summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/rados
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/rados
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/rados')
-rw-r--r--src/ceph/qa/suites/rados/basic-luminous/%0
l---------src/ceph/qa/suites/rados/basic-luminous/ceph.yaml1
l---------src/ceph/qa/suites/rados/basic-luminous/clusters1
l---------src/ceph/qa/suites/rados/basic-luminous/objectstore1
l---------src/ceph/qa/suites/rados/basic-luminous/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml28
-rw-r--r--src/ceph/qa/suites/rados/basic/%0
-rw-r--r--src/ceph/qa/suites/rados/basic/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/basic/clusters/+0
l---------src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml33
-rw-r--r--src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml0
l---------src/ceph/qa/suites/rados/basic/mon_kv_backend1
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/async.yaml6
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/random.yaml6
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/simple.yaml5
l---------src/ceph/qa/suites/rados/basic/objectstore1
l---------src/ceph/qa/suites/rados/basic/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml18
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml13
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml15
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml7
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml17
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml30
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml38
-rw-r--r--src/ceph/qa/suites/rados/mgr/%0
-rw-r--r--src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml6
-rw-r--r--src/ceph/qa/suites/rados/mgr/debug/mgr.yaml16
l---------src/ceph/qa/suites/rados/mgr/objectstore1
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml16
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/failover.yaml16
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml19
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/%0
-rw-r--r--src/ceph/qa/suites/rados/monthrash/ceph.yaml14
-rw-r--r--src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml7
-rw-r--r--src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml7
l---------src/ceph/qa/suites/rados/monthrash/d-require-luminous1
l---------src/ceph/qa/suites/rados/monthrash/mon_kv_backend1
l---------src/ceph/qa/suites/rados/monthrash/msgr1
-rw-r--r--src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml11
l---------src/ceph/qa/suites/rados/monthrash/objectstore1
l---------src/ceph/qa/suites/rados/monthrash/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml12
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml9
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml14
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml13
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml58
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml9
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml23
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/rados/multimon/%0
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/21.yaml8
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/3.yaml7
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/6.yaml7
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/9.yaml8
l---------src/ceph/qa/suites/rados/multimon/mon_kv_backend1
l---------src/ceph/qa/suites/rados/multimon/msgr1
-rw-r--r--src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rados/multimon/objectstore1
l---------src/ceph/qa/suites/rados/multimon/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml11
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml18
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml7
-rw-r--r--src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml21
-rw-r--r--src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml23
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filejournal.yaml13
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml14
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml11
-rw-r--r--src/ceph/qa/suites/rados/objectstore/fusestore.yaml9
-rw-r--r--src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml8
-rw-r--r--src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml14
-rw-r--r--src/ceph/qa/suites/rados/objectstore/objectstore.yaml12
-rw-r--r--src/ceph/qa/suites/rados/rest/mgr-restful.yaml25
-rw-r--r--src/ceph/qa/suites/rados/rest/rest_test.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml33
l---------src/ceph/qa/suites/rados/singleton-bluestore/msgr1
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml1
l---------src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml48
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml8
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml21
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml9
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml29
l---------src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml24
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml14
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/peer.yaml25
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/radostool.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/random-eio.yaml41
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml51
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/reg11184.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rest-api.yaml35
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/+0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml32
l---------src/ceph/qa/suites/rados/singleton/msgr1
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml7
l---------src/ceph/qa/suites/rados/singleton/objectstore1
l---------src/ceph/qa/suites/rados/singleton/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/standalone/crush.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/erasure-code.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/misc.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/mon.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/osd.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/scrub.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml4
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml4
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml19
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml20
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml15
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-isa/%0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml23
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml29
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml28
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml22
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml4
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml18
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/%0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml3
l---------src/ceph/qa/suites/rados/thrash-erasure-code/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml5
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml0
l---------src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml17
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml19
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml16
l---------src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml27
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml21
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml20
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/%0
l---------src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides1
l---------src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides1
l---------src/ceph/qa/suites/rados/thrash-luminous/backoff1
l---------src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/clusters1
l---------src/ceph/qa/suites/rados/thrash-luminous/msgr1
l---------src/ceph/qa/suites/rados/thrash-luminous/objectstore1
l---------src/ceph/qa/suites/rados/thrash-luminous/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/thrashers1
l---------src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml9
-rw-r--r--src/ceph/qa/suites/rados/thrash/%0
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml1
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml1
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml0
l---------src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/normal.yaml0
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/peering.yaml5
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml6
-rw-r--r--src/ceph/qa/suites/rados/thrash/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/thrash/clusters/+0
l---------src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml33
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml0
l---------src/ceph/qa/suites/rados/thrash/msgr1
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml6
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml7
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml9
l---------src/ceph/qa/suites/rados/thrash/objectstore1
l---------src/ceph/qa/suites/rados/thrash/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash/rocksdb.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/default.yaml17
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml21
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml22
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/none.yaml0
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml17
l---------src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml13
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml31
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml30
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml34
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml39
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml34
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache.yaml31
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml33
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml24
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml8
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml25
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml13
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml16
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml24
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml13
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml17
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml19
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml41
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml23
l---------src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml9
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/%0
-rw-r--r--src/ceph/qa/suites/rados/verify/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/verify/clusters/+0
l---------src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/clusters/openstack.yaml4
l---------src/ceph/qa/suites/rados/verify/d-require-luminous1
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/default/+0
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml10
l---------src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/none.yaml0
l---------src/ceph/qa/suites/rados/verify/mon_kv_backend1
l---------src/ceph/qa/suites/rados/verify/msgr1
-rw-r--r--src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml5
l---------src/ceph/qa/suites/rados/verify/objectstore1
l---------src/ceph/qa/suites/rados/verify/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml10
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml23
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml13
-rw-r--r--src/ceph/qa/suites/rados/verify/validater/lockdep.yaml5
-rw-r--r--src/ceph/qa/suites/rados/verify/validater/valgrind.yaml22
329 files changed, 3534 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/rados/basic-luminous/% b/src/ceph/qa/suites/rados/basic-luminous/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/%
diff --git a/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml b/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml
new file mode 120000
index 0000000..1e1b9ef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml
@@ -0,0 +1 @@
+../basic/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/clusters b/src/ceph/qa/suites/rados/basic-luminous/clusters
new file mode 120000
index 0000000..ae92569
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/clusters
@@ -0,0 +1 @@
+../basic/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/objectstore b/src/ceph/qa/suites/rados/basic-luminous/objectstore
new file mode 120000
index 0000000..f81a13b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/objectstore
@@ -0,0 +1 @@
+../basic/objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/rados.yaml b/src/ceph/qa/suites/rados/basic-luminous/rados.yaml
new file mode 120000
index 0000000..9b356bf
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/rados.yaml
@@ -0,0 +1 @@
+../basic/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml b/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml
new file mode 100644
index 0000000..d87f5bf
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml
@@ -0,0 +1,28 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - '!= data_digest'
+ - '!= omap_digest'
+ - '!= size'
+ - 'deep-scrub 0 missing, 1 inconsistent objects'
+ - 'deep-scrub [0-9]+ errors'
+ - 'repair 0 missing, 1 inconsistent objects'
+ - 'repair [0-9]+ errors, [0-9]+ fixed'
+ - 'shard [0-9]+ missing'
+ - 'deep-scrub 1 missing, 1 inconsistent objects'
+ - 'does not match object info size'
+ - 'attr name mistmatch'
+ - 'deep-scrub 1 missing, 0 inconsistent objects'
+ - 'failed to pick suitable auth object'
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ - (OSD_SCRUB_ERRORS)
+ - (TOO_FEW_PGS)
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- scrub_test:
diff --git a/src/ceph/qa/suites/rados/basic/% b/src/ceph/qa/suites/rados/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/%
diff --git a/src/ceph/qa/suites/rados/basic/ceph.yaml b/src/ceph/qa/suites/rados/basic/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/basic/clusters/+ b/src/ceph/qa/suites/rados/basic/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/+
diff --git a/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml b/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml
new file mode 100644
index 0000000..ef998cc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml
@@ -0,0 +1,33 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - exec:
+ mon.a:
+ - ceph osd require-osd-release luminous
+ - ceph osd pool application enable base rados || true
+# make sure osds have latest map
+ - rados -p rbd bench 5 write -b 4096
+ - ceph.healthy:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - sleep 15
+ - ceph osd dump | grep purged_snapdirs
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+overrides:
+ ceph:
+ conf:
+ global:
+ mon debug no require luminous: true
+
+# setting luminous triggers peering, which *might* trigger health alerts
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
+ thrashosds:
+ chance_thrash_cluster_full: 0
diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml
diff --git a/src/ceph/qa/suites/rados/basic/mon_kv_backend b/src/ceph/qa/suites/rados/basic/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..038c3a7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1500
diff --git a/src/ceph/qa/suites/rados/basic/msgr/async.yaml b/src/ceph/qa/suites/rados/basic/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: async
+ enable experimental unrecoverable data corrupting features: '*'
diff --git a/src/ceph/qa/suites/rados/basic/msgr/random.yaml b/src/ceph/qa/suites/rados/basic/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: random
+ enable experimental unrecoverable data corrupting features: '*'
diff --git a/src/ceph/qa/suites/rados/basic/msgr/simple.yaml b/src/ceph/qa/suites/rados/basic/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
diff --git a/src/ceph/qa/suites/rados/basic/objectstore b/src/ceph/qa/suites/rados/basic/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/rados.yaml b/src/ceph/qa/suites/rados/basic/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..316119c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - but it is still running
+ - overall HEALTH_
+ - (POOL_FULL)
+ - (SMALLER_PGP_NUM)
+ - (CACHE_POOL_NO_HIT_SET)
+ - (CACHE_POOL_NEAR_FULL)
+ - (POOL_APP_NOT_ENABLED)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - rados/test_pool_quota.sh
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..bbab083
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml
new file mode 100644
index 0000000..8c70304
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
new file mode 100644
index 0000000..bee513e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/stress_watch.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml
new file mode 100644
index 0000000..c19cc83
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml
@@ -0,0 +1,7 @@
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_striper_api_io
+ - ceph_test_rados_striper_api_aio
+ - ceph_test_rados_striper_api_striping
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
new file mode 100644
index 0000000..2dade6d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-big.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 0000000..6b764a8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
new file mode 100644
index 0000000..c82023c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mostlyread.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml b/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml
new file mode 100644
index 0000000..f135107
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ crush_tunables: optimal
+ conf:
+ mon:
+ mon osd initial require min compat client: luminous
+ osd:
+ osd_discard_disconnected_ops: false
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
diff --git a/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml b/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml
new file mode 100644
index 0000000..9afbc04
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - candidate had a stat error
+ - candidate had a read error
+ - deep-scrub 0 missing, 1 inconsistent objects
+ - deep-scrub 0 missing, 4 inconsistent objects
+ - deep-scrub [0-9]+ errors
+ - '!= omap_digest'
+ - '!= data_digest'
+ - repair 0 missing, 1 inconsistent objects
+ - repair 0 missing, 4 inconsistent objects
+ - repair [0-9]+ errors, [0-9]+ fixed
+ - scrub 0 missing, 1 inconsistent objects
+ - scrub [0-9]+ errors
+ - 'size 1 != size'
+ - attr name mismatch
+ - Regular scrub request, deep-scrub details will be lost
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ conf:
+ osd:
+ filestore debug inject read err: true
+ bluestore debug inject read err: true
+tasks:
+- repair_test:
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml b/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml
new file mode 100644
index 0000000..ec86491
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml
@@ -0,0 +1,38 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug rgw: 20
+ debug ms: 1
+ osd:
+ osd_max_omap_entries_per_request: 10
+tasks:
+- rgw:
+ client.0:
+- ceph_manager.wait_for_pools:
+ kwargs:
+ pools:
+ - .rgw.buckets
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- thrash_pool_snaps:
+ pools:
+ - .rgw.buckets
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rados/mgr/% b/src/ceph/qa/suites/rados/mgr/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/%
diff --git a/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml b/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
new file mode 100644
index 0000000..abc90e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
@@ -0,0 +1,6 @@
+roles:
+- [mgr.x, mon.a, mon.c, mds.a, mds.c, osd.0, client.0]
+- [mgr.y, mgr.z, mon.b, mds.b, osd.1, osd.2, client.1]
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml b/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml
new file mode 100644
index 0000000..068021e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: 20
+ mgr:
+ debug mgr: 20
+ debug ms: 1
+ client:
+ debug client: 20
+ debug mgrc: 20
+ debug ms: 1
+ osd:
+ debug mgrc: 20
+ mds:
+ debug mgrc: 20
diff --git a/src/ceph/qa/suites/rados/mgr/objectstore b/src/ceph/qa/suites/rados/mgr/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml b/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml
new file mode 100644
index 0000000..3065e11
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_dashboard
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml b/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml
new file mode 100644
index 0000000..34be471
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_failover
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml
new file mode 100644
index 0000000..ffdfe8b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml
@@ -0,0 +1,19 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - Reduced data availability
+ - Degraded data redundancy
+ - objects misplaced
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_module_selftest
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml b/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml
new file mode 100644
index 0000000..d7261f4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml
@@ -0,0 +1,16 @@
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - workunit:
+ clients:
+ client.0:
+ - mgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/% b/src/ceph/qa/suites/rados/monthrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/%
diff --git a/src/ceph/qa/suites/rados/monthrash/ceph.yaml b/src/ceph/qa/suites/rados/monthrash/ceph.yaml
new file mode 100644
index 0000000..9c08e93
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/ceph.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 25
+ paxos service trim min: 5
+# thrashing monitors may make mgr have trouble w/ its keepalive
+ log-whitelist:
+ - daemon x is unresponsive
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml b/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml
new file mode 100644
index 0000000..4b721ef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml b/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml
new file mode 100644
index 0000000..a2874c1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
+- [mon.f, mon.g, mon.h, mon.i, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/monthrash/d-require-luminous b/src/ceph/qa/suites/rados/monthrash/d-require-luminous
new file mode 120000
index 0000000..82036c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/d-require-luminous
@@ -0,0 +1 @@
+../basic/d-require-luminous \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/mon_kv_backend b/src/ceph/qa/suites/rados/monthrash/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr b/src/ceph/qa/suites/rados/monthrash/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
new file mode 100644
index 0000000..da25b7a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: mon
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ mgr:
+ debug monc: 10
diff --git a/src/ceph/qa/suites/rados/monthrash/objectstore b/src/ceph/qa/suites/rados/monthrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/rados.yaml b/src/ceph/qa/suites/rados/monthrash/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
new file mode 100644
index 0000000..2d1ba88
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_store: true
+ thrash_many: true
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml
new file mode 100644
index 0000000..fa829b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ osd:
+ mon client ping interval: 4
+ mon client ping timeout: 12
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ thrash_many: true
+ freeze_mon_duration: 20
+ freeze_mon_probability: 10
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml
new file mode 100644
index 0000000..041cee0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml
new file mode 100644
index 0000000..14f41f7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_many: true
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml
new file mode 100644
index 0000000..08b1522
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
new file mode 100644
index 0000000..c6b00b4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
@@ -0,0 +1,58 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - slow request
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml
new file mode 100644
index 0000000..940d3a8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..3b821bc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_FULL\)
+ - \(REQUEST_SLOW\)
+ - \(MON_DOWN\)
+ - \(PG_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SMALLER_PGP_NUM\)
+ conf:
+ global:
+ debug objecter: 20
+ debug rados: 20
+ debug ms: 1
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
new file mode 100644
index 0000000..b05eb38
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(PG_
+ - \(MON_DOWN\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - mon/pool_ops.sh
+ - mon/crush_ops.sh
+ - mon/osd.sh
+ - mon/caps.sh
+
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/multimon/% b/src/ceph/qa/suites/rados/multimon/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/%
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/21.yaml b/src/ceph/qa/suites/rados/multimon/clusters/21.yaml
new file mode 100644
index 0000000..b6d04a0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/21.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0]
+- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mgr.x]
+- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/3.yaml b/src/ceph/qa/suites/rados/multimon/clusters/3.yaml
new file mode 100644
index 0000000..7b8c626
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/3.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0]
+- [mon.b, mgr.x, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/6.yaml b/src/ceph/qa/suites/rados/multimon/clusters/6.yaml
new file mode 100644
index 0000000..715cbe8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/6.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, mon.e, mgr.x, osd.0]
+- [mon.b, mon.d, mon.f, mgr.y, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/9.yaml b/src/ceph/qa/suites/rados/multimon/clusters/9.yaml
new file mode 100644
index 0000000..d029c4c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/9.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, osd.0]
+- [mon.b, mon.e, mon.h, mgr.x]
+- [mon.c, mon.f, mon.i, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/mon_kv_backend b/src/ceph/qa/suites/rados/multimon/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/msgr b/src/ceph/qa/suites/rados/multimon/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rados/multimon/objectstore b/src/ceph/qa/suites/rados/multimon/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/rados.yaml b/src/ceph/qa/suites/rados/multimon/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
new file mode 100644
index 0000000..ec761e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - (MON_CLOCK_SKEW)
+- mon_clock_skew_check:
+ expect-skew: false
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
new file mode 100644
index 0000000..a2d3d0b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
@@ -0,0 +1,18 @@
+tasks:
+- install:
+- exec:
+ mon.b:
+ - date -u -s @$(expr $(date -u +%s) + 10)
+- ceph:
+ wait-for-healthy: false
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - \(MON_CLOCK_SKEW\)
+ - \(MGR_DOWN\)
+ - \(PG_
+ - No standby daemons available
+- mon_clock_skew_check:
+ expect-skew: true
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..137f58d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+- mon_recovery:
diff --git a/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml b/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml
new file mode 100644
index 0000000..d40143c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ filestore xfs extsize: true
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_alloc_hint.sh
diff --git a/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml b/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
new file mode 100644
index 0000000..f3163c9
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
@@ -0,0 +1,23 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 6
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_APP_NOT_ENABLED\)
+- ceph_objectstore_tool:
+ objects: 20
diff --git a/src/ceph/qa/suites/rados/objectstore/filejournal.yaml b/src/ceph/qa/suites/rados/objectstore/filejournal.yaml
new file mode 100644
index 0000000..b0af800
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filejournal.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- exec:
+ client.0:
+ - ceph_test_filejournal
diff --git a/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
new file mode 100644
index 0000000..58b5197
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ journal aio: true
+- filestore_idempotent:
diff --git a/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml
new file mode 100644
index 0000000..2d3f3c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- filestore_idempotent:
diff --git a/src/ceph/qa/suites/rados/objectstore/fusestore.yaml b/src/ceph/qa/suites/rados/objectstore/fusestore.yaml
new file mode 100644
index 0000000..1c34fca
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/fusestore.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - objectstore/test_fuse.sh
+
diff --git a/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml b/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml
new file mode 100644
index 0000000..efff8d3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
+ - rm -rf $TESTDIR/kvtest
diff --git a/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml b/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml
new file mode 100644
index 0000000..e407a39
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all:
+ - osdc/stress_objectcacher.sh
diff --git a/src/ceph/qa/suites/rados/objectstore/objectstore.yaml b/src/ceph/qa/suites/rados/objectstore/objectstore.yaml
new file mode 100644
index 0000000..b544234
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/objectstore.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ulimit -c 0 && ulimit -Sn 4096 && ceph_test_objectstore --gtest_filter=-*/3
+ - rm -rf $TESTDIR/ostest
diff --git a/src/ceph/qa/suites/rados/rest/mgr-restful.yaml b/src/ceph/qa/suites/rados/rest/mgr-restful.yaml
new file mode 100644
index 0000000..049532e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/rest/mgr-restful.yaml
@@ -0,0 +1,25 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+- exec:
+ mon.a:
+ - ceph restful create-key admin
+ - ceph restful create-self-signed-cert
+ - ceph restful restart
+- workunit:
+ clients:
+ client.a:
+ - rest/test-restful.sh
+- exec:
+ mon.a:
+ - ceph restful delete-key admin
+ - ceph restful list-keys | jq ".admin" | grep null
+
diff --git a/src/ceph/qa/suites/rados/rest/rest_test.yaml b/src/ceph/qa/suites/rados/rest/rest_test.yaml
new file mode 100644
index 0000000..0fdb9dc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/rest/rest_test.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+ - client.0
+
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+ - \(REQUEST_SLOW\)
+ - \(SLOW_OPS\)
+ - \(TOO_FEW_PGS\)
+ - but it is still running
+ conf:
+ client.rest0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+- rest-api: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/% b/src/ceph/qa/suites/rados/singleton-bluestore/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/%
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
new file mode 100644
index 0000000..77eed22
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
@@ -0,0 +1,33 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - must scrub before tier agent can activate
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping updates
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ - (SMALLER_PG_NUM)
+- workunit:
+ clients:
+ all:
+ - cephtool
+ - mon/pool_ops.sh
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr b/src/ceph/qa/suites/rados/singleton-bluestore/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml
new file mode 120000
index 0000000..b23b2a7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore-comp.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/% b/src/ceph/qa/suites/rados/singleton-nomsgr/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/%
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
new file mode 100644
index 0000000..bbf330b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - MDS in read-only mode
+ - force file system read-only
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_FULL)
+ - (MDS_READ_ONLY)
+ - (POOL_FULL)
+tasks:
+- install:
+- ceph:
+- rgw:
+ - client.0
+- exec:
+ client.0:
+ - ceph_test_admin_socket_output --all
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 0000000..5864803
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,48 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ debug client: 20
+ debug mds: 20
+ debug ms: 1
+- exec:
+ client.0:
+ - ceph osd pool create data_cache 4
+ - ceph osd tier add cephfs_data data_cache
+ - ceph osd tier cache-mode data_cache writeback
+ - ceph osd tier set-overlay cephfs_data data_cache
+ - ceph osd pool set data_cache hit_set_type bloom
+ - ceph osd pool set data_cache hit_set_count 8
+ - ceph osd pool set data_cache hit_set_period 3600
+ - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+ client.0:
+ - sudo chmod 777 $TESTDIR/mnt.0/
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - truncate --size 0 $TESTDIR/mnt.0/foo
+ - ls -al $TESTDIR/mnt.0/foo
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - cp $TESTDIR/mnt.0/foo /tmp/foo
+ - sync
+ - rados -p data_cache ls -
+ - sleep 10
+ - rados -p data_cache ls -
+ - rados -p data_cache cache-flush-evict-all
+ - rados -p data_cache ls -
+ - sleep 1
+- exec:
+ client.1:
+ - hexdump -C /tmp/foo | head
+ - hexdump -C $TESTDIR/mnt.1/foo | head
+ - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 0000000..8634362
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - post-file.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 0000000..e766bdc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+ - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+ - rbd snap create base-pool/bar@snap
+ - rados -p base-pool cache-flush-evict-all
+ - rbd export base-pool/bar $TESTDIR/bar
+ - rbd export base-pool/bar@snap $TESTDIR/snap
+ - cmp $TESTDIR/foo $TESTDIR/bar
+ - cmp $TESTDIR/foo $TESTDIR/snap
+ - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 0000000..b245866
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,34 @@
+# verify #13098 fix
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - is full
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+tasks:
+- install:
+- ceph:
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create ec-ca 1 1
+ - ceph osd pool create ec 1 1 erasure default
+ - ceph osd pool application enable ec rados
+ - ceph osd tier add ec ec-ca
+ - ceph osd tier cache-mode ec-ca readproxy
+ - ceph osd tier set-overlay ec ec-ca
+ - ceph osd pool set ec-ca hit_set_type bloom
+ - ceph osd pool set-quota ec-ca max_bytes 20480000
+ - ceph osd pool set-quota ec max_bytes 20480000
+ - ceph osd pool set ec-ca target_max_bytes 20480000
+ - timeout 30 rados -p ec-ca bench 30 write || true
+ - ceph osd pool set-quota ec-ca max_bytes 0
+ - ceph osd pool set-quota ec max_bytes 0
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
new file mode 100644
index 0000000..a28582f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+# we may land on ext4
+ osd max object name len: 400
+ osd max object namespace len: 64
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- workunit:
+ clients:
+ all:
+ - rados/test_health_warnings.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 0000000..98b5095
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - ceph_test_async_driver
+ - ceph_test_msgr
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 15000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 0
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 20
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 0000000..f480dbb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 64
+ - sudo ceph osd pool application enable foo rados
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd in 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd pool set foo size 2
+- sleep:
+ duration: 300
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
new file mode 100644
index 0000000..d49a597
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_pool_access.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
new file mode 100644
index 0000000..b0d5de3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
@@ -0,0 +1,29 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ conf:
+ global:
+ osd heartbeat grace: 40
+ debug deliberately leak memory: true
+ osd max object name len: 460
+ osd max object namespace len: 64
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ expect_valgrind_errors: true
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/% b/src/ceph/qa/suites/rados/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/%
diff --git a/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 0000000..13af813
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.a
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config help:
+ config set filestore_dump_file /tmp/foo:
+ perf dump:
+ perf schema:
+ get_heap_property tcmalloc.max_total_thread_cache_byte:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 0000000..604a9e4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 0000000..e2f0245
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 0000000..59085ff
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- dump_stuck:
diff --git a/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 0000000..68644c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,24 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- ec_lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
new file mode 100644
index 0000000..e8201ee
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - erasure-code/encode-decode-non-regression.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 0000000..bcaef78
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- rep_lost_unfound_delete:
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 0000000..a4a309d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644
index 0000000..accdd96
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 2
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: True
+ pg_num: 2
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644
index 0000000..1c48ada
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: True
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644
index 0000000..0cf37fd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: False
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
new file mode 100644
index 0000000..318af5e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/auth_caps.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 0000000..7bb4f65
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
new file mode 100644
index 0000000..815c518
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ osd:
+ debug monc: 1
+ debug ms: 1
+ log-whitelist:
+ - overall HEALTH
+ - Manager daemon
+ - \(MGR_DOWN\)
+- mon_seesaw:
+- ceph_manager.create_pool:
+ kwargs:
+ pool_name: test
+ pg_num: 1
+- ceph_manager.wait_for_clean:
+ kwargs:
+ timeout: 60
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 0000000..5b37407
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 0000000..ed5b216
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 0000000..634e884
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(SLOW_OPS\)
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/src/ceph/qa/suites/rados/singleton/all/peer.yaml b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 0000000..645034a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- peer:
diff --git a/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 0000000..10f18e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 128 128
+ - sudo ceph osd pool application enable foo rados
+ - sleep 5
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+ client.0:
+ - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/src/ceph/qa/suites/rados/singleton/all/radostool.yaml b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 0000000..1827795
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
new file mode 100644
index 0000000..a2ad997
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
@@ -0,0 +1,41 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (POOL_APP_NOT_ENABLED)
+- full_sequential:
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+ - sudo ceph osd pool create test 16 16
+ - sudo ceph osd pool set test size 3
+ - sudo ceph pg dump pgs --format=json-pretty
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ type: rand
+ objectsize: 1048576
+ pool: test
+ create_pool: false
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
new file mode 100644
index 0000000..78d77c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - no reply from
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ - rebuild_mondb:
+ - radosbench:
+ clients: [client.0]
+ time: 30
diff --git a/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
new file mode 100644
index 0000000..7507bf6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
@@ -0,0 +1,51 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd min pg log entries: 100
+ osd max pg log entries: 1000
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - overall HEALTH
+- exec:
+ osd.0:
+ - ceph osd pool create foo 128
+ - ceph osd pool application enable foo foo
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd out 0
+ - sleep 5
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - ceph tell osd.* config set osd_recovery_sleep 0
+ - ceph tell osd.* config set osd_recovery_max_active 20
+- ceph.healthy:
+- exec:
+ osd.0:
+ - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
new file mode 100644
index 0000000..f3c8575
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ conf:
+ osd:
+ debug osd: 5
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+tasks:
+- install:
+- ceph:
+- reg11184:
diff --git a/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
new file mode 100644
index 0000000..3eddce8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
@@ -0,0 +1,17 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
new file mode 100644
index 0000000..d988d1a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
@@ -0,0 +1,35 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - mds.a
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ conf:
+ client.rest0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+- rest-api: [client.0]
+- workunit:
+ clients:
+ all:
+ - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
new file mode 100644
index 0000000..42c8ae3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ fs: ext4
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_envlibrados_for_rocksdb.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
new file mode 100644
index 0000000..cac3cb3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (REQUEST_SLOW)
+ - (PG_
+ - \(OBJECT_MISPLACED\)
+ - (OSD_
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+ random_eio: .33
+ min_live: 5
+ min_in: 5
+- radosbench:
+ clients: [client.0]
+ time: 720
+ type: rand
+ objectsize: 1048576
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644
index 0000000..37be8df
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000
index 0000000..0b1d7b0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 0000000..82c9b2d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,70 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (CACHE_POOL_
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - echo proxy
+ - sudo ceph osd tier cache-mode cache proxy
+ - sleep 10
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - sudo ceph osd tier remove-overlay base
+ - sleep 20
+ - echo add writeback overlay
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd pool set cache cache_target_full_ratio .8
+ - sudo ceph osd tier set-overlay base cache
+ - sleep 30
+ - sudo ceph osd tier cache-mode cache readproxy
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 0000000..48ef78f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+ clients: [client.0]
diff --git a/src/ceph/qa/suites/rados/singleton/msgr b/src/ceph/qa/suites/rados/singleton/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..3b495f9
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
+ mgr:
+ debug monc: 10
diff --git a/src/ceph/qa/suites/rados/singleton/objectstore b/src/ceph/qa/suites/rados/singleton/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/rados.yaml b/src/ceph/qa/suites/rados/singleton/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/standalone/crush.yaml b/src/ceph/qa/suites/rados/standalone/crush.yaml
new file mode 100644
index 0000000..a62a0dd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/crush.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - crush
diff --git a/src/ceph/qa/suites/rados/standalone/erasure-code.yaml b/src/ceph/qa/suites/rados/standalone/erasure-code.yaml
new file mode 100644
index 0000000..7d79753
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/erasure-code.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - erasure-code
diff --git a/src/ceph/qa/suites/rados/standalone/misc.yaml b/src/ceph/qa/suites/rados/standalone/misc.yaml
new file mode 100644
index 0000000..4aa9ee2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/misc.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - misc
diff --git a/src/ceph/qa/suites/rados/standalone/mon.yaml b/src/ceph/qa/suites/rados/standalone/mon.yaml
new file mode 100644
index 0000000..c19606f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/mon.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - mon
diff --git a/src/ceph/qa/suites/rados/standalone/osd.yaml b/src/ceph/qa/suites/rados/standalone/osd.yaml
new file mode 100644
index 0000000..e28b522
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/osd.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - osd
diff --git a/src/ceph/qa/suites/rados/standalone/scrub.yaml b/src/ceph/qa/suites/rados/standalone/scrub.yaml
new file mode 100644
index 0000000..7f6fad4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/scrub.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - scrub
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/% b/src/ceph/qa/suites/rados/thrash-erasure-code-big/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
new file mode 100644
index 0000000..1c45ee3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [osd.0, osd.1, osd.2, osd.3, client.0, mon.a]
+- [osd.4, osd.5, osd.6, osd.7, mon.b, mgr.x]
+- [osd.8, osd.9, osd.10, osd.11, mon.c]
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
new file mode 100644
index 0000000..f2ccf7f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
new file mode 100644
index 0000000..afc43b8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ mon osd pool ec fast read: 1
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
new file mode 100644
index 0000000..3095cd8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_test_map_discontinuity: 0.5
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..572832d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
new file mode 100644
index 0000000..148d9fe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
new file mode 120000
index 0000000..a4d836c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 120000
index 0000000..86a2d3c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/% b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters
new file mode 120000
index 0000000..7aac47b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported
new file mode 120000
index 0000000..c5d5935
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported
@@ -0,0 +1 @@
+../../../distros/supported \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers
new file mode 120000
index 0000000..f461dad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 120000
index 0000000..9d32cd8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/% b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml
new file mode 120000
index 0000000..1249ffd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters
new file mode 120000
index 0000000..646ea04
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters
@@ -0,0 +1 @@
+../thrash-erasure-code/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast
new file mode 120000
index 0000000..6170b30
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast
@@ -0,0 +1 @@
+../thrash-erasure-code/fast \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml
new file mode 120000
index 0000000..531ecf3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
new file mode 120000
index 0000000..70c9ca1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
@@ -0,0 +1 @@
+../thrash-erasure-code/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
new file mode 120000
index 0000000..017df6f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
new file mode 120000
index 0000000..40ff82c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
@@ -0,0 +1 @@
+../thrash-erasure-code/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
new file mode 100644
index 0000000..d2ad70a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
new file mode 100644
index 0000000..b3f831b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
@@ -0,0 +1,29 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
new file mode 100644
index 0000000..9baacef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
@@ -0,0 +1,28 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
new file mode 100644
index 0000000..b7c5381
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/% b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
new file mode 120000
index 0000000..961d9b5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-4.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
new file mode 100644
index 0000000..c6f02b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 120000
index 0000000..476d7cd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/% b/src/ceph/qa/suites/rados/thrash-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code/clusters
new file mode 120000
index 0000000..7aac47b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
new file mode 100644
index 0000000..9c3f726
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ mon osd pool ec fast read: true
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
new file mode 100644
index 0000000..caa467b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
new file mode 100644
index 0000000..471e4af
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ mon osd pool ec fast read: 1
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..12c11fa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
new file mode 100644
index 0000000..2bbe5e5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 4
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 120000
index 0000000..f11eddb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 120000
index 0000000..b1407ae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
new file mode 100644
index 0000000..3c2ff7a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
@@ -0,0 +1,27 @@
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
new file mode 100644
index 0000000..e732ec6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
@@ -0,0 +1,21 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
new file mode 100644
index 0000000..a8ac397
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
@@ -0,0 +1,20 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/% b/src/ceph/qa/suites/rados/thrash-luminous/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/%
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides b/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides
new file mode 120000
index 0000000..ce15740
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides
@@ -0,0 +1 @@
+../thrash/0-size-min-size-overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides b/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides
new file mode 120000
index 0000000..d68d36c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides
@@ -0,0 +1 @@
+../thrash/1-pg-log-overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/backoff b/src/ceph/qa/suites/rados/thrash-luminous/backoff
new file mode 120000
index 0000000..dc7814b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/backoff
@@ -0,0 +1 @@
+../thrash/backoff/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml b/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/clusters b/src/ceph/qa/suites/rados/thrash-luminous/clusters
new file mode 120000
index 0000000..729c96f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/clusters
@@ -0,0 +1 @@
+../thrash/clusters/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/msgr b/src/ceph/qa/suites/rados/thrash-luminous/msgr
new file mode 120000
index 0000000..8113e02
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/msgr
@@ -0,0 +1 @@
+../thrash/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/objectstore b/src/ceph/qa/suites/rados/thrash-luminous/objectstore
new file mode 120000
index 0000000..38ae00c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/objectstore
@@ -0,0 +1 @@
+../thrash/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml b/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml
new file mode 120000
index 0000000..2834d2e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml
@@ -0,0 +1 @@
+../thrash/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml b/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml
new file mode 120000
index 0000000..9aaadff
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml
@@ -0,0 +1 @@
+../thrash/rocksdb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/thrashers b/src/ceph/qa/suites/rados/thrash-luminous/thrashers
new file mode 120000
index 0000000..f461dad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml
new file mode 100644
index 0000000..50bd812
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml
new file mode 100644
index 0000000..74595e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ set_redirect: 100
+ copy_from: 100
diff --git a/src/ceph/qa/suites/rados/thrash/% b/src/ceph/qa/suites/rados/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/%
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
new file mode 120000
index 0000000..4c817a6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/2-size-1-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
new file mode 120000
index 0000000..c429b07
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/2-size-2-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
new file mode 120000
index 0000000..8d529f0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/3-size-2-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
new file mode 120000
index 0000000..62010f4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
@@ -0,0 +1 @@
+../../../../overrides/short_pg_log.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml b/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml b/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml
new file mode 100644
index 0000000..66d0611
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml b/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
new file mode 100644
index 0000000..e610990
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
diff --git a/src/ceph/qa/suites/rados/thrash/ceph.yaml b/src/ceph/qa/suites/rados/thrash/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/+ b/src/ceph/qa/suites/rados/thrash/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/+
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml b/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml
new file mode 100644
index 0000000..b0f3b9b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml
new file mode 100644
index 0000000..ef998cc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml
@@ -0,0 +1,33 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - exec:
+ mon.a:
+ - ceph osd require-osd-release luminous
+ - ceph osd pool application enable base rados || true
+# make sure osds have latest map
+ - rados -p rbd bench 5 write -b 4096
+ - ceph.healthy:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - sleep 15
+ - ceph osd dump | grep purged_snapdirs
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+overrides:
+ ceph:
+ conf:
+ global:
+ mon debug no require luminous: true
+
+# setting luminous triggers peering, which *might* trigger health alerts
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
+ thrashosds:
+ chance_thrash_cluster_full: 0
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml
new file mode 100644
index 0000000..9eb7143
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ mgr:
+ debug osd: 20
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml
new file mode 100644
index 0000000..a1e0afe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ mgr:
+ debug osd: 20
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode upmap
+ - ceph balancer on
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/msgr b/src/ceph/qa/suites/rados/thrash/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ osd:
+ osd heartbeat use min delay socket: true
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
diff --git a/src/ceph/qa/suites/rados/thrash/objectstore b/src/ceph/qa/suites/rados/thrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/rados.yaml b/src/ceph/qa/suites/rados/thrash/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/rocksdb.yaml b/src/ceph/qa/suites/rados/thrash/rocksdb.yaml
new file mode 120000
index 0000000..f26e095
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/rocksdb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/rocksdb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..9e2b5b1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml
new file mode 100644
index 0000000..8962ff1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..91d2173
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml
new file mode 100644
index 0000000..2a8087f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
new file mode 100644
index 0000000..8c9764a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 150
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
new file mode 100644
index 0000000..0cef207
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
@@ -0,0 +1,31 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
+ - sudo ceph osd pool create base 4 4 erasure myprofile
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool set base min_size 2
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 5000
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 10000
+ objects: 6600
+ max_seconds: 1200
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
new file mode 100644
index 0000000..10d4735
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ crush_tunables: firefly
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+ - sudo ceph osd pool set cache min_write_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
new file mode 100644
index 0000000..4349743
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache readproxy
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dc3385c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
@@ -0,0 +1,39 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml
new file mode 100644
index 0000000..486d6db
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml
new file mode 100644
index 0000000..d63018f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml
@@ -0,0 +1,31 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
new file mode 100644
index 0000000..1f0759d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+override:
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..23c705d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+ crush_tunables: hammer
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml b/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml
new file mode 100644
index 0000000..1b25004
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml
@@ -0,0 +1,33 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml
new file mode 100644
index 0000000..f5a18ae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ crush_tunables: jewel
+ conf:
+ mon:
+ mon osd initial require min compat client: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
new file mode 100644
index 0000000..606dcae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_fadvise_dontneed: true
+ op_weights:
+ write: 100
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/% b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml
new file mode 100644
index 0000000..2872225
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - mgr.y
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..31ca3e5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - required past_interval bounds are empty
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..c244916
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml
new file mode 100644
index 0000000..ecf3d1b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - split_tasks
+split_tasks:
+ sequential:
+ - thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+ - print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..e35bfc2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..9d6c2e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml
new file mode 100644
index 0000000..0382520
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+ - print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..c96cfbe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,19 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml
new file mode 100644
index 0000000..2cfbf1d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml
@@ -0,0 +1,41 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+split_tasks:
+ sequential:
+ - full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml
new file mode 100644
index 0000000..22a5f57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml
new file mode 100644
index 0000000..a110815
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+ install upgrade on remaining node
+ restartin remaining osds
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_DEGRADED\)
+ - \(MDS_
+tasks:
+- install.upgrade:
+ osd.3:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-up: true
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [mds.a]
+ wait-for-up: true
+ wait-for-healthy: false
+- install.upgrade:
+ client.0:
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml
new file mode 100644
index 0000000..56ba21d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml
new file mode 100644
index 0000000..e41f47a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/% b/src/ceph/qa/suites/rados/verify/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/%
diff --git a/src/ceph/qa/suites/rados/verify/ceph.yaml b/src/ceph/qa/suites/rados/verify/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/verify/clusters/+ b/src/ceph/qa/suites/rados/verify/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/+
diff --git a/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml b/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/verify/d-require-luminous b/src/ceph/qa/suites/rados/verify/d-require-luminous
new file mode 120000
index 0000000..82036c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-require-luminous
@@ -0,0 +1 @@
+../basic/d-require-luminous \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/+ b/src/ceph/qa/suites/rados/verify/d-thrash/default/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/+
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml
new file mode 100644
index 0000000..bcd3f39
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
new file mode 120000
index 0000000..0b1d7b0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml
diff --git a/src/ceph/qa/suites/rados/verify/mon_kv_backend b/src/ceph/qa/suites/rados/verify/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/msgr b/src/ceph/qa/suites/rados/verify/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/verify/objectstore b/src/ceph/qa/suites/rados/verify/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/rados.yaml b/src/ceph/qa/suites/rados/verify/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml b/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..266a4e4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(SMALLER_PGP_NUM\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- mon_recovery:
diff --git a/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..05b843e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(REQUEST_SLOW\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ debug monc: 20
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..bbab083
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml b/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
diff --git a/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml b/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..5622414
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml
@@ -0,0 +1,22 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 40
+ mon:
+ mon osd crush smoke test: false
+ log-whitelist:
+ - overall HEALTH_
+# valgrind is slow.. we might get PGs stuck peering etc
+ - \(PG_
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]