summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites')
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/%0
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/ceph/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/clusters/big.yaml68
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/clusters/medium.yaml22
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/clusters/small.yaml6
l---------src/ceph/qa/suites/big/rados-thrash/objectstore1
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/openstack.yaml8
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml10
-rw-r--r--src/ceph/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/buildpackages/any/%0
l---------src/ceph/qa/suites/buildpackages/any/distros1
-rw-r--r--src/ceph/qa/suites/buildpackages/any/tasks/release.yaml8
-rw-r--r--src/ceph/qa/suites/buildpackages/tests/%0
l---------src/ceph/qa/suites/buildpackages/tests/distros1
-rw-r--r--src/ceph/qa/suites/buildpackages/tests/tasks/release.yaml20
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/%0
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml12
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.012
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml13
l---------src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml1
l---------src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml32
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml8
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml7
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml7
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml7
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml7
-rw-r--r--src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml15
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/%0
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml3
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml3
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml3
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml4
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml6
l---------src/ceph/qa/suites/ceph-deploy/basic/distros1
l---------src/ceph/qa/suites/ceph-deploy/basic/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/ceph-deploy/basic/objectstore/filestore-xfs.yaml1
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_2.yaml3
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_3.yaml3
-rw-r--r--src/ceph/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml26
-rw-r--r--src/ceph/qa/suites/ceph-disk/basic/%0
l---------src/ceph/qa/suites/ceph-disk/basic/distros1
-rw-r--r--src/ceph/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml41
-rw-r--r--src/ceph/qa/suites/dummy/%0
-rw-r--r--src/ceph/qa/suites/dummy/all/nop.yaml6
-rw-r--r--src/ceph/qa/suites/experimental/multimds/%0
-rw-r--r--src/ceph/qa/suites/experimental/multimds/clusters/7-multimds.yaml8
-rw-r--r--src/ceph/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml15
-rw-r--r--src/ceph/qa/suites/fs/32bits/%0
l---------src/ceph/qa/suites/fs/32bits/begin.yaml1
l---------src/ceph/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml1
l---------src/ceph/qa/suites/fs/32bits/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/32bits/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/32bits/overrides/+0
l---------src/ceph/qa/suites/fs/32bits/overrides/debug.yaml1
-rw-r--r--src/ceph/qa/suites/fs/32bits/overrides/faked-ino.yaml5
l---------src/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/32bits/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml1
l---------src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml11
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/%0
l---------src/ceph/qa/suites/fs/basic_functional/begin.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml10
l---------src/ceph/qa/suites/fs/basic_functional/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/overrides/+0
l---------src/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml20
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml4
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml13
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml16
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml6
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml19
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml4
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml14
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml11
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml25
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml19
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml14
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml14
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml14
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml10
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml32
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml6
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml13
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml11
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/%0
l---------src/ceph/qa/suites/fs/basic_workload/begin.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/inline/no.yaml0
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/inline/yes.yaml4
l---------src/ceph/qa/suites/fs/basic_workload/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/omap_limit/10.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/omap_limit/10000.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/overrides/+0
l---------src/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml14
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml11
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml16
l---------src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml9
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml6
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml5
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml16
-rw-r--r--src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml14
l---------src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml1
l---------src/ceph/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multiclient/%0
l---------src/ceph/qa/suites/fs/multiclient/begin.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multiclient/clusters/three_clients.yaml15
-rw-r--r--src/ceph/qa/suites/fs/multiclient/clusters/two_clients.yaml14
l---------src/ceph/qa/suites/fs/multiclient/mount/fuse.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multiclient/mount/kclient.yaml.disabled7
l---------src/ceph/qa/suites/fs/multiclient/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/multiclient/overrides/+0
l---------src/ceph/qa/suites/fs/multiclient/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/multiclient/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml10
-rw-r--r--src/ceph/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled20
-rw-r--r--src/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml26
-rw-r--r--src/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml23
-rw-r--r--src/ceph/qa/suites/fs/multifs/%0
l---------src/ceph/qa/suites/fs/multifs/begin.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multifs/clusters/2-remote-clients.yaml10
l---------src/ceph/qa/suites/fs/multifs/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/multifs/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/multifs/overrides/+0
l---------src/ceph/qa/suites/fs/multifs/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multifs/overrides/mon-debug.yaml5
l---------src/ceph/qa/suites/fs/multifs/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/multifs/tasks/failover.yaml12
-rw-r--r--src/ceph/qa/suites/fs/permission/%0
l---------src/ceph/qa/suites/fs/permission/begin.yaml1
l---------src/ceph/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml1
l---------src/ceph/qa/suites/fs/permission/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/permission/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/permission/overrides/+0
l---------src/ceph/qa/suites/fs/permission/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/permission/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml12
-rw-r--r--src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml12
-rw-r--r--src/ceph/qa/suites/fs/snaps/%0
l---------src/ceph/qa/suites/fs/snaps/begin.yaml1
l---------src/ceph/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml1
l---------src/ceph/qa/suites/fs/snaps/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/snaps/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/snaps/overrides/+0
l---------src/ceph/qa/suites/fs/snaps/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/snaps/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/snaps/tasks/snaptests.yaml5
-rw-r--r--src/ceph/qa/suites/fs/thrash/%0
l---------src/ceph/qa/suites/fs/thrash/begin.yaml1
-rw-r--r--src/ceph/qa/suites/fs/thrash/ceph-thrash/default.yaml7
-rw-r--r--src/ceph/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml10
l---------src/ceph/qa/suites/fs/thrash/mount/fuse.yaml1
-rw-r--r--src/ceph/qa/suites/fs/thrash/msgr-failures/none.yaml0
-rw-r--r--src/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml8
l---------src/ceph/qa/suites/fs/thrash/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/thrash/overrides/+0
l---------src/ceph/qa/suites/fs/thrash/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/thrash/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/thrash/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml1
-rw-r--r--src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml5
l---------src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml11
l---------src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml1
-rw-r--r--src/ceph/qa/suites/fs/traceless/%0
l---------src/ceph/qa/suites/fs/traceless/begin.yaml1
l---------src/ceph/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml1
l---------src/ceph/qa/suites/fs/traceless/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/traceless/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/traceless/overrides/+0
l---------src/ceph/qa/suites/fs/traceless/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml1
l---------src/ceph/qa/suites/fs/traceless/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml1
l---------src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml1
l---------src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml1
l---------src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml1
l---------src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/fs/traceless/traceless/50pc.yaml5
-rw-r--r--src/ceph/qa/suites/fs/verify/%0
l---------src/ceph/qa/suites/fs/verify/begin.yaml1
l---------src/ceph/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml1
l---------src/ceph/qa/suites/fs/verify/mount/fuse.yaml1
l---------src/ceph/qa/suites/fs/verify/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/fs/verify/overrides/+0
l---------src/ceph/qa/suites/fs/verify/overrides/debug.yaml1
l---------src/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml1
-rw-r--r--src/ceph/qa/suites/fs/verify/overrides/mon-debug.yaml6
l---------src/ceph/qa/suites/fs/verify/overrides/whitelist_health.yaml1
l---------src/ceph/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml1
l---------src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml1
l---------src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/fs/verify/validater/lockdep.yaml5
-rw-r--r--src/ceph/qa/suites/fs/verify/validater/valgrind.yaml27
-rw-r--r--src/ceph/qa/suites/hadoop/basic/%0
-rw-r--r--src/ceph/qa/suites/hadoop/basic/clusters/fixed-3.yaml17
l---------src/ceph/qa/suites/hadoop/basic/filestore-xfs.yaml1
-rw-r--r--src/ceph/qa/suites/hadoop/basic/tasks/repl.yaml8
-rw-r--r--src/ceph/qa/suites/hadoop/basic/tasks/terasort.yaml10
-rw-r--r--src/ceph/qa/suites/hadoop/basic/tasks/wordcount.yaml8
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/%0
l---------src/ceph/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml1
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/conf.yaml7
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/inline/no.yaml3
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/inline/yes.yaml6
l---------src/ceph/qa/suites/kcephfs/cephfs/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml7
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml7
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml6
-rw-r--r--src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml5
-rw-r--r--src/ceph/qa/suites/kcephfs/mixed-clients/%0
-rw-r--r--src/ceph/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml9
-rw-r--r--src/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml7
l---------src/ceph/qa/suites/kcephfs/mixed-clients/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml20
-rw-r--r--src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml20
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/%0
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/clusters/4-remote-clients.yaml12
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml12
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml11
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/mounts/kmounts.yaml4
l---------src/ceph/qa/suites/kcephfs/recovery/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml13
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/backtrace.yaml5
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml20
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml14
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/config-commands.yaml12
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml25
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml18
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml10
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml14
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml14
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml5
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml19
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml5
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml14
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/strays.yaml5
-rw-r--r--src/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml12
l---------src/ceph/qa/suites/kcephfs/recovery/whitelist_health.yaml1
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/%0
l---------src/ceph/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml1
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/conf.yaml7
l---------src/ceph/qa/suites/kcephfs/thrash/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml7
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml9
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml6
l---------src/ceph/qa/suites/kcephfs/thrash/thrashosds-health.yaml1
l---------src/ceph/qa/suites/kcephfs/thrash/whitelist_health.yaml1
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml11
-rw-r--r--src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml6
-rw-r--r--src/ceph/qa/suites/knfs/basic/%0
-rw-r--r--src/ceph/qa/suites/knfs/basic/ceph/base.yaml14
l---------src/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml1
-rw-r--r--src/ceph/qa/suites/knfs/basic/mount/v3.yaml5
-rw-r--r--src/ceph/qa/suites/knfs/basic/mount/v4.yaml5
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml6
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml11
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml5
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml5
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml10
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml5
-rw-r--r--src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/%0
l---------src/ceph/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/install/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml23
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml15
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml11
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml6
-rw-r--r--src/ceph/qa/suites/krbd/rbd/%0
l---------src/ceph/qa/suites/krbd/rbd/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/rbd/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_fio.yaml11
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml9
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml9
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml9
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml9
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml8
-rw-r--r--src/ceph/qa/suites/krbd/singleton/%0
-rw-r--r--src/ceph/qa/suites/krbd/singleton/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml38
-rw-r--r--src/ceph/qa/suites/krbd/thrash/%0
-rw-r--r--src/ceph/qa/suites/krbd/thrash/ceph/ceph.yaml3
l---------src/ceph/qa/suites/krbd/thrash/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/thrash/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml14
-rw-r--r--src/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml4
-rw-r--r--src/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml16
l---------src/ceph/qa/suites/krbd/thrash/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/thrash/workloads/rbd_fio.yaml11
-rw-r--r--src/ceph/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml8
-rw-r--r--src/ceph/qa/suites/krbd/unmap/%0
-rw-r--r--src/ceph/qa/suites/krbd/unmap/ceph/ceph.yaml9
-rw-r--r--src/ceph/qa/suites/krbd/unmap/clusters/separate-client.yaml16
-rw-r--r--src/ceph/qa/suites/krbd/unmap/conf.yaml5
l---------src/ceph/qa/suites/krbd/unmap/filestore-xfs.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/unmap/kernels/pre-single-major.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/unmap/kernels/single-major-off.yaml6
-rw-r--r--src/ceph/qa/suites/krbd/unmap/kernels/single-major-on.yaml6
-rw-r--r--src/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/wac/sysfs/%0
-rw-r--r--src/ceph/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml3
l---------src/ceph/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/wac/sysfs/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/wac/sysfs/tasks/stable_pages_required.yaml5
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/%0
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/ceph/ceph.yaml3
l---------src/ceph/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/conf.yaml7
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/tasks/wac.yaml11
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml10
-rw-r--r--src/ceph/qa/suites/krbd/wac/wac/verify/no-resets.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/basic/%0
-rw-r--r--src/ceph/qa/suites/marginal/basic/clusters/fixed-3.yaml4
-rw-r--r--src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml8
-rw-r--r--src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml8
-rw-r--r--src/ceph/qa/suites/marginal/fs-misc/%0
-rw-r--r--src/ceph/qa/suites/marginal/fs-misc/clusters/two_clients.yaml4
-rw-r--r--src/ceph/qa/suites/marginal/fs-misc/tasks/locktest.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/mds_restart/%0
-rw-r--r--src/ceph/qa/suites/marginal/mds_restart/clusters/one_mds.yaml4
-rw-r--r--src/ceph/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml11
-rw-r--r--src/ceph/qa/suites/marginal/multimds/%0
-rw-r--r--src/ceph/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml7
-rw-r--r--src/ceph/qa/suites/marginal/multimds/mounts/kclient.yaml4
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_misc.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml11
-rw-r--r--src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml15
-rw-r--r--src/ceph/qa/suites/marginal/multimds/thrash/exports.yaml5
-rw-r--r--src/ceph/qa/suites/marginal/multimds/thrash/normal.yaml0
-rw-r--r--src/ceph/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml4
l---------src/ceph/qa/suites/mixed-clients/basic/objectstore1
-rw-r--r--src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml26
-rw-r--r--src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml26
-rw-r--r--src/ceph/qa/suites/multimds/basic/%0
l---------src/ceph/qa/suites/multimds/basic/begin.yaml1
l---------src/ceph/qa/suites/multimds/basic/clusters/3-mds.yaml1
l---------src/ceph/qa/suites/multimds/basic/clusters/9-mds.yaml1
l---------src/ceph/qa/suites/multimds/basic/inline1
l---------src/ceph/qa/suites/multimds/basic/mount/fuse.yaml1
l---------src/ceph/qa/suites/multimds/basic/mount/kclient.yaml1
l---------src/ceph/qa/suites/multimds/basic/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/multimds/basic/overrides/%0
l---------src/ceph/qa/suites/multimds/basic/overrides/basic1
l---------src/ceph/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml1
-rw-r--r--src/ceph/qa/suites/multimds/basic/q_check_counter/check_counter.yaml8
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml4
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml10
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml7
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml12
l---------src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml1
l---------src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml1
l---------src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml1
l---------src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml1
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml5
-rw-r--r--src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml16
-rw-r--r--src/ceph/qa/suites/multimds/thrash/%0
l---------src/ceph/qa/suites/multimds/thrash/begin.yaml1
l---------src/ceph/qa/suites/multimds/thrash/ceph-thrash1
-rw-r--r--src/ceph/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml4
-rw-r--r--src/ceph/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml4
l---------src/ceph/qa/suites/multimds/thrash/mount/fuse.yaml1
l---------src/ceph/qa/suites/multimds/thrash/mount/kclient.yaml1
l---------src/ceph/qa/suites/multimds/thrash/msgr-failures1
l---------src/ceph/qa/suites/multimds/thrash/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/multimds/thrash/overrides/%0
l---------src/ceph/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml1
l---------src/ceph/qa/suites/multimds/thrash/overrides/thrash1
-rw-r--r--src/ceph/qa/suites/multimds/thrash/overrides/thrash_debug.yaml7
l---------src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml1
l---------src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml1
-rw-r--r--src/ceph/qa/suites/multimds/verify/%0
l---------src/ceph/qa/suites/multimds/verify/begin.yaml1
l---------src/ceph/qa/suites/multimds/verify/clusters/3-mds.yaml1
l---------src/ceph/qa/suites/multimds/verify/clusters/9-mds.yaml1
l---------src/ceph/qa/suites/multimds/verify/mount/fuse.yaml1
l---------src/ceph/qa/suites/multimds/verify/mount/kclient.yaml1
l---------src/ceph/qa/suites/multimds/verify/objectstore-ec1
-rw-r--r--src/ceph/qa/suites/multimds/verify/overrides/%0
l---------src/ceph/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml1
l---------src/ceph/qa/suites/multimds/verify/overrides/verify1
l---------src/ceph/qa/suites/multimds/verify/tasks1
l---------src/ceph/qa/suites/multimds/verify/validater1
-rw-r--r--src/ceph/qa/suites/powercycle/osd/%0
-rw-r--r--src/ceph/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml5
l---------src/ceph/qa/suites/powercycle/osd/objectstore1
-rw-r--r--src/ceph/qa/suites/powercycle/osd/powercycle/default.yaml7
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml13
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml12
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml7
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml14
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml6
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml7
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml6
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml12
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml15
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml11
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/radosbench.yaml38
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/readwrite.yaml9
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml13
l---------src/ceph/qa/suites/powercycle/osd/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/powercycle/osd/whitelist_health.yaml5
-rw-r--r--src/ceph/qa/suites/rados/basic-luminous/%0
l---------src/ceph/qa/suites/rados/basic-luminous/ceph.yaml1
l---------src/ceph/qa/suites/rados/basic-luminous/clusters1
l---------src/ceph/qa/suites/rados/basic-luminous/objectstore1
l---------src/ceph/qa/suites/rados/basic-luminous/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml28
-rw-r--r--src/ceph/qa/suites/rados/basic/%0
-rw-r--r--src/ceph/qa/suites/rados/basic/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/basic/clusters/+0
l---------src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml33
-rw-r--r--src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml0
l---------src/ceph/qa/suites/rados/basic/mon_kv_backend1
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/async.yaml6
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/random.yaml6
-rw-r--r--src/ceph/qa/suites/rados/basic/msgr/simple.yaml5
l---------src/ceph/qa/suites/rados/basic/objectstore1
l---------src/ceph/qa/suites/rados/basic/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml18
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml13
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml15
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml7
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml11
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml17
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml30
-rw-r--r--src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml38
-rw-r--r--src/ceph/qa/suites/rados/mgr/%0
-rw-r--r--src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml6
-rw-r--r--src/ceph/qa/suites/rados/mgr/debug/mgr.yaml16
l---------src/ceph/qa/suites/rados/mgr/objectstore1
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml16
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/failover.yaml16
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml19
-rw-r--r--src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/%0
-rw-r--r--src/ceph/qa/suites/rados/monthrash/ceph.yaml14
-rw-r--r--src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml7
-rw-r--r--src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml7
l---------src/ceph/qa/suites/rados/monthrash/d-require-luminous1
l---------src/ceph/qa/suites/rados/monthrash/mon_kv_backend1
l---------src/ceph/qa/suites/rados/monthrash/msgr1
-rw-r--r--src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml11
l---------src/ceph/qa/suites/rados/monthrash/objectstore1
l---------src/ceph/qa/suites/rados/monthrash/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml12
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml9
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml14
-rw-r--r--src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml13
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml58
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml9
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml23
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml16
-rw-r--r--src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/rados/multimon/%0
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/21.yaml8
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/3.yaml7
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/6.yaml7
-rw-r--r--src/ceph/qa/suites/rados/multimon/clusters/9.yaml8
l---------src/ceph/qa/suites/rados/multimon/mon_kv_backend1
l---------src/ceph/qa/suites/rados/multimon/msgr1
-rw-r--r--src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rados/multimon/objectstore1
l---------src/ceph/qa/suites/rados/multimon/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml11
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml18
-rw-r--r--src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml7
-rw-r--r--src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml21
-rw-r--r--src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml23
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filejournal.yaml13
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml14
-rw-r--r--src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml11
-rw-r--r--src/ceph/qa/suites/rados/objectstore/fusestore.yaml9
-rw-r--r--src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml8
-rw-r--r--src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml14
-rw-r--r--src/ceph/qa/suites/rados/objectstore/objectstore.yaml12
-rw-r--r--src/ceph/qa/suites/rados/rest/mgr-restful.yaml25
-rw-r--r--src/ceph/qa/suites/rados/rest/rest_test.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml33
l---------src/ceph/qa/suites/rados/singleton-bluestore/msgr1
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml1
l---------src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml48
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml8
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml21
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml9
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml29
l---------src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml24
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml14
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/peer.yaml25
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/radostool.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/random-eio.yaml41
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml51
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/reg11184.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rest-api.yaml35
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/+0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml32
l---------src/ceph/qa/suites/rados/singleton/msgr1
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml7
l---------src/ceph/qa/suites/rados/singleton/objectstore1
l---------src/ceph/qa/suites/rados/singleton/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/standalone/crush.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/erasure-code.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/misc.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/mon.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/osd.yaml18
-rw-r--r--src/ceph/qa/suites/rados/standalone/scrub.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml4
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml4
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml19
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml20
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml15
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-isa/%0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml23
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml29
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml28
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml22
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/%0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+0
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml4
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml18
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/%0
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml3
l---------src/ceph/qa/suites/rados/thrash-erasure-code/clusters1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml5
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml0
l---------src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/objectstore1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml17
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml19
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml16
l---------src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml1
l---------src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml27
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml21
-rw-r--r--src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml20
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/%0
l---------src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides1
l---------src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides1
l---------src/ceph/qa/suites/rados/thrash-luminous/backoff1
l---------src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/clusters1
l---------src/ceph/qa/suites/rados/thrash-luminous/msgr1
l---------src/ceph/qa/suites/rados/thrash-luminous/objectstore1
l---------src/ceph/qa/suites/rados/thrash-luminous/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml1
l---------src/ceph/qa/suites/rados/thrash-luminous/thrashers1
l---------src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml9
-rw-r--r--src/ceph/qa/suites/rados/thrash/%0
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml1
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml1
l---------src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml0
l---------src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/normal.yaml0
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/peering.yaml5
-rw-r--r--src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml6
-rw-r--r--src/ceph/qa/suites/rados/thrash/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/thrash/clusters/+0
l---------src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml33
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml11
-rw-r--r--src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml0
l---------src/ceph/qa/suites/rados/thrash/msgr1
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml6
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml7
-rw-r--r--src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml9
l---------src/ceph/qa/suites/rados/thrash/objectstore1
l---------src/ceph/qa/suites/rados/thrash/rados.yaml1
l---------src/ceph/qa/suites/rados/thrash/rocksdb.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/default.yaml17
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml21
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml22
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/none.yaml0
-rw-r--r--src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml17
l---------src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml13
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml31
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml30
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml34
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml39
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml34
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/cache.yaml31
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml16
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml33
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml24
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml8
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml25
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml13
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml16
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml24
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml13
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml17
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml19
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml41
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml23
l---------src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+0
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml9
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/%0
-rw-r--r--src/ceph/qa/suites/rados/verify/ceph.yaml3
-rw-r--r--src/ceph/qa/suites/rados/verify/clusters/+0
l---------src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/clusters/openstack.yaml4
l---------src/ceph/qa/suites/rados/verify/d-require-luminous1
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/default/+0
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml10
l---------src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/d-thrash/none.yaml0
l---------src/ceph/qa/suites/rados/verify/mon_kv_backend1
l---------src/ceph/qa/suites/rados/verify/msgr1
-rw-r--r--src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml5
l---------src/ceph/qa/suites/rados/verify/objectstore1
l---------src/ceph/qa/suites/rados/verify/rados.yaml1
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml10
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml23
-rw-r--r--src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml13
-rw-r--r--src/ceph/qa/suites/rados/verify/validater/lockdep.yaml5
-rw-r--r--src/ceph/qa/suites/rados/verify/validater/valgrind.yaml22
-rw-r--r--src/ceph/qa/suites/rbd/basic/%0
-rw-r--r--src/ceph/qa/suites/rbd/basic/base/install.yaml3
-rw-r--r--src/ceph/qa/suites/rbd/basic/cachepool/none.yaml0
-rw-r--r--src/ceph/qa/suites/rbd/basic/cachepool/small.yaml17
-rw-r--r--src/ceph/qa/suites/rbd/basic/clusters/+0
l---------src/ceph/qa/suites/rbd/basic/clusters/fixed-1.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/basic/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/basic/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rbd/basic/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml11
-rw-r--r--src/ceph/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/cli/%0
-rw-r--r--src/ceph/qa/suites/rbd/cli/base/install.yaml3
l---------src/ceph/qa/suites/rbd/cli/clusters1
-rw-r--r--src/ceph/qa/suites/rbd/cli/features/defaults.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/cli/features/format-1.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/cli/features/journaling.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/cli/features/layering.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/cli/msgr-failures/many.yaml5
l---------src/ceph/qa/suites/rbd/cli/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml27
-rw-r--r--src/ceph/qa/suites/rbd/cli/pool/none.yaml0
-rw-r--r--src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml11
-rw-r--r--src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml17
-rw-r--r--src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/librbd/%0
-rw-r--r--src/ceph/qa/suites/rbd/librbd/cache/none.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/librbd/cache/writeback.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/librbd/cache/writethrough.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/librbd/clusters/+0
l---------src/ceph/qa/suites/rbd/librbd/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/librbd/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/librbd/config/none.yaml0
-rw-r--r--src/ceph/qa/suites/rbd/librbd/config/skip-partial-discard.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml7
l---------src/ceph/qa/suites/rbd/librbd/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml24
-rw-r--r--src/ceph/qa/suites/rbd/librbd/pool/none.yaml0
-rw-r--r--src/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml11
-rw-r--r--src/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml17
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/fsx.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/librbd/workloads/rbd_fio.yaml10
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/%0
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/base/install.yaml3
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/clusters/+0
l---------src/ceph/qa/suites/rbd/maintenance/clusters/fixed-3.yaml1
l---------src/ceph/qa/suites/rbd/maintenance/clusters/openstack.yaml1
l---------src/ceph/qa/suites/rbd/maintenance/filestore-xfs.yaml1
l---------src/ceph/qa/suites/rbd/maintenance/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml8
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml8
-rw-r--r--src/ceph/qa/suites/rbd/mirror-ha/%0
l---------src/ceph/qa/suites/rbd/mirror-ha/base1
l---------src/ceph/qa/suites/rbd/mirror-ha/cluster1
l---------src/ceph/qa/suites/rbd/mirror-ha/msgr-failures1
l---------src/ceph/qa/suites/rbd/mirror-ha/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/mirror-ha/workloads/rbd-mirror-ha-workunit.yaml16
-rw-r--r--src/ceph/qa/suites/rbd/mirror/%0
-rw-r--r--src/ceph/qa/suites/rbd/mirror/base/install.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/mirror/cluster/+0
-rw-r--r--src/ceph/qa/suites/rbd/mirror/cluster/2-node.yaml17
-rw-r--r--src/ceph/qa/suites/rbd/mirror/cluster/openstack.yaml4
l---------src/ceph/qa/suites/rbd/mirror/msgr-failures1
l---------src/ceph/qa/suites/rbd/mirror/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml19
-rw-r--r--src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml12
-rw-r--r--src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml11
-rw-r--r--src/ceph/qa/suites/rbd/nbd/%0
l---------src/ceph/qa/suites/rbd/nbd/base1
-rw-r--r--src/ceph/qa/suites/rbd/nbd/cluster/+0
-rw-r--r--src/ceph/qa/suites/rbd/nbd/cluster/fixed-3.yaml4
l---------src/ceph/qa/suites/rbd/nbd/cluster/openstack.yaml1
l---------src/ceph/qa/suites/rbd/nbd/msgr-failures1
l---------src/ceph/qa/suites/rbd/nbd/objectstore1
l---------src/ceph/qa/suites/rbd/nbd/thrashers1
l---------src/ceph/qa/suites/rbd/nbd/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml15
-rw-r--r--src/ceph/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml10
-rw-r--r--src/ceph/qa/suites/rbd/openstack/%0
-rw-r--r--src/ceph/qa/suites/rbd/openstack/base/install.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/openstack/clusters/+0
-rw-r--r--src/ceph/qa/suites/rbd/openstack/clusters/fixed-2.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/openstack/clusters/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/openstack/features/minimum.yaml6
l---------src/ceph/qa/suites/rbd/openstack/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml51
-rw-r--r--src/ceph/qa/suites/rbd/qemu/%0
-rw-r--r--src/ceph/qa/suites/rbd/qemu/cache/none.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/cache/writeback.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/cache/writethrough.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/qemu/clusters/+0
l---------src/ceph/qa/suites/rbd/qemu/clusters/fixed-3.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/qemu/clusters/openstack.yaml8
-rw-r--r--src/ceph/qa/suites/rbd/qemu/features/defaults.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/features/journaling.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml7
l---------src/ceph/qa/suites/rbd/qemu/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml21
-rw-r--r--src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml24
-rw-r--r--src/ceph/qa/suites/rbd/qemu/pool/none.yaml0
-rw-r--r--src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml11
-rw-r--r--src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml17
-rw-r--r--src/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled6
-rw-r--r--src/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml8
-rw-r--r--src/ceph/qa/suites/rbd/singleton-bluestore/%0
-rw-r--r--src/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml14
l---------src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp.yaml1
l---------src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/singleton-bluestore/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/singleton/%0
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/admin_socket.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml10
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/merge_diff.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/permissions.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml14
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml14
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml7
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml12
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/read-flags-writeback.yaml12
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/singleton/all/verify_pool.yaml9
l---------src/ceph/qa/suites/rbd/singleton/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/singleton/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/thrash/%0
-rw-r--r--src/ceph/qa/suites/rbd/thrash/base/install.yaml3
-rw-r--r--src/ceph/qa/suites/rbd/thrash/clusters/+0
l---------src/ceph/qa/suites/rbd/thrash/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/thrash/clusters/openstack.yaml8
-rw-r--r--src/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml5
l---------src/ceph/qa/suites/rbd/thrash/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml21
-rw-r--r--src/ceph/qa/suites/rbd/thrash/thrashers/default.yaml8
l---------src/ceph/qa/suites/rbd/thrash/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/journal.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml16
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml10
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml10
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml5
-rw-r--r--src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/%0
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/base/install.yaml3
l---------src/ceph/qa/suites/rbd/valgrind/clusters1
l---------src/ceph/qa/suites/rbd/valgrind/objectstore1
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml13
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/fsx.yaml4
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml9
-rw-r--r--src/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml11
-rw-r--r--src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop-v28.yaml31
-rw-r--r--src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml32
-rw-r--r--src/ceph/qa/suites/rgw/multifs/%0
l---------src/ceph/qa/suites/rgw/multifs/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rgw/multifs/frontend/civetweb.yaml3
l---------src/ceph/qa/suites/rgw/multifs/objectstore1
-rw-r--r--src/ceph/qa/suites/rgw/multifs/overrides.yaml8
l---------src/ceph/qa/suites/rgw/multifs/rgw_pool_type1
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml10
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml10
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml16
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml13
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_swift.yaml7
-rw-r--r--src/ceph/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml10
-rw-r--r--src/ceph/qa/suites/rgw/multisite/%0
-rw-r--r--src/ceph/qa/suites/rgw/multisite/clusters.yaml3
-rw-r--r--src/ceph/qa/suites/rgw/multisite/frontend/civetweb.yaml3
-rw-r--r--src/ceph/qa/suites/rgw/multisite/overrides.yaml10
-rw-r--r--src/ceph/qa/suites/rgw/multisite/realms/three-zone.yaml20
-rw-r--r--src/ceph/qa/suites/rgw/multisite/realms/two-zonegroup.yaml27
-rw-r--r--src/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml20
-rw-r--r--src/ceph/qa/suites/rgw/multisite/valgrind.yaml17
-rw-r--r--src/ceph/qa/suites/rgw/singleton/%0
-rw-r--r--src/ceph/qa/suites/rgw/singleton/all/radosgw-admin.yaml20
-rw-r--r--src/ceph/qa/suites/rgw/singleton/frontend/civetweb.yaml3
l---------src/ceph/qa/suites/rgw/singleton/objectstore1
-rw-r--r--src/ceph/qa/suites/rgw/singleton/overrides.yaml8
l---------src/ceph/qa/suites/rgw/singleton/rgw_pool_type1
-rw-r--r--src/ceph/qa/suites/rgw/thrash/%0
-rw-r--r--src/ceph/qa/suites/rgw/thrash/civetweb.yaml3
l---------src/ceph/qa/suites/rgw/thrash/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rgw/thrash/install.yaml5
l---------src/ceph/qa/suites/rgw/thrash/objectstore1
-rw-r--r--src/ceph/qa/suites/rgw/thrash/thrasher/default.yaml8
l---------src/ceph/qa/suites/rgw/thrash/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml7
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml7
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml13
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml13
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml12
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_swift.yaml4
-rw-r--r--src/ceph/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml7
-rw-r--r--src/ceph/qa/suites/rgw/verify/%0
l---------src/ceph/qa/suites/rgw/verify/clusters/fixed-2.yaml1
-rw-r--r--src/ceph/qa/suites/rgw/verify/frontend/civetweb.yaml3
-rw-r--r--src/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml5
l---------src/ceph/qa/suites/rgw/verify/objectstore1
-rw-r--r--src/ceph/qa/suites/rgw/verify/overrides.yaml10
l---------src/ceph/qa/suites/rgw/verify/rgw_pool_type1
-rw-r--r--src/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml22
-rw-r--r--src/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml13
-rw-r--r--src/ceph/qa/suites/rgw/verify/validater/lockdep.yaml7
-rw-r--r--src/ceph/qa/suites/rgw/verify/validater/valgrind.yaml18
-rw-r--r--src/ceph/qa/suites/samba/%0
-rw-r--r--src/ceph/qa/suites/samba/clusters/samba-basic.yaml7
-rw-r--r--src/ceph/qa/suites/samba/install/install.yaml9
-rw-r--r--src/ceph/qa/suites/samba/mount/fuse.yaml6
-rw-r--r--src/ceph/qa/suites/samba/mount/kclient.yaml14
-rw-r--r--src/ceph/qa/suites/samba/mount/native.yaml2
-rw-r--r--src/ceph/qa/suites/samba/mount/noceph.yaml5
l---------src/ceph/qa/suites/samba/objectstore1
-rw-r--r--src/ceph/qa/suites/samba/workload/cifs-dbench.yaml8
-rw-r--r--src/ceph/qa/suites/samba/workload/cifs-fsstress.yaml8
-rw-r--r--src/ceph/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled9
-rw-r--r--src/ceph/qa/suites/samba/workload/smbtorture.yaml39
-rw-r--r--src/ceph/qa/suites/smoke/1node/%0
-rw-r--r--src/ceph/qa/suites/smoke/1node/clusters/+0
l---------src/ceph/qa/suites/smoke/1node/clusters/fixed-1.yaml1
-rw-r--r--src/ceph/qa/suites/smoke/1node/clusters/openstack.yaml8
l---------src/ceph/qa/suites/smoke/1node/distros/ubuntu_latest.yaml1
l---------src/ceph/qa/suites/smoke/1node/objectstore/filestore-xfs.yaml1
-rw-r--r--src/ceph/qa/suites/smoke/1node/tasks/ceph-deploy.yaml7
-rw-r--r--src/ceph/qa/suites/smoke/basic/%0
-rw-r--r--src/ceph/qa/suites/smoke/basic/clusters/+0
l---------src/ceph/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml1
-rw-r--r--src/ceph/qa/suites/smoke/basic/clusters/openstack.yaml8
l---------src/ceph/qa/suites/smoke/basic/objectstore/bluestore.yaml1
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml9
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml8
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml8
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml18
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml13
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml14
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml14
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml14
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml17
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml24
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml17
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml36
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml41
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_cls_all.yaml8
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml31
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml10
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml9
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml11
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml17
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml10
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml17
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml17
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml13
-rw-r--r--src/ceph/qa/suites/smoke/basic/tasks/rgw_swift.yaml8
-rw-r--r--src/ceph/qa/suites/smoke/systemd/%0
-rw-r--r--src/ceph/qa/suites/smoke/systemd/clusters/+0
-rw-r--r--src/ceph/qa/suites/smoke/systemd/clusters/fixed-4.yaml5
-rw-r--r--src/ceph/qa/suites/smoke/systemd/clusters/openstack.yaml8
l---------src/ceph/qa/suites/smoke/systemd/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/smoke/systemd/distros/ubuntu_latest.yaml1
l---------src/ceph/qa/suites/smoke/systemd/objectstore/filestore-xfs.yaml1
-rw-r--r--src/ceph/qa/suites/smoke/systemd/tasks/systemd.yaml8
-rw-r--r--src/ceph/qa/suites/stress/bench/%0
l---------src/ceph/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml1
-rw-r--r--src/ceph/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml8
-rw-r--r--src/ceph/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml8
-rw-r--r--src/ceph/qa/suites/stress/thrash/%0
-rw-r--r--src/ceph/qa/suites/stress/thrash/clusters/16-osd.yaml18
-rw-r--r--src/ceph/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml3
-rw-r--r--src/ceph/qa/suites/stress/thrash/clusters/8-osd.yaml10
-rw-r--r--src/ceph/qa/suites/stress/thrash/thrashers/default.yaml7
-rw-r--r--src/ceph/qa/suites/stress/thrash/thrashers/fast.yaml9
-rw-r--r--src/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml8
-rw-r--r--src/ceph/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml6
-rw-r--r--src/ceph/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml6
-rw-r--r--src/ceph/qa/suites/stress/thrash/workloads/radosbench.yaml4
-rw-r--r--src/ceph/qa/suites/stress/thrash/workloads/readwrite.yaml9
-rw-r--r--src/ceph/qa/suites/teuthology/buildpackages/%0
l---------src/ceph/qa/suites/teuthology/buildpackages/distros1
-rw-r--r--src/ceph/qa/suites/teuthology/buildpackages/tasks/branch.yaml10
-rw-r--r--src/ceph/qa/suites/teuthology/buildpackages/tasks/default.yaml14
-rw-r--r--src/ceph/qa/suites/teuthology/buildpackages/tasks/tag.yaml11
-rw-r--r--src/ceph/qa/suites/teuthology/ceph/%0
-rw-r--r--src/ceph/qa/suites/teuthology/ceph/clusters/single.yaml2
l---------src/ceph/qa/suites/teuthology/ceph/distros1
-rw-r--r--src/ceph/qa/suites/teuthology/ceph/tasks/teuthology.yaml3
-rw-r--r--src/ceph/qa/suites/teuthology/integration.yaml2
-rw-r--r--src/ceph/qa/suites/teuthology/multi-cluster/%0
-rw-r--r--src/ceph/qa/suites/teuthology/multi-cluster/all/ceph.yaml25
-rw-r--r--src/ceph/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml21
-rw-r--r--src/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml51
-rw-r--r--src/ceph/qa/suites/teuthology/multi-cluster/all/workunit.yaml23
-rw-r--r--src/ceph/qa/suites/teuthology/no-ceph/%0
-rw-r--r--src/ceph/qa/suites/teuthology/no-ceph/clusters/single.yaml2
-rw-r--r--src/ceph/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml2
-rw-r--r--src/ceph/qa/suites/teuthology/nop/%0
-rw-r--r--src/ceph/qa/suites/teuthology/nop/all/nop.yaml3
-rw-r--r--src/ceph/qa/suites/teuthology/rgw/%0
l---------src/ceph/qa/suites/teuthology/rgw/distros1
-rw-r--r--src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml24
-rw-r--r--src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml24
-rw-r--r--src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml26
-rw-r--r--src/ceph/qa/suites/teuthology/workunits/yes.yaml8
-rw-r--r--src/ceph/qa/suites/tgt/basic/%1
-rw-r--r--src/ceph/qa/suites/tgt/basic/clusters/fixed-3.yaml4
-rw-r--r--src/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml5
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/blogbench.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/bonnie.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/dbench-short.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/dbench.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/ffsb.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/fio.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/fsstress.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/fsx.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/fsync-tester.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/iogen.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/iozone-sync.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/iozone.yaml9
-rw-r--r--src/ceph/qa/suites/tgt/basic/tasks/pjd.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml26
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml36
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml5
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml29
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml30
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml11
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%0
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml83
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml5
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml52
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml82
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml32
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml60
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml41
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml38
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml23
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml236
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml22
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml35
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml9
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml61
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml33
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml39
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml35
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml22
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml35
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml27
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml9
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml43
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml35
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/luminous-x/parallel/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%0
l---------src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml225
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml22
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml35
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml29
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml1
1411 files changed, 12314 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/big/rados-thrash/% b/src/ceph/qa/suites/big/rados-thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/%
diff --git a/src/ceph/qa/suites/big/rados-thrash/ceph/ceph.yaml b/src/ceph/qa/suites/big/rados-thrash/ceph/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/ceph/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/big/rados-thrash/clusters/big.yaml b/src/ceph/qa/suites/big/rados-thrash/clusters/big.yaml
new file mode 100644
index 0000000..fd8c217
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/clusters/big.yaml
@@ -0,0 +1,68 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x]
+- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y]
+- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
+- [osd.63, osd.64, osd.65, client.21]
+- [osd.66, osd.67, osd.68, client.22]
+- [osd.69, osd.70, osd.71, client.23]
+- [osd.72, osd.73, osd.74, client.24]
+- [osd.75, osd.76, osd.77, client.25]
+- [osd.78, osd.79, osd.80, client.26]
+- [osd.81, osd.82, osd.83, client.27]
+- [osd.84, osd.85, osd.86, client.28]
+- [osd.87, osd.88, osd.89, client.29]
+- [osd.90, osd.91, osd.92, client.30]
+- [osd.93, osd.94, osd.95, client.31]
+- [osd.96, osd.97, osd.98, client.32]
+- [osd.99, osd.100, osd.101, client.33]
+- [osd.102, osd.103, osd.104, client.34]
+- [osd.105, osd.106, osd.107, client.35]
+- [osd.108, osd.109, osd.110, client.36]
+- [osd.111, osd.112, osd.113, client.37]
+- [osd.114, osd.115, osd.116, client.38]
+- [osd.117, osd.118, osd.119, client.39]
+- [osd.120, osd.121, osd.122, client.40]
+- [osd.123, osd.124, osd.125, client.41]
+- [osd.126, osd.127, osd.128, client.42]
+- [osd.129, osd.130, osd.131, client.43]
+- [osd.132, osd.133, osd.134, client.44]
+- [osd.135, osd.136, osd.137, client.45]
+- [osd.138, osd.139, osd.140, client.46]
+- [osd.141, osd.142, osd.143, client.47]
+- [osd.144, osd.145, osd.146, client.48]
+- [osd.147, osd.148, osd.149, client.49]
+- [osd.150, osd.151, osd.152, client.50]
+#- [osd.153, osd.154, osd.155, client.51]
+#- [osd.156, osd.157, osd.158, client.52]
+#- [osd.159, osd.160, osd.161, client.53]
+#- [osd.162, osd.163, osd.164, client.54]
+#- [osd.165, osd.166, osd.167, client.55]
+#- [osd.168, osd.169, osd.170, client.56]
+#- [osd.171, osd.172, osd.173, client.57]
+#- [osd.174, osd.175, osd.176, client.58]
+#- [osd.177, osd.178, osd.179, client.59]
+#- [osd.180, osd.181, osd.182, client.60]
+#- [osd.183, osd.184, osd.185, client.61]
+#- [osd.186, osd.187, osd.188, client.62]
+#- [osd.189, osd.190, osd.191, client.63]
+#- [osd.192, osd.193, osd.194, client.64]
+#- [osd.195, osd.196, osd.197, client.65]
+#- [osd.198, osd.199, osd.200, client.66]
diff --git a/src/ceph/qa/suites/big/rados-thrash/clusters/medium.yaml b/src/ceph/qa/suites/big/rados-thrash/clusters/medium.yaml
new file mode 100644
index 0000000..ecded01
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/clusters/medium.yaml
@@ -0,0 +1,22 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x]
+- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y]
+- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
diff --git a/src/ceph/qa/suites/big/rados-thrash/clusters/small.yaml b/src/ceph/qa/suites/big/rados-thrash/clusters/small.yaml
new file mode 100644
index 0000000..d0aecd0
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/clusters/small.yaml
@@ -0,0 +1,6 @@
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x]
+- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y]
+- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
diff --git a/src/ceph/qa/suites/big/rados-thrash/objectstore b/src/ceph/qa/suites/big/rados-thrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/big/rados-thrash/openstack.yaml b/src/ceph/qa/suites/big/rados-thrash/openstack.yaml
new file mode 100644
index 0000000..4d6edcd
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml b/src/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml
new file mode 100644
index 0000000..bcd3f39
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml b/src/ceph/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..b73bb67
--- /dev/null
+++ b/src/ceph/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ ops: 4000
+ max_seconds: 3600
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/buildpackages/any/% b/src/ceph/qa/suites/buildpackages/any/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/any/%
diff --git a/src/ceph/qa/suites/buildpackages/any/distros b/src/ceph/qa/suites/buildpackages/any/distros
new file mode 120000
index 0000000..1ce8f29
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/any/distros
@@ -0,0 +1 @@
+../../../distros/all \ No newline at end of file
diff --git a/src/ceph/qa/suites/buildpackages/any/tasks/release.yaml b/src/ceph/qa/suites/buildpackages/any/tasks/release.yaml
new file mode 100644
index 0000000..d7a3b62
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/any/tasks/release.yaml
@@ -0,0 +1,8 @@
+# --suite buildpackages/any --ceph v10.0.1 --filter centos_7,ubuntu_14.04
+roles:
+ - [client.0]
+tasks:
+ - install:
+ - exec:
+ client.0:
+ - ceph --version | grep 'version '
diff --git a/src/ceph/qa/suites/buildpackages/tests/% b/src/ceph/qa/suites/buildpackages/tests/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/tests/%
diff --git a/src/ceph/qa/suites/buildpackages/tests/distros b/src/ceph/qa/suites/buildpackages/tests/distros
new file mode 120000
index 0000000..1ce8f29
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/tests/distros
@@ -0,0 +1 @@
+../../../distros/all \ No newline at end of file
diff --git a/src/ceph/qa/suites/buildpackages/tests/tasks/release.yaml b/src/ceph/qa/suites/buildpackages/tests/tasks/release.yaml
new file mode 100644
index 0000000..05e8778
--- /dev/null
+++ b/src/ceph/qa/suites/buildpackages/tests/tasks/release.yaml
@@ -0,0 +1,20 @@
+# --suite buildpackages/tests --ceph v10.0.1 --filter centos_7.2,ubuntu_14.04
+overrides:
+ ansible.cephlab:
+ playbook: users.yml
+ buildpackages:
+ good_machine:
+ disk: 20 # GB
+ ram: 2000 # MB
+ cpus: 2
+ min_machine:
+ disk: 10 # GB
+ ram: 1000 # MB
+ cpus: 1
+roles:
+ - [client.0]
+tasks:
+ - install:
+ - exec:
+ client.0:
+ - ceph --version | grep 'version '
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/% b/src/ceph/qa/suites/ceph-ansible/smoke/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/%
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml
new file mode 100644
index 0000000..86dd366
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ 3-node cluster
+ install and run ceph-ansible on a mon.a node alone with ceph
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5]
+- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0 b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0
new file mode 100644
index 0000000..86dd366
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ 3-node cluster
+ install and run ceph-ansible on a mon.a node alone with ceph
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5]
+- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml
new file mode 100644
index 0000000..b175443
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ 4-node cluster
+ install and run ceph-ansible on installer.0 stand alone node
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5]
+- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0]
+- [installer.0]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml
new file mode 100644
index 0000000..36d0a07
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: "Build the ceph cluster using ceph-ansible"
+
+overrides:
+ ceph_ansible:
+ vars:
+ ceph_conf_overrides:
+ global:
+ osd default pool size: 2
+ mon pg warn min per osd: 2
+ osd pool default pg num: 64
+ osd pool default pgp num: 64
+ mon_max_pg_per_osd: 1024
+ ceph_test: true
+ ceph_stable_release: luminous
+ osd_scenario: collocated
+ journal_size: 1024
+ osd_auto_discovery: false
+ ceph_origin: repository
+ ceph_repository: dev
+ ceph_mgr_modules:
+ - status
+ - restful
+ cephfs_pools:
+ - name: "cephfs_data"
+ pgs: "64"
+ - name: "cephfs_metadata"
+ pgs: "64"
+tasks:
+- ssh-keys:
+- ceph_ansible:
+- install.ship_utilities:
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml
new file mode 100644
index 0000000..604e757
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: "use bluestore + dmcrypt option"
+
+overrides:
+ ceph_ansible:
+ vars:
+ osd_objectstore: bluestore
+ dmcrypt: True
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml
new file mode 100644
index 0000000..4bbd1c7
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "without dmcrypt"
+
+overrides:
+ ceph_ansible:
+ vars:
+ dmcrypt: False
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml
new file mode 100644
index 0000000..12d63d3
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "use dmcrypt option"
+
+overrides:
+ ceph_ansible:
+ vars:
+ dmcrypt: True
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml
new file mode 100644
index 0000000..33642d5
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "Run ceph-admin-commands.sh"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - ceph-tests/ceph-admin-commands.sh
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml
new file mode 100644
index 0000000..9495934
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: "Run the rbd import/export tests"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
diff --git a/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml
new file mode 100644
index 0000000..8e38913
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml
@@ -0,0 +1,15 @@
+tasks:
+- exec:
+ mgr.x:
+ - systemctl stop ceph-mgr.target
+ - sleep 5
+ - ceph -s
+- exec:
+ mon.a:
+ - ceph restful create-key admin
+ - ceph restful create-self-signed-cert
+ - ceph restful restart
+- workunit:
+ clients:
+ client.0:
+ - rest/test-restful.sh
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/% b/src/ceph/qa/suites/ceph-deploy/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/%
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
new file mode 100644
index 0000000..859a37f
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph-deploy:
+ dmcrypt: yes
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml
new file mode 100644
index 0000000..5c998c5
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/disable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph-deploy:
+ separate_journal_disk:
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml
new file mode 100644
index 0000000..ea3f634
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_diff_journal_disk.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph-deploy:
+ separate_journal_disk: yes
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
new file mode 100644
index 0000000..59cb799
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml
@@ -0,0 +1,4 @@
+overrides:
+ ceph-deploy:
+ dmcrypt: yes
+ separate_journal_disk: yes
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml b/src/ceph/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml
new file mode 100644
index 0000000..7f9f0b7
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/config_options/cephdeploy_conf.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd pool default size: 2
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/distros b/src/ceph/qa/suites/ceph-deploy/basic/distros
new file mode 120000
index 0000000..c5d5935
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/distros
@@ -0,0 +1 @@
+../../../distros/supported \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/objectstore/bluestore.yaml b/src/ceph/qa/suites/ceph-deploy/basic/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/ceph-deploy/basic/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..1af1dfd
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_2.yaml b/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_2.yaml
new file mode 100644
index 0000000..51c865b
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_2.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph-deploy:
+ python_version: "2"
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_3.yaml b/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_3.yaml
new file mode 100644
index 0000000..22deeca
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/python_versions/python_3.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph-deploy:
+ python_version: "3"
diff --git a/src/ceph/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml b/src/ceph/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml
new file mode 100644
index 0000000..fc4873c
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-deploy/basic/tasks/ceph-admin-commands.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - mds.0
+ - osd.0
+- - osd.1
+ - mon.b
+ - client.0
+openstack:
+ - machine:
+ disk: 10 # GB
+ ram: 2000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- ssh_keys:
+- print: "**** done ssh_keys"
+- ceph-deploy:
+- print: "**** done ceph-deploy"
+- workunit:
+ clients:
+ client.0:
+ - ceph-tests/ceph-admin-commands.sh
+- print: "**** done ceph-tests/ceph-admin-commands.sh"
diff --git a/src/ceph/qa/suites/ceph-disk/basic/% b/src/ceph/qa/suites/ceph-disk/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-disk/basic/%
diff --git a/src/ceph/qa/suites/ceph-disk/basic/distros b/src/ceph/qa/suites/ceph-disk/basic/distros
new file mode 120000
index 0000000..c5d5935
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-disk/basic/distros
@@ -0,0 +1 @@
+../../../distros/supported \ No newline at end of file
diff --git a/src/ceph/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml b/src/ceph/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml
new file mode 100644
index 0000000..c61c376
--- /dev/null
+++ b/src/ceph/qa/suites/ceph-disk/basic/tasks/ceph-disk.yaml
@@ -0,0 +1,41 @@
+roles:
+- - mon.a
+ - mgr.x
+ - client.0
+- - osd.0
+ - osd.1
+openstack:
+ - machine:
+ disk: 20 # GB
+ ram: 2000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs # this implicitly means /dev/vd? are used instead of directories
+ wait-for-scrub: false
+ log-whitelist:
+ - \(OSD_
+ - \(PG_
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd pool default size: 2
+ osd crush chooseleaf type: 0 # failure domain == osd
+ osd pg bits: 2
+ osd pgp bits: 2
+#
+# Keep this around for debugging purposes. If uncommented the target
+# will pause and the workunit can be run and debug manually.
+#
+# - exec:
+# client.0:
+# - sleep 1000000000 # forever
+#
+- workunit:
+ clients:
+ all:
+ - ceph-disk/ceph-disk.sh
diff --git a/src/ceph/qa/suites/dummy/% b/src/ceph/qa/suites/dummy/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/dummy/%
diff --git a/src/ceph/qa/suites/dummy/all/nop.yaml b/src/ceph/qa/suites/dummy/all/nop.yaml
new file mode 100644
index 0000000..0f00ffc
--- /dev/null
+++ b/src/ceph/qa/suites/dummy/all/nop.yaml
@@ -0,0 +1,6 @@
+roles:
+ - [mon.a, mgr.x, mds.a, osd.0, osd.1, client.0]
+
+tasks:
+ - nop:
+
diff --git a/src/ceph/qa/suites/experimental/multimds/% b/src/ceph/qa/suites/experimental/multimds/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/experimental/multimds/%
diff --git a/src/ceph/qa/suites/experimental/multimds/clusters/7-multimds.yaml b/src/ceph/qa/suites/experimental/multimds/clusters/7-multimds.yaml
new file mode 100644
index 0000000..d4bb141
--- /dev/null
+++ b/src/ceph/qa/suites/experimental/multimds/clusters/7-multimds.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, mds.a, mds.a-s]
+- [mon.b, mgr.y, mds.b, mds.b-s]
+- [mon.c, mgr.z, mds.c, mds.c-s]
+- [osd.0]
+- [osd.1]
+- [osd.2]
+- [client.0]
diff --git a/src/ceph/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml b/src/ceph/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml
new file mode 100644
index 0000000..bee01a8
--- /dev/null
+++ b/src/ceph/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ mds:
+ mds thrash exports: 1
+ mds debug subtrees: 1
+ mds debug scatterstat: 1
+ mds verify scatter: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/fsstress.sh
+
diff --git a/src/ceph/qa/suites/fs/32bits/% b/src/ceph/qa/suites/fs/32bits/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/%
diff --git a/src/ceph/qa/suites/fs/32bits/begin.yaml b/src/ceph/qa/suites/fs/32bits/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/mount/fuse.yaml b/src/ceph/qa/suites/fs/32bits/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/objectstore-ec b/src/ceph/qa/suites/fs/32bits/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/+ b/src/ceph/qa/suites/fs/32bits/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/+
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/debug.yaml b/src/ceph/qa/suites/fs/32bits/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/faked-ino.yaml b/src/ceph/qa/suites/fs/32bits/overrides/faked-ino.yaml
new file mode 100644
index 0000000..102df68
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/faked-ino.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ client use faked inos: true
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/32bits/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..a1e2ada
--- /dev/null
+++ b/src/ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse set user groups: true
+ fuse default permissions: false
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/fs/basic_functional/% b/src/ceph/qa/suites/fs/basic_functional/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/%
diff --git a/src/ceph/qa/suites/fs/basic_functional/begin.yaml b/src/ceph/qa/suites/fs/basic_functional/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml b/src/ceph/qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml
new file mode 100644
index 0000000..1c540a4
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, mds.a, mds.b, client.1, client.2, client.3]
+- [client.0, osd.4, osd.5, osd.6, osd.7]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/fs/basic_functional/mount/fuse.yaml b/src/ceph/qa/suites/fs/basic_functional/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml b/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml
new file mode 120000
index 0000000..36a4d69
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml
@@ -0,0 +1 @@
+../../../../cephfs/objectstore-ec/bluestore-ec-root.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore.yaml b/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/+ b/src/ceph/qa/suites/fs/basic_functional/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/+
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml b/src/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/basic_functional/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml b/src/ceph/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml
new file mode 120000
index 0000000..7b8e4bd
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml
@@ -0,0 +1 @@
+../../../../overrides/no_client_pidfile.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml
new file mode 100644
index 0000000..94d5cc6
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml
@@ -0,0 +1,20 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - unmatched fragstat
+ - unmatched rstat
+ - was unreadable, recreating it now
+ - Scrub error on inode
+ - Metadata damage detected
+ - MDS_FAILED
+ - MDS_DAMAGE
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_recovery_pool
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml
new file mode 100644
index 0000000..7fa5614
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml
@@ -0,0 +1,4 @@
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_dump_tree
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml
new file mode 100644
index 0000000..90d0e7b
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - force file system read-only
+ - bad backtrace
+ - MDS in read-only mode
+ - \(MDS_READ_ONLY\)
+
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_auto_repair
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml
new file mode 100644
index 0000000..d740a5f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_backtrace
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml
new file mode 100644
index 0000000..0d26dc9
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_cap_flush
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml
new file mode 100644
index 0000000..30b3a96
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - Scrub error on inode
+ - Behind on trimming
+ - Metadata damage detected
+ - overall HEALTH_
+ - (MDS_TRIM)
+ conf:
+ mds:
+ mds log max segments: 1
+ mds cache max size: 1000
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_scrub_checks
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml
new file mode 100644
index 0000000..8801454
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/quota
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml
new file mode 100644
index 0000000..635d0b6
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml
@@ -0,0 +1,19 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - responding to mclientcaps\(revoke\)
+ - not advance its oldest_client_tid
+ - failing to advance its oldest client/flush tid
+ - Too many inodes in cache
+ - failing to respond to cache pressure
+ - slow requests are blocked
+ - failing to respond to capability release
+ - MDS cache is too large
+ - \(MDS_CLIENT_OLDEST_TID\)
+ - \(MDS_CACHE_OVERSIZED\)
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_client_limits
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml
new file mode 100644
index 0000000..1d178e5
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml
@@ -0,0 +1,4 @@
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_readahead
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml
new file mode 100644
index 0000000..f5e9a0b
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml
@@ -0,0 +1,14 @@
+
+# The task interferes with the network, so we need
+# to permit OSDs to complain about that.
+overrides:
+ ceph:
+ log-whitelist:
+ - evicting unresponsive client
+ - but it is still running
+ - slow request
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_client_recovery
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml
new file mode 100644
index 0000000..2f51801
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_config_commands
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml
new file mode 100644
index 0000000..3f4aac9
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml
@@ -0,0 +1,25 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - Error loading MDS rank
+ - missing journal object
+ - Error recovering journal
+ - error decoding table object
+ - failed to read JournalPointer
+ - Corrupt directory entry
+ - Corrupt fnode header
+ - corrupt sessionmap header
+ - Corrupt dentry
+ - Scrub error on inode
+ - Metadata damage detected
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_damage
+
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml
new file mode 100644
index 0000000..64c8a23
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml
@@ -0,0 +1,19 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - unmatched fragstat
+ - unmatched rstat
+ - was unreadable, recreating it now
+ - Scrub error on inode
+ - Metadata damage detected
+ - inconsistent rstat on inode
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_data_scan
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml
new file mode 100644
index 0000000..b92cf10
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - inode wrongly marked free
+ - bad backtrace on inode
+ - inode table repaired for inode
+ - Scrub error on inode
+ - Metadata damage detected
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_forward_scrub
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml
new file mode 100644
index 0000000..482caad
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_fragment
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml
new file mode 100644
index 0000000..66f819d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace on directory inode
+ - error reading table object
+ - Metadata damage detected
+ - slow requests are blocked
+ - Behind on trimming
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_journal_repair
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml
new file mode 100644
index 0000000..aaffa03
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml
@@ -0,0 +1,14 @@
+
+os_type: ubuntu
+os_version: "14.04"
+
+overrides:
+ ceph-fuse:
+ disabled: true
+ kclient:
+ disabled: true
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs-java/test.sh
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml
new file mode 100644
index 0000000..e5cbb14
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph-fuse:
+ disabled: true
+ kclient:
+ disabled: true
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - fs/test_python.sh
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml
new file mode 100644
index 0000000..d59a8ad
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_flush
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml
new file mode 100644
index 0000000..5373500
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml
@@ -0,0 +1,32 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - OSD full dropping all updates
+ - OSD near full
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping
+ - is full \(reached quota
+ conf:
+ mon:
+ mon osd nearfull ratio: 0.6
+ mon osd backfillfull ratio: 0.6
+ mon osd full ratio: 0.7
+ osd:
+ osd mon report interval max: 5
+ osd objectstore: memstore
+ osd failsafe full ratio: 1.0
+ memstore device bytes: 200000000
+ client.0:
+ debug client: 20
+ debug objecter: 20
+ debug objectcacher: 20
+ client.1:
+ debug client: 20
+ debug objecter: 20
+ debug objectcacher: 20
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_full
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml
new file mode 100644
index 0000000..fd23aa8
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml
@@ -0,0 +1,6 @@
+tasks:
+-mds_creation_failure:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
+
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml
new file mode 100644
index 0000000..f220626
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_pool_perm
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml
new file mode 100644
index 0000000..89b10ce
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_quota
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml
new file mode 100644
index 0000000..054fdb7
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml
@@ -0,0 +1,13 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
+ log-whitelist:
+ - client session with invalid root
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_sessionmap
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml
new file mode 100644
index 0000000..2809fc1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_strays
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml
new file mode 100644
index 0000000..183ef38
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_journal_migration
diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml
new file mode 100644
index 0000000..e8c850a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_volume_client
diff --git a/src/ceph/qa/suites/fs/basic_workload/% b/src/ceph/qa/suites/fs/basic_workload/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/%
diff --git a/src/ceph/qa/suites/fs/basic_workload/begin.yaml b/src/ceph/qa/suites/fs/basic_workload/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/inline/no.yaml b/src/ceph/qa/suites/fs/basic_workload/inline/no.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/inline/no.yaml
diff --git a/src/ceph/qa/suites/fs/basic_workload/inline/yes.yaml b/src/ceph/qa/suites/fs/basic_workload/inline/yes.yaml
new file mode 100644
index 0000000..ae5222f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/inline/yes.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/src/ceph/qa/suites/fs/basic_workload/mount/fuse.yaml b/src/ceph/qa/suites/fs/basic_workload/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/objectstore-ec b/src/ceph/qa/suites/fs/basic_workload/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/omap_limit/10.yaml b/src/ceph/qa/suites/fs/basic_workload/omap_limit/10.yaml
new file mode 100644
index 0000000..0cd2c6f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/omap_limit/10.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_max_omap_entries_per_request: 10 \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/omap_limit/10000.yaml b/src/ceph/qa/suites/fs/basic_workload/omap_limit/10000.yaml
new file mode 100644
index 0000000..0c7e4cf
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/omap_limit/10000.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_max_omap_entries_per_request: 10000 \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/overrides/+ b/src/ceph/qa/suites/fs/basic_workload/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/overrides/+
diff --git a/src/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml b/src/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/basic_workload/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..1e71bb4
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse_default_permissions: 0
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.dir_split"
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..fac769e
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,11 @@
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.dir_split"
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/misc
+
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml
new file mode 100644
index 0000000..c9de5c3
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/test_o_trunc.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml
new file mode 100644
index 0000000..bfed71c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml
@@ -0,0 +1,16 @@
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.dir_split"
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/norstats
+
+overrides:
+ ceph:
+ conf:
+ client:
+ client dirsize rbytes: false
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 120000
index 0000000..8f2e88a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 120000
index 0000000..87c056d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 120000
index 0000000..3528bad
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..b16cfb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml
@@ -0,0 +1,9 @@
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.dir_split"
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..7efa1ad
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml
new file mode 100644
index 0000000..8d4c271
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
+
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..9270f3c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..7cb0b0f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ fuse set user groups: true
+ fuse default permissions: false
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..b47b565
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+tasks:
+- exec:
+ client.0:
+ - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - cd $TESTDIR/mnt.* && truncate --size 0 ./foo
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml
new file mode 120000
index 0000000..55a4c85
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_trivial_sync.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml b/src/ceph/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml
new file mode 120000
index 0000000..582815a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/libcephfs_interface_tests.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/% b/src/ceph/qa/suites/fs/multiclient/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/%
diff --git a/src/ceph/qa/suites/fs/multiclient/begin.yaml b/src/ceph/qa/suites/fs/multiclient/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/clusters/three_clients.yaml b/src/ceph/qa/suites/fs/multiclient/clusters/three_clients.yaml
new file mode 100644
index 0000000..a533af5
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/clusters/three_clients.yaml
@@ -0,0 +1,15 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
+- [client.2]
+- [client.1]
+- [client.0]
+
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
+
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
+
diff --git a/src/ceph/qa/suites/fs/multiclient/clusters/two_clients.yaml b/src/ceph/qa/suites/fs/multiclient/clusters/two_clients.yaml
new file mode 100644
index 0000000..00f3815
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/clusters/two_clients.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
+- [client.1]
+- [client.0]
+
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
+
diff --git a/src/ceph/qa/suites/fs/multiclient/mount/fuse.yaml b/src/ceph/qa/suites/fs/multiclient/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/mount/kclient.yaml.disabled b/src/ceph/qa/suites/fs/multiclient/mount/kclient.yaml.disabled
new file mode 100644
index 0000000..f00f16a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/mount/kclient.yaml.disabled
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- kclient:
diff --git a/src/ceph/qa/suites/fs/multiclient/objectstore-ec b/src/ceph/qa/suites/fs/multiclient/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/overrides/+ b/src/ceph/qa/suites/fs/multiclient/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/overrides/+
diff --git a/src/ceph/qa/suites/fs/multiclient/overrides/debug.yaml b/src/ceph/qa/suites/fs/multiclient/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml b/src/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml
new file mode 100644
index 0000000..cb84e64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml
@@ -0,0 +1,10 @@
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_misc
+
+overrides:
+ ceph:
+ log-whitelist:
+ - evicting unresponsive client
+ - POOL_APP_NOT_ENABLED
diff --git a/src/ceph/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled b/src/ceph/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled
new file mode 100644
index 0000000..266447d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled
@@ -0,0 +1,20 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://download.ceph.com/qa/fsx-mpi.c
+ - mpicc fsx-mpi.c -o fsx-mpi
+ - rm fsx-mpi.c
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: sudo $TESTDIR/fsx-mpi -o 1MB -N 50000 -p 10000 -l 1048576 $TESTDIR/gmnt/test
+ workdir: $TESTDIR/gmnt
+- pexec:
+ all:
+ - rm $TESTDIR/gmnt
+ - rm $TESTDIR/fsx-mpi
diff --git a/src/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml b/src/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml
new file mode 100644
index 0000000..94501b2
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml
@@ -0,0 +1,26 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://download.ceph.com/qa/ior.tbz2
+ - tar xvfj ior.tbz2
+ - cd ior
+ - ./configure
+ - make
+ - make install DESTDIR=$TESTDIR/binary/
+ - cd $TESTDIR/
+ - rm ior.tbz2
+ - rm -r ior
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile
+- pexec:
+ all:
+ - rm -f $TESTDIR/gmnt/ior.testfile
+ - rm -f $TESTDIR/gmnt
+ - rm -rf $TESTDIR/binary
diff --git a/src/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml b/src/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml
new file mode 100644
index 0000000..fd337bd
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml
@@ -0,0 +1,23 @@
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://download.ceph.com/qa/mdtest-1.9.3.tgz
+ - mkdir mdtest-1.9.3
+ - cd mdtest-1.9.3
+ - tar xvfz $TESTDIR/mdtest-1.9.3.tgz
+ - rm $TESTDIR/mdtest-1.9.3.tgz
+ - MPI_CC=mpicc make
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R
+- pexec:
+ all:
+ - rm -f $TESTDIR/gmnt
+ - rm -rf $TESTDIR/mdtest-1.9.3
+ - rm -rf $TESTDIR/._mdtest-1.9.3
diff --git a/src/ceph/qa/suites/fs/multifs/% b/src/ceph/qa/suites/fs/multifs/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/%
diff --git a/src/ceph/qa/suites/fs/multifs/begin.yaml b/src/ceph/qa/suites/fs/multifs/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/clusters/2-remote-clients.yaml b/src/ceph/qa/suites/fs/multifs/clusters/2-remote-clients.yaml
new file mode 100644
index 0000000..2ae772c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/clusters/2-remote-clients.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, mon.b, mds.a, mds.b, client.1]
+- [mds.c, mds.d, mon.c, client.0, osd.4, osd.5, osd.6, osd.7]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/fs/multifs/mount/fuse.yaml b/src/ceph/qa/suites/fs/multifs/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/objectstore-ec b/src/ceph/qa/suites/fs/multifs/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/+ b/src/ceph/qa/suites/fs/multifs/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/+
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/debug.yaml b/src/ceph/qa/suites/fs/multifs/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/mon-debug.yaml b/src/ceph/qa/suites/fs/multifs/overrides/mon-debug.yaml
new file mode 100644
index 0000000..24b454c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/mon-debug.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: 20
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/multifs/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/multifs/tasks/failover.yaml b/src/ceph/qa/suites/fs/multifs/tasks/failover.yaml
new file mode 100644
index 0000000..8833fd6
--- /dev/null
+++ b/src/ceph/qa/suites/fs/multifs/tasks/failover.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - not responding, replacing
+ - \(MDS_INSUFFICIENT_STANDBY\)
+ ceph-fuse:
+ disabled: true
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_failover
+
diff --git a/src/ceph/qa/suites/fs/permission/% b/src/ceph/qa/suites/fs/permission/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/%
diff --git a/src/ceph/qa/suites/fs/permission/begin.yaml b/src/ceph/qa/suites/fs/permission/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/mount/fuse.yaml b/src/ceph/qa/suites/fs/permission/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/objectstore-ec b/src/ceph/qa/suites/fs/permission/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/overrides/+ b/src/ceph/qa/suites/fs/permission/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/overrides/+
diff --git a/src/ceph/qa/suites/fs/permission/overrides/debug.yaml b/src/ceph/qa/suites/fs/permission/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/permission/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml b/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..618498e
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse default permissions: false
+ client acl type: posix_acl
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/misc/acl.sh
+ - fs/misc/chmod.sh
diff --git a/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..2dd8ac7
--- /dev/null
+++ b/src/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse set user groups: true
+ fuse default permissions: false
+ client acl type: posix_acl
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/fs/snaps/% b/src/ceph/qa/suites/fs/snaps/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/%
diff --git a/src/ceph/qa/suites/fs/snaps/begin.yaml b/src/ceph/qa/suites/fs/snaps/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/mount/fuse.yaml b/src/ceph/qa/suites/fs/snaps/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/objectstore-ec b/src/ceph/qa/suites/fs/snaps/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/overrides/+ b/src/ceph/qa/suites/fs/snaps/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/overrides/+
diff --git a/src/ceph/qa/suites/fs/snaps/overrides/debug.yaml b/src/ceph/qa/suites/fs/snaps/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/snaps/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/snaps/tasks/snaptests.yaml b/src/ceph/qa/suites/fs/snaps/tasks/snaptests.yaml
new file mode 100644
index 0000000..790c93c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/snaps/tasks/snaptests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/snaps
diff --git a/src/ceph/qa/suites/fs/thrash/% b/src/ceph/qa/suites/fs/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/%
diff --git a/src/ceph/qa/suites/fs/thrash/begin.yaml b/src/ceph/qa/suites/fs/thrash/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/ceph-thrash/default.yaml b/src/ceph/qa/suites/fs/thrash/ceph-thrash/default.yaml
new file mode 100644
index 0000000..154615c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/ceph-thrash/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- mds_thrash:
+
+overrides:
+ ceph:
+ log-whitelist:
+ - not responding, replacing
diff --git a/src/ceph/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml b/src/ceph/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml
new file mode 100644
index 0000000..d025248
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, mds.b-s-a]
+- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/fs/thrash/mount/fuse.yaml b/src/ceph/qa/suites/fs/thrash/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/msgr-failures/none.yaml b/src/ceph/qa/suites/fs/thrash/msgr-failures/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/msgr-failures/none.yaml
diff --git a/src/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/src/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml
new file mode 100644
index 0000000..adcebc0
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ mds inject delay type: osd mds
+ ms inject delay probability: .005
+ ms inject delay max: 1
diff --git a/src/ceph/qa/suites/fs/thrash/objectstore-ec b/src/ceph/qa/suites/fs/thrash/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/overrides/+ b/src/ceph/qa/suites/fs/thrash/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/overrides/+
diff --git a/src/ceph/qa/suites/fs/thrash/overrides/debug.yaml b/src/ceph/qa/suites/fs/thrash/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/thrash/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/thrash/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml
new file mode 100644
index 0000000..790c93c
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/snaps
diff --git a/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..a1e2ada
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse set user groups: true
+ fuse default permissions: false
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml
new file mode 120000
index 0000000..55a4c85
--- /dev/null
+++ b/src/ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_trivial_sync.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/% b/src/ceph/qa/suites/fs/traceless/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/%
diff --git a/src/ceph/qa/suites/fs/traceless/begin.yaml b/src/ceph/qa/suites/fs/traceless/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/mount/fuse.yaml b/src/ceph/qa/suites/fs/traceless/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/objectstore-ec b/src/ceph/qa/suites/fs/traceless/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/overrides/+ b/src/ceph/qa/suites/fs/traceless/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/overrides/+
diff --git a/src/ceph/qa/suites/fs/traceless/overrides/debug.yaml b/src/ceph/qa/suites/fs/traceless/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/traceless/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 120000
index 0000000..8f2e88a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 120000
index 0000000..87c056d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 120000
index 0000000..3528bad
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/traceless/traceless/50pc.yaml b/src/ceph/qa/suites/fs/traceless/traceless/50pc.yaml
new file mode 100644
index 0000000..e0418bc
--- /dev/null
+++ b/src/ceph/qa/suites/fs/traceless/traceless/50pc.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds inject traceless reply probability: .5
diff --git a/src/ceph/qa/suites/fs/verify/% b/src/ceph/qa/suites/fs/verify/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/%
diff --git a/src/ceph/qa/suites/fs/verify/begin.yaml b/src/ceph/qa/suites/fs/verify/begin.yaml
new file mode 120000
index 0000000..0c4ae31
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/begin.yaml
@@ -0,0 +1 @@
+../../../cephfs/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml b/src/ceph/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml
new file mode 120000
index 0000000..c25795f
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/mount/fuse.yaml b/src/ceph/qa/suites/fs/verify/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/objectstore-ec b/src/ceph/qa/suites/fs/verify/objectstore-ec
new file mode 120000
index 0000000..a330d66
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/overrides/+ b/src/ceph/qa/suites/fs/verify/overrides/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/+
diff --git a/src/ceph/qa/suites/fs/verify/overrides/debug.yaml b/src/ceph/qa/suites/fs/verify/overrides/debug.yaml
new file mode 120000
index 0000000..9bc8eb1
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/debug.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/debug.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml b/src/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml
new file mode 120000
index 0000000..e9b2d64
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/frag_enable.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/overrides/mon-debug.yaml b/src/ceph/qa/suites/fs/verify/overrides/mon-debug.yaml
new file mode 100644
index 0000000..6ed3e6d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/mon-debug.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug ms: 1
+ debug mon: 20
diff --git a/src/ceph/qa/suites/fs/verify/overrides/whitelist_health.yaml b/src/ceph/qa/suites/fs/verify/overrides/whitelist_health.yaml
new file mode 120000
index 0000000..440e747
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml b/src/ceph/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000
index 0000000..a26a657
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml b/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 120000
index 0000000..87c056d
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/fs/verify/validater/lockdep.yaml b/src/ceph/qa/suites/fs/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
diff --git a/src/ceph/qa/suites/fs/verify/validater/valgrind.yaml b/src/ceph/qa/suites/fs/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..6982ced
--- /dev/null
+++ b/src/ceph/qa/suites/fs/verify/validater/valgrind.yaml
@@ -0,0 +1,27 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+# Valgrind makes everything slow, so ignore slow requests
+overrides:
+ ceph:
+ log-whitelist:
+ - slow requests are blocked
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 40
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
+ ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
diff --git a/src/ceph/qa/suites/hadoop/basic/% b/src/ceph/qa/suites/hadoop/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/%
diff --git a/src/ceph/qa/suites/hadoop/basic/clusters/fixed-3.yaml b/src/ceph/qa/suites/hadoop/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..6fc0af6
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/clusters/fixed-3.yaml
@@ -0,0 +1,17 @@
+
+os_type: ubuntu
+os_version: "14.04"
+
+overrides:
+ ceph:
+ conf:
+ client:
+ client permissions: false
+roles:
+- [mon.0, mds.0, osd.0, hadoop.master.0]
+- [mon.1, mgr.x, osd.1, hadoop.slave.0]
+- [mon.2, mgr.y, hadoop.slave.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/hadoop/basic/filestore-xfs.yaml b/src/ceph/qa/suites/hadoop/basic/filestore-xfs.yaml
new file mode 120000
index 0000000..59ef7e4
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/hadoop/basic/tasks/repl.yaml b/src/ceph/qa/suites/hadoop/basic/tasks/repl.yaml
new file mode 100644
index 0000000..60cdcca
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/tasks/repl.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+ clients:
+ client.0: [hadoop/repl.sh]
diff --git a/src/ceph/qa/suites/hadoop/basic/tasks/terasort.yaml b/src/ceph/qa/suites/hadoop/basic/tasks/terasort.yaml
new file mode 100644
index 0000000..4377894
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/tasks/terasort.yaml
@@ -0,0 +1,10 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+ clients:
+ client.0: [hadoop/terasort.sh]
+ env:
+ NUM_RECORDS: "10000000"
diff --git a/src/ceph/qa/suites/hadoop/basic/tasks/wordcount.yaml b/src/ceph/qa/suites/hadoop/basic/tasks/wordcount.yaml
new file mode 100644
index 0000000..b84941b
--- /dev/null
+++ b/src/ceph/qa/suites/hadoop/basic/tasks/wordcount.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+ clients:
+ client.0: [hadoop/wordcount.sh]
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/% b/src/ceph/qa/suites/kcephfs/cephfs/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/%
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml b/src/ceph/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml
new file mode 120000
index 0000000..a482e65
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3-cephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/conf.yaml b/src/ceph/qa/suites/kcephfs/cephfs/conf.yaml
new file mode 100644
index 0000000..b3ef404
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ mds:
+ debug mds: 20
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/inline/no.yaml b/src/ceph/qa/suites/kcephfs/cephfs/inline/no.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/inline/no.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/inline/yes.yaml b/src/ceph/qa/suites/kcephfs/cephfs/inline/yes.yaml
new file mode 100644
index 0000000..fce64c6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/inline/yes.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - sudo ceph mds set inline_data true --yes-i-really-mean-it
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/objectstore-ec b/src/ceph/qa/suites/kcephfs/cephfs/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml
new file mode 100644
index 0000000..cc4b32a
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml
@@ -0,0 +1,7 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - direct_io
+
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..84d15f6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml
new file mode 100644
index 0000000..e3f4fb1
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - fs/misc
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml
new file mode 100644
index 0000000..5219fc9
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml
@@ -0,0 +1,7 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - fs/test_o_trunc.sh
+
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml
new file mode 100644
index 0000000..e815800
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - fs/snaps
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..8dd810a
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..059ffe1
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..bc49fc9
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..38d9604
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..452641c
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..832e024
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..09abaeb
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..d317a39
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/% b/src/ceph/qa/suites/kcephfs/mixed-clients/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/%
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml b/src/ceph/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml
new file mode 100644
index 0000000..90b6cf6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/clusters/2-clients.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml b/src/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml
new file mode 100644
index 0000000..75b8558
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ mds:
+ debug mds: 20 \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/objectstore-ec b/src/ceph/qa/suites/kcephfs/mixed-clients/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
new file mode 100644
index 0000000..0121a01
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iozone.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
new file mode 100644
index 0000000..7b0ce5b
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/kcephfs/recovery/% b/src/ceph/qa/suites/kcephfs/recovery/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/%
diff --git a/src/ceph/qa/suites/kcephfs/recovery/clusters/4-remote-clients.yaml b/src/ceph/qa/suites/kcephfs/recovery/clusters/4-remote-clients.yaml
new file mode 100644
index 0000000..b1072e3
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/clusters/4-remote-clients.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, osd.0, osd.1, osd.2, osd.3, mds.a, mds.c, client.2]
+- [mgr.x, osd.4, osd.5, osd.6, osd.7, mds.b, mds.d, client.3]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml b/src/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml
new file mode 100644
index 0000000..76cc4d8
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ conf:
+ mds:
+ debug ms: 1
+ debug mds: 20
+ client.0:
+ debug ms: 1
+ debug client: 20
+ client.1:
+ debug ms: 1
+ debug client: 20
diff --git a/src/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml b/src/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml
new file mode 100644
index 0000000..9913fa1
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml
@@ -0,0 +1,11 @@
+
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds bal frag: true
+ mds bal fragment size max: 10000
+ mds bal split size: 100
+ mds bal merge size: 5
+ mds bal split bits: 3
+
diff --git a/src/ceph/qa/suites/kcephfs/recovery/mounts/kmounts.yaml b/src/ceph/qa/suites/kcephfs/recovery/mounts/kmounts.yaml
new file mode 100644
index 0000000..c18db8f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/mounts/kmounts.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- kclient:
diff --git a/src/ceph/qa/suites/kcephfs/recovery/objectstore-ec b/src/ceph/qa/suites/kcephfs/recovery/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml
new file mode 100644
index 0000000..90d0e7b
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - force file system read-only
+ - bad backtrace
+ - MDS in read-only mode
+ - \(MDS_READ_ONLY\)
+
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_auto_repair
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/backtrace.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/backtrace.yaml
new file mode 100644
index 0000000..d740a5f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/backtrace.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_backtrace
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml
new file mode 100644
index 0000000..f816cee
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml
@@ -0,0 +1,20 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - responding to mclientcaps\(revoke\)
+ - not advance its oldest_client_tid
+ - failing to advance its oldest client/flush tid
+ - Too many inodes in cache
+ - failing to respond to cache pressure
+ - slow requests are blocked
+ - failing to respond to capability release
+ - MDS cache is too large
+ - \(MDS_CLIENT_OLDEST_TID\)
+ - \(MDS_CACHE_OVERSIZED\)
+
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_client_limits
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml
new file mode 100644
index 0000000..72ce013
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml
@@ -0,0 +1,14 @@
+
+# The task interferes with the network, so we need
+# to permit OSDs to complain about that.
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - evicting unresponsive client
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_client_recovery
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/config-commands.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/config-commands.yaml
new file mode 100644
index 0000000..cb00216
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/config-commands.yaml
@@ -0,0 +1,12 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
+
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_config_commands
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml
new file mode 100644
index 0000000..3f4aac9
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml
@@ -0,0 +1,25 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - Error loading MDS rank
+ - missing journal object
+ - Error recovering journal
+ - error decoding table object
+ - failed to read JournalPointer
+ - Corrupt directory entry
+ - Corrupt fnode header
+ - corrupt sessionmap header
+ - Corrupt dentry
+ - Scrub error on inode
+ - Metadata damage detected
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_damage
+
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml
new file mode 100644
index 0000000..b2cd739
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml
@@ -0,0 +1,18 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - unmatched fragstat
+ - was unreadable, recreating it now
+ - Scrub error on inode
+ - Metadata damage detected
+ - inconsistent rstat on inode
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_data_scan
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml
new file mode 100644
index 0000000..2e4655b
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - not responding, replacing
+ - \(MDS_INSUFFICIENT_STANDBY\)
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_failover
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml
new file mode 100644
index 0000000..b92cf10
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - inode wrongly marked free
+ - bad backtrace on inode
+ - inode table repaired for inode
+ - Scrub error on inode
+ - Metadata damage detected
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_forward_scrub
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml
new file mode 100644
index 0000000..66f819d
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - bad backtrace on directory inode
+ - error reading table object
+ - Metadata damage detected
+ - slow requests are blocked
+ - Behind on trimming
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_journal_repair
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml
new file mode 100644
index 0000000..d59a8ad
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_flush
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml
new file mode 100644
index 0000000..558a206
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml
@@ -0,0 +1,19 @@
+
+overrides:
+ ceph:
+ log-whitelist:
+ - OSD full dropping all updates
+ - OSD near full
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping
+ - is full \(reached quota
+ conf:
+ osd:
+ osd mon report interval max: 5
+ osd objectstore: memstore
+ memstore device bytes: 100000000
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_full
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml
new file mode 100644
index 0000000..f220626
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_pool_perm
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml
new file mode 100644
index 0000000..9be8338
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
+ log-whitelist:
+ - client session with invalid root
+
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_sessionmap
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/strays.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/strays.yaml
new file mode 100644
index 0000000..2809fc1
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/strays.yaml
@@ -0,0 +1,5 @@
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_strays
diff --git a/src/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml b/src/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml
new file mode 100644
index 0000000..d738eeb
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml
@@ -0,0 +1,12 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
+
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_volume_client
diff --git a/src/ceph/qa/suites/kcephfs/recovery/whitelist_health.yaml b/src/ceph/qa/suites/kcephfs/recovery/whitelist_health.yaml
new file mode 120000
index 0000000..90ca7b6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/recovery/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/% b/src/ceph/qa/suites/kcephfs/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/%
diff --git a/src/ceph/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml b/src/ceph/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml
new file mode 120000
index 0000000..a482e65
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3-cephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/conf.yaml b/src/ceph/qa/suites/kcephfs/thrash/conf.yaml
new file mode 100644
index 0000000..75b8558
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ mds:
+ debug mds: 20 \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/objectstore-ec b/src/ceph/qa/suites/kcephfs/thrash/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml b/src/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..e628ba6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
diff --git a/src/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml b/src/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml
new file mode 100644
index 0000000..d5d1f43
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- mds_thrash:
+
+overrides:
+ ceph:
+ log-whitelist:
+ - not responding, replacing
diff --git a/src/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml b/src/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml
new file mode 100644
index 0000000..90612f2
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/kcephfs/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/kcephfs/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/whitelist_health.yaml b/src/ceph/qa/suites/kcephfs/thrash/whitelist_health.yaml
new file mode 120000
index 0000000..90ca7b6
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/whitelist_health.yaml
@@ -0,0 +1 @@
+../../../cephfs/overrides/whitelist_health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..0c4a152
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml b/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..832e024
--- /dev/null
+++ b/src/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml
@@ -0,0 +1,6 @@
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/knfs/basic/% b/src/ceph/qa/suites/knfs/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/%
diff --git a/src/ceph/qa/suites/knfs/basic/ceph/base.yaml b/src/ceph/qa/suites/knfs/basic/ceph/base.yaml
new file mode 100644
index 0000000..7c2f0fc
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/ceph/base.yaml
@@ -0,0 +1,14 @@
+
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+
+tasks:
+- install:
+- ceph:
+- kclient: [client.0]
+- knfsd:
+ client.0:
+ options: [rw,no_root_squash,async]
diff --git a/src/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml b/src/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml
new file mode 120000
index 0000000..1582e30
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml
@@ -0,0 +1 @@
+../../../../clusters/extra-client.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/knfs/basic/mount/v3.yaml b/src/ceph/qa/suites/knfs/basic/mount/v3.yaml
new file mode 100644
index 0000000..1b61119
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/mount/v3.yaml
@@ -0,0 +1,5 @@
+tasks:
+- nfs:
+ client.1:
+ server: client.0
+ options: [rw,hard,intr,nfsvers=3]
diff --git a/src/ceph/qa/suites/knfs/basic/mount/v4.yaml b/src/ceph/qa/suites/knfs/basic/mount/v4.yaml
new file mode 100644
index 0000000..8840566
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/mount/v4.yaml
@@ -0,0 +1,5 @@
+tasks:
+- nfs:
+ client.1:
+ server: client.0
+ options: [rw,hard,intr,nfsvers=4]
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml
new file mode 100644
index 0000000..b9c0a5e
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ client.1:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml
new file mode 100644
index 0000000..135c4a7
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml
@@ -0,0 +1,11 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - fs/misc/chmod.sh
+ - fs/misc/i_complete_vs_rename.sh
+ - fs/misc/trivial_sync.sh
+ #- fs/misc/multiple_rsync.sh
+ #- fs/misc/xattrs.sh
+# Once we can run multiple_rsync.sh and xattrs.sh we can change to this
+# - misc
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..e554a3d
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/blogbench.sh
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..1da1b76
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/dbench-short.sh
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..3090f91
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..bbe7b7a
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..7c3eec2
--- /dev/null
+++ b/src/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/% b/src/ceph/qa/suites/krbd/rbd-nomount/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/%
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/conf.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/install/ceph.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/install/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/install/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml
new file mode 100644
index 0000000..1dab397
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml
@@ -0,0 +1,23 @@
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 30
+ debug bdev: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/krbd_data_pool.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml
new file mode 100644
index 0000000..567deeb
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/krbd_exclusive_option.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml
new file mode 100644
index 0000000..a728698
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/krbd_fallocate.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml
new file mode 100644
index 0000000..675b98e
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml
@@ -0,0 +1,10 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/concurrent.sh
+# Options for rbd/concurrent.sh (default values shown)
+# env:
+# RBD_CONCURRENT_ITER: 100
+# RBD_CONCURRENT_COUNT: 5
+# RBD_CONCURRENT_DELAY: 5
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml
new file mode 100644
index 0000000..ea421ee
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/huge-tickets.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml
new file mode 100644
index 0000000..e5017e1
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml
@@ -0,0 +1,15 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/image_read.sh
+# Options for rbd/image_read.sh (default values shown)
+# env:
+# IMAGE_READ_LOCAL_FILES: 'false'
+# IMAGE_READ_FORMAT: '2'
+# IMAGE_READ_VERBOSE: 'true'
+# IMAGE_READ_PAGE_SIZE: '4096'
+# IMAGE_READ_OBJECT_ORDER: '22'
+# IMAGE_READ_TEST_CLONES: 'true'
+# IMAGE_READ_DOUBLE_ORDER: 'true'
+# IMAGE_READ_HALF_ORDER: 'false'
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml
new file mode 100644
index 0000000..aa15582
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/kernel.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml
new file mode 100644
index 0000000..0f4b24a
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_kfsx.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 10000
+ krbd: true
+ readbdy: 512
+ writebdy: 512
+ truncbdy: 512
+ holebdy: 512
+ punch_holes: true
+ randomized_striping: false
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml
new file mode 100644
index 0000000..c152939
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/map-snapshot-io.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml
new file mode 100644
index 0000000..c216099
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/map-unmap.sh
diff --git a/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml
new file mode 100644
index 0000000..c493cfa
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/simple_big.sh
+
diff --git a/src/ceph/qa/suites/krbd/rbd/% b/src/ceph/qa/suites/krbd/rbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/%
diff --git a/src/ceph/qa/suites/krbd/rbd/clusters/fixed-3.yaml b/src/ceph/qa/suites/krbd/rbd/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/rbd/conf.yaml b/src/ceph/qa/suites/krbd/rbd/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml b/src/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml b/src/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_fio.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_fio.yaml
new file mode 100644
index 0000000..01088fa
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_fio.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph: null
+- rbd_fio:
+ client.0:
+ fio-io-size: 90%
+ formats: [2]
+ features: [[layering,exclusive-lock]]
+ io-engine: sync
+ rw: randrw
+ runtime: 900
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..ef2a35d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..d779eea
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..5204bb8
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..f9d62fe
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml
new file mode 100644
index 0000000..f765b74
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ fs_type: ext4
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..98c0849
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..eb8f18d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml
new file mode 100644
index 0000000..7c2796b
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
diff --git a/src/ceph/qa/suites/krbd/singleton/% b/src/ceph/qa/suites/krbd/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/singleton/%
diff --git a/src/ceph/qa/suites/krbd/singleton/conf.yaml b/src/ceph/qa/suites/krbd/singleton/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/singleton/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml b/src/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml b/src/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/src/ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml
new file mode 100644
index 0000000..443aa0e
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml
@@ -0,0 +1,38 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- rbd.xfstests:
+ client.0: &ref
+ test_image: 'test_image-0'
+ test_size: 5120 # MB
+ scratch_image: 'scratch_image-0'
+ scratch_size: 5120 # MB
+ fs_type: ext4
+ tests: '-g auto -x clone'
+ exclude:
+ - generic/042
+ - generic/392
+ - generic/044
+ - generic/045
+ - generic/046
+ - generic/223
+ - ext4/304
+ - generic/050 # krbd BLKROSET bug
+ - generic/388
+ - generic/405
+ - generic/422
+ - generic/448
+ randomize: true
+ client.1:
+ <<: *ref
+ test_image: 'test_image-1'
+ scratch_image: 'scratch_image-1'
diff --git a/src/ceph/qa/suites/krbd/thrash/% b/src/ceph/qa/suites/krbd/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/%
diff --git a/src/ceph/qa/suites/krbd/thrash/ceph/ceph.yaml b/src/ceph/qa/suites/krbd/thrash/ceph/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/ceph/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/krbd/thrash/clusters/fixed-3.yaml b/src/ceph/qa/suites/krbd/thrash/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/thrash/conf.yaml b/src/ceph/qa/suites/krbd/thrash/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml b/src/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml
new file mode 100644
index 0000000..44c86ba
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/src/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml
new file mode 100644
index 0000000..2ce44c8
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml
@@ -0,0 +1,4 @@
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml b/src/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml
new file mode 100644
index 0000000..14346a2
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml b/src/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml
new file mode 100644
index 0000000..7f72986
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ crush_tunables: optimal
+ conf:
+ mon:
+ mon osd initial require min compat client: luminous
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_pg_upmap: 3
+ chance_thrash_pg_upmap_items: 3
diff --git a/src/ceph/qa/suites/krbd/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/krbd/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/thrash/workloads/rbd_fio.yaml b/src/ceph/qa/suites/krbd/thrash/workloads/rbd_fio.yaml
new file mode 100644
index 0000000..157210f
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/workloads/rbd_fio.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rbd_fio:
+ client.0:
+ fio-io-size: 100%
+ formats: [2]
+ features: [[layering,exclusive-lock]]
+ io-engine: libaio
+ rw: randrw
+ bs: 1024
+ io-depth: 256
+ runtime: 1200
diff --git a/src/ceph/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..4ae7d69
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/krbd/unmap/% b/src/ceph/qa/suites/krbd/unmap/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/%
diff --git a/src/ceph/qa/suites/krbd/unmap/ceph/ceph.yaml b/src/ceph/qa/suites/krbd/unmap/ceph/ceph.yaml
new file mode 100644
index 0000000..c58aaca
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/ceph/ceph.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ crush_tunables: bobtail
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - "ceph osd getcrushmap -o /dev/stdout | crushtool -d - | sed -e 's/alg straw2/alg straw/g' | crushtool -c /dev/stdin -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin"
diff --git a/src/ceph/qa/suites/krbd/unmap/clusters/separate-client.yaml b/src/ceph/qa/suites/krbd/unmap/clusters/separate-client.yaml
new file mode 100644
index 0000000..be13431
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/clusters/separate-client.yaml
@@ -0,0 +1,16 @@
+# fixed-1.yaml, but with client.0 on a separate target
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ osd pool default size: 2
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/krbd/unmap/conf.yaml b/src/ceph/qa/suites/krbd/unmap/conf.yaml
new file mode 100644
index 0000000..8984e8d
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/conf.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 1 # pre-single-major is v3.13, so layering only
diff --git a/src/ceph/qa/suites/krbd/unmap/filestore-xfs.yaml b/src/ceph/qa/suites/krbd/unmap/filestore-xfs.yaml
new file mode 120000
index 0000000..59ef7e4
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/unmap/kernels/pre-single-major.yaml b/src/ceph/qa/suites/krbd/unmap/kernels/pre-single-major.yaml
new file mode 100644
index 0000000..a5636b4
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/kernels/pre-single-major.yaml
@@ -0,0 +1,10 @@
+overrides:
+ kernel:
+ client.0:
+ branch: nightly_pre-single-major # v3.12.z
+tasks:
+- exec:
+ client.0:
+ - "modprobe -r rbd"
+ - "modprobe --first-time rbd"
+ - "test ! -f /sys/module/rbd/parameters/single_major"
diff --git a/src/ceph/qa/suites/krbd/unmap/kernels/single-major-off.yaml b/src/ceph/qa/suites/krbd/unmap/kernels/single-major-off.yaml
new file mode 100644
index 0000000..9dc2488
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/kernels/single-major-off.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ client.0:
+ - "modprobe -r rbd"
+ - "modprobe --first-time rbd single_major=0"
+ - "grep -q N /sys/module/rbd/parameters/single_major"
diff --git a/src/ceph/qa/suites/krbd/unmap/kernels/single-major-on.yaml b/src/ceph/qa/suites/krbd/unmap/kernels/single-major-on.yaml
new file mode 100644
index 0000000..c3889f3
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/kernels/single-major-on.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ client.0:
+ - "modprobe -r rbd"
+ - "modprobe --first-time rbd single_major=1"
+ - "grep -q Y /sys/module/rbd/parameters/single_major"
diff --git a/src/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml b/src/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml
new file mode 100644
index 0000000..05cc5f3
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml
@@ -0,0 +1,5 @@
+tasks:
+- cram:
+ clients:
+ client.0:
+ - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/unmap.t
diff --git a/src/ceph/qa/suites/krbd/wac/sysfs/% b/src/ceph/qa/suites/krbd/wac/sysfs/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/sysfs/%
diff --git a/src/ceph/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml b/src/ceph/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml b/src/ceph/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml
new file mode 120000
index 0000000..549e880
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml
@@ -0,0 +1 @@
+../../../../../clusters/fixed-1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/wac/sysfs/conf.yaml b/src/ceph/qa/suites/krbd/wac/sysfs/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/sysfs/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/wac/sysfs/tasks/stable_pages_required.yaml b/src/ceph/qa/suites/krbd/wac/sysfs/tasks/stable_pages_required.yaml
new file mode 100644
index 0000000..3d23227
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/sysfs/tasks/stable_pages_required.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/krbd_stable_pages_required.sh
diff --git a/src/ceph/qa/suites/krbd/wac/wac/% b/src/ceph/qa/suites/krbd/wac/wac/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/%
diff --git a/src/ceph/qa/suites/krbd/wac/wac/ceph/ceph.yaml b/src/ceph/qa/suites/krbd/wac/wac/ceph/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/ceph/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml b/src/ceph/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml
new file mode 120000
index 0000000..af987da
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/krbd/wac/wac/conf.yaml b/src/ceph/qa/suites/krbd/wac/wac/conf.yaml
new file mode 100644
index 0000000..8279674
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/conf.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
diff --git a/src/ceph/qa/suites/krbd/wac/wac/tasks/wac.yaml b/src/ceph/qa/suites/krbd/wac/wac/tasks/wac.yaml
new file mode 100644
index 0000000..52dabc3
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/tasks/wac.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - "dmesg -C"
+- rbd:
+ all:
+ fs_type: ext4
+- workunit:
+ clients:
+ all:
+ - suites/wac.sh
diff --git a/src/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml b/src/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml
new file mode 100644
index 0000000..526897e
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
+tasks:
+- exec:
+ client.0:
+ - "dmesg | grep -q 'libceph: osd.* socket closed'"
+ - "dmesg | grep -q 'libceph: osd.* socket error on write'"
diff --git a/src/ceph/qa/suites/krbd/wac/wac/verify/no-resets.yaml b/src/ceph/qa/suites/krbd/wac/wac/verify/no-resets.yaml
new file mode 100644
index 0000000..2728479
--- /dev/null
+++ b/src/ceph/qa/suites/krbd/wac/wac/verify/no-resets.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+ client.0:
+ - "! dmesg | grep -q 'libceph: osd.* socket closed'"
+ - "! dmesg | grep -q 'libceph: osd.* socket error on write'"
diff --git a/src/ceph/qa/suites/marginal/basic/% b/src/ceph/qa/suites/marginal/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/basic/%
diff --git a/src/ceph/qa/suites/marginal/basic/clusters/fixed-3.yaml b/src/ceph/qa/suites/marginal/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..5e23c9e
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..4f25d80
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
diff --git a/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml b/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..a0d2e76
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/marginal/fs-misc/% b/src/ceph/qa/suites/marginal/fs-misc/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/fs-misc/%
diff --git a/src/ceph/qa/suites/marginal/fs-misc/clusters/two_clients.yaml b/src/ceph/qa/suites/marginal/fs-misc/clusters/two_clients.yaml
new file mode 100644
index 0000000..19d312d
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/fs-misc/clusters/two_clients.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2]
+- [client.1]
+- [client.0]
diff --git a/src/ceph/qa/suites/marginal/fs-misc/tasks/locktest.yaml b/src/ceph/qa/suites/marginal/fs-misc/tasks/locktest.yaml
new file mode 100644
index 0000000..444bb1f
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/fs-misc/tasks/locktest.yaml
@@ -0,0 +1,5 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- locktest: [client.0, client.1]
diff --git a/src/ceph/qa/suites/marginal/mds_restart/% b/src/ceph/qa/suites/marginal/mds_restart/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/mds_restart/%
diff --git a/src/ceph/qa/suites/marginal/mds_restart/clusters/one_mds.yaml b/src/ceph/qa/suites/marginal/mds_restart/clusters/one_mds.yaml
new file mode 100644
index 0000000..45c3e80
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/mds_restart/clusters/one_mds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, osd.0, osd.1, osd.2]
+- [mds.a]
+- [client.0]
diff --git a/src/ceph/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml b/src/ceph/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml
new file mode 100644
index 0000000..d086d4c
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ mds:
+ mds log segment size: 16384
+ mds log max segments: 1
+- restart:
+ exec:
+ client.0:
+ - test-backtraces.py
diff --git a/src/ceph/qa/suites/marginal/multimds/% b/src/ceph/qa/suites/marginal/multimds/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/%
diff --git a/src/ceph/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml b/src/ceph/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml
new file mode 100644
index 0000000..2995ea9
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
diff --git a/src/ceph/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml b/src/ceph/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml
new file mode 100644
index 0000000..083a07c
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
diff --git a/src/ceph/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml b/src/ceph/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml
new file mode 100644
index 0000000..55d8beb
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ fuse_default_permissions: 0
+- ceph-fuse:
diff --git a/src/ceph/qa/suites/marginal/multimds/mounts/kclient.yaml b/src/ceph/qa/suites/marginal/multimds/mounts/kclient.yaml
new file mode 100644
index 0000000..c18db8f
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/mounts/kclient.yaml
@@ -0,0 +1,4 @@
+tasks:
+- install:
+- ceph:
+- kclient:
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_misc.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_misc.yaml
new file mode 100644
index 0000000..aa62b9e
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_misc.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/misc
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..4c1fcc1
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml
new file mode 100644
index 0000000..41b2bc8
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..ddb18fb
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml
new file mode 100644
index 0000000..7efa1ad
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml
new file mode 100644
index 0000000..a1937ea
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse default permissions: false
+ fuse set user groups: true
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..3aa5f88
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+- ceph-fuse:
+- exec:
+ client.0:
+ - dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - truncate --size 0 ./foo
diff --git a/src/ceph/qa/suites/marginal/multimds/thrash/exports.yaml b/src/ceph/qa/suites/marginal/multimds/thrash/exports.yaml
new file mode 100644
index 0000000..240b46d
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/thrash/exports.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds thrash exports: 1
diff --git a/src/ceph/qa/suites/marginal/multimds/thrash/normal.yaml b/src/ceph/qa/suites/marginal/multimds/thrash/normal.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/marginal/multimds/thrash/normal.yaml
diff --git a/src/ceph/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml b/src/ceph/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..134bca1
--- /dev/null
+++ b/src/ceph/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3, client.0]
+- [client.1]
diff --git a/src/ceph/qa/suites/mixed-clients/basic/objectstore b/src/ceph/qa/suites/mixed-clients/basic/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/mixed-clients/basic/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
new file mode 100644
index 0000000..bb347be
--- /dev/null
+++ b/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iozone.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
new file mode 100644
index 0000000..2c32a61
--- /dev/null
+++ b/src/ceph/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/multimds/basic/% b/src/ceph/qa/suites/multimds/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/%
diff --git a/src/ceph/qa/suites/multimds/basic/begin.yaml b/src/ceph/qa/suites/multimds/basic/begin.yaml
new file mode 120000
index 0000000..d64b08e
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/begin.yaml
@@ -0,0 +1 @@
+../../fs/basic_workload/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/clusters/3-mds.yaml b/src/ceph/qa/suites/multimds/basic/clusters/3-mds.yaml
new file mode 120000
index 0000000..97d5edc
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/clusters/3-mds.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/3-mds.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/clusters/9-mds.yaml b/src/ceph/qa/suites/multimds/basic/clusters/9-mds.yaml
new file mode 120000
index 0000000..c522294
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/clusters/9-mds.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/9-mds.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/inline b/src/ceph/qa/suites/multimds/basic/inline
new file mode 120000
index 0000000..e7d7133
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/inline
@@ -0,0 +1 @@
+../../fs/basic_workload/inline \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/mount/fuse.yaml b/src/ceph/qa/suites/multimds/basic/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/mount/kclient.yaml b/src/ceph/qa/suites/multimds/basic/mount/kclient.yaml
new file mode 120000
index 0000000..b6f2aad
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/mount/kclient.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/kclient.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/objectstore-ec b/src/ceph/qa/suites/multimds/basic/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/overrides/% b/src/ceph/qa/suites/multimds/basic/overrides/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/overrides/%
diff --git a/src/ceph/qa/suites/multimds/basic/overrides/basic b/src/ceph/qa/suites/multimds/basic/overrides/basic
new file mode 120000
index 0000000..7517355
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/overrides/basic
@@ -0,0 +1 @@
+../../../fs/basic_workload/overrides \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml b/src/ceph/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml
new file mode 120000
index 0000000..1bada5b
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/q_check_counter/check_counter.yaml b/src/ceph/qa/suites/multimds/basic/q_check_counter/check_counter.yaml
new file mode 100644
index 0000000..1018b1e
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/q_check_counter/check_counter.yaml
@@ -0,0 +1,8 @@
+
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.exported"
+ - "mds.imported"
+
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml
new file mode 100644
index 0000000..b5842b3
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml
@@ -0,0 +1,4 @@
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_exports
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..8dbc24a
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse_default_permissions: 0
+tasks:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..5d54f3d
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/misc
+
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml
new file mode 100644
index 0000000..4833371
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml
@@ -0,0 +1,12 @@
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/norstats
+
+overrides:
+ ceph:
+ conf:
+ client:
+ client dirsize rbytes: false
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 120000
index 0000000..8f2e88a
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml
new file mode 120000
index 0000000..87c056d
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 120000
index 0000000..3528bad
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..dc3fd30
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../../cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..8b2b1ab
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..7cb0b0f
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ fuse set user groups: true
+ fuse default permissions: false
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/multimds/thrash/% b/src/ceph/qa/suites/multimds/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/%
diff --git a/src/ceph/qa/suites/multimds/thrash/begin.yaml b/src/ceph/qa/suites/multimds/thrash/begin.yaml
new file mode 120000
index 0000000..42459d1
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/begin.yaml
@@ -0,0 +1 @@
+../../fs/thrash/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/ceph-thrash b/src/ceph/qa/suites/multimds/thrash/ceph-thrash
new file mode 120000
index 0000000..d632af9
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/ceph-thrash
@@ -0,0 +1 @@
+../../fs/thrash/ceph-thrash/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml b/src/ceph/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml
new file mode 100644
index 0000000..1a415ef
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2, mds.d-s]
+- [mon.b, mgr.x, mds.b, mds.c, osd.3, osd.4, osd.5, mds.e-s]
+- [client.0]
diff --git a/src/ceph/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml b/src/ceph/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml
new file mode 100644
index 0000000..4a8334c
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2, mds.j-s, mds.k-s]
+- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5, mds.l-s]
+- [client.0]
diff --git a/src/ceph/qa/suites/multimds/thrash/mount/fuse.yaml b/src/ceph/qa/suites/multimds/thrash/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/mount/kclient.yaml b/src/ceph/qa/suites/multimds/thrash/mount/kclient.yaml
new file mode 120000
index 0000000..b6f2aad
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/mount/kclient.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/kclient.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/msgr-failures b/src/ceph/qa/suites/multimds/thrash/msgr-failures
new file mode 120000
index 0000000..534e0d8
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/msgr-failures
@@ -0,0 +1 @@
+../../fs/thrash/msgr-failures/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/objectstore-ec b/src/ceph/qa/suites/multimds/thrash/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/overrides/% b/src/ceph/qa/suites/multimds/thrash/overrides/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/overrides/%
diff --git a/src/ceph/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml b/src/ceph/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml
new file mode 120000
index 0000000..1bada5b
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/overrides/thrash b/src/ceph/qa/suites/multimds/thrash/overrides/thrash
new file mode 120000
index 0000000..1f0c36d
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/overrides/thrash
@@ -0,0 +1 @@
+../../../fs/thrash/overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/overrides/thrash_debug.yaml b/src/ceph/qa/suites/multimds/thrash/overrides/thrash_debug.yaml
new file mode 100644
index 0000000..2243037
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/overrides/thrash_debug.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ mds:
+ debug ms: 10
+ client:
+ debug ms: 10
diff --git a/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 120000
index 0000000..324538e
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1 @@
+../../../fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 120000
index 0000000..a6852a2
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1 @@
+../../../fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/% b/src/ceph/qa/suites/multimds/verify/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/%
diff --git a/src/ceph/qa/suites/multimds/verify/begin.yaml b/src/ceph/qa/suites/multimds/verify/begin.yaml
new file mode 120000
index 0000000..a199d46
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/begin.yaml
@@ -0,0 +1 @@
+../../fs/verify/begin.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/clusters/3-mds.yaml b/src/ceph/qa/suites/multimds/verify/clusters/3-mds.yaml
new file mode 120000
index 0000000..97d5edc
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/clusters/3-mds.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/3-mds.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/clusters/9-mds.yaml b/src/ceph/qa/suites/multimds/verify/clusters/9-mds.yaml
new file mode 120000
index 0000000..c522294
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/clusters/9-mds.yaml
@@ -0,0 +1 @@
+../../../../cephfs/clusters/9-mds.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/mount/fuse.yaml b/src/ceph/qa/suites/multimds/verify/mount/fuse.yaml
new file mode 120000
index 0000000..af9ee0a
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/mount/fuse.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/fuse.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/mount/kclient.yaml b/src/ceph/qa/suites/multimds/verify/mount/kclient.yaml
new file mode 120000
index 0000000..b6f2aad
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/mount/kclient.yaml
@@ -0,0 +1 @@
+../../../../cephfs/mount/kclient.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/objectstore-ec b/src/ceph/qa/suites/multimds/verify/objectstore-ec
new file mode 120000
index 0000000..15dc98f
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/objectstore-ec
@@ -0,0 +1 @@
+../../../cephfs/objectstore-ec \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/overrides/% b/src/ceph/qa/suites/multimds/verify/overrides/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/overrides/%
diff --git a/src/ceph/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml b/src/ceph/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml
new file mode 120000
index 0000000..1bada5b
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml
@@ -0,0 +1 @@
+../../../../cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/overrides/verify b/src/ceph/qa/suites/multimds/verify/overrides/verify
new file mode 120000
index 0000000..3ded92e
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/overrides/verify
@@ -0,0 +1 @@
+../../../fs/verify/overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/tasks b/src/ceph/qa/suites/multimds/verify/tasks
new file mode 120000
index 0000000..f0edfbd
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/tasks
@@ -0,0 +1 @@
+../../fs/verify/tasks/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/multimds/verify/validater b/src/ceph/qa/suites/multimds/verify/validater
new file mode 120000
index 0000000..0c7f8a5
--- /dev/null
+++ b/src/ceph/qa/suites/multimds/verify/validater
@@ -0,0 +1 @@
+../../fs/verify/validater/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/powercycle/osd/% b/src/ceph/qa/suites/powercycle/osd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/%
diff --git a/src/ceph/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml b/src/ceph/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml
new file mode 100644
index 0000000..8ba3c69
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.0, client.0]
+- [osd.0]
+- [osd.1]
+- [osd.2]
diff --git a/src/ceph/qa/suites/powercycle/osd/objectstore b/src/ceph/qa/suites/powercycle/osd/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/powercycle/osd/powercycle/default.yaml b/src/ceph/qa/suites/powercycle/osd/powercycle/default.yaml
new file mode 100644
index 0000000..b632e83
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/powercycle/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- thrashosds:
+ chance_down: 1.0
+ powercycle: true
+ timeout: 600
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml
new file mode 100644
index 0000000..3b1a892
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 60
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml
new file mode 100644
index 0000000..87f8f57
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse_default_permissions: 0
+tasks:
+- ceph-fuse:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - kernel_untar_build.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml
new file mode 100644
index 0000000..683d3f5
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/misc
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml
new file mode 100644
index 0000000..9f3fa7b
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..5908d95
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..9403151
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml
new file mode 100644
index 0000000..c6043e2
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..664791c
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse default permissions: false
+ fuse set user groups: true
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml
new file mode 100644
index 0000000..f3efafa
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+tasks:
+- ceph-fuse:
+- exec:
+ client.0:
+ - dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - truncate --size 0 ./foo
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..06f3f57
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/radosbench.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/radosbench.yaml
new file mode 100644
index 0000000..91573f9
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/radosbench.yaml
@@ -0,0 +1,38 @@
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/readwrite.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/readwrite.yaml
new file mode 100644
index 0000000..c53e52b
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/readwrite.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml b/src/ceph/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml
new file mode 100644
index 0000000..1ffe4e1
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/powercycle/osd/thrashosds-health.yaml b/src/ceph/qa/suites/powercycle/osd/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/powercycle/osd/whitelist_health.yaml b/src/ceph/qa/suites/powercycle/osd/whitelist_health.yaml
new file mode 100644
index 0000000..0235037
--- /dev/null
+++ b/src/ceph/qa/suites/powercycle/osd/whitelist_health.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - \(MDS_TRIM\)
+ - Behind on trimming
diff --git a/src/ceph/qa/suites/rados/basic-luminous/% b/src/ceph/qa/suites/rados/basic-luminous/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/%
diff --git a/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml b/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml
new file mode 120000
index 0000000..1e1b9ef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/ceph.yaml
@@ -0,0 +1 @@
+../basic/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/clusters b/src/ceph/qa/suites/rados/basic-luminous/clusters
new file mode 120000
index 0000000..ae92569
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/clusters
@@ -0,0 +1 @@
+../basic/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/objectstore b/src/ceph/qa/suites/rados/basic-luminous/objectstore
new file mode 120000
index 0000000..f81a13b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/objectstore
@@ -0,0 +1 @@
+../basic/objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/rados.yaml b/src/ceph/qa/suites/rados/basic-luminous/rados.yaml
new file mode 120000
index 0000000..9b356bf
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/rados.yaml
@@ -0,0 +1 @@
+../basic/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml b/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml
new file mode 100644
index 0000000..d87f5bf
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml
@@ -0,0 +1,28 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - '!= data_digest'
+ - '!= omap_digest'
+ - '!= size'
+ - 'deep-scrub 0 missing, 1 inconsistent objects'
+ - 'deep-scrub [0-9]+ errors'
+ - 'repair 0 missing, 1 inconsistent objects'
+ - 'repair [0-9]+ errors, [0-9]+ fixed'
+ - 'shard [0-9]+ missing'
+ - 'deep-scrub 1 missing, 1 inconsistent objects'
+ - 'does not match object info size'
+ - 'attr name mistmatch'
+ - 'deep-scrub 1 missing, 0 inconsistent objects'
+ - 'failed to pick suitable auth object'
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ - (OSD_SCRUB_ERRORS)
+ - (TOO_FEW_PGS)
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- scrub_test:
diff --git a/src/ceph/qa/suites/rados/basic/% b/src/ceph/qa/suites/rados/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/%
diff --git a/src/ceph/qa/suites/rados/basic/ceph.yaml b/src/ceph/qa/suites/rados/basic/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/basic/clusters/+ b/src/ceph/qa/suites/rados/basic/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/+
diff --git a/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml b/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml
new file mode 100644
index 0000000..ef998cc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-end.yaml
@@ -0,0 +1,33 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - exec:
+ mon.a:
+ - ceph osd require-osd-release luminous
+ - ceph osd pool application enable base rados || true
+# make sure osds have latest map
+ - rados -p rbd bench 5 write -b 4096
+ - ceph.healthy:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - sleep 15
+ - ceph osd dump | grep purged_snapdirs
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+overrides:
+ ceph:
+ conf:
+ global:
+ mon debug no require luminous: true
+
+# setting luminous triggers peering, which *might* trigger health alerts
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
+ thrashosds:
+ chance_thrash_cluster_full: 0
diff --git a/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/d-require-luminous/at-mkfs.yaml
diff --git a/src/ceph/qa/suites/rados/basic/mon_kv_backend b/src/ceph/qa/suites/rados/basic/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..038c3a7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1500
diff --git a/src/ceph/qa/suites/rados/basic/msgr/async.yaml b/src/ceph/qa/suites/rados/basic/msgr/async.yaml
new file mode 100644
index 0000000..9c77eaa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/async.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: async
+ enable experimental unrecoverable data corrupting features: '*'
diff --git a/src/ceph/qa/suites/rados/basic/msgr/random.yaml b/src/ceph/qa/suites/rados/basic/msgr/random.yaml
new file mode 100644
index 0000000..64404b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/random.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: random
+ enable experimental unrecoverable data corrupting features: '*'
diff --git a/src/ceph/qa/suites/rados/basic/msgr/simple.yaml b/src/ceph/qa/suites/rados/basic/msgr/simple.yaml
new file mode 100644
index 0000000..5c4f853
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/msgr/simple.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms type: simple
diff --git a/src/ceph/qa/suites/rados/basic/objectstore b/src/ceph/qa/suites/rados/basic/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/rados.yaml b/src/ceph/qa/suites/rados/basic/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..316119c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - but it is still running
+ - overall HEALTH_
+ - (POOL_FULL)
+ - (SMALLER_PGP_NUM)
+ - (CACHE_POOL_NO_HIT_SET)
+ - (CACHE_POOL_NEAR_FULL)
+ - (POOL_APP_NOT_ENABLED)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - rados/test_pool_quota.sh
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..bbab083
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml
new file mode 100644
index 0000000..8c70304
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_python.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
new file mode 100644
index 0000000..bee513e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/stress_watch.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml
new file mode 100644
index 0000000..c19cc83
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml
@@ -0,0 +1,7 @@
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_striper_api_io
+ - ceph_test_rados_striper_api_aio
+ - ceph_test_rados_striper_api_striping
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
new file mode 100644
index 0000000..2dade6d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-big.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 0000000..6b764a8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
new file mode 100644
index 0000000..c82023c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mostlyread.sh
diff --git a/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml b/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml
new file mode 100644
index 0000000..f135107
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/readwrite.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ crush_tunables: optimal
+ conf:
+ mon:
+ mon osd initial require min compat client: luminous
+ osd:
+ osd_discard_disconnected_ops: false
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
diff --git a/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml b/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml
new file mode 100644
index 0000000..9afbc04
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/repair_test.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - candidate had a stat error
+ - candidate had a read error
+ - deep-scrub 0 missing, 1 inconsistent objects
+ - deep-scrub 0 missing, 4 inconsistent objects
+ - deep-scrub [0-9]+ errors
+ - '!= omap_digest'
+ - '!= data_digest'
+ - repair 0 missing, 1 inconsistent objects
+ - repair 0 missing, 4 inconsistent objects
+ - repair [0-9]+ errors, [0-9]+ fixed
+ - scrub 0 missing, 1 inconsistent objects
+ - scrub [0-9]+ errors
+ - 'size 1 != size'
+ - attr name mismatch
+ - Regular scrub request, deep-scrub details will be lost
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ conf:
+ osd:
+ filestore debug inject read err: true
+ bluestore debug inject read err: true
+tasks:
+- repair_test:
+
diff --git a/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml b/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml
new file mode 100644
index 0000000..ec86491
--- /dev/null
+++ b/src/ceph/qa/suites/rados/basic/tasks/rgw_snaps.yaml
@@ -0,0 +1,38 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug rgw: 20
+ debug ms: 1
+ osd:
+ osd_max_omap_entries_per_request: 10
+tasks:
+- rgw:
+ client.0:
+- ceph_manager.wait_for_pools:
+ kwargs:
+ pools:
+ - .rgw.buckets
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- thrash_pool_snaps:
+ pools:
+ - .rgw.buckets
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rados/mgr/% b/src/ceph/qa/suites/rados/mgr/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/%
diff --git a/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml b/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
new file mode 100644
index 0000000..abc90e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
@@ -0,0 +1,6 @@
+roles:
+- [mgr.x, mon.a, mon.c, mds.a, mds.c, osd.0, client.0]
+- [mgr.y, mgr.z, mon.b, mds.b, osd.1, osd.2, client.1]
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
diff --git a/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml b/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml
new file mode 100644
index 0000000..068021e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/debug/mgr.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: 20
+ mgr:
+ debug mgr: 20
+ debug ms: 1
+ client:
+ debug client: 20
+ debug mgrc: 20
+ debug ms: 1
+ osd:
+ debug mgrc: 20
+ mds:
+ debug mgrc: 20
diff --git a/src/ceph/qa/suites/rados/mgr/objectstore b/src/ceph/qa/suites/rados/mgr/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml b/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml
new file mode 100644
index 0000000..3065e11
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/dashboard.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_dashboard
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml b/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml
new file mode 100644
index 0000000..34be471
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/failover.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_failover
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml
new file mode 100644
index 0000000..ffdfe8b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml
@@ -0,0 +1,19 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - Reduced data availability
+ - Degraded data redundancy
+ - objects misplaced
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_module_selftest
diff --git a/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml b/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml
new file mode 100644
index 0000000..d7261f4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/mgr/tasks/workunits.yaml
@@ -0,0 +1,16 @@
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - workunit:
+ clients:
+ client.0:
+ - mgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/% b/src/ceph/qa/suites/rados/monthrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/%
diff --git a/src/ceph/qa/suites/rados/monthrash/ceph.yaml b/src/ceph/qa/suites/rados/monthrash/ceph.yaml
new file mode 100644
index 0000000..9c08e93
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/ceph.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 25
+ paxos service trim min: 5
+# thrashing monitors may make mgr have trouble w/ its keepalive
+ log-whitelist:
+ - daemon x is unresponsive
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml b/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml
new file mode 100644
index 0000000..4b721ef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/clusters/3-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml b/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml
new file mode 100644
index 0000000..a2874c1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/clusters/9-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
+- [mon.f, mon.g, mon.h, mon.i, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/monthrash/d-require-luminous b/src/ceph/qa/suites/rados/monthrash/d-require-luminous
new file mode 120000
index 0000000..82036c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/d-require-luminous
@@ -0,0 +1 @@
+../basic/d-require-luminous \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/mon_kv_backend b/src/ceph/qa/suites/rados/monthrash/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr b/src/ceph/qa/suites/rados/monthrash/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
new file mode 100644
index 0000000..da25b7a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: mon
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ mgr:
+ debug monc: 10
diff --git a/src/ceph/qa/suites/rados/monthrash/objectstore b/src/ceph/qa/suites/rados/monthrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/rados.yaml b/src/ceph/qa/suites/rados/monthrash/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
new file mode 100644
index 0000000..2d1ba88
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_store: true
+ thrash_many: true
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml
new file mode 100644
index 0000000..fa829b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/many.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ osd:
+ mon client ping interval: 4
+ mon client ping timeout: 12
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ thrash_many: true
+ freeze_mon_duration: 20
+ freeze_mon_probability: 10
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml
new file mode 100644
index 0000000..041cee0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/one.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml
new file mode 100644
index 0000000..14f41f7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_many: true
diff --git a/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml b/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml
new file mode 100644
index 0000000..08b1522
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
new file mode 100644
index 0000000..c6b00b4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
@@ -0,0 +1,58 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - slow request
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml
new file mode 100644
index 0000000..940d3a8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..3b821bc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_FULL\)
+ - \(REQUEST_SLOW\)
+ - \(MON_DOWN\)
+ - \(PG_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SMALLER_PGP_NUM\)
+ conf:
+ global:
+ debug objecter: 20
+ debug rados: 20
+ debug ms: 1
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
new file mode 100644
index 0000000..b05eb38
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(PG_
+ - \(MON_DOWN\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - mon/pool_ops.sh
+ - mon/crush_ops.sh
+ - mon/osd.sh
+ - mon/caps.sh
+
diff --git a/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/multimon/% b/src/ceph/qa/suites/rados/multimon/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/%
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/21.yaml b/src/ceph/qa/suites/rados/multimon/clusters/21.yaml
new file mode 100644
index 0000000..b6d04a0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/21.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0]
+- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mgr.x]
+- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/3.yaml b/src/ceph/qa/suites/rados/multimon/clusters/3.yaml
new file mode 100644
index 0000000..7b8c626
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/3.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0]
+- [mon.b, mgr.x, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/6.yaml b/src/ceph/qa/suites/rados/multimon/clusters/6.yaml
new file mode 100644
index 0000000..715cbe8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/6.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, mon.e, mgr.x, osd.0]
+- [mon.b, mon.d, mon.f, mgr.y, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/clusters/9.yaml b/src/ceph/qa/suites/rados/multimon/clusters/9.yaml
new file mode 100644
index 0000000..d029c4c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/clusters/9.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, osd.0]
+- [mon.b, mon.e, mon.h, mgr.x]
+- [mon.c, mon.f, mon.i, osd.1]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/multimon/mon_kv_backend b/src/ceph/qa/suites/rados/multimon/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/msgr b/src/ceph/qa/suites/rados/multimon/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rados/multimon/objectstore b/src/ceph/qa/suites/rados/multimon/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/rados.yaml b/src/ceph/qa/suites/rados/multimon/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
new file mode 100644
index 0000000..ec761e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - (MON_CLOCK_SKEW)
+- mon_clock_skew_check:
+ expect-skew: false
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
new file mode 100644
index 0000000..a2d3d0b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
@@ -0,0 +1,18 @@
+tasks:
+- install:
+- exec:
+ mon.b:
+ - date -u -s @$(expr $(date -u +%s) + 10)
+- ceph:
+ wait-for-healthy: false
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - \(MON_CLOCK_SKEW\)
+ - \(MGR_DOWN\)
+ - \(PG_
+ - No standby daemons available
+- mon_clock_skew_check:
+ expect-skew: true
diff --git a/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..137f58d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+- mon_recovery:
diff --git a/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml b/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml
new file mode 100644
index 0000000..d40143c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/alloc-hint.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ filestore xfs extsize: true
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_alloc_hint.sh
diff --git a/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml b/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
new file mode 100644
index 0000000..f3163c9
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml
@@ -0,0 +1,23 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 6
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_APP_NOT_ENABLED\)
+- ceph_objectstore_tool:
+ objects: 20
diff --git a/src/ceph/qa/suites/rados/objectstore/filejournal.yaml b/src/ceph/qa/suites/rados/objectstore/filejournal.yaml
new file mode 100644
index 0000000..b0af800
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filejournal.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- exec:
+ client.0:
+ - ceph_test_filejournal
diff --git a/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
new file mode 100644
index 0000000..58b5197
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent-aio-journal.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ journal aio: true
+- filestore_idempotent:
diff --git a/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml
new file mode 100644
index 0000000..2d3f3c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/filestore-idempotent.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- filestore_idempotent:
diff --git a/src/ceph/qa/suites/rados/objectstore/fusestore.yaml b/src/ceph/qa/suites/rados/objectstore/fusestore.yaml
new file mode 100644
index 0000000..1c34fca
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/fusestore.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - objectstore/test_fuse.sh
+
diff --git a/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml b/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml
new file mode 100644
index 0000000..efff8d3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/keyvaluedb.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
+ - rm -rf $TESTDIR/kvtest
diff --git a/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml b/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml
new file mode 100644
index 0000000..e407a39
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/objectcacher-stress.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all:
+ - osdc/stress_objectcacher.sh
diff --git a/src/ceph/qa/suites/rados/objectstore/objectstore.yaml b/src/ceph/qa/suites/rados/objectstore/objectstore.yaml
new file mode 100644
index 0000000..b544234
--- /dev/null
+++ b/src/ceph/qa/suites/rados/objectstore/objectstore.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ulimit -c 0 && ulimit -Sn 4096 && ceph_test_objectstore --gtest_filter=-*/3
+ - rm -rf $TESTDIR/ostest
diff --git a/src/ceph/qa/suites/rados/rest/mgr-restful.yaml b/src/ceph/qa/suites/rados/rest/mgr-restful.yaml
new file mode 100644
index 0000000..049532e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/rest/mgr-restful.yaml
@@ -0,0 +1,25 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+- exec:
+ mon.a:
+ - ceph restful create-key admin
+ - ceph restful create-self-signed-cert
+ - ceph restful restart
+- workunit:
+ clients:
+ client.a:
+ - rest/test-restful.sh
+- exec:
+ mon.a:
+ - ceph restful delete-key admin
+ - ceph restful list-keys | jq ".admin" | grep null
+
diff --git a/src/ceph/qa/suites/rados/rest/rest_test.yaml b/src/ceph/qa/suites/rados/rest/rest_test.yaml
new file mode 100644
index 0000000..0fdb9dc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/rest/rest_test.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+ - client.0
+
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+ - \(REQUEST_SLOW\)
+ - \(SLOW_OPS\)
+ - \(TOO_FEW_PGS\)
+ - but it is still running
+ conf:
+ client.rest0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+- rest-api: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/% b/src/ceph/qa/suites/rados/singleton-bluestore/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/%
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
new file mode 100644
index 0000000..77eed22
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
@@ -0,0 +1,33 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - must scrub before tier agent can activate
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping updates
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+ - (SMALLER_PG_NUM)
+- workunit:
+ clients:
+ all:
+ - cephtool
+ - mon/pool_ops.sh
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr b/src/ceph/qa/suites/rados/singleton-bluestore/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml
new file mode 120000
index 0000000..b23b2a7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore-comp.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml b/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-bluestore/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/% b/src/ceph/qa/suites/rados/singleton-nomsgr/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/%
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
new file mode 100644
index 0000000..bbf330b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - MDS in read-only mode
+ - force file system read-only
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_FULL)
+ - (MDS_READ_ONLY)
+ - (POOL_FULL)
+tasks:
+- install:
+- ceph:
+- rgw:
+ - client.0
+- exec:
+ client.0:
+ - ceph_test_admin_socket_output --all
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 0000000..5864803
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,48 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ debug client: 20
+ debug mds: 20
+ debug ms: 1
+- exec:
+ client.0:
+ - ceph osd pool create data_cache 4
+ - ceph osd tier add cephfs_data data_cache
+ - ceph osd tier cache-mode data_cache writeback
+ - ceph osd tier set-overlay cephfs_data data_cache
+ - ceph osd pool set data_cache hit_set_type bloom
+ - ceph osd pool set data_cache hit_set_count 8
+ - ceph osd pool set data_cache hit_set_period 3600
+ - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+ client.0:
+ - sudo chmod 777 $TESTDIR/mnt.0/
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - truncate --size 0 $TESTDIR/mnt.0/foo
+ - ls -al $TESTDIR/mnt.0/foo
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - cp $TESTDIR/mnt.0/foo /tmp/foo
+ - sync
+ - rados -p data_cache ls -
+ - sleep 10
+ - rados -p data_cache ls -
+ - rados -p data_cache cache-flush-evict-all
+ - rados -p data_cache ls -
+ - sleep 1
+- exec:
+ client.1:
+ - hexdump -C /tmp/foo | head
+ - hexdump -C $TESTDIR/mnt.1/foo | head
+ - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 0000000..8634362
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - post-file.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 0000000..e766bdc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+ - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+ - rbd snap create base-pool/bar@snap
+ - rados -p base-pool cache-flush-evict-all
+ - rbd export base-pool/bar $TESTDIR/bar
+ - rbd export base-pool/bar@snap $TESTDIR/snap
+ - cmp $TESTDIR/foo $TESTDIR/bar
+ - cmp $TESTDIR/foo $TESTDIR/snap
+ - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 0000000..b245866
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,34 @@
+# verify #13098 fix
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - is full
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+tasks:
+- install:
+- ceph:
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create ec-ca 1 1
+ - ceph osd pool create ec 1 1 erasure default
+ - ceph osd pool application enable ec rados
+ - ceph osd tier add ec ec-ca
+ - ceph osd tier cache-mode ec-ca readproxy
+ - ceph osd tier set-overlay ec ec-ca
+ - ceph osd pool set ec-ca hit_set_type bloom
+ - ceph osd pool set-quota ec-ca max_bytes 20480000
+ - ceph osd pool set-quota ec max_bytes 20480000
+ - ceph osd pool set ec-ca target_max_bytes 20480000
+ - timeout 30 rados -p ec-ca bench 30 write || true
+ - ceph osd pool set-quota ec-ca max_bytes 0
+ - ceph osd pool set-quota ec max_bytes 0
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
new file mode 100644
index 0000000..a28582f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+# we may land on ext4
+ osd max object name len: 400
+ osd max object namespace len: 64
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- workunit:
+ clients:
+ all:
+ - rados/test_health_warnings.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 0000000..98b5095
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - ceph_test_async_driver
+ - ceph_test_msgr
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 15000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 0
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 20
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 0000000..f480dbb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 64
+ - sudo ceph osd pool application enable foo rados
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd in 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd pool set foo size 2
+- sleep:
+ duration: 300
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
new file mode 100644
index 0000000..d49a597
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_pool_access.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
new file mode 100644
index 0000000..b0d5de3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
@@ -0,0 +1,29 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ conf:
+ global:
+ osd heartbeat grace: 40
+ debug deliberately leak memory: true
+ osd max object name len: 460
+ osd max object namespace len: 64
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ expect_valgrind_errors: true
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/% b/src/ceph/qa/suites/rados/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/%
diff --git a/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 0000000..13af813
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.a
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config help:
+ config set filestore_dump_file /tmp/foo:
+ perf dump:
+ perf schema:
+ get_heap_property tcmalloc.max_total_thread_cache_byte:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 0000000..604a9e4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 0000000..e2f0245
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 0000000..59085ff
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- dump_stuck:
diff --git a/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 0000000..68644c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,24 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- ec_lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
new file mode 100644
index 0000000..e8201ee
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - erasure-code/encode-decode-non-regression.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 0000000..bcaef78
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- rep_lost_unfound_delete:
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 0000000..a4a309d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644
index 0000000..accdd96
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 2
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: True
+ pg_num: 2
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644
index 0000000..1c48ada
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: True
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644
index 0000000..0cf37fd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: False
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
new file mode 100644
index 0000000..318af5e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/auth_caps.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 0000000..7bb4f65
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
new file mode 100644
index 0000000..815c518
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ osd:
+ debug monc: 1
+ debug ms: 1
+ log-whitelist:
+ - overall HEALTH
+ - Manager daemon
+ - \(MGR_DOWN\)
+- mon_seesaw:
+- ceph_manager.create_pool:
+ kwargs:
+ pool_name: test
+ pg_num: 1
+- ceph_manager.wait_for_clean:
+ kwargs:
+ timeout: 60
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 0000000..5b37407
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 0000000..ed5b216
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 0000000..634e884
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(SLOW_OPS\)
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/src/ceph/qa/suites/rados/singleton/all/peer.yaml b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 0000000..645034a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- peer:
diff --git a/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 0000000..10f18e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 128 128
+ - sudo ceph osd pool application enable foo rados
+ - sleep 5
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+ client.0:
+ - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/src/ceph/qa/suites/rados/singleton/all/radostool.yaml b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 0000000..1827795
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
new file mode 100644
index 0000000..a2ad997
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
@@ -0,0 +1,41 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (POOL_APP_NOT_ENABLED)
+- full_sequential:
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+ - sudo ceph osd pool create test 16 16
+ - sudo ceph osd pool set test size 3
+ - sudo ceph pg dump pgs --format=json-pretty
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ type: rand
+ objectsize: 1048576
+ pool: test
+ create_pool: false
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
new file mode 100644
index 0000000..78d77c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - no reply from
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ - rebuild_mondb:
+ - radosbench:
+ clients: [client.0]
+ time: 30
diff --git a/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
new file mode 100644
index 0000000..7507bf6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
@@ -0,0 +1,51 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd min pg log entries: 100
+ osd max pg log entries: 1000
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - overall HEALTH
+- exec:
+ osd.0:
+ - ceph osd pool create foo 128
+ - ceph osd pool application enable foo foo
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd out 0
+ - sleep 5
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - ceph tell osd.* config set osd_recovery_sleep 0
+ - ceph tell osd.* config set osd_recovery_max_active 20
+- ceph.healthy:
+- exec:
+ osd.0:
+ - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
new file mode 100644
index 0000000..f3c8575
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ conf:
+ osd:
+ debug osd: 5
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+tasks:
+- install:
+- ceph:
+- reg11184:
diff --git a/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
new file mode 100644
index 0000000..3eddce8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
@@ -0,0 +1,17 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
new file mode 100644
index 0000000..d988d1a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
@@ -0,0 +1,35 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - mds.a
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ conf:
+ client.rest0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+- rest-api: [client.0]
+- workunit:
+ clients:
+ all:
+ - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
new file mode 100644
index 0000000..42c8ae3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ fs: ext4
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_envlibrados_for_rocksdb.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
new file mode 100644
index 0000000..cac3cb3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (REQUEST_SLOW)
+ - (PG_
+ - \(OBJECT_MISPLACED\)
+ - (OSD_
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+ random_eio: .33
+ min_live: 5
+ min_in: 5
+- radosbench:
+ clients: [client.0]
+ time: 720
+ type: rand
+ objectsize: 1048576
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644
index 0000000..37be8df
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000
index 0000000..0b1d7b0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 0000000..82c9b2d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,70 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (CACHE_POOL_
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - echo proxy
+ - sudo ceph osd tier cache-mode cache proxy
+ - sleep 10
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - sudo ceph osd tier remove-overlay base
+ - sleep 20
+ - echo add writeback overlay
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd pool set cache cache_target_full_ratio .8
+ - sudo ceph osd tier set-overlay base cache
+ - sleep 30
+ - sudo ceph osd tier cache-mode cache readproxy
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 0000000..48ef78f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+ clients: [client.0]
diff --git a/src/ceph/qa/suites/rados/singleton/msgr b/src/ceph/qa/suites/rados/singleton/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..3b495f9
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
+ mgr:
+ debug monc: 10
diff --git a/src/ceph/qa/suites/rados/singleton/objectstore b/src/ceph/qa/suites/rados/singleton/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/rados.yaml b/src/ceph/qa/suites/rados/singleton/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/standalone/crush.yaml b/src/ceph/qa/suites/rados/standalone/crush.yaml
new file mode 100644
index 0000000..a62a0dd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/crush.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - crush
diff --git a/src/ceph/qa/suites/rados/standalone/erasure-code.yaml b/src/ceph/qa/suites/rados/standalone/erasure-code.yaml
new file mode 100644
index 0000000..7d79753
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/erasure-code.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - erasure-code
diff --git a/src/ceph/qa/suites/rados/standalone/misc.yaml b/src/ceph/qa/suites/rados/standalone/misc.yaml
new file mode 100644
index 0000000..4aa9ee2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/misc.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - misc
diff --git a/src/ceph/qa/suites/rados/standalone/mon.yaml b/src/ceph/qa/suites/rados/standalone/mon.yaml
new file mode 100644
index 0000000..c19606f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/mon.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - mon
diff --git a/src/ceph/qa/suites/rados/standalone/osd.yaml b/src/ceph/qa/suites/rados/standalone/osd.yaml
new file mode 100644
index 0000000..e28b522
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/osd.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - osd
diff --git a/src/ceph/qa/suites/rados/standalone/scrub.yaml b/src/ceph/qa/suites/rados/standalone/scrub.yaml
new file mode 100644
index 0000000..7f6fad4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/standalone/scrub.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - scrub
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/% b/src/ceph/qa/suites/rados/thrash-erasure-code-big/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/+
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
new file mode 100644
index 0000000..1c45ee3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [osd.0, osd.1, osd.2, osd.3, client.0, mon.a]
+- [osd.4, osd.5, osd.6, osd.7, mon.b, mgr.x]
+- [osd.8, osd.9, osd.10, osd.11, mon.c]
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
new file mode 100644
index 0000000..f2ccf7f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
new file mode 100644
index 0000000..afc43b8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ mon osd pool ec fast read: 1
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
new file mode 100644
index 0000000..3095cd8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_test_map_discontinuity: 0.5
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..572832d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
new file mode 100644
index 0000000..148d9fe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
new file mode 120000
index 0000000..a4d836c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 120000
index 0000000..86a2d3c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/% b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
new file mode 100644
index 0000000..c2409f5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters
new file mode 120000
index 0000000..7aac47b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported
new file mode 120000
index 0000000..c5d5935
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/supported
@@ -0,0 +1 @@
+../../../distros/supported \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers
new file mode 120000
index 0000000..f461dad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 120000
index 0000000..9d32cd8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/% b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml
new file mode 120000
index 0000000..1249ffd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/bluestore.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters
new file mode 120000
index 0000000..646ea04
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/clusters
@@ -0,0 +1 @@
+../thrash-erasure-code/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast
new file mode 120000
index 0000000..6170b30
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/fast
@@ -0,0 +1 @@
+../thrash-erasure-code/fast \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml
new file mode 120000
index 0000000..531ecf3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/leveldb.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
new file mode 120000
index 0000000..70c9ca1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
@@ -0,0 +1 @@
+../thrash-erasure-code/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
new file mode 120000
index 0000000..017df6f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
new file mode 120000
index 0000000..40ff82c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
@@ -0,0 +1 @@
+../thrash-erasure-code/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
new file mode 100644
index 0000000..d2ad70a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
new file mode 100644
index 0000000..b3f831b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
@@ -0,0 +1,29 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
new file mode 100644
index 0000000..9baacef
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
@@ -0,0 +1,28 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
new file mode 100644
index 0000000..b7c5381
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/% b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/+
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
new file mode 120000
index 0000000..961d9b5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-4.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
new file mode 100644
index 0000000..c6f02b3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 120000
index 0000000..476d7cd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/% b/src/ceph/qa/suites/rados/thrash-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/%
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/clusters b/src/ceph/qa/suites/rados/thrash-erasure-code/clusters
new file mode 120000
index 0000000..7aac47b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous b/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous
new file mode 120000
index 0000000..737aee8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/d-require-luminous
@@ -0,0 +1 @@
+../thrash/d-require-luminous/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
new file mode 100644
index 0000000..9c3f726
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ mon osd pool ec fast read: true
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml
new file mode 120000
index 0000000..264207f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/leveldb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/leveldb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures b/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore b/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
new file mode 100644
index 0000000..caa467b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
new file mode 100644
index 0000000..471e4af
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ mon osd pool ec fast read: 1
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..12c11fa
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
new file mode 100644
index 0000000..2bbe5e5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 4
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 120000
index 0000000..f11eddb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 120000
index 0000000..b1407ae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1 @@
+../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
new file mode 100644
index 0000000..3c2ff7a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
@@ -0,0 +1,27 @@
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
new file mode 100644
index 0000000..e732ec6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
@@ -0,0 +1,21 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
new file mode 100644
index 0000000..a8ac397
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
@@ -0,0 +1,20 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/% b/src/ceph/qa/suites/rados/thrash-luminous/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/%
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides b/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides
new file mode 120000
index 0000000..ce15740
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/0-size-min-size-overrides
@@ -0,0 +1 @@
+../thrash/0-size-min-size-overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides b/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides
new file mode 120000
index 0000000..d68d36c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/1-pg-log-overrides
@@ -0,0 +1 @@
+../thrash/1-pg-log-overrides/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/backoff b/src/ceph/qa/suites/rados/thrash-luminous/backoff
new file mode 120000
index 0000000..dc7814b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/backoff
@@ -0,0 +1 @@
+../thrash/backoff/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml b/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml
new file mode 120000
index 0000000..a2fd139
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/clusters b/src/ceph/qa/suites/rados/thrash-luminous/clusters
new file mode 120000
index 0000000..729c96f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/clusters
@@ -0,0 +1 @@
+../thrash/clusters/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/msgr b/src/ceph/qa/suites/rados/thrash-luminous/msgr
new file mode 120000
index 0000000..8113e02
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/msgr
@@ -0,0 +1 @@
+../thrash/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/objectstore b/src/ceph/qa/suites/rados/thrash-luminous/objectstore
new file mode 120000
index 0000000..38ae00c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/objectstore
@@ -0,0 +1 @@
+../thrash/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml b/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml
new file mode 120000
index 0000000..2834d2e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/rados.yaml
@@ -0,0 +1 @@
+../thrash/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml b/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml
new file mode 120000
index 0000000..9aaadff
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/rocksdb.yaml
@@ -0,0 +1 @@
+../thrash/rocksdb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/thrashers b/src/ceph/qa/suites/rados/thrash-luminous/thrashers
new file mode 120000
index 0000000..f461dad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml
new file mode 100644
index 0000000..50bd812
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml
new file mode 100644
index 0000000..74595e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash-luminous/workloads/redirect_set_object.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ set_redirect: 100
+ copy_from: 100
diff --git a/src/ceph/qa/suites/rados/thrash/% b/src/ceph/qa/suites/rados/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/%
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
new file mode 120000
index 0000000..4c817a6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-1-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/2-size-1-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
new file mode 120000
index 0000000..c429b07
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/2-size-2-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
new file mode 120000
index 0000000..8d529f0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
@@ -0,0 +1 @@
+../../../../overrides/3-size-2-min-size.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
new file mode 120000
index 0000000..62010f4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
@@ -0,0 +1 @@
+../../../../overrides/short_pg_log.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml b/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/normal.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml b/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml
new file mode 100644
index 0000000..66d0611
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/peering.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
diff --git a/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml b/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
new file mode 100644
index 0000000..e610990
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
diff --git a/src/ceph/qa/suites/rados/thrash/ceph.yaml b/src/ceph/qa/suites/rados/thrash/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/+ b/src/ceph/qa/suites/rados/thrash/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/+
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml b/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml
new file mode 100644
index 0000000..b0f3b9b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml
new file mode 100644
index 0000000..ef998cc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-end.yaml
@@ -0,0 +1,33 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - exec:
+ mon.a:
+ - ceph osd require-osd-release luminous
+ - ceph osd pool application enable base rados || true
+# make sure osds have latest map
+ - rados -p rbd bench 5 write -b 4096
+ - ceph.healthy:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - sleep 15
+ - ceph osd dump | grep purged_snapdirs
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+overrides:
+ ceph:
+ conf:
+ global:
+ mon debug no require luminous: true
+
+# setting luminous triggers peering, which *might* trigger health alerts
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
+ thrashosds:
+ chance_thrash_cluster_full: 0
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml
new file mode 100644
index 0000000..9eb7143
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-crush-compat.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ mgr:
+ debug osd: 20
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml
new file mode 100644
index 0000000..a1e0afe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs-balancer-upmap.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ mgr:
+ debug osd: 20
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode upmap
+ - ceph balancer on
diff --git a/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/d-require-luminous/at-mkfs.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/msgr b/src/ceph/qa/suites/rados/thrash/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
new file mode 100644
index 0000000..77fd730
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..477bffe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ osd:
+ osd heartbeat use min delay socket: true
diff --git a/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
new file mode 100644
index 0000000..a33ba89
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
diff --git a/src/ceph/qa/suites/rados/thrash/objectstore b/src/ceph/qa/suites/rados/thrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/rados.yaml b/src/ceph/qa/suites/rados/thrash/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/rocksdb.yaml b/src/ceph/qa/suites/rados/thrash/rocksdb.yaml
new file mode 120000
index 0000000..f26e095
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/rocksdb.yaml
@@ -0,0 +1 @@
+../../../mon_kv_backend/rocksdb.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..9e2b5b1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/default.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml
new file mode 100644
index 0000000..8962ff1
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml
new file mode 100644
index 0000000..91d2173
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/none.yaml
diff --git a/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml b/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml
new file mode 100644
index 0000000..2a8087f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
new file mode 100644
index 0000000..8c9764a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 150
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
new file mode 100644
index 0000000..0cef207
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
@@ -0,0 +1,31 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
+ - sudo ceph osd pool create base 4 4 erasure myprofile
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool set base min_size 2
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 5000
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 10000
+ objects: 6600
+ max_seconds: 1200
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
new file mode 100644
index 0000000..10d4735
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ crush_tunables: firefly
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+ - sudo ceph osd pool set cache min_write_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
new file mode 100644
index 0000000..4349743
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache readproxy
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dc3385c
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
@@ -0,0 +1,39 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml
new file mode 100644
index 0000000..486d6db
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml b/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml
new file mode 100644
index 0000000..d63018f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/cache.yaml
@@ -0,0 +1,31 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
new file mode 100644
index 0000000..1f0759d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+override:
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
new file mode 100644
index 0000000..23c705d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+ crush_tunables: hammer
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml b/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml
new file mode 100644
index 0000000..1b25004
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml
@@ -0,0 +1,33 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml
new file mode 100644
index 0000000..f5a18ae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ crush_tunables: jewel
+ conf:
+ mon:
+ mon osd initial require min compat client: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 0000000..aa82d97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
new file mode 100644
index 0000000..606dcae
--- /dev/null
+++ b/src/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_fadvise_dontneed: true
+ op_weights:
+ write: 100
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/% b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/%
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml
new file mode 100644
index 0000000..2872225
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/start.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - mgr.y
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..31ca3e5
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - required past_interval bounds are empty
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..c244916
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml
new file mode 100644
index 0000000..ecf3d1b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - split_tasks
+split_tasks:
+ sequential:
+ - thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+ - print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..e35bfc2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-cls.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..9d6c2e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/rbd-import-export.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml
new file mode 100644
index 0000000..0382520
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/readwrite.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+ - print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..c96cfbe
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,19 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml
new file mode 100644
index 0000000..2cfbf1d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/radosbench.yaml
@@ -0,0 +1,41 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+split_tasks:
+ sequential:
+ - full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml
new file mode 100644
index 0000000..22a5f57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+split_tasks:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml
new file mode 100644
index 0000000..a110815
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/6-finish-upgrade.yaml
@@ -0,0 +1,23 @@
+meta:
+- desc: |
+ install upgrade on remaining node
+ restartin remaining osds
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_DEGRADED\)
+ - \(MDS_
+tasks:
+- install.upgrade:
+ osd.3:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-up: true
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [mds.a]
+ wait-for-up: true
+ wait-for-healthy: false
+- install.upgrade:
+ client.0:
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/7-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/+
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml
new file mode 100644
index 0000000..56ba21d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rbd-python.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml
new file mode 100644
index 0000000..e41f47a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/rados/upgrade/jewel-x-singleton/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/% b/src/ceph/qa/suites/rados/verify/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/%
diff --git a/src/ceph/qa/suites/rados/verify/ceph.yaml b/src/ceph/qa/suites/rados/verify/ceph.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rados/verify/clusters/+ b/src/ceph/qa/suites/rados/verify/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/+
diff --git a/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml b/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml b/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml
new file mode 100644
index 0000000..e559d91
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/rados/verify/d-require-luminous b/src/ceph/qa/suites/rados/verify/d-require-luminous
new file mode 120000
index 0000000..82036c6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-require-luminous
@@ -0,0 +1 @@
+../basic/d-require-luminous \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/+ b/src/ceph/qa/suites/rados/verify/d-thrash/default/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/+
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml
new file mode 100644
index 0000000..bcd3f39
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
new file mode 120000
index 0000000..0b1d7b0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml b/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/d-thrash/none.yaml
diff --git a/src/ceph/qa/suites/rados/verify/mon_kv_backend b/src/ceph/qa/suites/rados/verify/mon_kv_backend
new file mode 120000
index 0000000..6f5a7e6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/mon_kv_backend
@@ -0,0 +1 @@
+../../../mon_kv_backend \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/msgr b/src/ceph/qa/suites/rados/verify/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/verify/objectstore b/src/ceph/qa/suites/rados/verify/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/rados.yaml b/src/ceph/qa/suites/rados/verify/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml b/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml
new file mode 100644
index 0000000..266a4e4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(SMALLER_PGP_NUM\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- mon_recovery:
diff --git a/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..05b843e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(REQUEST_SLOW\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ debug monc: 20
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..bbab083
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml b/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..25f8435
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
diff --git a/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml b/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..5622414
--- /dev/null
+++ b/src/ceph/qa/suites/rados/verify/validater/valgrind.yaml
@@ -0,0 +1,22 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 40
+ mon:
+ mon osd crush smoke test: false
+ log-whitelist:
+ - overall HEALTH_
+# valgrind is slow.. we might get PGs stuck peering etc
+ - \(PG_
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
diff --git a/src/ceph/qa/suites/rbd/basic/% b/src/ceph/qa/suites/rbd/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/%
diff --git a/src/ceph/qa/suites/rbd/basic/base/install.yaml b/src/ceph/qa/suites/rbd/basic/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rbd/basic/cachepool/none.yaml b/src/ceph/qa/suites/rbd/basic/cachepool/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/cachepool/none.yaml
diff --git a/src/ceph/qa/suites/rbd/basic/cachepool/small.yaml b/src/ceph/qa/suites/rbd/basic/cachepool/small.yaml
new file mode 100644
index 0000000..1b50565
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/cachepool/small.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
diff --git a/src/ceph/qa/suites/rbd/basic/clusters/+ b/src/ceph/qa/suites/rbd/basic/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/clusters/+
diff --git a/src/ceph/qa/suites/rbd/basic/clusters/fixed-1.yaml b/src/ceph/qa/suites/rbd/basic/clusters/fixed-1.yaml
new file mode 120000
index 0000000..435ea3c
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/clusters/fixed-1.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/basic/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml b/src/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rbd/basic/msgr-failures/many.yaml b/src/ceph/qa/suites/rbd/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rbd/basic/objectstore b/src/ceph/qa/suites/rbd/basic/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/src/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml
new file mode 100644
index 0000000..fe1e26d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
diff --git a/src/ceph/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml b/src/ceph/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml
new file mode 100644
index 0000000..51b35e2
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - cls/test_cls_lock.sh
+ - cls/test_cls_journal.sh
diff --git a/src/ceph/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml b/src/ceph/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml
new file mode 100644
index 0000000..d2c80ad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_lock_fence.sh
diff --git a/src/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/src/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml
new file mode 100644
index 0000000..7ab3185
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - \(REQUEST_SLOW\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
diff --git a/src/ceph/qa/suites/rbd/cli/% b/src/ceph/qa/suites/rbd/cli/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/%
diff --git a/src/ceph/qa/suites/rbd/cli/base/install.yaml b/src/ceph/qa/suites/rbd/cli/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rbd/cli/clusters b/src/ceph/qa/suites/rbd/cli/clusters
new file mode 120000
index 0000000..ae92569
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/clusters
@@ -0,0 +1 @@
+../basic/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/cli/features/defaults.yaml b/src/ceph/qa/suites/rbd/cli/features/defaults.yaml
new file mode 100644
index 0000000..fd42254
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 61
diff --git a/src/ceph/qa/suites/rbd/cli/features/format-1.yaml b/src/ceph/qa/suites/rbd/cli/features/format-1.yaml
new file mode 100644
index 0000000..9c53208
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/features/format-1.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 1
diff --git a/src/ceph/qa/suites/rbd/cli/features/journaling.yaml b/src/ceph/qa/suites/rbd/cli/features/journaling.yaml
new file mode 100644
index 0000000..322a728
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/features/journaling.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 125
diff --git a/src/ceph/qa/suites/rbd/cli/features/layering.yaml b/src/ceph/qa/suites/rbd/cli/features/layering.yaml
new file mode 100644
index 0000000..420e3d5
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/features/layering.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 1
diff --git a/src/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml b/src/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rbd/cli/msgr-failures/many.yaml b/src/ceph/qa/suites/rbd/cli/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/rbd/cli/objectstore b/src/ceph/qa/suites/rbd/cli/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml
new file mode 100644
index 0000000..376bf08
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml
@@ -0,0 +1,27 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/src/ceph/qa/suites/rbd/cli/pool/none.yaml b/src/ceph/qa/suites/rbd/cli/pool/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/pool/none.yaml
diff --git a/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml
new file mode 100644
index 0000000..c5647db
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create datapool 4
+ - rbd pool init datapool
+
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default data pool: datapool
diff --git a/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml
new file mode 100644
index 0000000..1b50565
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
diff --git a/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml b/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml
new file mode 100644
index 0000000..be43b3e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/cli_generic.sh
diff --git a/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml b/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..b08f261
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
diff --git a/src/ceph/qa/suites/rbd/librbd/% b/src/ceph/qa/suites/rbd/librbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/%
diff --git a/src/ceph/qa/suites/rbd/librbd/cache/none.yaml b/src/ceph/qa/suites/rbd/librbd/cache/none.yaml
new file mode 100644
index 0000000..42fd9c9
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/cache/none.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: false
diff --git a/src/ceph/qa/suites/rbd/librbd/cache/writeback.yaml b/src/ceph/qa/suites/rbd/librbd/cache/writeback.yaml
new file mode 100644
index 0000000..86fe06a
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/cache/writeback.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
diff --git a/src/ceph/qa/suites/rbd/librbd/cache/writethrough.yaml b/src/ceph/qa/suites/rbd/librbd/cache/writethrough.yaml
new file mode 100644
index 0000000..6dc29e1
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/cache/writethrough.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
diff --git a/src/ceph/qa/suites/rbd/librbd/clusters/+ b/src/ceph/qa/suites/rbd/librbd/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/clusters/+
diff --git a/src/ceph/qa/suites/rbd/librbd/clusters/fixed-3.yaml b/src/ceph/qa/suites/rbd/librbd/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/librbd/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/librbd/clusters/openstack.yaml
new file mode 100644
index 0000000..b0f3b9b
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml b/src/ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml
new file mode 100644
index 0000000..ce99e7e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd clone copy on read: true
diff --git a/src/ceph/qa/suites/rbd/librbd/config/none.yaml b/src/ceph/qa/suites/rbd/librbd/config/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/config/none.yaml
diff --git a/src/ceph/qa/suites/rbd/librbd/config/skip-partial-discard.yaml b/src/ceph/qa/suites/rbd/librbd/config/skip-partial-discard.yaml
new file mode 100644
index 0000000..cd63204
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/config/skip-partial-discard.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd skip partial discard: true
diff --git a/src/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml b/src/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml
new file mode 100644
index 0000000..55b6df5
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - but it is still running
diff --git a/src/ceph/qa/suites/rbd/librbd/objectstore b/src/ceph/qa/suites/rbd/librbd/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml
new file mode 100644
index 0000000..f39a5bb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml
@@ -0,0 +1,24 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/src/ceph/qa/suites/rbd/librbd/pool/none.yaml b/src/ceph/qa/suites/rbd/librbd/pool/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/pool/none.yaml
diff --git a/src/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml b/src/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml
new file mode 100644
index 0000000..c5647db
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create datapool 4
+ - rbd pool init datapool
+
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default data pool: datapool
diff --git a/src/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml b/src/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml
new file mode 100644
index 0000000..1b50565
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml
new file mode 100644
index 0000000..04af9c8
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..6ae7f46
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..578115e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "125"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/fsx.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/fsx.yaml
new file mode 100644
index 0000000..6d8cd5f
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/fsx.yaml
@@ -0,0 +1,4 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 20000
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests.yaml
new file mode 100644
index 0000000..a7b3ce7
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..40b2312
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "61"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..d0e905f
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml
@@ -0,0 +1,7 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "125"
diff --git a/src/ceph/qa/suites/rbd/librbd/workloads/rbd_fio.yaml b/src/ceph/qa/suites/rbd/librbd/workloads/rbd_fio.yaml
new file mode 100644
index 0000000..ff788c6
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/librbd/workloads/rbd_fio.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fio:
+ client.0:
+ fio-io-size: 80%
+ formats: [2]
+ features: [[layering],[layering,exclusive-lock,object-map]]
+ io-engine: rbd
+ test-clone-io: 1
+ rw: randrw
+ runtime: 900
diff --git a/src/ceph/qa/suites/rbd/maintenance/% b/src/ceph/qa/suites/rbd/maintenance/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/%
diff --git a/src/ceph/qa/suites/rbd/maintenance/base/install.yaml b/src/ceph/qa/suites/rbd/maintenance/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rbd/maintenance/clusters/+ b/src/ceph/qa/suites/rbd/maintenance/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/clusters/+
diff --git a/src/ceph/qa/suites/rbd/maintenance/clusters/fixed-3.yaml b/src/ceph/qa/suites/rbd/maintenance/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/maintenance/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/maintenance/clusters/openstack.yaml
new file mode 120000
index 0000000..3e5028f
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/clusters/openstack.yaml
@@ -0,0 +1 @@
+../../qemu/clusters/openstack.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/maintenance/filestore-xfs.yaml b/src/ceph/qa/suites/rbd/maintenance/filestore-xfs.yaml
new file mode 120000
index 0000000..59ef7e4
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/maintenance/objectstore b/src/ceph/qa/suites/rbd/maintenance/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml b/src/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml
new file mode 100644
index 0000000..ffa012e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml
@@ -0,0 +1,13 @@
+tasks:
+- parallel:
+ - io_workload
+ - op_workload
+io_workload:
+ sequential:
+ - qemu:
+ client.0:
+ clone: true
+ type: block
+ disks: 3
+ test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh
+exclude_arch: armv7l
diff --git a/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml b/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml
new file mode 100644
index 0000000..d7e1c1e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml
@@ -0,0 +1,8 @@
+op_workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/qemu_dynamic_features.sh
+ env:
+ IMAGE_NAME: client.0.1-clone
diff --git a/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml b/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml
new file mode 100644
index 0000000..dc8671b
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: false
+op_workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/qemu_dynamic_features.sh
+ env:
+ IMAGE_NAME: client.0.1-clone
diff --git a/src/ceph/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml b/src/ceph/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml
new file mode 100644
index 0000000..308158f
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml
@@ -0,0 +1,8 @@
+op_workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/qemu_rebuild_object_map.sh
+ env:
+ IMAGE_NAME: client.0.1-clone
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/% b/src/ceph/qa/suites/rbd/mirror-ha/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/%
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/base b/src/ceph/qa/suites/rbd/mirror-ha/base
new file mode 120000
index 0000000..09e88d4
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/base
@@ -0,0 +1 @@
+../mirror/base \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/cluster b/src/ceph/qa/suites/rbd/mirror-ha/cluster
new file mode 120000
index 0000000..47e95a2
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/cluster
@@ -0,0 +1 @@
+../mirror/cluster \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/msgr-failures b/src/ceph/qa/suites/rbd/mirror-ha/msgr-failures
new file mode 120000
index 0000000..c0ae027
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/msgr-failures
@@ -0,0 +1 @@
+../mirror/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/objectstore b/src/ceph/qa/suites/rbd/mirror-ha/objectstore
new file mode 120000
index 0000000..1a5e90e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/objectstore
@@ -0,0 +1 @@
+../mirror/objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror-ha/workloads/rbd-mirror-ha-workunit.yaml b/src/ceph/qa/suites/rbd/mirror-ha/workloads/rbd-mirror-ha-workunit.yaml
new file mode 100644
index 0000000..406318f
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror-ha/workloads/rbd-mirror-ha-workunit.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: run the rbd_mirror_ha.sh workunit to test the rbd-mirror daemon
+tasks:
+- exec:
+ cluster1.client.mirror:
+ - ceph --cluster cluster1 auth caps client.mirror mon 'profile rbd' osd 'profile rbd'
+ cluster2.client.mirror:
+ - ceph --cluster cluster2 auth caps client.mirror mon 'profile rbd' osd 'profile rbd'
+- workunit:
+ clients:
+ cluster1.client.mirror: [rbd/rbd_mirror_ha.sh]
+ env:
+ # override workunit setting of CEPH_ARGS='--cluster'
+ CEPH_ARGS: ''
+ RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
+ timeout: 6h
diff --git a/src/ceph/qa/suites/rbd/mirror/% b/src/ceph/qa/suites/rbd/mirror/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/%
diff --git a/src/ceph/qa/suites/rbd/mirror/base/install.yaml b/src/ceph/qa/suites/rbd/mirror/base/install.yaml
new file mode 100644
index 0000000..365c3a8
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/base/install.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: run two ceph clusters and install rbd-mirror
+tasks:
+- install:
+ extra_packages: [rbd-mirror]
+- ceph:
+ cluster: cluster1
+- ceph:
+ cluster: cluster2
diff --git a/src/ceph/qa/suites/rbd/mirror/cluster/+ b/src/ceph/qa/suites/rbd/mirror/cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/cluster/+
diff --git a/src/ceph/qa/suites/rbd/mirror/cluster/2-node.yaml b/src/ceph/qa/suites/rbd/mirror/cluster/2-node.yaml
new file mode 100644
index 0000000..fbc76bd
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/cluster/2-node.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: 2 ceph clusters with 1 mon and 3 osds each
+roles:
+- - cluster1.mon.a
+ - cluster1.mgr.x
+ - cluster1.osd.0
+ - cluster1.osd.1
+ - cluster1.osd.2
+ - cluster1.client.0
+ - cluster2.client.0
+- - cluster2.mon.a
+ - cluster2.mgr.x
+ - cluster2.osd.0
+ - cluster2.osd.1
+ - cluster2.osd.2
+ - cluster1.client.mirror
+ - cluster2.client.mirror
diff --git a/src/ceph/qa/suites/rbd/mirror/cluster/openstack.yaml b/src/ceph/qa/suites/rbd/mirror/cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/mirror/msgr-failures b/src/ceph/qa/suites/rbd/mirror/msgr-failures
new file mode 120000
index 0000000..db59eb4
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/msgr-failures
@@ -0,0 +1 @@
+../basic/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror/objectstore b/src/ceph/qa/suites/rbd/mirror/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml b/src/ceph/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml
new file mode 100644
index 0000000..1e762a6
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/rbd-mirror/one-per-cluster.yaml
@@ -0,0 +1,19 @@
+meta:
+- desc: run one rbd-mirror daemon per cluster
+overrides:
+ ceph:
+ conf:
+ client.mirror:
+ # override to make these names predictable
+ admin socket: /var/run/ceph/$cluster-$name.asok
+ pid file: /var/run/ceph/$cluster-$name.pid
+tasks:
+- exec:
+ cluster1.client.mirror:
+ - ceph --cluster cluster1 auth caps client.mirror mon 'profile rbd' osd 'profile rbd'
+ cluster2.client.mirror:
+ - ceph --cluster cluster2 auth caps client.mirror mon 'profile rbd' osd 'profile rbd'
+- rbd-mirror:
+ client: cluster1.client.mirror
+- rbd-mirror:
+ client: cluster2.client.mirror
diff --git a/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml b/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml
new file mode 100644
index 0000000..cdc4864
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
+tasks:
+- workunit:
+ clients:
+ cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
+ env:
+ # override workunit setting of CEPH_ARGS='--cluster'
+ CEPH_ARGS: ''
+ RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
+ RBD_MIRROR_USE_RBD_MIRROR: '1'
+ timeout: 6h
diff --git a/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml b/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml
new file mode 100644
index 0000000..2e16642
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon
+tasks:
+- workunit:
+ clients:
+ cluster1.client.mirror: [rbd/rbd_mirror.sh]
+ env:
+ # override workunit setting of CEPH_ARGS='--cluster'
+ CEPH_ARGS: ''
+ RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
+ RBD_MIRROR_USE_RBD_MIRROR: '1'
diff --git a/src/ceph/qa/suites/rbd/nbd/% b/src/ceph/qa/suites/rbd/nbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/%
diff --git a/src/ceph/qa/suites/rbd/nbd/base b/src/ceph/qa/suites/rbd/nbd/base
new file mode 120000
index 0000000..fd10a85
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/base
@@ -0,0 +1 @@
+../thrash/base \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/cluster/+ b/src/ceph/qa/suites/rbd/nbd/cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/cluster/+
diff --git a/src/ceph/qa/suites/rbd/nbd/cluster/fixed-3.yaml b/src/ceph/qa/suites/rbd/nbd/cluster/fixed-3.yaml
new file mode 100644
index 0000000..1825891
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/cluster/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/src/ceph/qa/suites/rbd/nbd/cluster/openstack.yaml b/src/ceph/qa/suites/rbd/nbd/cluster/openstack.yaml
new file mode 120000
index 0000000..48becbb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/cluster/openstack.yaml
@@ -0,0 +1 @@
+../../thrash/clusters/openstack.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/msgr-failures b/src/ceph/qa/suites/rbd/nbd/msgr-failures
new file mode 120000
index 0000000..03689aa
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/objectstore b/src/ceph/qa/suites/rbd/nbd/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/thrashers b/src/ceph/qa/suites/rbd/nbd/thrashers
new file mode 120000
index 0000000..f461dad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/thrashosds-health.yaml b/src/ceph/qa/suites/rbd/nbd/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml b/src/ceph/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml
new file mode 100644
index 0000000..b6e9d5b
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml
@@ -0,0 +1,15 @@
+os_type: ubuntu
+overrides:
+ install:
+ ceph:
+ extra_packages: [rbd-nbd]
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+ nbd: True
+ holebdy: 512
+ punch_holes: true
+ readbdy: 512
+ truncbdy: 512
+ writebdy: 512
diff --git a/src/ceph/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml b/src/ceph/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml
new file mode 100644
index 0000000..897d07c
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml
@@ -0,0 +1,10 @@
+os_type: ubuntu
+overrides:
+ install:
+ ceph:
+ extra_packages: [rbd-nbd]
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/rbd-nbd.sh
diff --git a/src/ceph/qa/suites/rbd/openstack/% b/src/ceph/qa/suites/rbd/openstack/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/%
diff --git a/src/ceph/qa/suites/rbd/openstack/base/install.yaml b/src/ceph/qa/suites/rbd/openstack/base/install.yaml
new file mode 100644
index 0000000..90f80dc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/base/install.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+overrides:
+ ceph:
+ log-whitelist:
+ - (POOL_APP_NOT_ENABLED)
diff --git a/src/ceph/qa/suites/rbd/openstack/clusters/+ b/src/ceph/qa/suites/rbd/openstack/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/clusters/+
diff --git a/src/ceph/qa/suites/rbd/openstack/clusters/fixed-2.yaml b/src/ceph/qa/suites/rbd/openstack/clusters/fixed-2.yaml
new file mode 100644
index 0000000..473c205
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/clusters/fixed-2.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ osd pool default size: 2
+ osd crush chooseleaf type: 0
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2]
+- [client.0]
diff --git a/src/ceph/qa/suites/rbd/openstack/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/openstack/clusters/openstack.yaml
new file mode 100644
index 0000000..3310e9d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 1
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/openstack/features/minimum.yaml b/src/ceph/qa/suites/rbd/openstack/features/minimum.yaml
new file mode 100644
index 0000000..420e3d5
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/features/minimum.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 1
diff --git a/src/ceph/qa/suites/rbd/openstack/objectstore b/src/ceph/qa/suites/rbd/openstack/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml b/src/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml
new file mode 100644
index 0000000..26ddda9
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml
@@ -0,0 +1,51 @@
+tasks:
+- qemu:
+ all:
+ type: filesystem
+ cpus: 4
+ memory: 12288
+ disks:
+ - image_size: 30720
+ - image_size: 30720
+ test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/rbd/run_devstack_tempest.sh
+ image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+ cloud_config_archive:
+ - type: text/cloud-config
+ content: |
+ users:
+ - name: stack
+ lock_passwd: False
+ shell: /bin/bash
+ sudo: ["ALL=(root) NOPASSWD:ALL\nDefaults:stack,tempest !requiretty"]
+ - name: tempest
+ lock_passwd: False
+ shell: /bin/bash
+ sudo:
+ - "ALL=(root) NOPASSWD:/sbin/ip"
+ - "ALL=(root) NOPASSWD:/sbin/iptables"
+ - "ALL=(root) NOPASSWD:/usr/bin/ovsdb-client"
+ - |
+ #!/bin/bash -ex
+ wget -q -O- "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc" | apt-key add -
+ wget -q -O /etc/apt/sources.list.d/ceph.list "https://shaman.ceph.com/api/repos/ceph/{ceph_branch}/{ceph_sha1}/ubuntu/xenial/repo"
+ apt-get update
+
+ mount --bind /mnt/test_b /opt
+ mkdir /opt/stack
+ chown -R stack:stack /home/stack
+ chown -R stack:stack /opt/stack
+
+ mkdir /mnt/log/stack
+ chmod a+rwx /mnt/log/stack
+ chown -R stack:stack /mnt/log/stack
+
+ apt-get install -y ceph-common librbd1
+
+ mkdir /mnt/log/stack/ceph
+ chown -R stack:stack /mnt/log/stack/ceph
+ chmod a+rwx /mnt/log/stack/ceph
+
+ # sanity check that the cluster is reachable from the VM
+ echo '[client]' >> /etc/ceph/ceph.conf
+ echo 'log file = /mnt/log/stack/ceph/$name.$pid.log' >> /etc/ceph/ceph.conf
+ rbd --debug-ms=10 --debug-rbd=20 info client.0.1
diff --git a/src/ceph/qa/suites/rbd/qemu/% b/src/ceph/qa/suites/rbd/qemu/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/%
diff --git a/src/ceph/qa/suites/rbd/qemu/cache/none.yaml b/src/ceph/qa/suites/rbd/qemu/cache/none.yaml
new file mode 100644
index 0000000..42fd9c9
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/cache/none.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: false
diff --git a/src/ceph/qa/suites/rbd/qemu/cache/writeback.yaml b/src/ceph/qa/suites/rbd/qemu/cache/writeback.yaml
new file mode 100644
index 0000000..86fe06a
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/cache/writeback.yaml
@@ -0,0 +1,6 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
diff --git a/src/ceph/qa/suites/rbd/qemu/cache/writethrough.yaml b/src/ceph/qa/suites/rbd/qemu/cache/writethrough.yaml
new file mode 100644
index 0000000..6dc29e1
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/cache/writethrough.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
diff --git a/src/ceph/qa/suites/rbd/qemu/clusters/+ b/src/ceph/qa/suites/rbd/qemu/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/clusters/+
diff --git a/src/ceph/qa/suites/rbd/qemu/clusters/fixed-3.yaml b/src/ceph/qa/suites/rbd/qemu/clusters/fixed-3.yaml
new file mode 120000
index 0000000..a3ac9fc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/clusters/fixed-3.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/qemu/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/qemu/clusters/openstack.yaml
new file mode 100644
index 0000000..9c39c7e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 30000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/qemu/features/defaults.yaml b/src/ceph/qa/suites/rbd/qemu/features/defaults.yaml
new file mode 100644
index 0000000..fd42254
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 61
diff --git a/src/ceph/qa/suites/rbd/qemu/features/journaling.yaml b/src/ceph/qa/suites/rbd/qemu/features/journaling.yaml
new file mode 100644
index 0000000..322a728
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/features/journaling.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default format: 2
+ rbd default features: 125
diff --git a/src/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml b/src/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml
new file mode 100644
index 0000000..55b6df5
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - but it is still running
diff --git a/src/ceph/qa/suites/rbd/qemu/objectstore b/src/ceph/qa/suites/rbd/qemu/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml
new file mode 100644
index 0000000..c75e6fd
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
+ - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - rbd pool init rbd
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml
new file mode 100644
index 0000000..f39a5bb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml
@@ -0,0 +1,24 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/none.yaml b/src/ceph/qa/suites/rbd/qemu/pool/none.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/pool/none.yaml
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml
new file mode 100644
index 0000000..c5647db
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create datapool 4
+ - rbd pool init datapool
+
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default data pool: datapool
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml
new file mode 100644
index 0000000..1b50565
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
diff --git a/src/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml
new file mode 100644
index 0000000..e06a587
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+ all:
+ clone: true
+ test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/bonnie.sh
+exclude_arch: armv7l
diff --git a/src/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml
new file mode 100644
index 0000000..a78801d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+ all:
+ clone: true
+ test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/fsstress.sh
+exclude_arch: armv7l
diff --git a/src/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled
new file mode 100644
index 0000000..c436ba1
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled
@@ -0,0 +1,6 @@
+tasks:
+- qemu:
+ all:
+ test: http://git.ceph.com/?p={repo};a=blob_plain;h={branch};f=qa/workunits/suites/iozone.sh
+ image_size: 20480
+exclude_arch: armv7l
diff --git a/src/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml
new file mode 100644
index 0000000..2fc6fb6
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml
@@ -0,0 +1,8 @@
+tasks:
+- qemu:
+ all:
+ clone: true
+ type: block
+ disks: 3
+ test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh
+exclude_arch: armv7l
diff --git a/src/ceph/qa/suites/rbd/singleton-bluestore/% b/src/ceph/qa/suites/rbd/singleton-bluestore/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton-bluestore/%
diff --git a/src/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml b/src/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml
new file mode 100644
index 0000000..9af52e0
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mgr.y, osd.3, osd.4, osd.5]
+- [mon.c, mgr.z, osd.6, osd.7, osd.8]
+- [osd.9, osd.10, osd.11]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - 'application not enabled'
+- workunit:
+ timeout: 30m
+ clients:
+ all: [rbd/issue-20295.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp.yaml b/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp.yaml
new file mode 120000
index 0000000..b23b2a7
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore-comp.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore.yaml b/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton-bluestore/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/singleton-bluestore/openstack.yaml b/src/ceph/qa/suites/rbd/singleton-bluestore/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton-bluestore/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/singleton/% b/src/ceph/qa/suites/rbd/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/%
diff --git a/src/ceph/qa/suites/rbd/singleton/all/admin_socket.yaml b/src/ceph/qa/suites/rbd/singleton/all/admin_socket.yaml
new file mode 100644
index 0000000..22dbd8c
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/admin_socket.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all: [rbd/test_admin_socket.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml b/src/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml
new file mode 100644
index 0000000..f6a1991
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- cram:
+ clients:
+ client.0:
+ - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/formatted-output.t
diff --git a/src/ceph/qa/suites/rbd/singleton/all/merge_diff.yaml b/src/ceph/qa/suites/rbd/singleton/all/merge_diff.yaml
new file mode 100644
index 0000000..31b269d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/merge_diff.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all: [rbd/merge_diff.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/permissions.yaml b/src/ceph/qa/suites/rbd/singleton/all/permissions.yaml
new file mode 100644
index 0000000..c00a5c9
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/permissions.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all: [rbd/permissions.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml
new file mode 100644
index 0000000..bfb2039
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml
@@ -0,0 +1,13 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: false
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml
new file mode 100644
index 0000000..bf1b4be
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml
@@ -0,0 +1,13 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: true
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml
new file mode 100644
index 0000000..908a678
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml
@@ -0,0 +1,14 @@
+exclude_arch: armv7l
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml b/src/ceph/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml
new file mode 100644
index 0000000..f14bd74
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd validate pool: false
+- workunit:
+ clients:
+ all:
+ - mon/rbd_snaps_ops.sh
+
diff --git a/src/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml b/src/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml
new file mode 100644
index 0000000..0800cbf
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all: [rbd/test_rbd_mirror.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml b/src/ceph/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml
new file mode 100644
index 0000000..0053e66
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml
@@ -0,0 +1,7 @@
+roles:
+- [client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all: [rbd/test_rbdmap_RBDMAPFILE.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml b/src/ceph/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml
new file mode 100644
index 0000000..cf602cb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: false
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/read-flags-writeback.yaml b/src/ceph/qa/suites/rbd/singleton/all/read-flags-writeback.yaml
new file mode 100644
index 0000000..e763bcc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/read-flags-writeback.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: true
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml b/src/ceph/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml
new file mode 100644
index 0000000..fc499d4
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/all/verify_pool.yaml b/src/ceph/qa/suites/rbd/singleton/all/verify_pool.yaml
new file mode 100644
index 0000000..5ab06f7
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/all/verify_pool.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all: [rbd/verify_pool.sh]
diff --git a/src/ceph/qa/suites/rbd/singleton/objectstore b/src/ceph/qa/suites/rbd/singleton/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/singleton/openstack.yaml b/src/ceph/qa/suites/rbd/singleton/openstack.yaml
new file mode 100644
index 0000000..21eca2b
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/singleton/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/thrash/% b/src/ceph/qa/suites/rbd/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/%
diff --git a/src/ceph/qa/suites/rbd/thrash/base/install.yaml b/src/ceph/qa/suites/rbd/thrash/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rbd/thrash/clusters/+ b/src/ceph/qa/suites/rbd/thrash/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/clusters/+
diff --git a/src/ceph/qa/suites/rbd/thrash/clusters/fixed-2.yaml b/src/ceph/qa/suites/rbd/thrash/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/thrash/clusters/openstack.yaml b/src/ceph/qa/suites/rbd/thrash/clusters/openstack.yaml
new file mode 100644
index 0000000..40fef47
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml b/src/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rbd/thrash/objectstore b/src/ceph/qa/suites/rbd/thrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml b/src/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml
new file mode 100644
index 0000000..e723e09
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (CACHE_POOL_NEAR_FULL)
+ - (CACHE_POOL_NO_HIT_SET)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+- thrashosds:
+ timeout: 1200
diff --git a/src/ceph/qa/suites/rbd/thrash/thrashers/default.yaml b/src/ceph/qa/suites/rbd/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..3f1615c
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/thrashers/default.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
diff --git a/src/ceph/qa/suites/rbd/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/rbd/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/journal.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/journal.yaml
new file mode 100644
index 0000000..4dae106
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/journal.yaml
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/journal.sh
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml
new file mode 100644
index 0000000..6ae7f46
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml
new file mode 100644
index 0000000..a902154
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml
@@ -0,0 +1,16 @@
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ client:
+ rbd clone copy on read: true
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml
new file mode 100644
index 0000000..578115e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "125"
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml
new file mode 100644
index 0000000..04af9c8
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml
new file mode 100644
index 0000000..98e0b39
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml
new file mode 100644
index 0000000..463ba99
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml
new file mode 100644
index 0000000..0c284ca
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml
@@ -0,0 +1,10 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd clone copy on read: true
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml
new file mode 100644
index 0000000..13e9a78
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml
@@ -0,0 +1,5 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+ journal_replay: True
diff --git a/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml
new file mode 100644
index 0000000..968665e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 6000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: false
diff --git a/src/ceph/qa/suites/rbd/valgrind/% b/src/ceph/qa/suites/rbd/valgrind/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/%
diff --git a/src/ceph/qa/suites/rbd/valgrind/base/install.yaml b/src/ceph/qa/suites/rbd/valgrind/base/install.yaml
new file mode 100644
index 0000000..2030acb
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/base/install.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/src/ceph/qa/suites/rbd/valgrind/clusters b/src/ceph/qa/suites/rbd/valgrind/clusters
new file mode 120000
index 0000000..ae92569
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/clusters
@@ -0,0 +1 @@
+../basic/clusters \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/valgrind/objectstore b/src/ceph/qa/suites/rbd/valgrind/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml b/src/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml
new file mode 100644
index 0000000..c660dce
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml
@@ -0,0 +1,13 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ rbd_fsx:
+ valgrind: ["--tool=memcheck"]
+ workunit:
+ env:
+ VALGRIND: "--tool=memcheck --leak-check=full"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml
new file mode 100644
index 0000000..04af9c8
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..6ae7f46
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..578115e
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "125"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/fsx.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/fsx.yaml
new file mode 100644
index 0000000..5c745a2
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/fsx.yaml
@@ -0,0 +1,4 @@
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ size: 134217728
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml
new file mode 100644
index 0000000..0fa86ad
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml
@@ -0,0 +1,9 @@
+os_type: centos
+os_version: "7.3"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml
new file mode 100644
index 0000000..ec1eddd
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml
@@ -0,0 +1,9 @@
+os_type: centos
+os_version: "7.3"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "61"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml
new file mode 100644
index 0000000..b7de1bc
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml
@@ -0,0 +1,9 @@
+os_type: centos
+os_version: "7.3"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "125"
diff --git a/src/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml b/src/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml
new file mode 100644
index 0000000..e094343
--- /dev/null
+++ b/src/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_rbd_mirror.sh
diff --git a/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop-v28.yaml b/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop-v28.yaml
new file mode 100644
index 0000000..41dfa40
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop-v28.yaml
@@ -0,0 +1,31 @@
+os_type: centos
+os_version: "7.3"
+machine_type: vps
+overrides:
+ ceph_ansible:
+ vars:
+ ceph_conf_overrides:
+ global:
+ osd default pool size: 2
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ debug rgw: 20
+ debug ms: 1
+ ceph_test: true
+ ceph_dev: true
+ ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
+ ceph_origin: upstream
+ journal_collocation: true
+ osd_auto_discovery: false
+ journal_size: 1024
+
+roles:
+- [mon.a, osd.0, osd.1, osd.2, rgw.0]
+- [osd.3, osd.4, osd.5]
+- [osd.6, osd.7, osd.8]
+- [mgr.x]
+tasks:
+- ssh-keys:
+- ceph-ansible:
+- s3a-hadoop:
+ hadoop-version: '2.8.0'
diff --git a/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml b/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml
new file mode 100644
index 0000000..1c17a69
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml
@@ -0,0 +1,32 @@
+machine_type: ovh
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+overrides:
+ ceph_ansible:
+ vars:
+ ceph_conf_overrides:
+ global:
+ osd default pool size: 2
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ debug rgw: 20
+ debug ms: 1
+ ceph_test: true
+ journal_collocation: true
+ osd_auto_discovery: false
+ journal_size: 1024
+ ceph_stable_release: luminous
+ osd_scenario: collocated
+ ceph_origin: repository
+ ceph_repository: dev
+roles:
+- [mon.a, osd.0, osd.1, osd.2, rgw.0]
+- [osd.3, osd.4, osd.5]
+- [osd.6, osd.7, osd.8]
+- [mgr.x]
+tasks:
+- ssh-keys:
+- ceph-ansible:
+- s3a-hadoop:
diff --git a/src/ceph/qa/suites/rgw/multifs/% b/src/ceph/qa/suites/rgw/multifs/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/%
diff --git a/src/ceph/qa/suites/rgw/multifs/clusters/fixed-2.yaml b/src/ceph/qa/suites/rgw/multifs/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/multifs/frontend/civetweb.yaml b/src/ceph/qa/suites/rgw/multifs/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/multifs/objectstore b/src/ceph/qa/suites/rgw/multifs/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/multifs/overrides.yaml b/src/ceph/qa/suites/rgw/multifs/overrides.yaml
new file mode 100644
index 0000000..161e1e3
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/overrides.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ conf:
+ client:
+ debug rgw: 20
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+ rgw crypt require ssl: false
diff --git a/src/ceph/qa/suites/rgw/multifs/rgw_pool_type b/src/ceph/qa/suites/rgw/multifs/rgw_pool_type
new file mode 120000
index 0000000..0506f61
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/rgw_pool_type
@@ -0,0 +1 @@
+../../../rgw_pool_type \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml
new file mode 100644
index 0000000..c518d0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml
@@ -0,0 +1,10 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_bucket_quota.pl
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml
new file mode 100644
index 0000000..b042aa8
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml
@@ -0,0 +1,10 @@
+# Amazon::S3 is not available on el7
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_multipart_upload.pl
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml
new file mode 100644
index 0000000..c7efaa1
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml
new file mode 100644
index 0000000..47b3c18
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml
@@ -0,0 +1,16 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3roundtrip:
+ client.0:
+ rgw_server: client.0
+ roundtrip:
+ bucket: rttest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..da05a5e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml
@@ -0,0 +1,13 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ force-branch: ceph-luminous
+ rgw_server: client.0
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_swift.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..569741b
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_swift.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- swift:
+ client.0:
+ rgw_server: client.0
diff --git a/src/ceph/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml
new file mode 100644
index 0000000..ef9d6df
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml
@@ -0,0 +1,10 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_user_quota.pl
diff --git a/src/ceph/qa/suites/rgw/multisite/% b/src/ceph/qa/suites/rgw/multisite/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/%
diff --git a/src/ceph/qa/suites/rgw/multisite/clusters.yaml b/src/ceph/qa/suites/rgw/multisite/clusters.yaml
new file mode 100644
index 0000000..536ef7c
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/clusters.yaml
@@ -0,0 +1,3 @@
+roles:
+- [c1.mon.a, c1.mgr.x, c1.osd.0, c1.osd.1, c1.osd.2, c1.client.0, c1.client.1]
+- [c2.mon.a, c2.mgr.x, c2.osd.0, c2.osd.1, c2.osd.2, c2.client.0, c2.client.1]
diff --git a/src/ceph/qa/suites/rgw/multisite/frontend/civetweb.yaml b/src/ceph/qa/suites/rgw/multisite/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/multisite/overrides.yaml b/src/ceph/qa/suites/rgw/multisite/overrides.yaml
new file mode 100644
index 0000000..1ac9181
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/overrides.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ conf:
+ client:
+ debug rgw: 20
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
+ rgw crypt require ssl: false
+ rgw:
+ compression type: random
diff --git a/src/ceph/qa/suites/rgw/multisite/realms/three-zone.yaml b/src/ceph/qa/suites/rgw/multisite/realms/three-zone.yaml
new file mode 100644
index 0000000..a8a7ca1
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/realms/three-zone.yaml
@@ -0,0 +1,20 @@
+overrides:
+ rgw-multisite:
+ realm:
+ name: test-realm
+ is default: true
+ zonegroups:
+ - name: test-zonegroup
+ is_master: true
+ is_default: true
+ endpoints: [c1.client.0]
+ zones:
+ - name: test-zone1
+ is_master: true
+ is_default: true
+ endpoints: [c1.client.0]
+ - name: test-zone2
+ is_default: true
+ endpoints: [c2.client.0]
+ - name: test-zone3
+ endpoints: [c1.client.1]
diff --git a/src/ceph/qa/suites/rgw/multisite/realms/two-zonegroup.yaml b/src/ceph/qa/suites/rgw/multisite/realms/two-zonegroup.yaml
new file mode 100644
index 0000000..dc5a786
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/realms/two-zonegroup.yaml
@@ -0,0 +1,27 @@
+overrides:
+ rgw-multisite:
+ realm:
+ name: test-realm
+ is default: true
+ zonegroups:
+ - name: a
+ is_master: true
+ is_default: true
+ endpoints: [c1.client.0]
+ zones:
+ - name: a1
+ is_master: true
+ is_default: true
+ endpoints: [c1.client.0]
+ - name: a2
+ endpoints: [c1.client.1]
+ - name: b
+ is_default: true
+ endpoints: [c2.client.0]
+ zones:
+ - name: b1
+ is_master: true
+ is_default: true
+ endpoints: [c2.client.0]
+ - name: b2
+ endpoints: [c2.client.1]
diff --git a/src/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml b/src/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml
new file mode 100644
index 0000000..a8f8978
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml
@@ -0,0 +1,20 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+tasks:
+- install:
+- ceph: {cluster: c1}
+- ceph: {cluster: c2}
+- rgw:
+ c1.client.0:
+ valgrind: [--tool=memcheck]
+ c1.client.1:
+ valgrind: [--tool=memcheck]
+ c2.client.0:
+ valgrind: [--tool=memcheck]
+ c2.client.1:
+ valgrind: [--tool=memcheck]
+- rgw-multisite:
+- rgw-multisite-tests:
+ config:
+ reconfigure_delay: 60
diff --git a/src/ceph/qa/suites/rgw/multisite/valgrind.yaml b/src/ceph/qa/suites/rgw/multisite/valgrind.yaml
new file mode 100644
index 0000000..08fad9d
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/multisite/valgrind.yaml
@@ -0,0 +1,17 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 40
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
diff --git a/src/ceph/qa/suites/rgw/singleton/% b/src/ceph/qa/suites/rgw/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/%
diff --git a/src/ceph/qa/suites/rgw/singleton/all/radosgw-admin.yaml b/src/ceph/qa/suites/rgw/singleton/all/radosgw-admin.yaml
new file mode 100644
index 0000000..aada29b
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/all/radosgw-admin.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, osd.0]
+- [mgr.x, client.0, osd.1, osd.2, osd.3]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug ms: 1
+ rgw gc obj min wait: 15
+ osd:
+ debug ms: 1
+ debug objclass : 20
+- rgw:
+ client.0:
+- radosgw-admin:
diff --git a/src/ceph/qa/suites/rgw/singleton/frontend/civetweb.yaml b/src/ceph/qa/suites/rgw/singleton/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/singleton/objectstore b/src/ceph/qa/suites/rgw/singleton/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/singleton/overrides.yaml b/src/ceph/qa/suites/rgw/singleton/overrides.yaml
new file mode 100644
index 0000000..fc35b09
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/overrides.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ conf:
+ client:
+ debug rgw: 20
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/singleton/rgw_pool_type b/src/ceph/qa/suites/rgw/singleton/rgw_pool_type
new file mode 120000
index 0000000..77fa7e7
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/singleton/rgw_pool_type
@@ -0,0 +1 @@
+../../../rgw_pool_type/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/thrash/% b/src/ceph/qa/suites/rgw/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/%
diff --git a/src/ceph/qa/suites/rgw/thrash/civetweb.yaml b/src/ceph/qa/suites/rgw/thrash/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/thrash/clusters/fixed-2.yaml b/src/ceph/qa/suites/rgw/thrash/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/thrash/install.yaml b/src/ceph/qa/suites/rgw/thrash/install.yaml
new file mode 100644
index 0000000..84a1d70
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/install.yaml
@@ -0,0 +1,5 @@
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+
diff --git a/src/ceph/qa/suites/rgw/thrash/objectstore b/src/ceph/qa/suites/rgw/thrash/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/thrash/thrasher/default.yaml b/src/ceph/qa/suites/rgw/thrash/thrasher/default.yaml
new file mode 100644
index 0000000..d880d53
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/thrasher/default.yaml
@@ -0,0 +1,8 @@
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ op_delay: 30
+ chance_test_min_size: 0
+ ceph_objectstore_tool: false
diff --git a/src/ceph/qa/suites/rgw/thrash/thrashosds-health.yaml b/src/ceph/qa/suites/rgw/thrash/thrashosds-health.yaml
new file mode 120000
index 0000000..ebf7f34
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml
new file mode 100644
index 0000000..32e6af5
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml
@@ -0,0 +1,7 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_bucket_quota.pl
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml
new file mode 100644
index 0000000..b792336
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml
@@ -0,0 +1,7 @@
+# Amazon::S3 is not available on el7
+os_type: ubuntu
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_multipart_upload.pl
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml
new file mode 100644
index 0000000..e4e6828
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml
@@ -0,0 +1,13 @@
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml
new file mode 100644
index 0000000..621683a
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml
@@ -0,0 +1,13 @@
+tasks:
+- s3roundtrip:
+ client.0:
+ rgw_server: client.0
+ roundtrip:
+ bucket: rttest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml
new file mode 100644
index 0000000..82ac7c1
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml
@@ -0,0 +1,12 @@
+tasks:
+- s3tests:
+ client.0:
+ force-branch: ceph-luminous
+ rgw_server: client.0
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+ rgw crypt require ssl: false
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_swift.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_swift.yaml
new file mode 100644
index 0000000..45e2fc9
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_swift.yaml
@@ -0,0 +1,4 @@
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
diff --git a/src/ceph/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml b/src/ceph/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml
new file mode 100644
index 0000000..0a98882
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml
@@ -0,0 +1,7 @@
+# Amazon/S3.pm (cpan) not available as an rpm
+os_type: ubuntu
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_user_quota.pl
diff --git a/src/ceph/qa/suites/rgw/verify/% b/src/ceph/qa/suites/rgw/verify/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/%
diff --git a/src/ceph/qa/suites/rgw/verify/clusters/fixed-2.yaml b/src/ceph/qa/suites/rgw/verify/clusters/fixed-2.yaml
new file mode 120000
index 0000000..cd0791a
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-2.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/verify/frontend/civetweb.yaml b/src/ceph/qa/suites/rgw/verify/frontend/civetweb.yaml
new file mode 100644
index 0000000..5845a0e
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/frontend/civetweb.yaml
@@ -0,0 +1,3 @@
+overrides:
+ rgw:
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml b/src/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rgw/verify/objectstore b/src/ceph/qa/suites/rgw/verify/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/verify/overrides.yaml b/src/ceph/qa/suites/rgw/verify/overrides.yaml
new file mode 100644
index 0000000..a9ffd29
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/overrides.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug rgw: 20
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+ rgw crypt require ssl: false
+ rgw:
+ frontend: civetweb
+ compression type: random
diff --git a/src/ceph/qa/suites/rgw/verify/rgw_pool_type b/src/ceph/qa/suites/rgw/verify/rgw_pool_type
new file mode 120000
index 0000000..77fa7e7
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/rgw_pool_type
@@ -0,0 +1 @@
+../../../rgw_pool_type/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml b/src/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..cf41338
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml
@@ -0,0 +1,22 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+tasks:
+- install:
+ flavor: notcmalloc
+- ceph:
+- rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+- s3tests:
+ client.0:
+ force-branch: ceph-luminous
+ rgw_server: client.0
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_min_pg_log_entries: 10
+ osd_max_pg_log_entries: 10
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml b/src/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..9b3aa6f
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml
@@ -0,0 +1,13 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+tasks:
+- install:
+ flavor: notcmalloc
+- ceph:
+- rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+- swift:
+ client.0:
+ rgw_server: client.0
diff --git a/src/ceph/qa/suites/rgw/verify/validater/lockdep.yaml b/src/ceph/qa/suites/rgw/verify/validater/lockdep.yaml
new file mode 100644
index 0000000..941fe12
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/validater/lockdep.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ lockdep: true
+ mon:
+ lockdep: true
diff --git a/src/ceph/qa/suites/rgw/verify/validater/valgrind.yaml b/src/ceph/qa/suites/rgw/verify/validater/valgrind.yaml
new file mode 100644
index 0000000..b2095b0
--- /dev/null
+++ b/src/ceph/qa/suites/rgw/verify/validater/valgrind.yaml
@@ -0,0 +1,18 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 40
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
diff --git a/src/ceph/qa/suites/samba/% b/src/ceph/qa/suites/samba/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/samba/%
diff --git a/src/ceph/qa/suites/samba/clusters/samba-basic.yaml b/src/ceph/qa/suites/samba/clusters/samba-basic.yaml
new file mode 100644
index 0000000..af432f6
--- /dev/null
+++ b/src/ceph/qa/suites/samba/clusters/samba-basic.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1]
+- [samba.0, client.0, client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/samba/install/install.yaml b/src/ceph/qa/suites/samba/install/install.yaml
new file mode 100644
index 0000000..c53f9c5
--- /dev/null
+++ b/src/ceph/qa/suites/samba/install/install.yaml
@@ -0,0 +1,9 @@
+# we currently can't install Samba on RHEL; need a gitbuilder and code updates
+os_type: ubuntu
+
+tasks:
+- install:
+- install:
+ project: samba
+ extra_packages: ['samba']
+- ceph:
diff --git a/src/ceph/qa/suites/samba/mount/fuse.yaml b/src/ceph/qa/suites/samba/mount/fuse.yaml
new file mode 100644
index 0000000..d00ffdb
--- /dev/null
+++ b/src/ceph/qa/suites/samba/mount/fuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
diff --git a/src/ceph/qa/suites/samba/mount/kclient.yaml b/src/ceph/qa/suites/samba/mount/kclient.yaml
new file mode 100644
index 0000000..8baa09f
--- /dev/null
+++ b/src/ceph/qa/suites/samba/mount/kclient.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+kernel:
+ client:
+ branch: testing
+tasks:
+- kclient: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
diff --git a/src/ceph/qa/suites/samba/mount/native.yaml b/src/ceph/qa/suites/samba/mount/native.yaml
new file mode 100644
index 0000000..09b8c1c
--- /dev/null
+++ b/src/ceph/qa/suites/samba/mount/native.yaml
@@ -0,0 +1,2 @@
+tasks:
+- samba:
diff --git a/src/ceph/qa/suites/samba/mount/noceph.yaml b/src/ceph/qa/suites/samba/mount/noceph.yaml
new file mode 100644
index 0000000..3cad474
--- /dev/null
+++ b/src/ceph/qa/suites/samba/mount/noceph.yaml
@@ -0,0 +1,5 @@
+tasks:
+- localdir: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
diff --git a/src/ceph/qa/suites/samba/objectstore b/src/ceph/qa/suites/samba/objectstore
new file mode 120000
index 0000000..39d9417
--- /dev/null
+++ b/src/ceph/qa/suites/samba/objectstore
@@ -0,0 +1 @@
+../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/samba/workload/cifs-dbench.yaml b/src/ceph/qa/suites/samba/workload/cifs-dbench.yaml
new file mode 100644
index 0000000..c13c1c0
--- /dev/null
+++ b/src/ceph/qa/suites/samba/workload/cifs-dbench.yaml
@@ -0,0 +1,8 @@
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/samba/workload/cifs-fsstress.yaml b/src/ceph/qa/suites/samba/workload/cifs-fsstress.yaml
new file mode 100644
index 0000000..ff003af
--- /dev/null
+++ b/src/ceph/qa/suites/samba/workload/cifs-fsstress.yaml
@@ -0,0 +1,8 @@
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled b/src/ceph/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled
new file mode 100644
index 0000000..ab9ff8a
--- /dev/null
+++ b/src/ceph/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled
@@ -0,0 +1,9 @@
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
+
diff --git a/src/ceph/qa/suites/samba/workload/smbtorture.yaml b/src/ceph/qa/suites/samba/workload/smbtorture.yaml
new file mode 100644
index 0000000..823489a
--- /dev/null
+++ b/src/ceph/qa/suites/samba/workload/smbtorture.yaml
@@ -0,0 +1,39 @@
+tasks:
+- pexec:
+ client.1:
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
diff --git a/src/ceph/qa/suites/smoke/1node/% b/src/ceph/qa/suites/smoke/1node/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/%
diff --git a/src/ceph/qa/suites/smoke/1node/clusters/+ b/src/ceph/qa/suites/smoke/1node/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/clusters/+
diff --git a/src/ceph/qa/suites/smoke/1node/clusters/fixed-1.yaml b/src/ceph/qa/suites/smoke/1node/clusters/fixed-1.yaml
new file mode 120000
index 0000000..435ea3c
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/clusters/fixed-1.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-1.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/1node/clusters/openstack.yaml b/src/ceph/qa/suites/smoke/1node/clusters/openstack.yaml
new file mode 100644
index 0000000..39e43d0
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/smoke/1node/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/smoke/1node/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..21601ef
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/1node/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/smoke/1node/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..1af1dfd
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/1node/tasks/ceph-deploy.yaml b/src/ceph/qa/suites/smoke/1node/tasks/ceph-deploy.yaml
new file mode 100644
index 0000000..5a30923
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/1node/tasks/ceph-deploy.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: |
+ Run ceph-deploy cli tests on one node
+ and verify all the cli works and cluster can reach
+ HEALTH_OK state(implicty verifying the daemons via init).
+tasks:
+- ceph_deploy.single_node_test: null
diff --git a/src/ceph/qa/suites/smoke/basic/% b/src/ceph/qa/suites/smoke/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/%
diff --git a/src/ceph/qa/suites/smoke/basic/clusters/+ b/src/ceph/qa/suites/smoke/basic/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/clusters/+
diff --git a/src/ceph/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml b/src/ceph/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml
new file mode 120000
index 0000000..a482e65
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3-cephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/basic/clusters/openstack.yaml b/src/ceph/qa/suites/smoke/basic/clusters/openstack.yaml
new file mode 100644
index 0000000..7d652b4
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/smoke/basic/objectstore/bluestore.yaml b/src/ceph/qa/suites/smoke/basic/objectstore/bluestore.yaml
new file mode 120000
index 0000000..bd7d7e0
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml
new file mode 100644
index 0000000..2ee4177
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..b58487c
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..dc6df2f
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse: [client.0]
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..a76154d
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml
@@ -0,0 +1,18 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ mds:
+ debug mds: 20
+ debug ms: 1
+ client:
+ debug client: 20
+ debug ms: 1
+ fuse default permissions: false
+ fuse set user groups: true
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml
new file mode 100644
index 0000000..2182007
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - direct_io
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml
new file mode 100644
index 0000000..01d7470
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml
new file mode 100644
index 0000000..42d6b97
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml
new file mode 100644
index 0000000..6818a2a
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml
new file mode 100644
index 0000000..aa2e767
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml b/src/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml
new file mode 100644
index 0000000..591931d
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ global:
+ ms inject delay max: 1
+ ms inject delay probability: 0.005
+ ms inject delay type: mon
+ ms inject internal delays: 0.002
+ ms inject socket failures: 2500
+tasks:
+- install: null
+- ceph:
+ fs: xfs
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_many: true
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 0000000..d17f60d
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,17 @@
+tasks:
+- install: null
+- ceph:
+ fs: ext4
+ log-whitelist:
+ - reached quota
+ - but it is still running
+ - objects unfound and apparently lost
+ - (POOL_APP_NOT_ENABLED)
+- thrashosds:
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ timeout: 1200
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml
new file mode 100644
index 0000000..f46ffb9
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml
@@ -0,0 +1,36 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject delay max: 1
+ ms inject delay probability: 0.005
+ ms inject delay type: osd
+ ms inject internal delays: 0.002
+ ms inject socket failures: 2500
+tasks:
+- install: null
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ timeout: 1200
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml
new file mode 100644
index 0000000..e7d9e89
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml
@@ -0,0 +1,41 @@
+tasks:
+- install: null
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ timeout: 1200
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+ clients:
+ - client.0
+ objects: 500
+ op_weights:
+ copy_from: 50
+ delete: 50
+ cache_evict: 50
+ cache_flush: 50
+ read: 100
+ rollback: 50
+ snap_create: 50
+ snap_remove: 50
+ cache_try_flush: 50
+ write: 100
+ ops: 4000
+ pool_snaps: true
+ pools:
+ - base
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_cls_all.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 0000000..7f18a7e
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml
new file mode 100644
index 0000000..a2e23ae
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml
@@ -0,0 +1,31 @@
+tasks:
+- install: null
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ timeout: 1200
+- rados:
+ clients:
+ - client.0
+ ec_pool: true
+ max_in_flight: 64
+ max_seconds: 600
+ objects: 1024
+ op_weights:
+ append: 100
+ copy_from: 50
+ delete: 50
+ read: 100
+ rmattr: 25
+ rollback: 50
+ setattr: 25
+ snap_create: 50
+ snap_remove: 50
+ write: 0
+ ops: 400000
+ size: 16384
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml
new file mode 100644
index 0000000..c6d2cee
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 0000000..0d472a3
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+ fs: ext4
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml
new file mode 100644
index 0000000..a0dda21
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..e9f38d3
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml
new file mode 100644
index 0000000..ed737a3
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
+ global:
+ ms inject socket failures: 5000
+tasks:
+- install: null
+- ceph:
+ fs: xfs
+- thrashosds:
+ timeout: 1200
+- rbd_fsx:
+ clients:
+ - client.0
+ ops: 2000
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml
new file mode 100644
index 0000000..9714a6e
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "1"
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml
new file mode 100644
index 0000000..237aa4b
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+ client:
+ rbd default features: 5
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml
new file mode 100644
index 0000000..dc8fb6f
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml
@@ -0,0 +1,17 @@
+overrides:
+ rgw:
+ ec-data-pool: true
+ cache-pools: true
+ frontend: civetweb
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml
new file mode 100644
index 0000000..cc83ee3
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml
@@ -0,0 +1,13 @@
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/smoke/basic/tasks/rgw_swift.yaml b/src/ceph/qa/suites/smoke/basic/tasks/rgw_swift.yaml
new file mode 100644
index 0000000..57c7226
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/basic/tasks/rgw_swift.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+ fs: ext4
+- rgw: [client.0]
+- swift:
+ client.0:
+ rgw_server: client.0
diff --git a/src/ceph/qa/suites/smoke/systemd/% b/src/ceph/qa/suites/smoke/systemd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/%
diff --git a/src/ceph/qa/suites/smoke/systemd/clusters/+ b/src/ceph/qa/suites/smoke/systemd/clusters/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/clusters/+
diff --git a/src/ceph/qa/suites/smoke/systemd/clusters/fixed-4.yaml b/src/ceph/qa/suites/smoke/systemd/clusters/fixed-4.yaml
new file mode 100644
index 0000000..43b4de7
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/clusters/fixed-4.yaml
@@ -0,0 +1,5 @@
+roles:
+- [mon.a, mgr.x, osd.0]
+- [osd.1, osd.2]
+- [mds.a, osd.3]
+- [mon.b, client.0]
diff --git a/src/ceph/qa/suites/smoke/systemd/clusters/openstack.yaml b/src/ceph/qa/suites/smoke/systemd/clusters/openstack.yaml
new file mode 100644
index 0000000..4d6edcd
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/clusters/openstack.yaml
@@ -0,0 +1,8 @@
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/src/ceph/qa/suites/smoke/systemd/distros/centos_latest.yaml b/src/ceph/qa/suites/smoke/systemd/distros/centos_latest.yaml
new file mode 120000
index 0000000..99ec2bb
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/systemd/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/smoke/systemd/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..21601ef
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/systemd/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/smoke/systemd/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..1af1dfd
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/smoke/systemd/tasks/systemd.yaml b/src/ceph/qa/suites/smoke/systemd/tasks/systemd.yaml
new file mode 100644
index 0000000..67b6170
--- /dev/null
+++ b/src/ceph/qa/suites/smoke/systemd/tasks/systemd.yaml
@@ -0,0 +1,8 @@
+tasks:
+- ssh-keys:
+- ceph-deploy:
+- systemd:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/stress/bench/% b/src/ceph/qa/suites/stress/bench/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/stress/bench/%
diff --git a/src/ceph/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml b/src/ceph/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml
new file mode 120000
index 0000000..a482e65
--- /dev/null
+++ b/src/ceph/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml
@@ -0,0 +1 @@
+../../../../clusters/fixed-3-cephfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml b/src/ceph/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml
new file mode 100644
index 0000000..eafec39
--- /dev/null
+++ b/src/ceph/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - snaps
diff --git a/src/ceph/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml b/src/ceph/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml
new file mode 100644
index 0000000..a0d2e76
--- /dev/null
+++ b/src/ceph/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/stress/thrash/% b/src/ceph/qa/suites/stress/thrash/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/%
diff --git a/src/ceph/qa/suites/stress/thrash/clusters/16-osd.yaml b/src/ceph/qa/suites/stress/thrash/clusters/16-osd.yaml
new file mode 100644
index 0000000..7623233
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/clusters/16-osd.yaml
@@ -0,0 +1,18 @@
+roles:
+- [mon.a, mds.a, osd.0]
+- [mon.b, mgr.x, osd.1]
+- [mon.c, mgr.y, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [osd.8]
+- [osd.9]
+- [osd.10]
+- [osd.11]
+- [osd.12]
+- [osd.13]
+- [osd.14]
+- [osd.15]
+- [client.0]
diff --git a/src/ceph/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml b/src/ceph/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml
new file mode 100644
index 0000000..8c3556a
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml
@@ -0,0 +1,3 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mon.c, client.0]
diff --git a/src/ceph/qa/suites/stress/thrash/clusters/8-osd.yaml b/src/ceph/qa/suites/stress/thrash/clusters/8-osd.yaml
new file mode 100644
index 0000000..9f51c6b
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/clusters/8-osd.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mds.a, osd.0]
+- [mon.b, mgr.x, osd.1]
+- [mon.c, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [client.0]
diff --git a/src/ceph/qa/suites/stress/thrash/thrashers/default.yaml b/src/ceph/qa/suites/stress/thrash/thrashers/default.yaml
new file mode 100644
index 0000000..e628ba6
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/thrashers/default.yaml
@@ -0,0 +1,7 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
diff --git a/src/ceph/qa/suites/stress/thrash/thrashers/fast.yaml b/src/ceph/qa/suites/stress/thrash/thrashers/fast.yaml
new file mode 100644
index 0000000..6bc9dff
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/thrashers/fast.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
+ op_delay: 1
+ chance_down: 10
diff --git a/src/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml b/src/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml
new file mode 100644
index 0000000..6042bf6
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+- thrashosds:
+ chance_down: 50
diff --git a/src/ceph/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml b/src/ceph/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml
new file mode 100644
index 0000000..912f12d
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/bonnie.sh
diff --git a/src/ceph/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml b/src/ceph/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml
new file mode 100644
index 0000000..18a6051
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml
@@ -0,0 +1,6 @@
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/stress/thrash/workloads/radosbench.yaml b/src/ceph/qa/suites/stress/thrash/workloads/radosbench.yaml
new file mode 100644
index 0000000..3940870
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/workloads/radosbench.yaml
@@ -0,0 +1,4 @@
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
diff --git a/src/ceph/qa/suites/stress/thrash/workloads/readwrite.yaml b/src/ceph/qa/suites/stress/thrash/workloads/readwrite.yaml
new file mode 100644
index 0000000..c53e52b
--- /dev/null
+++ b/src/ceph/qa/suites/stress/thrash/workloads/readwrite.yaml
@@ -0,0 +1,9 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
diff --git a/src/ceph/qa/suites/teuthology/buildpackages/% b/src/ceph/qa/suites/teuthology/buildpackages/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/buildpackages/%
diff --git a/src/ceph/qa/suites/teuthology/buildpackages/distros b/src/ceph/qa/suites/teuthology/buildpackages/distros
new file mode 120000
index 0000000..dd0d7f1
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/buildpackages/distros
@@ -0,0 +1 @@
+../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/teuthology/buildpackages/tasks/branch.yaml b/src/ceph/qa/suites/teuthology/buildpackages/tasks/branch.yaml
new file mode 100644
index 0000000..1dad96f
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/buildpackages/tasks/branch.yaml
@@ -0,0 +1,10 @@
+roles:
+ - [mon.a, mgr.x, client.0]
+tasks:
+ - install:
+ # branch has precedence over sha1
+ branch: hammer
+ sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
+ - exec:
+ client.0:
+ - ceph --version | grep 'version 0.94'
diff --git a/src/ceph/qa/suites/teuthology/buildpackages/tasks/default.yaml b/src/ceph/qa/suites/teuthology/buildpackages/tasks/default.yaml
new file mode 100644
index 0000000..cb583c7
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/buildpackages/tasks/default.yaml
@@ -0,0 +1,14 @@
+roles:
+ - [client.0]
+tasks:
+ - install:
+ tag: v0.94.1
+ - exec:
+ client.0:
+ - ceph --version | grep 'version 0.94.1'
+ - install.upgrade:
+ client.0:
+ tag: v0.94.3
+ - exec:
+ client.0:
+ - ceph --version | grep 'version 0.94.3'
diff --git a/src/ceph/qa/suites/teuthology/buildpackages/tasks/tag.yaml b/src/ceph/qa/suites/teuthology/buildpackages/tasks/tag.yaml
new file mode 100644
index 0000000..2bfb8a9
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/buildpackages/tasks/tag.yaml
@@ -0,0 +1,11 @@
+roles:
+ - [mon.a, mgr.x, client.0]
+tasks:
+ - install:
+ # tag has precedence over branch and sha1
+ tag: v0.94.1
+ branch: firefly
+ sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
+ - exec:
+ client.0:
+ - ceph --version | grep 'version 0.94.1'
diff --git a/src/ceph/qa/suites/teuthology/ceph/% b/src/ceph/qa/suites/teuthology/ceph/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/ceph/%
diff --git a/src/ceph/qa/suites/teuthology/ceph/clusters/single.yaml b/src/ceph/qa/suites/teuthology/ceph/clusters/single.yaml
new file mode 100644
index 0000000..0c6a40d
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/ceph/clusters/single.yaml
@@ -0,0 +1,2 @@
+roles:
+ - [mon.a, mgr.x, client.0]
diff --git a/src/ceph/qa/suites/teuthology/ceph/distros b/src/ceph/qa/suites/teuthology/ceph/distros
new file mode 120000
index 0000000..dd0d7f1
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/ceph/distros
@@ -0,0 +1 @@
+../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/teuthology/ceph/tasks/teuthology.yaml b/src/ceph/qa/suites/teuthology/ceph/tasks/teuthology.yaml
new file mode 100644
index 0000000..00081c8
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/ceph/tasks/teuthology.yaml
@@ -0,0 +1,3 @@
+tasks:
+ - install:
+ - tests:
diff --git a/src/ceph/qa/suites/teuthology/integration.yaml b/src/ceph/qa/suites/teuthology/integration.yaml
new file mode 100644
index 0000000..8a7f1c7
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/integration.yaml
@@ -0,0 +1,2 @@
+tasks:
+- teuthology_integration:
diff --git a/src/ceph/qa/suites/teuthology/multi-cluster/% b/src/ceph/qa/suites/teuthology/multi-cluster/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/multi-cluster/%
diff --git a/src/ceph/qa/suites/teuthology/multi-cluster/all/ceph.yaml b/src/ceph/qa/suites/teuthology/multi-cluster/all/ceph.yaml
new file mode 100644
index 0000000..4659ef3
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/multi-cluster/all/ceph.yaml
@@ -0,0 +1,25 @@
+roles:
+- - ceph.mon.a
+ - ceph.mon.b
+ - ceph.mgr.x
+ - backup.osd.0
+ - backup.osd.1
+ - backup.osd.2
+ - backup.client.0
+- - backup.mon.a
+ - backup.mgr.x
+ - ceph.osd.0
+ - ceph.osd.1
+ - ceph.osd.2
+ - ceph.client.0
+ - client.1
+ - osd.3
+tasks:
+- install:
+- ceph:
+ cluster: backup
+- ceph:
+- workunit:
+ clients:
+ ceph.client.0: [true.sh]
+ backup.client.0: [true.sh]
diff --git a/src/ceph/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml b/src/ceph/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml
new file mode 100644
index 0000000..52002f5
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml
@@ -0,0 +1,21 @@
+roles:
+- - backup.mon.a
+ - backup.mon.b
+ - backup.mgr.x
+ - backup.osd.0
+ - backup.osd.1
+ - backup.osd.2
+- - backup.mon.c
+ - backup.osd.3
+ - backup.osd.4
+ - backup.osd.5
+ - backup.client.0
+tasks:
+- install:
+- ceph:
+ cluster: backup
+- thrashosds:
+ cluster: backup
+- workunit:
+ clients:
+ all: [true.sh]
diff --git a/src/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml b/src/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml
new file mode 100644
index 0000000..42cd93b
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml
@@ -0,0 +1,51 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+roles:
+- - ceph.mon.a
+ - ceph.mon.b
+ - ceph.mgr.x
+ - backup.osd.0
+ - backup.osd.1
+ - backup.osd.2
+ - backup.client.0
+- - backup.mon.a
+ - backup.mgr.x
+ - ceph.osd.0
+ - ceph.osd.1
+ - ceph.osd.2
+ - ceph.client.0
+ - client.1
+ - osd.3
+tasks:
+- install:
+ branch: infernalis
+- ceph:
+ cluster: backup
+- ceph:
+- workunit:
+ clients:
+ backup.client.0: [true.sh]
+ ceph.client.0: [true.sh]
+- install.upgrade:
+ ceph.mon.a:
+ branch: jewel
+ backup.mon.a:
+ branch: jewel
+- ceph.restart: [ceph.mon.a, ceph.mon.b, ceph.osd.0, ceph.osd.1, ceph.osd.2, osd.3]
+- exec:
+ ceph.client.0:
+ - ceph --version | grep -F 'version 10.'
+ client.1:
+ - ceph --cluster backup --version | grep -F 'version 10.'
+ backup.client.0:
+ # cli upgraded
+ - ceph --cluster backup --id 0 --version | grep -F 'version 10.'
+ - ceph --version | grep -F 'version 10.'
+ # backup cluster mon not upgraded
+ - ceph --cluster backup --id 0 tell mon.a version | grep -F 'version 9.2.'
+ - ceph tell mon.a version | grep -F 'version 10.'
diff --git a/src/ceph/qa/suites/teuthology/multi-cluster/all/workunit.yaml b/src/ceph/qa/suites/teuthology/multi-cluster/all/workunit.yaml
new file mode 100644
index 0000000..b1288e3
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/multi-cluster/all/workunit.yaml
@@ -0,0 +1,23 @@
+roles:
+- - backup.mon.a
+ - backup.mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+ - backup.client.0
+- - mon.a
+ - mgr.x
+ - backup.osd.0
+ - backup.osd.1
+ - backup.osd.2
+ - client.1
+ - backup.client.1
+tasks:
+- install:
+- workunit:
+ clients:
+ all: [true.sh]
+- workunit:
+ clients:
+ backup.client.1: [true.sh]
diff --git a/src/ceph/qa/suites/teuthology/no-ceph/% b/src/ceph/qa/suites/teuthology/no-ceph/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/no-ceph/%
diff --git a/src/ceph/qa/suites/teuthology/no-ceph/clusters/single.yaml b/src/ceph/qa/suites/teuthology/no-ceph/clusters/single.yaml
new file mode 100644
index 0000000..0c6a40d
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/no-ceph/clusters/single.yaml
@@ -0,0 +1,2 @@
+roles:
+ - [mon.a, mgr.x, client.0]
diff --git a/src/ceph/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml b/src/ceph/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml
new file mode 100644
index 0000000..1391458
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml
@@ -0,0 +1,2 @@
+tasks:
+ - tests:
diff --git a/src/ceph/qa/suites/teuthology/nop/% b/src/ceph/qa/suites/teuthology/nop/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/nop/%
diff --git a/src/ceph/qa/suites/teuthology/nop/all/nop.yaml b/src/ceph/qa/suites/teuthology/nop/all/nop.yaml
new file mode 100644
index 0000000..4a5b227
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/nop/all/nop.yaml
@@ -0,0 +1,3 @@
+tasks:
+ - nop:
+
diff --git a/src/ceph/qa/suites/teuthology/rgw/% b/src/ceph/qa/suites/teuthology/rgw/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/rgw/%
diff --git a/src/ceph/qa/suites/teuthology/rgw/distros b/src/ceph/qa/suites/teuthology/rgw/distros
new file mode 120000
index 0000000..dd0d7f1
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/rgw/distros
@@ -0,0 +1 @@
+../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml
new file mode 100644
index 0000000..01580e3
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml
@@ -0,0 +1,24 @@
+# this runs s3tests against rgw, using civetweb
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+ branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
+ force-branch: master
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ debug rgw: 20
+ rgw lc debug interval: 10
+ rgw:
+ ec-data-pool: false
+ frontend: civetweb
diff --git a/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml
new file mode 100644
index 0000000..d8a5050
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml
@@ -0,0 +1,24 @@
+# this runs s3tests against rgw, using mod_fastcgi
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+ branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
+ force-branch: master
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ debug rgw: 20
+ rgw lc debug interval: 10
+ rgw:
+ ec-data-pool: false
+ frontend: apache
diff --git a/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml
new file mode 100644
index 0000000..1def7b0
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml
@@ -0,0 +1,26 @@
+# this runs s3tests against rgw, using mod_proxy_fcgi
+# the choice between uds or tcp with mod_proxy_fcgi depends on the distro
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
+
+tasks:
+- install:
+ branch: master
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
+ force-branch: master
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ debug rgw: 20
+ rgw lc debug interval: 10
+ rgw:
+ ec-data-pool: false
+ frontend: apache
+ use_fcgi: true
diff --git a/src/ceph/qa/suites/teuthology/workunits/yes.yaml b/src/ceph/qa/suites/teuthology/workunits/yes.yaml
new file mode 100644
index 0000000..45098db
--- /dev/null
+++ b/src/ceph/qa/suites/teuthology/workunits/yes.yaml
@@ -0,0 +1,8 @@
+roles:
+ - [client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - true.sh
diff --git a/src/ceph/qa/suites/tgt/basic/% b/src/ceph/qa/suites/tgt/basic/%
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/%
@@ -0,0 +1 @@
+
diff --git a/src/ceph/qa/suites/tgt/basic/clusters/fixed-3.yaml b/src/ceph/qa/suites/tgt/basic/clusters/fixed-3.yaml
new file mode 100644
index 0000000..5e23c9e
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/clusters/fixed-3.yaml
@@ -0,0 +1,4 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
diff --git a/src/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml b/src/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml b/src/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml
new file mode 100644
index 0000000..86f8dde
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/blogbench.yaml b/src/ceph/qa/suites/tgt/basic/tasks/blogbench.yaml
new file mode 100644
index 0000000..f77a78b
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/blogbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/bonnie.yaml b/src/ceph/qa/suites/tgt/basic/tasks/bonnie.yaml
new file mode 100644
index 0000000..2cbfcf8
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/bonnie.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/bonnie.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/dbench-short.yaml b/src/ceph/qa/suites/tgt/basic/tasks/dbench-short.yaml
new file mode 100644
index 0000000..fcb721a
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/dbench-short.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/dbench-short.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/dbench.yaml b/src/ceph/qa/suites/tgt/basic/tasks/dbench.yaml
new file mode 100644
index 0000000..7f73217
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/dbench.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/ffsb.yaml b/src/ceph/qa/suites/tgt/basic/tasks/ffsb.yaml
new file mode 100644
index 0000000..f50a3a1
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/ffsb.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/fio.yaml b/src/ceph/qa/suites/tgt/basic/tasks/fio.yaml
new file mode 100644
index 0000000..e7346ce
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/fio.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fio.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/fsstress.yaml b/src/ceph/qa/suites/tgt/basic/tasks/fsstress.yaml
new file mode 100644
index 0000000..c77f511
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/fsstress.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/fsx.yaml b/src/ceph/qa/suites/tgt/basic/tasks/fsx.yaml
new file mode 100644
index 0000000..04732c8
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/fsx.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/fsync-tester.yaml b/src/ceph/qa/suites/tgt/basic/tasks/fsync-tester.yaml
new file mode 100644
index 0000000..ea627b7
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/fsync-tester.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/iogen.yaml b/src/ceph/qa/suites/tgt/basic/tasks/iogen.yaml
new file mode 100644
index 0000000..1065c74
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/iogen.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/iozone-sync.yaml b/src/ceph/qa/suites/tgt/basic/tasks/iozone-sync.yaml
new file mode 100644
index 0000000..ac241a4
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/iozone-sync.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iozone-sync.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/iozone.yaml b/src/ceph/qa/suites/tgt/basic/tasks/iozone.yaml
new file mode 100644
index 0000000..cf5604c
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/iozone.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
diff --git a/src/ceph/qa/suites/tgt/basic/tasks/pjd.yaml b/src/ceph/qa/suites/tgt/basic/tasks/pjd.yaml
new file mode 100644
index 0000000..ba5c631
--- /dev/null
+++ b/src/ceph/qa/suites/tgt/basic/tasks/pjd.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/% b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..ea9c37d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..ffd4194
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install hammer"
+upgrade_workload:
+ sequential:
+ - install.upgrade:
+ exclude_packages: ['ceph-test-dbg']
+ client.0:
+ - print: "**** done install.upgrade client.0"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..6638d14
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 13
+tasks:
+- exec:
+ client.0:
+ - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- exec:
+ client.0:
+ - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+ - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to hammer ceph_test_librbd_api"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_api.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..dfaa0e8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/% b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4c9f324
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
+ conf:
+ client:
+ rbd default features: 1
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..a625642
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install hammer"
+- install.upgrade:
+ exclude_packages: ['ceph-test-dbg']
+ client.1:
+- print: "**** done install.upgrade client.1"
+- ceph:
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..984dfa0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/notify_master.sh
+ client.1:
+ - rbd/notify_slave.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/notify_slave.sh
+ client.1:
+ - rbd/notify_master.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/% b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..a4cd754
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,13 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+- - client.0
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml
new file mode 100644
index 0000000..87ea402
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+upgrade_workload:
+ sequential:
+ - install.upgrade:
+ exclude_packages: ['ceph-test', 'ceph-test-dbg']
+ client.0:
+ - print: "**** done install.upgrade to -x on client.0"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..8939f3a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- exec:
+ client.0:
+ - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- exec:
+ client.0:
+ - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+ - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to jewel ceph_test_librbd_api"
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_api.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..545354f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/% b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4db664b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml
new file mode 100644
index 0000000..4ce73a4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- install.upgrade:
+ exclude_packages: ['ceph-test', 'ceph-test-dbg']
+ client.1:
+- print: "**** done install.upgrade to -x on client.0"
+- ceph:
+- print: "**** done ceph task"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml
new file mode 100644
index 0000000..dff6623
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 61
+
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml
new file mode 100644
index 0000000..5613d01
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 1
+
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..1fb6822
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/notify_master.sh
+ client.1:
+ - rbd/notify_slave.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/notify_slave.sh
+ client.1:
+ - rbd/notify_master.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..bbddfb3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ mon debug unsafe allow tier with nonempty snaps: true
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - reached quota
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml
new file mode 100644
index 0000000..c57e071
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done hammer"
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ osd.0:
+ branch: jewel
+ osd.2:
+ branch: jewel
+- print: "*** client.0 upgraded packages to jewel"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..e4f3ee1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,20 @@
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..d86c2d2
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..50ba808
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..997f7ba
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..d1046da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..1aaeac8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,18 @@
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
+ mon.a:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
new file mode 100644
index 0000000..f2093da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
@@ -0,0 +1,36 @@
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: false
+ - sleep:
+ duration: 30
+ - exec:
+ osd.0:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml
new file mode 100644
index 0000000..60a3cb6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml
@@ -0,0 +1,5 @@
+tasks:
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.0:
+ branch: jewel
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml
new file mode 120000
index 0000000..987c18c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml
@@ -0,0 +1 @@
+../../../../releases/jewel.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml
new file mode 100644
index 0000000..ab41db6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml
@@ -0,0 +1,14 @@
+tasks:
+ - install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.0:
+ branch: jewel
+ - print: "**** done install.upgrade client.0 to jewel"
+ - install.upgrade:
+ osd.0:
+ osd.2:
+ - print: "**** done install.upgrade daemons to x"
+ - parallel:
+ - workload2
+ - upgrade-sequence2
+ - print: "**** done parallel workload2 and upgrade-sequence2"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..9818541
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml
@@ -0,0 +1,29 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload2:
+ full_sequential:
+ - rados:
+ erasure_code_profile:
+ name: teuthologyprofile2
+ k: 2
+ m: 1
+ crush-failure-domain: osd
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml
new file mode 100644
index 0000000..088976b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..30f1307
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..e21839b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..cae2c06
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..356f8ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence2:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml
new file mode 100644
index 0000000..0a69a7f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml
@@ -0,0 +1,30 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, mon.b, mon.c, osd.0, osd.1
+ step two ordering: osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence2:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..e0b0ba1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b1c6791
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,6 @@
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..807afb9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,11 @@
+tasks:
+ - sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..973c438
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+- print: "**** done 7-final-workload/rbd_cls.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..d8116a9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml
@@ -0,0 +1,8 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh from 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml
new file mode 100644
index 0000000..f1cf2de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rgw: [client.1]
+- s3tests:
+ client.1:
+ rgw_server: client.1
+- print: "**** done rgw_server from 7-final-workload"
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster
new file mode 120000
index 0000000..9bb7a0d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster
@@ -0,0 +1 @@
+../../jewel-x/stress-split/0-cluster \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml
new file mode 100644
index 0000000..212b8ff
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml
@@ -0,0 +1,83 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- print: '**** done hammer'
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- install.upgrade:
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ osd.0:
+ branch: jewel
+ osd.3:
+ branch: jewel
+- print: '*** client.0 upgraded packages to jewel'
+- parallel:
+ - workload-h-j
+ - upgrade-sequence-h-j
+- print: '**** done parallel'
+- install.upgrade:
+ client.0:
+ branch: jewel
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- exec:
+ osd.0:
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ;
+ done
+- print: '**** done install.upgrade client.0 to jewel'
+upgrade-sequence-h-j:
+ sequential:
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons:
+ - mon.a
+ - mon.b
+ - mon.c
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: '**** done ceph.restart do not wait for healthy'
+ - exec:
+ mon.a:
+ - sleep 300
+ - ceph osd set require_jewel_osds
+ - ceph.healthy: null
+ - print: '**** done ceph.healthy'
+workload-h-j:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade
new file mode 120000
index 0000000..fad7148
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade
@@ -0,0 +1 @@
+../../jewel-x/stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash
new file mode 120000
index 0000000..894fdeb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash
@@ -0,0 +1 @@
+../../jewel-x/stress-split/3-thrash/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload
new file mode 120000
index 0000000..6135fb0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload
@@ -0,0 +1 @@
+../../jewel-x/stress-split/4-workload \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml
new file mode 120000
index 0000000..7d39ac6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../../jewel-x/stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload
new file mode 120000
index 0000000..97adf26
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload
@@ -0,0 +1 @@
+../../jewel-x/stress-split/7-final-workload/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
new file mode 100644
index 0000000..9cd743c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
new file mode 100644
index 0000000..7485dce
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
@@ -0,0 +1,13 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- print: '**** done hammer'
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
new file mode 100644
index 0000000..f0e22bf
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1
+ - ceph osd pool create base-pool 4 4 erasure t-profile
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
new file mode 100644
index 0000000..36dc06d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
new file mode 100644
index 0000000..d9cc348
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - ceph osd pool set cache-pool hit_set_type bloom
+ - ceph osd pool set cache-pool hit_set_count 8
+ - ceph osd pool set cache-pool hit_set_period 5
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
new file mode 100644
index 0000000..b2fc171
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
@@ -0,0 +1,52 @@
+tasks:
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+
+workload:
+ sequential:
+ - rados:
+ clients: [client.0]
+ pools: [base-pool]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ - print: "**** done rados"
+
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ osd.0:
+ branch: jewel
+ osd.2:
+ branch: jewel
+ - print: "*** done install.upgrade osd.0 and osd.2"
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
+ mon.a:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/% b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
new file mode 100644
index 0000000..9adede7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
@@ -0,0 +1,82 @@
+meta:
+- desc: |
+ Setup 4 node ceph cluster using ceph-deploy, use latest
+ stable jewel as initial release, upgrade to luminous and
+ also setup mgr nodes along after upgrade, check for
+ cluster to reach healthy state, After upgrade run kernel tar/untar
+ task and systemd task. This test will detect any
+ ceph upgrade issue and systemd issues.
+overrides:
+ ceph-deploy:
+ fs: xfs
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd:
+ osd pool default size: 2
+ osd objectstore: filestore
+ osd sloppy crc: true
+ client:
+ rbd default features: 5
+openstack:
+- machine:
+ disk: 100
+- volumes:
+ count: 3
+ size: 30
+# reluctantely :( hard-coded machine type
+# it will override command line args with teuthology-suite
+machine_type: vps
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mgr.y
+- - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - osd.6
+ - osd.7
+ - osd.8
+ - client.0
+tasks:
+- ssh-keys:
+- print: "**** done ssh-keys"
+- ceph-deploy:
+ branch:
+ stable: jewel
+ skip-mgr: True
+- print: "**** done initial ceph-deploy"
+- ceph-deploy.upgrade:
+ branch:
+ dev: luminous
+ setup-mgr-node: True
+ check-for-healthy: True
+ roles:
+ - mon.a
+ - mon.b
+ - mon.c
+ - osd.6
+- print: "**** done ceph-deploy upgrade"
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done `ceph osd require-osd-release luminous`"
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
+- print: "**** done kernel_untar_build.sh"
+- systemd:
+- print: "**** done systemd"
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
+- print: "**** done rados/load-gen-mix.sh"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..d1f1e10
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mds.a
+ - mgr.x
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(MDS_FAILED\)
+ - \(OBJECT_
+ - is unresponsive
+ conf:
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..c64b2cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
@@ -0,0 +1,60 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.1:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.2:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.3:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+meta:
+- desc: |
+ install ceph/jewel latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done installing jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - is unresponsive
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
new file mode 100644
index 0000000..83457c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
@@ -0,0 +1,11 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..56eedbd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse: [client.2]
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dfbcbea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+workload:
+ full_sequential:
+ - sequential:
+ - exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+ - rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..fb9d30f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..348f1ae
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..15d892e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..bb2d3ea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..6a0f829
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ - ceph.restart:
+ daemons: [mds.a, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..2d74e9e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,38 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..e57b377
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
@@ -0,0 +1,23 @@
+# this is the same fragment as ../../../../releases/luminous.yaml
+# but without line "ceph osd set-require-min-compat-client luminous"
+
+tasks:
+- exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+- ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+- ceph.healthy:
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
+ log-whitelist:
+ - no active mgr
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..f7e9de4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: jewel
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
new file mode 100644
index 0000000..20c0ffd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
@@ -0,0 +1,8 @@
+tasks:
+- exec:
+ mon.a:
+ - ceph osd set-require-min-compat-client jewel
+ - ceph osd crush set-all-straw-buckets-to-straw2
+ - ceph osd crush weight-set create-compat
+ - ceph osd crush weight-set reweight-compat osd.0 .9
+ - ceph osd crush weight-set reweight-compat osd.1 1.2
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d73459e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse: [client.3]
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..7dd61c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b218b92
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..c835a65
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..46bbf76
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..5ae7491
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..780c4ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 7-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
new file mode 120000
index 0000000..81df389
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
@@ -0,0 +1 @@
+5-workload.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/% b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
new file mode 120000
index 0000000..c79327b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/centos_7.3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
new file mode 120000
index 0000000..6237042
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/ubuntu_14.04.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..d68c258
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,236 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/jewel v10.2.0 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/jewel latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (jewel or kraken)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(OSD_
+ - \(PG_
+ - \(CACHE_
+ fs: xfs
+ conf:
+ global:
+ mon warn on pool no app: false
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+ osd map cache size: 1100
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** v10.2.0 about to install"
+- install:
+ tag: v10.2.0
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v10.2.0 install"
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v10.2.0"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ branch: jewel
+ mon.b:
+ branch: jewel
+ # Note that client.a IS NOT upgraded at this point
+ #client.1:
+ #branch: jewel
+- parallel:
+ - workload_jewel
+ - upgrade-sequence_jewel
+- print: "**** done parallel jewel branch"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.1:
+ branch: jewel
+- print: "**** done branch: jewel install.upgrade on client.1"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_jewel:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test.sh & cls workload_jewel"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_jewel"
+ - s3tests:
+ client.0:
+ force-branch: ceph-jewel
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_jewel"
+upgrade-sequence_jewel:
+ sequential:
+ - print: "**** done branch: jewel install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all jewel branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-jewel
+ rgw_server: client.1
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart:
+ daemons: [osd.5]
+ wait-for-healthy: false
+ wait-for-up-osds: true
+ - exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+ - ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph.healthy:
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
new file mode 120000
index 0000000..3e7cbc3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
@@ -0,0 +1 @@
+../stress-split/1-jewel-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
new file mode 120000
index 0000000..2b99d5c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
@@ -0,0 +1 @@
+../stress-split/6-luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..a82f11b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..4f40219
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,20 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..31ca3e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - required past_interval bounds are empty
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..442dcf1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..92779bc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..693154d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..64c0e33
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
new file mode 120000
index 0000000..02263d1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
@@ -0,0 +1 @@
+../parallel/6.5-crush-compat.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..56ba21d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml b/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml
new file mode 100644
index 0000000..4a55362
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml
@@ -0,0 +1,61 @@
+meta:
+- desc: |
+ Setup 4 node ceph cluster using ceph-deploy, use latest
+ stable kraken as initial release, upgrade to luminous and
+ also setup mgr nodes along after upgrade, check for
+ cluster to reach healthy state, After upgrade run kernel tar/untar
+ task and systemd task. This test will detect any
+ ceph upgrade issue and systemd issues.
+overrides:
+ ceph-deploy:
+ fs: xfs
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd:
+ osd pool default size: 2
+ osd objectstore: filestore
+ osd sloppy crc: true
+ client:
+ rbd default features: 5
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mgr.y
+- - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - osd.6
+ - osd.7
+ - osd.8
+ - client.0
+tasks:
+- ssh-keys:
+- ceph-deploy:
+ branch:
+ stable: kraken
+ skip-mgr: True
+- ceph-deploy.upgrade:
+ branch:
+ dev: luminous
+ setup-mgr-node: True
+ check-for-healthy: True
+ roles:
+ - mon.a
+ - mon.b
+ - mon.c
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
+- systemd:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/% b/src/ceph/qa/suites/upgrade/kraken-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..f5a883a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,33 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - (POOL_APP_NOT_ENABLED)
+ - overall HEALTH_
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml
new file mode 100644
index 0000000..de0893c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml
@@ -0,0 +1,39 @@
+meta:
+- desc: |
+ install ceph/kraken latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: kraken
+- print: "**** done installing kraken"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade both hosts"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..893beec
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..8befdd4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..10f4b05
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..23e653d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..cff3a68
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..f197de6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,35 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c, mgr.x]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..80c2b9d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..851c5c8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: kraken
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..ab6276e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: kraken
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..46e1355
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros b/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore b/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install
new file mode 120000
index 0000000..d4bcb5a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install
@@ -0,0 +1 @@
+../stress-split/1-kraken-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..01d44cc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../stress-split/6-luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml
new file mode 100644
index 0000000..50a1465
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/% b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..b8a28f9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,27 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml
new file mode 100644
index 0000000..145c2c8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/kraken latest
+tasks:
+- install:
+ branch: kraken
+- print: "**** done install kraken"
+- ceph:
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..87fa1d5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..7f4b06b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..b8b6ad3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..a5ae1e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..24c2644
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml
new file mode 120000
index 0000000..d644598
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..03750e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/% b/src/ceph/qa/suites/upgrade/luminous-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..3684b1e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - (POOL_APP_NOT_ENABLED)
+ - overall HEALTH_
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml
new file mode 100644
index 0000000..3d57f79
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml
@@ -0,0 +1,43 @@
+meta:
+- desc: |
+ install ceph/luminous latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: luminous
+- print: "**** done installing luminous"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade both hosts"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..e4cc9f9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..874a8c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..81563c9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..e17207d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..cff3a68
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..f197de6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,35 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c, mgr.x]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..a42b7d2
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml
new file mode 100644
index 0000000..5de8a23
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+ on NO upgrated client
+tasks:
+ - workunit:
+ branch: luminous
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload on NO upgrated client"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml
new file mode 100644
index 0000000..2c7c484
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+ on upgrated client
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload on upgrated client"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros b/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore b/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/% b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..4c81c34
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,225 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/luminous v12.2.2 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/luminous latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (luminous or master/mimic)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ fs: xfs
+ conf:
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ mon warn on pool no app: false
+ osd:
+ osd map max advance: 1000
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ client:
+ rgw_crypt_require_ssl: false
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** v12.2.2 about to install"
+- install:
+ tag: v12.2.2
+ # line below can be removed its from jewel test
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v12.2.2 install"
+- ceph:
+ fs: xfs
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload"
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ branch: luminous
+ mon.b:
+ branch: luminous
+ # Note that client.a IS NOT upgraded at this point
+- parallel:
+ - workload_luminous
+ - upgrade-sequence_luminous
+- print: "**** done parallel luminous branch"
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.1:
+ branch: luminous
+- print: "**** done branch: luminous install.upgrade on client.1"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_luminous:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_luminous"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_luminous"
+ - s3tests:
+ client.0:
+ force-branch: ceph-luminous
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_luminous"
+upgrade-sequence_luminous:
+ sequential:
+ - print: "**** done branch: luminous install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all luminous branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-luminous
+ rgw_server: client.1
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart:
+ daemons: [osd.5]
+ wait-for-healthy: false
+ wait-for-up-osds: true
+ - ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph.healthy:
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install
new file mode 120000
index 0000000..0479ac5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install
@@ -0,0 +1 @@
+../stress-split/1-ceph-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml
new file mode 100644
index 0000000..50a1465
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/% b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..e3ad918
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,29 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml
new file mode 100644
index 0000000..2230525
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: install ceph/luminous latest
+tasks:
+- install:
+ branch: luminous
+- print: "**** done install luminous"
+- ceph:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done ceph "
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..87fa1d5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..f8cc4d8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..30a677a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..9079aa3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..92fe658
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml
new file mode 120000
index 0000000..d644598
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..03750e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file