summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/upgrade
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/upgrade
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/upgrade')
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml26
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%0
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml21
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml36
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml5
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml29
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml30
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml11
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%0
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml83
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload1
l---------src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml5
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml52
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml82
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml32
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml60
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml41
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml38
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml23
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml236
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml22
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml35
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml9
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml61
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml33
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml39
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml35
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml22
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml35
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml27
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml9
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml1
l---------src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml43
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml35
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/luminous-x/parallel/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%0
l---------src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml225
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml22
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml35
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml29
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml1
l---------src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml1
268 files changed, 3402 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/% b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..ea9c37d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..ffd4194
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/hammer-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install hammer"
+upgrade_workload:
+ sequential:
+ - install.upgrade:
+ exclude_packages: ['ceph-test-dbg']
+ client.0:
+ - print: "**** done install.upgrade client.0"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..6638d14
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 13
+tasks:
+- exec:
+ client.0:
+ - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- exec:
+ client.0:
+ - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+ - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to hammer ceph_test_librbd_api"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_api.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..dfaa0e8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/% b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4c9f324
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
+ conf:
+ client:
+ rbd default features: 1
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
new file mode 100644
index 0000000..a625642
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/hammer-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install hammer"
+- install.upgrade:
+ exclude_packages: ['ceph-test-dbg']
+ client.1:
+- print: "**** done install.upgrade client.1"
+- ceph:
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..984dfa0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/notify_master.sh
+ client.1:
+ - rbd/notify_slave.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/notify_slave.sh
+ client.1:
+ - rbd/notify_master.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/% b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml
new file mode 100644
index 0000000..a4cd754
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/start.yaml
@@ -0,0 +1,13 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+- - client.0
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml
new file mode 100644
index 0000000..87ea402
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/jewel-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+upgrade_workload:
+ sequential:
+ - install.upgrade:
+ exclude_packages: ['ceph-test', 'ceph-test-dbg']
+ client.0:
+ - print: "**** done install.upgrade to -x on client.0"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml
new file mode 100644
index 0000000..8939f3a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_api_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- exec:
+ client.0:
+ - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- exec:
+ client.0:
+ - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
+ - "rm -rf $TESTDIR/ceph_test_librbd_api"
+- print: "**** done reverting to jewel ceph_test_librbd_api"
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_api.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd/test_librbd_api.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml
new file mode 100644
index 0000000..545354f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/rbd_cli_import_export.yaml
@@ -0,0 +1,13 @@
+tasks:
+- sequential:
+ - upgrade_workload
+- ceph:
+- print: "**** done ceph"
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
+- print: "**** done rbd/import_export.sh"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/% b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/%
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml
new file mode 100644
index 0000000..4db664b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/start.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - failed to encode map
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml
new file mode 100644
index 0000000..4ce73a4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/jewel-client-x.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- install.upgrade:
+ exclude_packages: ['ceph-test', 'ceph-test-dbg']
+ client.1:
+- print: "**** done install.upgrade to -x on client.0"
+- ceph:
+- print: "**** done ceph task"
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml
new file mode 100644
index 0000000..dff6623
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/defaults.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 61
+
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml
new file mode 100644
index 0000000..5613d01
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/layering.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default features: 1
+
diff --git a/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml
new file mode 100644
index 0000000..1fb6822
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/rbd_notification_tests.yaml
@@ -0,0 +1,21 @@
+tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/notify_master.sh
+ client.1:
+ - rbd/notify_slave.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: old librbd -> new librbd"
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/notify_slave.sh
+ client.1:
+ - rbd/notify_master.sh
+ env:
+ RBD_FEATURES: "13"
+- print: "**** done rbd: new librbd -> old librbd"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..bbddfb3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ mon debug unsafe allow tier with nonempty snaps: true
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - reached quota
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml
new file mode 100644
index 0000000..c57e071
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml
@@ -0,0 +1,20 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done hammer"
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ osd.0:
+ branch: jewel
+ osd.2:
+ branch: jewel
+- print: "*** client.0 upgraded packages to jewel"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..e4f3ee1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,20 @@
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..d86c2d2
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..50ba808
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..997f7ba
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..d1046da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,8 @@
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..1aaeac8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,18 @@
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
+ mon.a:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
new file mode 100644
index 0000000..f2093da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml
@@ -0,0 +1,36 @@
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: false
+ - sleep:
+ duration: 30
+ - exec:
+ osd.0:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml
new file mode 100644
index 0000000..60a3cb6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml
@@ -0,0 +1,5 @@
+tasks:
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.0:
+ branch: jewel
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml
new file mode 120000
index 0000000..987c18c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml
@@ -0,0 +1 @@
+../../../../releases/jewel.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml
new file mode 100644
index 0000000..ab41db6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml
@@ -0,0 +1,14 @@
+tasks:
+ - install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.0:
+ branch: jewel
+ - print: "**** done install.upgrade client.0 to jewel"
+ - install.upgrade:
+ osd.0:
+ osd.2:
+ - print: "**** done install.upgrade daemons to x"
+ - parallel:
+ - workload2
+ - upgrade-sequence2
+ - print: "**** done parallel workload2 and upgrade-sequence2"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..9818541
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml
@@ -0,0 +1,29 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload2:
+ full_sequential:
+ - rados:
+ erasure_code_profile:
+ name: teuthologyprofile2
+ k: 2
+ m: 1
+ crush-failure-domain: osd
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml
new file mode 100644
index 0000000..088976b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..30f1307
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..e21839b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..cae2c06
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload2:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..356f8ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence2:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml
new file mode 100644
index 0000000..0a69a7f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml
@@ -0,0 +1,30 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, mon.b, mon.c, osd.0, osd.1
+ step two ordering: osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence2:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..e0b0ba1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b1c6791
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,6 @@
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..807afb9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,11 @@
+tasks:
+ - sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..973c438
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+- print: "**** done 7-final-workload/rbd_cls.yaml"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..d8116a9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml
@@ -0,0 +1,8 @@
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh from 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml
new file mode 100644
index 0000000..f1cf2de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml
@@ -0,0 +1,11 @@
+tasks:
+- rgw: [client.1]
+- s3tests:
+ client.1:
+ rgw_server: client.1
+- print: "**** done rgw_server from 7-final-workload"
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster
new file mode 120000
index 0000000..9bb7a0d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster
@@ -0,0 +1 @@
+../../jewel-x/stress-split/0-cluster \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml
new file mode 100644
index 0000000..212b8ff
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml
@@ -0,0 +1,83 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- print: '**** done hammer'
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- install.upgrade:
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ osd.0:
+ branch: jewel
+ osd.3:
+ branch: jewel
+- print: '*** client.0 upgraded packages to jewel'
+- parallel:
+ - workload-h-j
+ - upgrade-sequence-h-j
+- print: '**** done parallel'
+- install.upgrade:
+ client.0:
+ branch: jewel
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- exec:
+ osd.0:
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ;
+ done
+- print: '**** done install.upgrade client.0 to jewel'
+upgrade-sequence-h-j:
+ sequential:
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons:
+ - mon.a
+ - mon.b
+ - mon.c
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: '**** done ceph.restart do not wait for healthy'
+ - exec:
+ mon.a:
+ - sleep 300
+ - ceph osd set require_jewel_osds
+ - ceph.healthy: null
+ - print: '**** done ceph.healthy'
+workload-h-j:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade
new file mode 120000
index 0000000..fad7148
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade
@@ -0,0 +1 @@
+../../jewel-x/stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash
new file mode 120000
index 0000000..894fdeb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash
@@ -0,0 +1 @@
+../../jewel-x/stress-split/3-thrash/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload
new file mode 120000
index 0000000..6135fb0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload
@@ -0,0 +1 @@
+../../jewel-x/stress-split/4-workload \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml
new file mode 120000
index 0000000..7d39ac6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../../jewel-x/stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload
new file mode 120000
index 0000000..97adf26
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload
@@ -0,0 +1 @@
+../../jewel-x/stress-split/7-final-workload/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
new file mode 100644
index 0000000..9cd743c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
new file mode 100644
index 0000000..7485dce
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
@@ -0,0 +1,13 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- print: '**** done hammer'
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
new file mode 100644
index 0000000..f0e22bf
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1
+ - ceph osd pool create base-pool 4 4 erasure t-profile
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
new file mode 100644
index 0000000..36dc06d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
new file mode 100644
index 0000000..d9cc348
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - ceph osd pool set cache-pool hit_set_type bloom
+ - ceph osd pool set cache-pool hit_set_count 8
+ - ceph osd pool set cache-pool hit_set_period 5
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
new file mode 100644
index 0000000..b2fc171
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
@@ -0,0 +1,52 @@
+tasks:
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+
+workload:
+ sequential:
+ - rados:
+ clients: [client.0]
+ pools: [base-pool]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ - print: "**** done rados"
+
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ osd.0:
+ branch: jewel
+ osd.2:
+ branch: jewel
+ - print: "*** done install.upgrade osd.0 and osd.2"
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
+ mon.a:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/% b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
new file mode 100644
index 0000000..9adede7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
@@ -0,0 +1,82 @@
+meta:
+- desc: |
+ Setup 4 node ceph cluster using ceph-deploy, use latest
+ stable jewel as initial release, upgrade to luminous and
+ also setup mgr nodes along after upgrade, check for
+ cluster to reach healthy state, After upgrade run kernel tar/untar
+ task and systemd task. This test will detect any
+ ceph upgrade issue and systemd issues.
+overrides:
+ ceph-deploy:
+ fs: xfs
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd:
+ osd pool default size: 2
+ osd objectstore: filestore
+ osd sloppy crc: true
+ client:
+ rbd default features: 5
+openstack:
+- machine:
+ disk: 100
+- volumes:
+ count: 3
+ size: 30
+# reluctantely :( hard-coded machine type
+# it will override command line args with teuthology-suite
+machine_type: vps
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mgr.y
+- - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - osd.6
+ - osd.7
+ - osd.8
+ - client.0
+tasks:
+- ssh-keys:
+- print: "**** done ssh-keys"
+- ceph-deploy:
+ branch:
+ stable: jewel
+ skip-mgr: True
+- print: "**** done initial ceph-deploy"
+- ceph-deploy.upgrade:
+ branch:
+ dev: luminous
+ setup-mgr-node: True
+ check-for-healthy: True
+ roles:
+ - mon.a
+ - mon.b
+ - mon.c
+ - osd.6
+- print: "**** done ceph-deploy upgrade"
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done `ceph osd require-osd-release luminous`"
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
+- print: "**** done kernel_untar_build.sh"
+- systemd:
+- print: "**** done systemd"
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
+- print: "**** done rados/load-gen-mix.sh"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..d1f1e10
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mds.a
+ - mgr.x
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(MDS_FAILED\)
+ - \(OBJECT_
+ - is unresponsive
+ conf:
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..c64b2cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
@@ -0,0 +1,60 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.1:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.2:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.3:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+meta:
+- desc: |
+ install ceph/jewel latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done installing jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - is unresponsive
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
new file mode 100644
index 0000000..83457c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
@@ -0,0 +1,11 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..56eedbd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse: [client.2]
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dfbcbea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+workload:
+ full_sequential:
+ - sequential:
+ - exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+ - rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..fb9d30f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..348f1ae
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..15d892e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..bb2d3ea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..6a0f829
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ - ceph.restart:
+ daemons: [mds.a, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..2d74e9e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,38 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..e57b377
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
@@ -0,0 +1,23 @@
+# this is the same fragment as ../../../../releases/luminous.yaml
+# but without line "ceph osd set-require-min-compat-client luminous"
+
+tasks:
+- exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+- ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+- ceph.healthy:
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
+ log-whitelist:
+ - no active mgr
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..f7e9de4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: jewel
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
new file mode 100644
index 0000000..20c0ffd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
@@ -0,0 +1,8 @@
+tasks:
+- exec:
+ mon.a:
+ - ceph osd set-require-min-compat-client jewel
+ - ceph osd crush set-all-straw-buckets-to-straw2
+ - ceph osd crush weight-set create-compat
+ - ceph osd crush weight-set reweight-compat osd.0 .9
+ - ceph osd crush weight-set reweight-compat osd.1 1.2
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d73459e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse: [client.3]
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..7dd61c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b218b92
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..c835a65
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..46bbf76
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..5ae7491
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..780c4ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 7-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
new file mode 120000
index 0000000..81df389
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
@@ -0,0 +1 @@
+5-workload.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/% b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
new file mode 120000
index 0000000..c79327b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/centos_7.3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
new file mode 120000
index 0000000..6237042
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/ubuntu_14.04.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..d68c258
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,236 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/jewel v10.2.0 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/jewel latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (jewel or kraken)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(OSD_
+ - \(PG_
+ - \(CACHE_
+ fs: xfs
+ conf:
+ global:
+ mon warn on pool no app: false
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+ osd map cache size: 1100
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** v10.2.0 about to install"
+- install:
+ tag: v10.2.0
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v10.2.0 install"
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v10.2.0"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ branch: jewel
+ mon.b:
+ branch: jewel
+ # Note that client.a IS NOT upgraded at this point
+ #client.1:
+ #branch: jewel
+- parallel:
+ - workload_jewel
+ - upgrade-sequence_jewel
+- print: "**** done parallel jewel branch"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.1:
+ branch: jewel
+- print: "**** done branch: jewel install.upgrade on client.1"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_jewel:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test.sh & cls workload_jewel"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_jewel"
+ - s3tests:
+ client.0:
+ force-branch: ceph-jewel
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_jewel"
+upgrade-sequence_jewel:
+ sequential:
+ - print: "**** done branch: jewel install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all jewel branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-jewel
+ rgw_server: client.1
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart:
+ daemons: [osd.5]
+ wait-for-healthy: false
+ wait-for-up-osds: true
+ - exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+ - ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph.healthy:
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
new file mode 120000
index 0000000..3e7cbc3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
@@ -0,0 +1 @@
+../stress-split/1-jewel-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
new file mode 120000
index 0000000..2b99d5c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
@@ -0,0 +1 @@
+../stress-split/6-luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..a82f11b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..4f40219
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,20 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..31ca3e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - required past_interval bounds are empty
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..442dcf1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..92779bc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..693154d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..64c0e33
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
new file mode 120000
index 0000000..02263d1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
@@ -0,0 +1 @@
+../parallel/6.5-crush-compat.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..56ba21d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml b/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml
new file mode 100644
index 0000000..4a55362
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/kraken-luminous.yaml
@@ -0,0 +1,61 @@
+meta:
+- desc: |
+ Setup 4 node ceph cluster using ceph-deploy, use latest
+ stable kraken as initial release, upgrade to luminous and
+ also setup mgr nodes along after upgrade, check for
+ cluster to reach healthy state, After upgrade run kernel tar/untar
+ task and systemd task. This test will detect any
+ ceph upgrade issue and systemd issues.
+overrides:
+ ceph-deploy:
+ fs: xfs
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd:
+ osd pool default size: 2
+ osd objectstore: filestore
+ osd sloppy crc: true
+ client:
+ rbd default features: 5
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mgr.y
+- - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - osd.6
+ - osd.7
+ - osd.8
+ - client.0
+tasks:
+- ssh-keys:
+- ceph-deploy:
+ branch:
+ stable: kraken
+ skip-mgr: True
+- ceph-deploy.upgrade:
+ branch:
+ dev: luminous
+ setup-mgr-node: True
+ check-for-healthy: True
+ roles:
+ - mon.a
+ - mon.b
+ - mon.c
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
+- systemd:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/% b/src/ceph/qa/suites/upgrade/kraken-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..f5a883a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,33 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - (POOL_APP_NOT_ENABLED)
+ - overall HEALTH_
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml
new file mode 100644
index 0000000..de0893c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/kraken.yaml
@@ -0,0 +1,39 @@
+meta:
+- desc: |
+ install ceph/kraken latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: kraken
+- print: "**** done installing kraken"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade both hosts"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..893beec
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..8befdd4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..10f4b05
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..23e653d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..cff3a68
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..f197de6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,35 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c, mgr.x]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..80c2b9d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/4-luminous.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..851c5c8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: kraken
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..ab6276e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: kraken
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..46e1355
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros b/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore b/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/parallel/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install
new file mode 120000
index 0000000..d4bcb5a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/1-kraken-install
@@ -0,0 +1 @@
+../stress-split/1-kraken-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/4-ec-workload.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..01d44cc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../stress-split/6-luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml
new file mode 100644
index 0000000..50a1465
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/7-final-workload.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/% b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..b8a28f9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,27 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml
new file mode 100644
index 0000000..145c2c8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/kraken.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: install ceph/kraken latest
+tasks:
+- install:
+ branch: kraken
+- print: "**** done install kraken"
+- ceph:
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..87fa1d5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..7f4b06b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..b8b6ad3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..a5ae1e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..24c2644
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: kraken
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml
new file mode 120000
index 0000000..d644598
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..03750e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/kraken-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/% b/src/ceph/qa/suites/upgrade/luminous-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..3684b1e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - (POOL_APP_NOT_ENABLED)
+ - overall HEALTH_
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml
new file mode 100644
index 0000000..3d57f79
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml
@@ -0,0 +1,43 @@
+meta:
+- desc: |
+ install ceph/luminous latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: luminous
+- print: "**** done installing luminous"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade both hosts"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..021fcc6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..5c5a958
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..e4cc9f9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml
new file mode 100644
index 0000000..874a8c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..81563c9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..e17207d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..cff3a68
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..f197de6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,35 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c, mgr.x]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d2629c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 5-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..d8b3dcb
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..922a9da
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..a42b7d2
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..aaf0a37
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml
new file mode 100644
index 0000000..5de8a23
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_no_upgrated.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+ on NO upgrated client
+tasks:
+ - workunit:
+ branch: luminous
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload on NO upgrated client"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml
new file mode 100644
index 0000000..2c7c484
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export_upgrated.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+ on upgrated client
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload on upgrated client"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..7a7659f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros b/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore b/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/parallel/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/% b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..4c81c34
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,225 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/luminous v12.2.2 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/luminous latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (luminous or master/mimic)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ fs: xfs
+ conf:
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ mon warn on pool no app: false
+ osd:
+ osd map max advance: 1000
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version"
+ client:
+ rgw_crypt_require_ssl: false
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** v12.2.2 about to install"
+- install:
+ tag: v12.2.2
+ # line below can be removed its from jewel test
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v12.2.2 install"
+- ceph:
+ fs: xfs
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload"
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ branch: luminous
+ mon.b:
+ branch: luminous
+ # Note that client.a IS NOT upgraded at this point
+- parallel:
+ - workload_luminous
+ - upgrade-sequence_luminous
+- print: "**** done parallel luminous branch"
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.1:
+ branch: luminous
+- print: "**** done branch: luminous install.upgrade on client.1"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_luminous:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_luminous"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_luminous"
+ - s3tests:
+ client.0:
+ force-branch: ceph-luminous
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_luminous"
+upgrade-sequence_luminous:
+ sequential:
+ - print: "**** done branch: luminous install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all luminous branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-luminous
+ rgw_server: client.1
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart:
+ daemons: [osd.5]
+ wait-for-healthy: false
+ wait-for-up-osds: true
+ - ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph.healthy:
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install
new file mode 120000
index 0000000..0479ac5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-ceph-install
@@ -0,0 +1 @@
+../stress-split/1-ceph-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml
new file mode 100644
index 0000000..50a1465
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore
new file mode 120000
index 0000000..016cbf9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore
@@ -0,0 +1 @@
+../stress-split/objectstore/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/% b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..e3ad918
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,29 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml
new file mode 100644
index 0000000..2230525
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: install ceph/luminous latest
+tasks:
+- install:
+ branch: luminous
+- print: "**** done install luminous"
+- ceph:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done ceph "
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..87fa1d5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..f8cc4d8
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..30a677a
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..9079aa3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..92fe658
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml
new file mode 120000
index 0000000..d644598
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/bluestore.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml
new file mode 120000
index 0000000..03750e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml
@@ -0,0 +1 @@
+../../../../../objectstore/filestore-xfs.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file