From 812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Thu, 4 Jan 2018 13:43:33 +0800 Subject: initial code repo This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren --- .../qa/suites/upgrade/hammer-jewel-x/parallel/% | 0 .../hammer-jewel-x/parallel/0-cluster/start.yaml | 21 ++++++ .../1-hammer-jewel-install/hammer-jewel.yaml | 20 ++++++ .../upgrade/hammer-jewel-x/parallel/2-workload/+ | 0 .../parallel/2-workload/ec-rados-default.yaml | 20 ++++++ .../parallel/2-workload/rados_api.yaml | 8 +++ .../parallel/2-workload/rados_loadgenbig.yaml | 8 +++ .../parallel/2-workload/test_rbd_api.yaml | 8 +++ .../parallel/2-workload/test_rbd_python.yaml | 8 +++ .../parallel/3-upgrade-sequence/upgrade-all.yaml | 18 +++++ .../3-upgrade-sequence/upgrade-osd-mds-mon.yaml | 36 ++++++++++ .../hammer-jewel-x/parallel/3.5-finish.yaml | 5 ++ .../upgrade/hammer-jewel-x/parallel/4-jewel.yaml | 1 + .../5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml | 14 ++++ .../upgrade/hammer-jewel-x/parallel/6-workload/+ | 0 .../parallel/6-workload/ec-rados-default.yaml | 29 ++++++++ .../parallel/6-workload/rados_api.yaml | 11 +++ .../parallel/6-workload/rados_loadgenbig.yaml | 11 +++ .../parallel/6-workload/test_rbd_api.yaml | 11 +++ .../parallel/6-workload/test_rbd_python.yaml | 11 +++ .../parallel/7-upgrade-sequence/upgrade-all.yaml | 10 +++ .../7-upgrade-sequence/upgrade-by-daemon.yaml | 30 ++++++++ .../hammer-jewel-x/parallel/8-luminous.yaml | 1 + .../hammer-jewel-x/parallel/9-final-workload/+ | 0 .../9-final-workload/rados-snaps-few-objects.yaml | 13 ++++ .../9-final-workload/rados_loadgenmix.yaml | 6 ++ .../9-final-workload/rados_mon_thrash.yaml | 11 +++ .../parallel/9-final-workload/rbd_cls.yaml | 6 ++ .../9-final-workload/rbd_import_export.yaml | 8 +++ .../parallel/9-final-workload/rgw_s3tests.yaml | 11 +++ .../suites/upgrade/hammer-jewel-x/parallel/distros | 1 + .../suites/upgrade/hammer-jewel-x/stress-split/% | 0 .../upgrade/hammer-jewel-x/stress-split/0-cluster | 1 + .../hammer-to-jewel.yaml | 83 ++++++++++++++++++++++ .../hammer-jewel-x/stress-split/2-partial-upgrade | 1 + .../upgrade/hammer-jewel-x/stress-split/3-thrash | 1 + .../upgrade/hammer-jewel-x/stress-split/4-workload | 1 + .../stress-split/5-finish-upgrade.yaml | 1 + .../hammer-jewel-x/stress-split/6-luminous.yaml | 1 + .../hammer-jewel-x/stress-split/7-final-workload | 1 + .../upgrade/hammer-jewel-x/stress-split/distros | 1 + .../qa/suites/upgrade/hammer-jewel-x/tiering/% | 0 .../hammer-jewel-x/tiering/0-cluster/start.yaml | 17 +++++ .../hammer-to-jewel.yaml | 13 ++++ .../hammer-jewel-x/tiering/2-setup-cache-tiering/% | 0 .../0-create-base-tier/create-ec-pool.yaml | 6 ++ .../0-create-base-tier/create-replicated-pool.yaml | 5 ++ .../2-setup-cache-tiering/1-create-cache-tier.yaml | 14 ++++ .../upgrade/hammer-jewel-x/tiering/3-upgrade.yaml | 52 ++++++++++++++ 49 files changed, 535 insertions(+) create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/% create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+ create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+ create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+ create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/% create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload create mode 120000 src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml create mode 100644 src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml (limited to 'src/ceph/qa/suites/upgrade/hammer-jewel-x') diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/% new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml new file mode 100644 index 0000000..bbddfb3 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/start.yaml @@ -0,0 +1,21 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + mon debug unsafe allow tier with nonempty snaps: true + log-whitelist: + - but it is still running + - wrongly marked me down + - reached quota +roles: +- - mon.a + - osd.0 + - osd.1 + - mgr.x +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + - client.1 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml new file mode 100644 index 0000000..c57e071 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/hammer-jewel.yaml @@ -0,0 +1,20 @@ +tasks: +- install: + branch: hammer + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] +- print: "**** done hammer" +- ceph: + fs: xfs + skip_mgr_daemons: true + add_osds_to_crush: true +- install.upgrade: + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + osd.0: + branch: jewel + osd.2: + branch: jewel +- print: "*** client.0 upgraded packages to jewel" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/+ new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml new file mode 100644 index 0000000..e4f3ee1 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/ec-rados-default.yaml @@ -0,0 +1,20 @@ +workload: + full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 0000000..d86c2d2 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,8 @@ +workload: + full_sequential: + - workunit: + branch: hammer + clients: + client.0: + - cls + - print: "**** done cls 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 0000000..50ba808 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,8 @@ +workload: + full_sequential: + - workunit: + branch: hammer + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 0000000..997f7ba --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,8 @@ +workload: + full_sequential: + - workunit: + branch: hammer + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 0000000..d1046da --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,8 @@ +workload: + full_sequential: + - workunit: + branch: hammer + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 0000000..1aaeac8 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,18 @@ +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mon.a, mon.b, mon.c] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart do not wait for healthy" + - exec: + mon.a: + - sleep 300 # http://tracker.ceph.com/issues/17808 + - ceph osd set sortbitwise + - ceph osd set require_jewel_osds + - ceph.healthy: + - print: "**** done ceph.healthy" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml new file mode 100644 index 0000000..f2093da --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/upgrade-osd-mds-mon.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [osd.0, osd.1] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.2, osd.3] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + - sleep: + duration: 60 + - print: "**** running mixed versions of osds and mons" +#do we need to use "ceph osd crush tunables hammer" ? + - exec: + mon.b: + - sudo ceph osd crush tunables hammer + - print: "**** done ceph osd crush tunables hammer" + - ceph.restart: + daemons: [mon.b, mon.c] + wait-for-healthy: false + - sleep: + duration: 30 + - exec: + osd.0: + - sleep 300 # http://tracker.ceph.com/issues/17808 + - ceph osd set sortbitwise + - ceph osd set require_jewel_osds + - ceph.healthy: + - sleep: + duration: 60 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml new file mode 100644 index 0000000..60a3cb6 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3.5-finish.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + client.0: + branch: jewel diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml new file mode 120000 index 0000000..987c18c --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/4-jewel.yaml @@ -0,0 +1 @@ +../../../../releases/jewel.yaml \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml new file mode 100644 index 0000000..ab41db6 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml @@ -0,0 +1,14 @@ +tasks: + - install.upgrade: + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + client.0: + branch: jewel + - print: "**** done install.upgrade client.0 to jewel" + - install.upgrade: + osd.0: + osd.2: + - print: "**** done install.upgrade daemons to x" + - parallel: + - workload2 + - upgrade-sequence2 + - print: "**** done parallel workload2 and upgrade-sequence2" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/+ new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml new file mode 100644 index 0000000..9818541 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/ec-rados-default.yaml @@ -0,0 +1,29 @@ +meta: +- desc: | + run run randomized correctness test for rados operations + on an erasure-coded pool +workload2: + full_sequential: + - rados: + erasure_code_profile: + name: teuthologyprofile2 + k: 2 + m: 1 + crush-failure-domain: osd + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml new file mode 100644 index 0000000..088976b --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + object class functional tests +workload2: + full_sequential: + - workunit: + branch: jewel + clients: + client.0: + - cls + - print: "**** done cls 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml new file mode 100644 index 0000000..30f1307 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/rados_loadgenbig.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1MB to 25MB +workload2: + full_sequential: + - workunit: + branch: jewel + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml new file mode 100644 index 0000000..e21839b --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +workload2: + full_sequential: + - workunit: + branch: jewel + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml new file mode 100644 index 0000000..cae2c06 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/test_rbd_python.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd python api tests +workload2: + full_sequential: + - workunit: + branch: jewel + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 0000000..356f8ad --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + upgrade the ceph cluster +upgrade-sequence2: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart all" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml new file mode 100644 index 0000000..0a69a7f --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/upgrade-by-daemon.yaml @@ -0,0 +1,30 @@ +meta: +- desc: | + upgrade the ceph cluster, + upgrate in two steps + step one ordering: mon.a, mon.b, mon.c, osd.0, osd.1 + step two ordering: osd.2, osd.3 + ceph expected to be healthy state after each step +upgrade-sequence2: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.0, osd.1] + wait-for-healthy: true + - sleep: + duration: 60 + - print: "**** running mixed versions of osds and mons" + - exec: + mon.b: + - sudo ceph osd crush tunables jewel + - print: "**** done ceph osd crush tunables jewel" + - ceph.restart: + daemons: [osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml new file mode 120000 index 0000000..5283ac7 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/8-luminous.yaml @@ -0,0 +1 @@ +../../../../releases/luminous.yaml \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/+ new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 0000000..e0b0ba1 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.1] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml new file mode 100644 index 0000000..b1c6791 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,6 @@ +tasks: + - workunit: + clients: + client.1: + - rados/load-gen-mix.sh + - print: "**** done 7-final-workload/rados_loadgenmix.yaml" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml new file mode 100644 index 0000000..807afb9 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,11 @@ +tasks: + - sequential: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - workunit: + branch: jewel + clients: + client.1: + - rados/test-upgrade-v11.0.0.sh + - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml new file mode 100644 index 0000000..973c438 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_cls.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + client.1: + - cls/test_cls_rbd.sh +- print: "**** done 7-final-workload/rbd_cls.yaml" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml new file mode 100644 index 0000000..d8116a9 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rbd_import_export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh from 7-final-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml new file mode 100644 index 0000000..f1cf2de --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/rgw_s3tests.yaml @@ -0,0 +1,11 @@ +tasks: +- rgw: [client.1] +- s3tests: + client.1: + rgw_server: client.1 +- print: "**** done rgw_server from 7-final-workload" +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros new file mode 120000 index 0000000..ca99fee --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/distros @@ -0,0 +1 @@ +../../../../distros/supported/ \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/% new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster new file mode 120000 index 0000000..9bb7a0d --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/0-cluster @@ -0,0 +1 @@ +../../jewel-x/stress-split/0-cluster \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml new file mode 100644 index 0000000..212b8ff --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml @@ -0,0 +1,83 @@ +tasks: +- install: + branch: hammer + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev +- print: '**** done hammer' +- ceph: + fs: xfs + skip_mgr_daemons: true + add_osds_to_crush: true +- install.upgrade: + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev + osd.0: + branch: jewel + osd.3: + branch: jewel +- print: '*** client.0 upgraded packages to jewel' +- parallel: + - workload-h-j + - upgrade-sequence-h-j +- print: '**** done parallel' +- install.upgrade: + client.0: + branch: jewel + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev +- exec: + osd.0: + - ceph osd set sortbitwise + - ceph osd set require_jewel_osds + - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; + done +- print: '**** done install.upgrade client.0 to jewel' +upgrade-sequence-h-j: + sequential: + - ceph.restart: + daemons: + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - osd.4 + - osd.5 + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: + - mon.a + - mon.b + - mon.c + wait-for-healthy: false + wait-for-osds-up: true + - print: '**** done ceph.restart do not wait for healthy' + - exec: + mon.a: + - sleep 300 + - ceph osd set require_jewel_osds + - ceph.healthy: null + - print: '**** done ceph.healthy' +workload-h-j: + full_sequential: + - workunit: + branch: hammer + clients: + client.0: + - cls + - print: "**** done cls 2-workload" + - workunit: + branch: hammer + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade new file mode 120000 index 0000000..fad7148 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/2-partial-upgrade @@ -0,0 +1 @@ +../../jewel-x/stress-split/2-partial-upgrade/ \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash new file mode 120000 index 0000000..894fdeb --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/3-thrash @@ -0,0 +1 @@ +../../jewel-x/stress-split/3-thrash/ \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload new file mode 120000 index 0000000..6135fb0 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/4-workload @@ -0,0 +1 @@ +../../jewel-x/stress-split/4-workload \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml new file mode 120000 index 0000000..7d39ac6 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../../jewel-x/stress-split/5-finish-upgrade.yaml \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml new file mode 120000 index 0000000..5283ac7 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/6-luminous.yaml @@ -0,0 +1 @@ +../../../../releases/luminous.yaml \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload new file mode 120000 index 0000000..97adf26 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/7-final-workload @@ -0,0 +1 @@ +../../jewel-x/stress-split/7-final-workload/ \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros new file mode 120000 index 0000000..ca99fee --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/distros @@ -0,0 +1 @@ +../../../../distros/supported/ \ No newline at end of file diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml new file mode 100644 index 0000000..9cd743c --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - but it is still running + - wrongly marked me down +roles: +- - mon.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml new file mode 100644 index 0000000..7485dce --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml @@ -0,0 +1,13 @@ +tasks: +- install: + branch: hammer + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev +- print: '**** done hammer' +- ceph: + fs: xfs + skip_mgr_daemons: true + add_osds_to_crush: true diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% new file mode 100644 index 0000000..e69de29 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml new file mode 100644 index 0000000..f0e22bf --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + client.0: + - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1 + - ceph osd pool create base-pool 4 4 erasure t-profile + - ceph osd pool application enable base-pool rados diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml new file mode 100644 index 0000000..36dc06d --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + client.0: + - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml new file mode 100644 index 0000000..d9cc348 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - ceph osd pool set cache-pool hit_set_type bloom + - ceph osd pool set cache-pool hit_set_count 8 + - ceph osd pool set cache-pool hit_set_period 5 diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml new file mode 100644 index 0000000..b2fc171 --- /dev/null +++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml @@ -0,0 +1,52 @@ +tasks: +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" + +workload: + sequential: + - rados: + clients: [client.0] + pools: [base-pool] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 + - print: "**** done rados" + +upgrade-sequence: + sequential: + - install.upgrade: + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev + osd.0: + branch: jewel + osd.2: + branch: jewel + - print: "*** done install.upgrade osd.0 and osd.2" + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mon.a, mon.b, mon.c] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart do not wait for healthy" + - exec: + mon.a: + - sleep 300 # http://tracker.ceph.com/issues/17808 + - ceph osd set sortbitwise + - ceph osd set require_jewel_osds + - ceph.healthy: + - print: "**** done ceph.healthy" -- cgit 1.2.3-korg