summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/upgrade/jewel-x
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/upgrade/jewel-x
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/upgrade/jewel-x')
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml82
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml32
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml60
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml41
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml38
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml23
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/distros1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml236
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%0
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml22
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml35
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml20
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml25
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml40
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml10
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml16
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml9
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml16
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros1
l---------src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml1
72 files changed, 1048 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/% b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
new file mode 120000
index 0000000..b5973b9
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/centos_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
new file mode 120000
index 0000000..cc5b15b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported/ubuntu_latest.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
new file mode 100644
index 0000000..9adede7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/jewel-luminous.yaml
@@ -0,0 +1,82 @@
+meta:
+- desc: |
+ Setup 4 node ceph cluster using ceph-deploy, use latest
+ stable jewel as initial release, upgrade to luminous and
+ also setup mgr nodes along after upgrade, check for
+ cluster to reach healthy state, After upgrade run kernel tar/untar
+ task and systemd task. This test will detect any
+ ceph upgrade issue and systemd issues.
+overrides:
+ ceph-deploy:
+ fs: xfs
+ conf:
+ global:
+ mon pg warn min per osd: 2
+ osd:
+ osd pool default size: 2
+ osd objectstore: filestore
+ osd sloppy crc: true
+ client:
+ rbd default features: 5
+openstack:
+- machine:
+ disk: 100
+- volumes:
+ count: 3
+ size: 30
+# reluctantely :( hard-coded machine type
+# it will override command line args with teuthology-suite
+machine_type: vps
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mgr.y
+- - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - osd.6
+ - osd.7
+ - osd.8
+ - client.0
+tasks:
+- ssh-keys:
+- print: "**** done ssh-keys"
+- ceph-deploy:
+ branch:
+ stable: jewel
+ skip-mgr: True
+- print: "**** done initial ceph-deploy"
+- ceph-deploy.upgrade:
+ branch:
+ dev: luminous
+ setup-mgr-node: True
+ check-for-healthy: True
+ roles:
+ - mon.a
+ - mon.b
+ - mon.c
+ - osd.6
+- print: "**** done ceph-deploy upgrade"
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph osd set-require-min-compat-client luminous
+- print: "**** done `ceph osd require-osd-release luminous`"
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
+- print: "**** done kernel_untar_build.sh"
+- systemd:
+- print: "**** done systemd"
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
+- print: "**** done rados/load-gen-mix.sh"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..d1f1e10
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mds.a
+ - mgr.x
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(MDS_FAILED\)
+ - \(OBJECT_
+ - is unresponsive
+ conf:
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..c64b2cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
@@ -0,0 +1,60 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.1:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.2:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.3:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+meta:
+- desc: |
+ install ceph/jewel latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done installing jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - is unresponsive
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
new file mode 100644
index 0000000..83457c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
@@ -0,0 +1,11 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..56eedbd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse: [client.2]
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dfbcbea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+workload:
+ full_sequential:
+ - sequential:
+ - exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+ - rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..fb9d30f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..348f1ae
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..15d892e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..bb2d3ea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..6a0f829
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ - ceph.restart:
+ daemons: [mds.a, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..2d74e9e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,38 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..e57b377
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
@@ -0,0 +1,23 @@
+# this is the same fragment as ../../../../releases/luminous.yaml
+# but without line "ceph osd set-require-min-compat-client luminous"
+
+tasks:
+- exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+- ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+- ceph.healthy:
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
+ log-whitelist:
+ - no active mgr
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..f7e9de4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: jewel
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
new file mode 100644
index 0000000..20c0ffd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
@@ -0,0 +1,8 @@
+tasks:
+- exec:
+ mon.a:
+ - ceph osd set-require-min-compat-client jewel
+ - ceph osd crush set-all-straw-buckets-to-straw2
+ - ceph osd crush weight-set create-compat
+ - ceph osd crush weight-set reweight-compat osd.0 .9
+ - ceph osd crush weight-set reweight-compat osd.1 1.2
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d73459e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse: [client.3]
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..7dd61c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b218b92
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..c835a65
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..46bbf76
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..5ae7491
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..780c4ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 7-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
new file mode 120000
index 0000000..81df389
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
@@ -0,0 +1 @@
+5-workload.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/% b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
new file mode 120000
index 0000000..c79327b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/centos_7.3.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
new file mode 120000
index 0000000..6237042
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
@@ -0,0 +1 @@
+../../../../../distros/all/ubuntu_14.04.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
new file mode 100644
index 0000000..d68c258
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
@@ -0,0 +1,236 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/jewel v10.2.0 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/jewel latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (jewel or kraken)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(OSD_
+ - \(PG_
+ - \(CACHE_
+ fs: xfs
+ conf:
+ global:
+ mon warn on pool no app: false
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+ osd map cache size: 1100
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** v10.2.0 about to install"
+- install:
+ tag: v10.2.0
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v10.2.0 install"
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v10.2.0"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ branch: jewel
+ mon.b:
+ branch: jewel
+ # Note that client.a IS NOT upgraded at this point
+ #client.1:
+ #branch: jewel
+- parallel:
+ - workload_jewel
+ - upgrade-sequence_jewel
+- print: "**** done parallel jewel branch"
+- install.upgrade:
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ client.1:
+ branch: jewel
+- print: "**** done branch: jewel install.upgrade on client.1"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client luminous
+# Run librados tests on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_jewel:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test.sh & cls workload_jewel"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_jewel"
+ - s3tests:
+ client.0:
+ force-branch: ceph-jewel
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_jewel"
+upgrade-sequence_jewel:
+ sequential:
+ - print: "**** done branch: jewel install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all jewel branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/test-upgrade-v11.0.0-noec.sh
+ - cls
+ - print: "**** done rados/test-upgrade-v11.0.0.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-jewel
+ rgw_server: client.1
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart:
+ daemons: [osd.5]
+ wait-for-healthy: false
+ wait-for-up-osds: true
+ - exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+ - ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+ - exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+ - ceph.healthy:
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
new file mode 120000
index 0000000..3580937
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/0-cluster
@@ -0,0 +1 @@
+../stress-split/0-cluster/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
new file mode 120000
index 0000000..3e7cbc3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1-jewel-install
@@ -0,0 +1 @@
+../stress-split/1-jewel-install/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
new file mode 120000
index 0000000..ab35fc1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/2-partial-upgrade
@@ -0,0 +1 @@
+../stress-split/2-partial-upgrade/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
new file mode 100644
index 0000000..edae7b3
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..c89551e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/ec-rados-default.yaml
@@ -0,0 +1,22 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
new file mode 120000
index 0000000..a66a7dc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/5-finish-upgrade.yaml
@@ -0,0 +1 @@
+../stress-split/5-finish-upgrade.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
new file mode 120000
index 0000000..2b99d5c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/6-luminous.yaml
@@ -0,0 +1 @@
+../stress-split/6-luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 0000000..a82f11b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,35 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/% b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
new file mode 100644
index 0000000..a0d5c20
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
new file mode 100644
index 0000000..4f40219
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/start.yaml
@@ -0,0 +1,20 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..31ca3e5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: install ceph/jewel latest
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done install jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - required past_interval bounds are empty
+- print: "**** done ceph"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
new file mode 120000
index 0000000..522db1b
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/1.5-final-scrub.yaml
@@ -0,0 +1 @@
+../parallel/1.5-final-scrub.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 0000000..442dcf1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,osd.0, osd.1, osd.2]
+- print: "**** done ceph.restart 1st half"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
new file mode 100644
index 0000000..b3fddef
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/default.yaml
@@ -0,0 +1,25 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
new file mode 100644
index 0000000..626ae8e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,40 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+- print: "**** done radosbench 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 0000000..92779bc
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 0000000..693154d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 0000000..64c0e33
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
new file mode 100644
index 0000000..41e34d6
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 0000000..f56d0de
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
new file mode 100644
index 0000000..1d528cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,9 @@
+tasks:
+- install.upgrade:
+ osd.3:
+ client.0:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
new file mode 120000
index 0000000..5283ac7
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6-luminous.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
new file mode 120000
index 0000000..02263d1
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/6.5-crush-compat.yaml
@@ -0,0 +1 @@
+../parallel/6.5-crush-compat.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
new file mode 100644
index 0000000..56ba21d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rbd-python.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
new file mode 100644
index 0000000..76e5d6f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/rgw-swift.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
new file mode 100644
index 0000000..805bf97
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
new file mode 120000
index 0000000..e0426db
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/stress-split/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../tasks/thrashosds-health.yaml \ No newline at end of file