summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/upgrade/jewel-x/parallel
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/suites/upgrade/jewel-x/parallel')
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/%0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml4
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml32
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml60
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml41
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml24
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml12
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml38
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml23
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml11
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml1
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml8
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+0
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml18
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml9
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml11
-rw-r--r--src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml13
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml1
l---------src/ceph/qa/suites/upgrade/jewel-x/parallel/distros1
28 files changed, 404 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/% b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/%
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
new file mode 100644
index 0000000..f4d1349
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
new file mode 100644
index 0000000..d1f1e10
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/start.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mds.a
+ - mgr.x
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+- - client.4
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(MDS_FAILED\)
+ - \(OBJECT_
+ - is unresponsive
+ conf:
+ fs: xfs
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
new file mode 100644
index 0000000..c64b2cd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
@@ -0,0 +1,60 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.1:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.2:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+ client.3:
+ debug ms: 1
+ debug client: 10
+ debug monc: 10
+meta:
+- desc: |
+ install ceph/jewel latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: jewel
+ exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+- print: "**** done installing jewel"
+- ceph:
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - is unresponsive
+ conf:
+ global:
+ mon warn on pool no app: false
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade mon.a and mon.b"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
new file mode 100644
index 0000000..83457c0
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/1.5-final-scrub.yaml
@@ -0,0 +1,11 @@
+# do not require luminous osds at mkfs time; only set flag at
+# the end of the test run, then do a final scrub (to convert any
+# legacy snapsets), and verify we are healthy.
+tasks:
+- full_sequential_finally:
+ - ceph.osd_scrub_pgs:
+ cluster: ceph
+ - exec:
+ mon.a:
+ - ceph pg dump -f json-pretty
+ - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
new file mode 100644
index 0000000..56eedbd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/blogbench.yaml
@@ -0,0 +1,14 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse: [client.2]
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
new file mode 100644
index 0000000..dfbcbea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/cache-pool-snaps.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+workload:
+ full_sequential:
+ - sequential:
+ - exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+ - rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
new file mode 100644
index 0000000..fb9d30f
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/ec-rados-default.yaml
@@ -0,0 +1,24 @@
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
new file mode 100644
index 0000000..348f1ae
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/rados_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
new file mode 100644
index 0000000..15d892e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_api.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
new file mode 100644
index 0000000..bb2d3ea
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/test_rbd_python.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
new file mode 100644
index 0000000..6a0f829
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-all.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ - ceph.restart:
+ daemons: [mds.a, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
new file mode 100644
index 0000000..2d74e9e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml
@@ -0,0 +1,38 @@
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
new file mode 100644
index 0000000..e57b377
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/4-luminous.yaml
@@ -0,0 +1,23 @@
+# this is the same fragment as ../../../../releases/luminous.yaml
+# but without line "ceph osd set-require-min-compat-client luminous"
+
+tasks:
+- exec:
+ mgr.x:
+ - mkdir -p /var/lib/ceph/mgr/ceph-x
+ - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
+ - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
+- ceph.restart:
+ daemons: [mgr.x]
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release luminous
+- ceph.healthy:
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
+ log-whitelist:
+ - no active mgr
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
new file mode 100644
index 0000000..f7e9de4
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/5-workload.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd on not upgrated client.4
+ (covers issue http://tracker.ceph.com/issues/21660)
+tasks:
+ - workunit:
+ branch: jewel
+ clients:
+ client.4:
+ - rbd/import_export.sh
+ - print: "**** done rbd/import_export.sh 5-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
new file mode 120000
index 0000000..5c72153
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6-luminous-with-mgr.yaml
@@ -0,0 +1 @@
+../../../../releases/luminous-with-mgr.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
new file mode 100644
index 0000000..20c0ffd
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/6.5-crush-compat.yaml
@@ -0,0 +1,8 @@
+tasks:
+- exec:
+ mon.a:
+ - ceph osd set-require-min-compat-client jewel
+ - ceph osd crush set-all-straw-buckets-to-straw2
+ - ceph osd crush weight-set create-compat
+ - ceph osd crush weight-set reweight-compat osd.0 .9
+ - ceph osd crush weight-set reweight-compat osd.1 1.2
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/+
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
new file mode 100644
index 0000000..d73459e
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/blogbench.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse: [client.3]
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
new file mode 100644
index 0000000..7dd61c5
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados-snaps-few-objects.yaml
@@ -0,0 +1,17 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
new file mode 100644
index 0000000..b218b92
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_loadgenmix.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
new file mode 100644
index 0000000..c835a65
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rados_mon_thrash.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
new file mode 100644
index 0000000..46bbf76
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_cls.yaml
@@ -0,0 +1,9 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
new file mode 100644
index 0000000..5ae7491
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rbd_import_export.yaml
@@ -0,0 +1,11 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
new file mode 100644
index 0000000..780c4ad
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/rgw_swift.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 7-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 7-final-workload"
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
new file mode 120000
index 0000000..81df389
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/8-jewel-workload.yaml
@@ -0,0 +1 @@
+5-workload.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
new file mode 120000
index 0000000..ca99fee
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/jewel-x/parallel/distros
@@ -0,0 +1 @@
+../../../../distros/supported/ \ No newline at end of file