summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/rados/singleton
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/rados/singleton
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/rados/singleton')
-rw-r--r--src/ceph/qa/suites/rados/singleton/%0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml24
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml14
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/peer.yaml25
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/radostool.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/random-eio.yaml41
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml51
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/reg11184.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rest-api.yaml35
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/+0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml32
l---------src/ceph/qa/suites/rados/singleton/msgr1
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml5
-rw-r--r--src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml7
l---------src/ceph/qa/suites/rados/singleton/objectstore1
l---------src/ceph/qa/suites/rados/singleton/rados.yaml1
39 files changed, 921 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/rados/singleton/% b/src/ceph/qa/suites/rados/singleton/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/%
diff --git a/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 0000000..13af813
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.a
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config help:
+ config set filestore_dump_file /tmp/foo:
+ perf dump:
+ perf schema:
+ get_heap_property tcmalloc.max_total_thread_cache_byte:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 0000000..604a9e4
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 0000000..e2f0245
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ debug osd: 5
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 0000000..59085ff
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- dump_stuck:
diff --git a/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 0000000..68644c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,24 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- ec_lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
new file mode 100644
index 0000000..e8201ee
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - erasure-code/encode-decode-non-regression.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 0000000..bcaef78
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- rep_lost_unfound_delete:
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 0000000..a4a309d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,23 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+- lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644
index 0000000..accdd96
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 2
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: True
+ pg_num: 2
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644
index 0000000..1c48ada
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: True
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644
index 0000000..0cf37fd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: False
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
new file mode 100644
index 0000000..318af5e
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
@@ -0,0 +1,14 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/auth_caps.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 0000000..7bb4f65
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
new file mode 100644
index 0000000..815c518
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ osd:
+ debug monc: 1
+ debug ms: 1
+ log-whitelist:
+ - overall HEALTH
+ - Manager daemon
+ - \(MGR_DOWN\)
+- mon_seesaw:
+- ceph_manager.create_pool:
+ kwargs:
+ pool_name: test
+ pg_num: 1
+- ceph_manager.wait_for_clean:
+ kwargs:
+ timeout: 60
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 0000000..5b37407
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 0000000..ed5b216
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 0000000..634e884
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(SLOW_OPS\)
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/src/ceph/qa/suites/rados/singleton/all/peer.yaml b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 0000000..645034a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- peer:
diff --git a/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 0000000..10f18e2
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_
+ - (PG_
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 128 128
+ - sudo ceph osd pool application enable foo rados
+ - sleep 5
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+ client.0:
+ - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/src/ceph/qa/suites/rados/singleton/all/radostool.yaml b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 0000000..1827795
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
new file mode 100644
index 0000000..a2ad997
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
@@ -0,0 +1,41 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (POOL_APP_NOT_ENABLED)
+- full_sequential:
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+ - sudo ceph osd pool create test 16 16
+ - sudo ceph osd pool set test size 3
+ - sudo ceph pg dump pgs --format=json-pretty
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ type: rand
+ objectsize: 1048576
+ pool: test
+ create_pool: false
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
new file mode 100644
index 0000000..78d77c8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
@@ -0,0 +1,31 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - no reply from
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ - rebuild_mondb:
+ - radosbench:
+ clients: [client.0]
+ time: 30
diff --git a/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
new file mode 100644
index 0000000..7507bf6
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
@@ -0,0 +1,51 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd min pg log entries: 100
+ osd max pg log entries: 1000
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - overall HEALTH
+- exec:
+ osd.0:
+ - ceph osd pool create foo 128
+ - ceph osd pool application enable foo foo
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd out 0
+ - sleep 5
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - ceph tell osd.* config set osd_recovery_sleep 0
+ - ceph tell osd.* config set osd_recovery_max_active 20
+- ceph.healthy:
+- exec:
+ osd.0:
+ - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
new file mode 100644
index 0000000..f3c8575
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ conf:
+ osd:
+ debug osd: 5
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+tasks:
+- install:
+- ceph:
+- reg11184:
diff --git a/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
new file mode 100644
index 0000000..3eddce8
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
@@ -0,0 +1,17 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
new file mode 100644
index 0000000..d988d1a
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
@@ -0,0 +1,35 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - mds.a
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ conf:
+ client.rest0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+- rest-api: [client.0]
+- workunit:
+ clients:
+ all:
+ - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
new file mode 100644
index 0000000..42c8ae3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ fs: ext4
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_envlibrados_for_rocksdb.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
new file mode 100644
index 0000000..cac3cb3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (REQUEST_SLOW)
+ - (PG_
+ - \(OBJECT_MISPLACED\)
+ - (OSD_
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+ random_eio: .33
+ min_live: 5
+ min_in: 5
+- radosbench:
+ clients: [client.0]
+ time: 720
+ type: rand
+ objectsize: 1048576
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644
index 0000000..37be8df
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000
index 0000000..0b1d7b0
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
@@ -0,0 +1 @@
+../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 0000000..82c9b2d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,70 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - (CACHE_POOL_
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - echo proxy
+ - sudo ceph osd tier cache-mode cache proxy
+ - sleep 10
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - sudo ceph osd tier remove-overlay base
+ - sleep 20
+ - echo add writeback overlay
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd pool set cache cache_target_full_ratio .8
+ - sudo ceph osd tier set-overlay base cache
+ - sleep 30
+ - sudo ceph osd tier cache-mode cache readproxy
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 0000000..48ef78f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+ clients: [client.0]
diff --git a/src/ceph/qa/suites/rados/singleton/msgr b/src/ceph/qa/suites/rados/singleton/msgr
new file mode 120000
index 0000000..b29ecdd
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr
@@ -0,0 +1 @@
+../basic/msgr \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 0000000..0de320d
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
diff --git a/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 0000000..3b495f9
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
+ mgr:
+ debug monc: 10
diff --git a/src/ceph/qa/suites/rados/singleton/objectstore b/src/ceph/qa/suites/rados/singleton/objectstore
new file mode 120000
index 0000000..4c8ebad
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/objectstore
@@ -0,0 +1 @@
+../../../objectstore \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/rados.yaml b/src/ceph/qa/suites/rados/singleton/rados.yaml
new file mode 120000
index 0000000..b756e57
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1 @@
+../../../config/rados.yaml \ No newline at end of file