summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/rados/singleton/all
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/suites/rados/singleton/all')
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml29
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml24
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml23
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml14
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/peer.yaml25
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/radostool.yaml26
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/random-eio.yaml41
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml31
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml51
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/reg11184.yaml28
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml17
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/rest-api.yaml35
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml19
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/+0
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml32
33 files changed, 0 insertions, 906 deletions
diff --git a/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml b/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
deleted file mode 100644
index 13af813..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/admin-socket.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - client.a
-openstack:
- - volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
-- admin_socket:
- osd.0:
- version:
- git_version:
- help:
- config show:
- config help:
- config set filestore_dump_file /tmp/foo:
- perf dump:
- perf schema:
- get_heap_property tcmalloc.max_total_thread_cache_byte:
- set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
- set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
deleted file mode 100644
index 604a9e4..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-
-overrides:
- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- debug osd: 5
-
-tasks:
-- install:
-- ceph:
-- divergent_priors:
diff --git a/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml b/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
deleted file mode 100644
index e2f0245..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-
-overrides:
- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
- - \(POOL_APP_NOT_ENABLED\)
- conf:
- osd:
- debug osd: 5
-
-tasks:
-- install:
-- ceph:
-- divergent_priors2:
diff --git a/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
deleted file mode 100644
index 59085ff..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
-openstack:
- - volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
-- dump_stuck:
diff --git a/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
deleted file mode 100644
index 68644c8..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-openstack:
- - volumes: # attached to each instance
- count: 4
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_
-- ec_lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
deleted file mode 100644
index e8201ee..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- workunit:
- clients:
- all:
- - erasure-code/encode-decode-non-regression.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
deleted file mode 100644
index bcaef78..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_
-- rep_lost_unfound_delete:
diff --git a/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
deleted file mode 100644
index a4a309d..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_
-- lost_unfound:
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
deleted file mode 100644
index accdd96..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
-openstack:
- - volumes: # attached to each instance
- count: 2
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: False
- conf:
- mon:
- osd pool default size: 2
- osd:
- mon max pg per osd : 2
- osd max pg per osd hard ratio : 1
- log-whitelist:
- - \(TOO_FEW_PGS\)
-tasks:
-- install:
-- ceph:
-- osd_max_pg_per_osd:
- test_create_from_mon: True
- pg_num: 2
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
deleted file mode 100644
index 1c48ada..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-openstack:
- - volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: False
- conf:
- mon:
- osd pool default size: 2
- osd:
- mon max pg per osd : 1
- osd max pg per osd hard ratio : 1
- log-whitelist:
- - \(TOO_FEW_PGS\)
- - \(PG_
-tasks:
-- install:
-- ceph:
-- osd_max_pg_per_osd:
- test_create_from_mon: False
- pg_num: 1
- pool_size: 2
- from_primary: True
diff --git a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
deleted file mode 100644
index 0cf37fd..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-openstack:
- - volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: False
- conf:
- mon:
- osd pool default size: 2
- osd:
- mon max pg per osd : 1
- osd max pg per osd hard ratio : 1
- log-whitelist:
- - \(TOO_FEW_PGS\)
- - \(PG_
-tasks:
-- install:
-- ceph:
-- osd_max_pg_per_osd:
- test_create_from_mon: False
- pg_num: 1
- pool_size: 2
- from_primary: False
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
deleted file mode 100644
index 318af5e..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - mon/auth_caps.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
deleted file mode 100644
index 7bb4f65..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/mon-config-keys.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - mon/test_mon_config_key.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml b/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
deleted file mode 100644
index 815c518..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/mon-seesaw.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- config:
- global:
- osd pool default min size : 1
- osd:
- debug monc: 1
- debug ms: 1
- log-whitelist:
- - overall HEALTH
- - Manager daemon
- - \(MGR_DOWN\)
-- mon_seesaw:
-- ceph_manager.create_pool:
- kwargs:
- pool_name: test
- pg_num: 1
-- ceph_manager.wait_for_clean:
- kwargs:
- timeout: 60
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
deleted file mode 100644
index 5b37407..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_
- conf:
- osd:
- osd min pg log entries: 5
-- osd_backfill:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
deleted file mode 100644
index ed5b216..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-openstack:
- - volumes: # attached to each instance
- count: 4
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_
- conf:
- osd:
- osd min pg log entries: 5
- osd_fast_fail_on_connection_refused: false
-- osd_recovery.test_incomplete_pgs:
diff --git a/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
deleted file mode 100644
index 634e884..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
- - \(SLOW_OPS\)
- conf:
- osd:
- osd min pg log entries: 5
- osd_fast_fail_on_connection_refused: false
-- osd_recovery:
diff --git a/src/ceph/qa/suites/rados/singleton/all/peer.yaml b/src/ceph/qa/suites/rados/singleton/all/peer.yaml
deleted file mode 100644
index 645034a..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/peer.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- config:
- global:
- osd pool default min size : 1
- log-whitelist:
- - objects unfound and apparently lost
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
-- peer:
diff --git a/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
deleted file mode 100644
index 10f18e2..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - slow request
- - overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
-- exec:
- client.0:
- - sudo ceph osd pool create foo 128 128
- - sudo ceph osd pool application enable foo rados
- - sleep 5
- - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
- - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
-- ceph.wait_for_failure: [osd.0]
-- exec:
- client.0:
- - sudo ceph osd down 0
-- ceph.restart: [osd.0]
-- ceph.healthy:
diff --git a/src/ceph/qa/suites/rados/singleton/all/radostool.yaml b/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
deleted file mode 100644
index 1827795..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/radostool.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - had wrong client addr
- - had wrong cluster addr
- - reached quota
- - overall HEALTH_
- - \(POOL_FULL\)
- - \(POOL_APP_NOT_ENABLED\)
-- workunit:
- clients:
- all:
- - rados/test_rados_tool.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
deleted file mode 100644
index a2ad997..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/random-eio.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - missing primary copy of
- - objects unfound and apparently lost
- - overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
-- full_sequential:
- - exec:
- client.0:
- - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
- - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
- - sudo ceph osd pool create test 16 16
- - sudo ceph osd pool set test size 3
- - sudo ceph pg dump pgs --format=json-pretty
- - radosbench:
- clients: [client.0]
- time: 360
- type: rand
- objectsize: 1048576
- pool: test
- create_pool: false
- - exec:
- client.0:
- - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
- - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
deleted file mode 100644
index 78d77c8..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - no reply from
- - overall HEALTH_
- - \(MON_DOWN\)
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 30
- - rebuild_mondb:
- - radosbench:
- clients: [client.0]
- time: 30
diff --git a/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml b/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
deleted file mode 100644
index 7507bf6..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 20 # GB
-tasks:
-- install:
-- ceph:
- conf:
- osd:
- osd recovery sleep: .1
- osd min pg log entries: 100
- osd max pg log entries: 1000
- log-whitelist:
- - \(POOL_APP_NOT_ENABLED\)
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(OBJECT_
- - \(PG_
- - overall HEALTH
-- exec:
- osd.0:
- - ceph osd pool create foo 128
- - ceph osd pool application enable foo foo
- - rados -p foo bench 30 write -b 4096 --no-cleanup
- - ceph osd out 0
- - sleep 5
- - ceph osd set noup
-- ceph.restart:
- daemons: [osd.1]
- wait-for-up: false
- wait-for-healthy: false
-- exec:
- osd.0:
- - rados -p foo bench 3 write -b 4096 --no-cleanup
- - ceph osd unset noup
- - sleep 10
- - ceph tell osd.* config set osd_recovery_sleep 0
- - ceph tell osd.* config set osd_recovery_max_active 20
-- ceph.healthy:
-- exec:
- osd.0:
- - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml b/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
deleted file mode 100644
index f3c8575..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/reg11184.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-
-overrides:
- ceph:
- conf:
- osd:
- debug osd: 5
- log-whitelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(SMALLER_PGP_NUM\)
- - \(OBJECT_
-tasks:
-- install:
-- ceph:
-- reg11184:
diff --git a/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
deleted file mode 100644
index 3eddce8..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-roles:
-- [mon.a, mgr.x]
-- [osd.0, osd.1, osd.2, client.0]
-
-tasks:
-- install:
-- ceph:
- fs: xfs
- log-whitelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
- - \(POOL_APP_NOT_ENABLED\)
-- resolve_stuck_peering:
-
diff --git a/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml b/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
deleted file mode 100644
index d988d1a..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/rest-api.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - mds.a
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - had wrong client addr
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
- conf:
- client.rest0:
- debug ms: 1
- debug objecter: 20
- debug rados: 20
-- rest-api: [client.0]
-- workunit:
- clients:
- all:
- - rest/test.py
diff --git a/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
deleted file mode 100644
index 42c8ae3..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-overrides:
- ceph:
- fs: ext4
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(POOL_APP_NOT_ENABLED\)
-- workunit:
- clients:
- all:
- - rados/test_envlibrados_for_rocksdb.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
deleted file mode 100644
index cac3cb3..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-override:
- ceph:
- conf:
- mon:
- osd default pool size: 3
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - missing primary copy of
- - objects unfound and apparently lost
- - overall HEALTH_
- - (OSDMAP_FLAGS)
- - (REQUEST_SLOW)
- - (PG_
- - \(OBJECT_MISPLACED\)
- - (OSD_
-- thrashosds:
- op_delay: 30
- clean_interval: 120
- chance_down: .5
- random_eio: .33
- min_live: 5
- min_in: 5
-- radosbench:
- clients: [client.0]
- time: 720
- type: rand
- objectsize: 1048576
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+ b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
deleted file mode 100644
index e69de29..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/+
+++ /dev/null
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
deleted file mode 100644
index 37be8df..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
-- thrashosds:
- op_delay: 30
- clean_interval: 120
- chance_down: .5
-- workunit:
- clients:
- all:
- - rados/load-gen-mix-small.sh
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
deleted file mode 120000
index 0b1d7b0..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
deleted file mode 100644
index 82c9b2d..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - but it is still running
- - slow request
- - overall HEALTH_
- - (CACHE_POOL_
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 60
- - sudo ceph osd pool set cache target_max_objects 500
-- background_exec:
- mon.a:
- - while true
- - do sleep 30
- - echo proxy
- - sudo ceph osd tier cache-mode cache proxy
- - sleep 10
- - sudo ceph osd pool set cache cache_target_full_ratio .001
- - echo cache-try-flush-evict-all
- - rados -p cache cache-try-flush-evict-all
- - sleep 5
- - echo cache-flush-evict-all
- - rados -p cache cache-flush-evict-all
- - sleep 5
- - echo remove overlay
- - sudo ceph osd tier remove-overlay base
- - sleep 20
- - echo add writeback overlay
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd pool set cache cache_target_full_ratio .8
- - sudo ceph osd tier set-overlay base cache
- - sleep 30
- - sudo ceph osd tier cache-mode cache readproxy
- - done
-- rados:
- clients: [client.0]
- pools: [base]
- max_seconds: 600
- ops: 400000
- objects: 10000
- size: 1024
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
diff --git a/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
deleted file mode 100644
index 48ef78f..0000000
--- a/src/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- config:
- global:
- osd pool default min size : 1
- client:
- debug ms: 1
- debug objecter: 20
- debug rados: 20
- log-whitelist:
- - objects unfound and apparently lost
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(OBJECT_DEGRADED\)
-- watch_notify_same_primary:
- clients: [client.0]