summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/rados/singleton-nomsgr/all
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/suites/rados/singleton-nomsgr/all')
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml48
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml8
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml21
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml9
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml29
10 files changed, 0 insertions, 267 deletions
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
deleted file mode 100644
index bbf330b..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-roles:
-- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
-overrides:
- ceph:
- log-whitelist:
- - MDS in read-only mode
- - force file system read-only
- - overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_FULL)
- - (MDS_READ_ONLY)
- - (POOL_FULL)
-tasks:
-- install:
-- ceph:
-- rgw:
- - client.0
-- exec:
- client.0:
- - ceph_test_admin_socket_output --all
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
deleted file mode 100644
index 5864803..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-roles:
-- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(CACHE_POOL_NO_HIT_SET\)
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
- debug client: 20
- debug mds: 20
- debug ms: 1
-- exec:
- client.0:
- - ceph osd pool create data_cache 4
- - ceph osd tier add cephfs_data data_cache
- - ceph osd tier cache-mode data_cache writeback
- - ceph osd tier set-overlay cephfs_data data_cache
- - ceph osd pool set data_cache hit_set_type bloom
- - ceph osd pool set data_cache hit_set_count 8
- - ceph osd pool set data_cache hit_set_period 3600
- - ceph osd pool set data_cache min_read_recency_for_promote 0
-- ceph-fuse:
-- exec:
- client.0:
- - sudo chmod 777 $TESTDIR/mnt.0/
- - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- - ls -al $TESTDIR/mnt.0/foo
- - truncate --size 0 $TESTDIR/mnt.0/foo
- - ls -al $TESTDIR/mnt.0/foo
- - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- - ls -al $TESTDIR/mnt.0/foo
- - cp $TESTDIR/mnt.0/foo /tmp/foo
- - sync
- - rados -p data_cache ls -
- - sleep 10
- - rados -p data_cache ls -
- - rados -p data_cache cache-flush-evict-all
- - rados -p data_cache ls -
- - sleep 1
-- exec:
- client.1:
- - hexdump -C /tmp/foo | head
- - hexdump -C $TESTDIR/mnt.1/foo | head
- - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
deleted file mode 100644
index 8634362..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
-tasks:
-- install:
-- workunit:
- clients:
- all:
- - post-file.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
deleted file mode 100644
index e766bdc..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(CACHE_POOL_NO_HIT_SET\)
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
-- exec:
- client.0:
- - ceph osd pool create base-pool 4
- - ceph osd pool application enable base-pool rados
- - ceph osd pool create cache-pool 4
- - ceph osd tier add base-pool cache-pool
- - ceph osd tier cache-mode cache-pool writeback
- - ceph osd tier set-overlay base-pool cache-pool
- - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
- - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
- - rbd snap create base-pool/bar@snap
- - rados -p base-pool cache-flush-evict-all
- - rbd export base-pool/bar $TESTDIR/bar
- - rbd export base-pool/bar@snap $TESTDIR/snap
- - cmp $TESTDIR/foo $TESTDIR/bar
- - cmp $TESTDIR/foo $TESTDIR/snap
- - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
deleted file mode 100644
index b245866..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# verify #13098 fix
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
-overrides:
- ceph:
- log-whitelist:
- - is full
- - overall HEALTH_
- - \(POOL_FULL\)
- - \(POOL_NEAR_FULL\)
- - \(CACHE_POOL_NO_HIT_SET\)
- - \(CACHE_POOL_NEAR_FULL\)
-tasks:
-- install:
-- ceph:
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
-- exec:
- client.0:
- - ceph osd pool create ec-ca 1 1
- - ceph osd pool create ec 1 1 erasure default
- - ceph osd pool application enable ec rados
- - ceph osd tier add ec ec-ca
- - ceph osd tier cache-mode ec-ca readproxy
- - ceph osd tier set-overlay ec ec-ca
- - ceph osd pool set ec-ca hit_set_type bloom
- - ceph osd pool set-quota ec-ca max_bytes 20480000
- - ceph osd pool set-quota ec max_bytes 20480000
- - ceph osd pool set ec-ca target_max_bytes 20480000
- - timeout 30 rados -p ec-ca bench 30 write || true
- - ceph osd pool set-quota ec-ca max_bytes 0
- - ceph osd pool set-quota ec max_bytes 0
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
deleted file mode 100644
index a28582f..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- osd:
-# we may land on ext4
- osd max object name len: 400
- osd max object namespace len: 64
- log-whitelist:
- - but it is still running
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
-- workunit:
- clients:
- all:
- - rados/test_health_warnings.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
deleted file mode 100644
index 98b5095..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-tasks:
-- install:
-- exec:
- client.0:
- - ceph_test_async_driver
- - ceph_test_msgr
-openstack:
- - machine:
- disk: 40 # GB
- ram: 15000 # MB
- cpus: 1
- volumes: # attached to each instance
- count: 0
- size: 1 # GB
-overrides:
- ceph:
- conf:
- client:
- debug ms: 20
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
deleted file mode 100644
index f480dbb..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - client.0
-- - osd.3
- - osd.4
- - osd.5
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(PG_
- - \(OSD_
- - \(OBJECT_
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd min pg log entries: 25
- osd max pg log entries: 100
- osd max object name len: 460
- osd max object namespace len: 64
-- exec:
- client.0:
- - sudo ceph osd pool create foo 64
- - sudo ceph osd pool application enable foo rados
- - rados -p foo bench 60 write -b 1024 --no-cleanup
- - sudo ceph osd pool set foo size 3
- - sudo ceph osd out 0 1
-- sleep:
- duration: 60
-- exec:
- client.0:
- - sudo ceph osd in 0 1
-- sleep:
- duration: 60
-- exec:
- client.0:
- - sudo ceph osd pool set foo size 2
-- sleep:
- duration: 300
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
deleted file mode 100644
index d49a597..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/test_pool_access.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
deleted file mode 100644
index b0d5de3..0000000
--- a/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
-os_type: centos
-
-overrides:
- install:
- ceph:
- flavor: notcmalloc
- debuginfo: true
- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(PG_
- conf:
- global:
- osd heartbeat grace: 40
- debug deliberately leak memory: true
- osd max object name len: 460
- osd max object namespace len: 64
- mon:
- mon osd crush smoke test: false
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- expect_valgrind_errors: true