summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/rados/singleton-nomsgr/all
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/rados/singleton-nomsgr/all
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/rados/singleton-nomsgr/all')
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml48
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml8
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml34
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml20
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml21
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml44
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml9
-rw-r--r--src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml29
10 files changed, 267 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
new file mode 100644
index 0000000..bbf330b
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - MDS in read-only mode
+ - force file system read-only
+ - overall HEALTH_
+ - (OSDMAP_FLAGS)
+ - (OSD_FULL)
+ - (MDS_READ_ONLY)
+ - (POOL_FULL)
+tasks:
+- install:
+- ceph:
+- rgw:
+ - client.0
+- exec:
+ client.0:
+ - ceph_test_admin_socket_output --all
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 0000000..5864803
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,48 @@
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ debug client: 20
+ debug mds: 20
+ debug ms: 1
+- exec:
+ client.0:
+ - ceph osd pool create data_cache 4
+ - ceph osd tier add cephfs_data data_cache
+ - ceph osd tier cache-mode data_cache writeback
+ - ceph osd tier set-overlay cephfs_data data_cache
+ - ceph osd pool set data_cache hit_set_type bloom
+ - ceph osd pool set data_cache hit_set_count 8
+ - ceph osd pool set data_cache hit_set_period 3600
+ - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+ client.0:
+ - sudo chmod 777 $TESTDIR/mnt.0/
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - truncate --size 0 $TESTDIR/mnt.0/foo
+ - ls -al $TESTDIR/mnt.0/foo
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - cp $TESTDIR/mnt.0/foo /tmp/foo
+ - sync
+ - rados -p data_cache ls -
+ - sleep 10
+ - rados -p data_cache ls -
+ - rados -p data_cache cache-flush-evict-all
+ - rados -p data_cache ls -
+ - sleep 1
+- exec:
+ client.1:
+ - hexdump -C /tmp/foo | head
+ - hexdump -C $TESTDIR/mnt.1/foo | head
+ - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 0000000..8634362
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - post-file.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 0000000..e766bdc
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+ - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+ - rbd snap create base-pool/bar@snap
+ - rados -p base-pool cache-flush-evict-all
+ - rbd export base-pool/bar $TESTDIR/bar
+ - rbd export base-pool/bar@snap $TESTDIR/snap
+ - cmp $TESTDIR/foo $TESTDIR/bar
+ - cmp $TESTDIR/foo $TESTDIR/snap
+ - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 0000000..b245866
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,34 @@
+# verify #13098 fix
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - is full
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+tasks:
+- install:
+- ceph:
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create ec-ca 1 1
+ - ceph osd pool create ec 1 1 erasure default
+ - ceph osd pool application enable ec rados
+ - ceph osd tier add ec ec-ca
+ - ceph osd tier cache-mode ec-ca readproxy
+ - ceph osd tier set-overlay ec ec-ca
+ - ceph osd pool set ec-ca hit_set_type bloom
+ - ceph osd pool set-quota ec-ca max_bytes 20480000
+ - ceph osd pool set-quota ec max_bytes 20480000
+ - ceph osd pool set ec-ca target_max_bytes 20480000
+ - timeout 30 rados -p ec-ca bench 30 write || true
+ - ceph osd pool set-quota ec-ca max_bytes 0
+ - ceph osd pool set-quota ec max_bytes 0
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
new file mode 100644
index 0000000..a28582f
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+# we may land on ext4
+ osd max object name len: 400
+ osd max object namespace len: 64
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- workunit:
+ clients:
+ all:
+ - rados/test_health_warnings.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 0000000..98b5095
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - ceph_test_async_driver
+ - ceph_test_msgr
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 15000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 0
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 20
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 0000000..f480dbb
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 64
+ - sudo ceph osd pool application enable foo rados
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd in 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd pool set foo size 2
+- sleep:
+ duration: 300
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
new file mode 100644
index 0000000..d49a597
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_pool_access.sh
diff --git a/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
new file mode 100644
index 0000000..b0d5de3
--- /dev/null
+++ b/src/ceph/qa/suites/rados/singleton-nomsgr/all/valgrind-leaks.yaml
@@ -0,0 +1,29 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ debuginfo: true
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ conf:
+ global:
+ osd heartbeat grace: 40
+ debug deliberately leak memory: true
+ osd max object name len: 460
+ osd max object namespace len: 64
+ mon:
+ mon osd crush smoke test: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ expect_valgrind_errors: true