From 812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Thu, 4 Jan 2018 13:43:33 +0800 Subject: initial code repo This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren --- .../fs/basic_functional/tasks/alternate-pool.yaml | 20 ++++++++++++++ .../fs/basic_functional/tasks/asok_dump_tree.yaml | 4 +++ .../fs/basic_functional/tasks/auto-repair.yaml | 13 +++++++++ .../fs/basic_functional/tasks/backtrace.yaml | 5 ++++ .../fs/basic_functional/tasks/cap-flush.yaml | 5 ++++ .../basic_functional/tasks/cephfs_scrub_tests.yaml | 16 +++++++++++ .../tasks/cfuse_workunit_quota.yaml | 6 ++++ .../fs/basic_functional/tasks/client-limits.yaml | 19 +++++++++++++ .../fs/basic_functional/tasks/client-readahad.yaml | 4 +++ .../fs/basic_functional/tasks/client-recovery.yaml | 14 ++++++++++ .../fs/basic_functional/tasks/config-commands.yaml | 11 ++++++++ .../suites/fs/basic_functional/tasks/damage.yaml | 25 +++++++++++++++++ .../fs/basic_functional/tasks/data-scan.yaml | 19 +++++++++++++ .../fs/basic_functional/tasks/forward-scrub.yaml | 14 ++++++++++ .../suites/fs/basic_functional/tasks/fragment.yaml | 5 ++++ .../fs/basic_functional/tasks/journal-repair.yaml | 14 ++++++++++ .../fs/basic_functional/tasks/libcephfs_java.yaml | 14 ++++++++++ .../basic_functional/tasks/libcephfs_python.yaml | 10 +++++++ .../fs/basic_functional/tasks/mds-flush.yaml | 5 ++++ .../suites/fs/basic_functional/tasks/mds-full.yaml | 32 ++++++++++++++++++++++ .../basic_functional/tasks/mds_creation_retry.yaml | 6 ++++ .../fs/basic_functional/tasks/pool-perm.yaml | 5 ++++ .../qa/suites/fs/basic_functional/tasks/quota.yaml | 5 ++++ .../fs/basic_functional/tasks/sessionmap.yaml | 13 +++++++++ .../suites/fs/basic_functional/tasks/strays.yaml | 5 ++++ .../tasks/test_journal_migration.yaml | 5 ++++ .../fs/basic_functional/tasks/volume-client.yaml | 11 ++++++++ 27 files changed, 305 insertions(+) create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml create mode 100644 src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml (limited to 'src/ceph/qa/suites/fs/basic_functional/tasks') diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml new file mode 100644 index 0000000..94d5cc6 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml @@ -0,0 +1,20 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - unmatched rstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - MDS_FAILED + - MDS_DAMAGE + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_recovery_pool diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml new file mode 100644 index 0000000..7fa5614 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml @@ -0,0 +1,4 @@ +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_dump_tree diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml new file mode 100644 index 0000000..90d0e7b --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - force file system read-only + - bad backtrace + - MDS in read-only mode + - \(MDS_READ_ONLY\) + + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_auto_repair diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml new file mode 100644 index 0000000..d740a5f --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/backtrace.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_backtrace diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml new file mode 100644 index 0000000..0d26dc9 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cap-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_cap_flush diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml new file mode 100644 index 0000000..30b3a96 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - Scrub error on inode + - Behind on trimming + - Metadata damage detected + - overall HEALTH_ + - (MDS_TRIM) + conf: + mds: + mds log max segments: 1 + mds cache max size: 1000 +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_scrub_checks diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml new file mode 100644 index 0000000..8801454 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + timeout: 6h + clients: + all: + - fs/quota diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml new file mode 100644 index 0000000..635d0b6 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml @@ -0,0 +1,19 @@ + +overrides: + ceph: + log-whitelist: + - responding to mclientcaps\(revoke\) + - not advance its oldest_client_tid + - failing to advance its oldest client/flush tid + - Too many inodes in cache + - failing to respond to cache pressure + - slow requests are blocked + - failing to respond to capability release + - MDS cache is too large + - \(MDS_CLIENT_OLDEST_TID\) + - \(MDS_CACHE_OVERSIZED\) + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_client_limits diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml new file mode 100644 index 0000000..1d178e5 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-readahad.yaml @@ -0,0 +1,4 @@ +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_readahead diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml new file mode 100644 index 0000000..f5e9a0b --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml @@ -0,0 +1,14 @@ + +# The task interferes with the network, so we need +# to permit OSDs to complain about that. +overrides: + ceph: + log-whitelist: + - evicting unresponsive client + - but it is still running + - slow request + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_client_recovery diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml new file mode 100644 index 0000000..2f51801 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/config-commands.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + global: + lockdep: true + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_config_commands diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml new file mode 100644 index 0000000..3f4aac9 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml @@ -0,0 +1,25 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - Error loading MDS rank + - missing journal object + - Error recovering journal + - error decoding table object + - failed to read JournalPointer + - Corrupt directory entry + - Corrupt fnode header + - corrupt sessionmap header + - Corrupt dentry + - Scrub error on inode + - Metadata damage detected + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_damage + diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml new file mode 100644 index 0000000..64c8a23 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml @@ -0,0 +1,19 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - unmatched rstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - inconsistent rstat on inode + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_data_scan diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml new file mode 100644 index 0000000..b92cf10 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - inode wrongly marked free + - bad backtrace on inode + - inode table repaired for inode + - Scrub error on inode + - Metadata damage detected + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_forward_scrub diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml new file mode 100644 index 0000000..482caad --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/fragment.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_fragment diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml new file mode 100644 index 0000000..66f819d --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace on directory inode + - error reading table object + - Metadata damage detected + - slow requests are blocked + - Behind on trimming + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_repair diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml new file mode 100644 index 0000000..aaffa03 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_java.yaml @@ -0,0 +1,14 @@ + +os_type: ubuntu +os_version: "14.04" + +overrides: + ceph-fuse: + disabled: true + kclient: + disabled: true +tasks: +- workunit: + clients: + client.0: + - libcephfs-java/test.sh diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml new file mode 100644 index 0000000..e5cbb14 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml @@ -0,0 +1,10 @@ +overrides: + ceph-fuse: + disabled: true + kclient: + disabled: true +tasks: +- workunit: + clients: + client.0: + - fs/test_python.sh diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml new file mode 100644 index 0000000..d59a8ad --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_flush diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml new file mode 100644 index 0000000..5373500 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml @@ -0,0 +1,32 @@ + +overrides: + ceph: + log-whitelist: + - OSD full dropping all updates + - OSD near full + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + conf: + mon: + mon osd nearfull ratio: 0.6 + mon osd backfillfull ratio: 0.6 + mon osd full ratio: 0.7 + osd: + osd mon report interval max: 5 + osd objectstore: memstore + osd failsafe full ratio: 1.0 + memstore device bytes: 200000000 + client.0: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + client.1: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_full diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml new file mode 100644 index 0000000..fd23aa8 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml @@ -0,0 +1,6 @@ +tasks: +-mds_creation_failure: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] + diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml new file mode 100644 index 0000000..f220626 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/pool-perm.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_pool_perm diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml new file mode 100644 index 0000000..89b10ce --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/quota.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_quota diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml new file mode 100644 index 0000000..054fdb7 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/sessionmap.yaml @@ -0,0 +1,13 @@ + +overrides: + ceph: + conf: + global: + ms type: simple + log-whitelist: + - client session with invalid root + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_sessionmap diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml new file mode 100644 index 0000000..2809fc1 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/strays.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_strays diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml new file mode 100644 index 0000000..183ef38 --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml @@ -0,0 +1,5 @@ + +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_migration diff --git a/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml b/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml new file mode 100644 index 0000000..e8c850a --- /dev/null +++ b/src/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + global: + ms type: simple + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_volume_client -- cgit 1.2.3-korg