summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/md
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/md
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/md')
-rw-r--r--kernel/drivers/md/Kconfig19
-rw-r--r--kernel/drivers/md/Makefile4
-rw-r--r--kernel/drivers/md/bcache/bcache.h18
-rw-r--r--kernel/drivers/md/bcache/btree.c15
-rw-r--r--kernel/drivers/md/bcache/closure.c4
-rw-r--r--kernel/drivers/md/bcache/closure.h5
-rw-r--r--kernel/drivers/md/bcache/io.c100
-rw-r--r--kernel/drivers/md/bcache/journal.c16
-rw-r--r--kernel/drivers/md/bcache/movinggc.c8
-rw-r--r--kernel/drivers/md/bcache/request.c69
-rw-r--r--kernel/drivers/md/bcache/super.c74
-rw-r--r--kernel/drivers/md/bcache/util.h15
-rw-r--r--kernel/drivers/md/bcache/writeback.c51
-rw-r--r--kernel/drivers/md/bcache/writeback.h3
-rw-r--r--kernel/drivers/md/bitmap.c34
-rw-r--r--kernel/drivers/md/bitmap.h4
-rw-r--r--kernel/drivers/md/dm-bio-prison.c32
-rw-r--r--kernel/drivers/md/dm-bio-prison.h13
-rw-r--r--kernel/drivers/md/dm-bufio.c44
-rw-r--r--kernel/drivers/md/dm-cache-metadata.c141
-rw-r--r--kernel/drivers/md/dm-cache-metadata.h10
-rw-r--r--kernel/drivers/md/dm-cache-policy-cleaner.c7
-rw-r--r--kernel/drivers/md/dm-cache-policy-internal.h47
-rw-r--r--kernel/drivers/md/dm-cache-policy-mq.c56
-rw-r--r--kernel/drivers/md/dm-cache-policy-smq.c1761
-rw-r--r--kernel/drivers/md/dm-cache-policy.h15
-rw-r--r--kernel/drivers/md/dm-cache-target.c870
-rw-r--r--kernel/drivers/md/dm-crypt.c111
-rw-r--r--kernel/drivers/md/dm-delay.c23
-rw-r--r--kernel/drivers/md/dm-era-target.c30
-rw-r--r--kernel/drivers/md/dm-exception-store.c8
-rw-r--r--kernel/drivers/md/dm-exception-store.h7
-rw-r--r--kernel/drivers/md/dm-flakey.c40
-rw-r--r--kernel/drivers/md/dm-io.c11
-rw-r--r--kernel/drivers/md/dm-ioctl.c4
-rw-r--r--kernel/drivers/md/dm-kcopyd.c2
-rw-r--r--kernel/drivers/md/dm-linear.c43
-rw-r--r--kernel/drivers/md/dm-log-userspace-base.c3
-rw-r--r--kernel/drivers/md/dm-log-writes.c55
-rw-r--r--kernel/drivers/md/dm-mpath.c85
-rw-r--r--kernel/drivers/md/dm-raid.c209
-rw-r--r--kernel/drivers/md/dm-raid1.c107
-rw-r--r--kernel/drivers/md/dm-region-hash.c6
-rw-r--r--kernel/drivers/md/dm-snap-persistent.c39
-rw-r--r--kernel/drivers/md/dm-snap-transient.c7
-rw-r--r--kernel/drivers/md/dm-snap.c70
-rw-r--r--kernel/drivers/md/dm-stats.c353
-rw-r--r--kernel/drivers/md/dm-stats.h4
-rw-r--r--kernel/drivers/md/dm-stripe.c35
-rw-r--r--kernel/drivers/md/dm-switch.c25
-rw-r--r--kernel/drivers/md/dm-table.c109
-rw-r--r--kernel/drivers/md/dm-thin-metadata.c164
-rw-r--r--kernel/drivers/md/dm-thin-metadata.h11
-rw-r--r--kernel/drivers/md/dm-thin.c664
-rw-r--r--kernel/drivers/md/dm-verity.c57
-rw-r--r--kernel/drivers/md/dm-zero.c2
-rw-r--r--kernel/drivers/md/dm.c471
-rw-r--r--kernel/drivers/md/dm.h3
-rw-r--r--kernel/drivers/md/faulty.c4
-rw-r--r--kernel/drivers/md/linear.c45
-rw-r--r--kernel/drivers/md/md-cluster.c375
-rw-r--r--kernel/drivers/md/md-cluster.h14
-rw-r--r--kernel/drivers/md/md.c869
-rw-r--r--kernel/drivers/md/md.h40
-rw-r--r--kernel/drivers/md/multipath.c40
-rw-r--r--kernel/drivers/md/persistent-data/dm-array.c4
-rw-r--r--kernel/drivers/md/persistent-data/dm-block-manager.c18
-rw-r--r--kernel/drivers/md/persistent-data/dm-block-manager.h3
-rw-r--r--kernel/drivers/md/persistent-data/dm-btree-internal.h8
-rw-r--r--kernel/drivers/md/persistent-data/dm-btree-remove.c217
-rw-r--r--kernel/drivers/md/persistent-data/dm-btree-spine.c57
-rw-r--r--kernel/drivers/md/persistent-data/dm-btree.c120
-rw-r--r--kernel/drivers/md/persistent-data/dm-btree.h17
-rw-r--r--kernel/drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--kernel/drivers/md/persistent-data/dm-space-map-metadata.c29
-rw-r--r--kernel/drivers/md/persistent-data/dm-transaction-manager.c4
-rw-r--r--kernel/drivers/md/persistent-data/dm-transaction-manager.h2
-rw-r--r--kernel/drivers/md/raid0.c133
-rw-r--r--kernel/drivers/md/raid0.h2
-rw-r--r--kernel/drivers/md/raid1.c221
-rw-r--r--kernel/drivers/md/raid1.h12
-rw-r--r--kernel/drivers/md/raid10.c291
-rw-r--r--kernel/drivers/md/raid10.h6
-rw-r--r--kernel/drivers/md/raid5-cache.c1191
-rw-r--r--kernel/drivers/md/raid5.c543
-rw-r--r--kernel/drivers/md/raid5.h32
86 files changed, 7596 insertions, 2926 deletions
diff --git a/kernel/drivers/md/Kconfig b/kernel/drivers/md/Kconfig
index edcf4ab66..7913fdcfc 100644
--- a/kernel/drivers/md/Kconfig
+++ b/kernel/drivers/md/Kconfig
@@ -123,6 +123,7 @@ config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
select RAID6_PQ
+ select LIBCRC32C
select ASYNC_MEMCPY
select ASYNC_XOR
select ASYNC_PQ
@@ -259,7 +260,7 @@ config DM_CRYPT
the ciphers you're going to use in the cryptoapi configuration.
For further information on dm-crypt and userspace tools see:
- <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
+ <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
To compile this code as a module, choose M here: the module will
be called dm-crypt.
@@ -304,6 +305,18 @@ config DM_CACHE_MQ
This is meant to be a general purpose policy. It prioritises
reads over writes.
+config DM_CACHE_SMQ
+ tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A cache policy that uses a multiqueue ordered by recent hits
+ to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes. This SMQ policy (vs MQ) offers the promise
+ of less memory utilization, improved performance and increased
+ adaptability in the face of changing workloads.
+
config DM_CACHE_CLEANER
tristate "Cleaner Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
@@ -381,7 +394,7 @@ config DM_MULTIPATH
# of SCSI_DH if the latter isn't defined but if
# it is, DM_MULTIPATH must depend on it. We get a build
# error if SCSI_DH=m and DM_MULTIPATH=y
- depends on SCSI_DH || !SCSI_DH
+ depends on !SCSI_DH || SCSI
---help---
Allow volume managers to support multipath hardware.
@@ -466,7 +479,7 @@ config DM_LOG_WRITES
This device-mapper target takes two devices, one device to use
normally, one to log all write operations done to the first device.
This is for use by file system developers wishing to verify that
- their fs is writing a consitent file system at all times by allowing
+ their fs is writing a consistent file system at all times by allowing
them to replay the log in a variety of ways and to check the
contents.
diff --git a/kernel/drivers/md/Makefile b/kernel/drivers/md/Makefile
index dba4db598..f34979cd1 100644
--- a/kernel/drivers/md/Makefile
+++ b/kernel/drivers/md/Makefile
@@ -13,10 +13,11 @@ dm-log-userspace-y \
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
dm-cache-mq-y += dm-cache-policy-mq.o
+dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o
md-mod-y += md.o bitmap.o
-raid456-y += raid5.o
+raid456-y += raid5.o raid5-cache.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
@@ -54,6 +55,7 @@ obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
+obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
diff --git a/kernel/drivers/md/bcache/bcache.h b/kernel/drivers/md/bcache/bcache.h
index 04f7bc28e..6b420a55c 100644
--- a/kernel/drivers/md/bcache/bcache.h
+++ b/kernel/drivers/md/bcache/bcache.h
@@ -243,19 +243,6 @@ struct keybuf {
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
};
-struct bio_split_pool {
- struct bio_set *bio_split;
- mempool_t *bio_split_hook;
-};
-
-struct bio_split_hook {
- struct closure cl;
- struct bio_split_pool *p;
- struct bio *bio;
- bio_end_io_t *bi_end_io;
- void *bi_private;
-};
-
struct bcache_device {
struct closure cl;
@@ -288,8 +275,6 @@ struct bcache_device {
int (*cache_miss)(struct btree *, struct search *,
struct bio *, unsigned);
int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
-
- struct bio_split_pool bio_split_hook;
};
struct io {
@@ -454,8 +439,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
-
- struct bio_split_pool bio_split_hook;
};
struct gc_stat {
@@ -873,7 +856,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/kernel/drivers/md/bcache/btree.c b/kernel/drivers/md/bcache/btree.c
index 00cde40db..22b9e34ce 100644
--- a/kernel/drivers/md/bcache/btree.c
+++ b/kernel/drivers/md/bcache/btree.c
@@ -278,7 +278,7 @@ err:
goto out;
}
-static void btree_node_read_endio(struct bio *bio, int error)
+static void btree_node_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
@@ -305,7 +305,7 @@ static void bch_btree_node_read(struct btree *b)
bch_submit_bbio(bio, b->c, &b->key, 0);
closure_sync(&cl);
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (bio->bi_error)
set_btree_node_io_error(b);
bch_bbio_free(bio, b->c);
@@ -371,15 +371,15 @@ static void btree_node_write_done(struct closure *cl)
__btree_node_write_done(cl);
}
-static void btree_node_write_endio(struct bio *bio, int error)
+static void btree_node_write_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct btree *b = container_of(cl, struct btree, io);
- if (error)
+ if (bio->bi_error)
set_btree_node_io_error(b);
- bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
+ bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
closure_put(cl);
}
@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
do {
ret = btree_root(gc_root, c, &op, &writes, &stats);
closure_sync(&writes);
+ cond_resched();
if (ret && ret != -EAGAIN)
pr_warn("gc failed!");
@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
rw_lock(true, b, b->level);
if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1)
+ b->seq != seq + 1) {
+ op->lock = b->level;
goto out;
+ }
}
SET_KEY_PTRS(check_key, 1);
diff --git a/kernel/drivers/md/bcache/closure.c b/kernel/drivers/md/bcache/closure.c
index 7a228de95..9eaf1d6e8 100644
--- a/kernel/drivers/md/bcache/closure.c
+++ b/kernel/drivers/md/bcache/closure.c
@@ -167,8 +167,6 @@ EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug;
-#define work_data_bits(work) ((unsigned long *)(&(work)->data))
-
static int debug_seq_show(struct seq_file *f, void *data)
{
struct closure *cl;
@@ -182,7 +180,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_REMAINING_MASK);
seq_printf(f, "%s%s%s%s\n",
- test_bit(WORK_STRUCT_PENDING,
+ test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "",
r & CLOSURE_STACK ? "S" : "",
diff --git a/kernel/drivers/md/bcache/closure.h b/kernel/drivers/md/bcache/closure.h
index a08e3eeac..782cc2c8a 100644
--- a/kernel/drivers/md/bcache/closure.h
+++ b/kernel/drivers/md/bcache/closure.h
@@ -38,7 +38,7 @@
* they are running owned by the thread that is running them. Otherwise, suppose
* you submit some bios and wish to have a function run when they all complete:
*
- * foo_endio(struct bio *bio, int error)
+ * foo_endio(struct bio *bio)
* {
* closure_put(cl);
* }
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \
- return; \
} while (0)
/**
@@ -349,7 +348,6 @@ do { \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \
- return; \
} while (0)
/**
@@ -365,7 +363,6 @@ do { \
do { \
set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
- return; \
} while (0)
/**
diff --git a/kernel/drivers/md/bcache/io.c b/kernel/drivers/md/bcache/io.c
index fa028fa82..86a0bb871 100644
--- a/kernel/drivers/md/bcache/io.c
+++ b/kernel/drivers/md/bcache/io.c
@@ -11,104 +11,6 @@
#include <linux/blkdev.h>
-static unsigned bch_bio_max_sectors(struct bio *bio)
-{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- struct bio_vec bv;
- struct bvec_iter iter;
- unsigned ret = 0, seg = 0;
-
- if (bio->bi_rw & REQ_DISCARD)
- return min(bio_sectors(bio), q->limits.max_discard_sectors);
-
- bio_for_each_segment(bv, bio, iter) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_iter.bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q)))
- break;
-
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
- break;
-
- seg++;
- ret += bv.bv_len >> 9;
- }
-
- ret = min(ret, queue_max_sectors(q));
-
- WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
-
- return ret;
-}
-
-static void bch_bio_submit_split_done(struct closure *cl)
-{
- struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
- s->bio->bi_end_io = s->bi_end_io;
- s->bio->bi_private = s->bi_private;
- bio_endio_nodec(s->bio, 0);
-
- closure_debug_destroy(&s->cl);
- mempool_free(s, s->p->bio_split_hook);
-}
-
-static void bch_bio_submit_split_endio(struct bio *bio, int error)
-{
- struct closure *cl = bio->bi_private;
- struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
- if (error)
- clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
-
- bio_put(bio);
- closure_put(cl);
-}
-
-void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
-{
- struct bio_split_hook *s;
- struct bio *n;
-
- if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
- goto submit;
-
- if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
- goto submit;
-
- s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
- closure_init(&s->cl, NULL);
-
- s->bio = bio;
- s->p = p;
- s->bi_end_io = bio->bi_end_io;
- s->bi_private = bio->bi_private;
- bio_get(bio);
-
- do {
- n = bio_next_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
-
- n->bi_end_io = bch_bio_submit_split_endio;
- n->bi_private = &s->cl;
-
- closure_get(&s->cl);
- generic_make_request(n);
- } while (n != bio);
-
- continue_at(&s->cl, bch_bio_submit_split_done, NULL);
-submit:
- generic_make_request(bio);
-}
-
/* Bios with headers */
void bch_bbio_free(struct bio *bio, struct cache_set *c)
@@ -138,7 +40,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
- closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+ closure_bio_submit(bio, bio->bi_private);
}
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
diff --git a/kernel/drivers/md/bcache/journal.c b/kernel/drivers/md/bcache/journal.c
index fe080ad0e..29eba7219 100644
--- a/kernel/drivers/md/bcache/journal.c
+++ b/kernel/drivers/md/bcache/journal.c
@@ -24,7 +24,7 @@
* bit.
*/
-static void journal_read_endio(struct bio *bio, int error)
+static void journal_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
@@ -61,7 +61,7 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_private = &cl;
bch_bio_map(bio, data);
- closure_bio_submit(bio, &cl, ca);
+ closure_bio_submit(bio, &cl);
closure_sync(&cl);
/* This function could be simpler now since we no longer write
@@ -157,7 +157,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
- unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
unsigned i, l, r, m;
uint64_t seq;
@@ -401,7 +401,7 @@ retry:
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
-static void journal_discard_endio(struct bio *bio, int error)
+static void journal_discard_endio(struct bio *bio)
{
struct journal_device *ja =
container_of(bio, struct journal_device, discard_bio);
@@ -547,11 +547,11 @@ void bch_journal_next(struct journal *j)
pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
}
-static void journal_write_endio(struct bio *bio, int error)
+static void journal_write_endio(struct bio *bio)
{
struct journal_write *w = bio->bi_private;
- cache_set_err_on(error, w->c, "journal io error");
+ cache_set_err_on(bio->bi_error, w->c, "journal io error");
closure_put(&w->c->journal.io);
}
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
if (!w->need_write) {
closure_return_with_destructor(cl, journal_write_unlock);
+ return;
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
continue_at(cl, journal_write, system_wq);
+ return;
}
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
@@ -646,7 +648,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock);
while ((bio = bio_list_pop(&list)))
- closure_bio_submit(bio, cl, c->cache[0]);
+ closure_bio_submit(bio, cl);
continue_at(cl, journal_write_done, NULL);
}
diff --git a/kernel/drivers/md/bcache/movinggc.c b/kernel/drivers/md/bcache/movinggc.c
index cd7490311..b929fc944 100644
--- a/kernel/drivers/md/bcache/movinggc.c
+++ b/kernel/drivers/md/bcache/movinggc.c
@@ -60,20 +60,20 @@ static void write_moving_finish(struct closure *cl)
closure_return_with_destructor(cl, moving_io_destructor);
}
-static void read_moving_endio(struct bio *bio, int error)
+static void read_moving_endio(struct bio *bio)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
- if (error)
- io->op.error = error;
+ if (bio->bi_error)
+ io->op.error = bio->bi_error;
else if (!KEY_DIRTY(&b->key) &&
ptr_stale(io->op.c, &b->key, 0)) {
io->op.error = -EINTR;
}
- bch_bbio_endio(io->op.c, bio, error, "reading data to move");
+ bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
}
static void moving_init(struct moving_io *io)
diff --git a/kernel/drivers/md/bcache/request.c b/kernel/drivers/md/bcache/request.c
index ab43faddb..25fa8445b 100644
--- a/kernel/drivers/md/bcache/request.c
+++ b/kernel/drivers/md/bcache/request.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
+#include <linux/backing-dev.h>
#include <trace/events/bcache.h>
@@ -87,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
if (journal_ref)
atomic_dec_bug(journal_ref);
- if (!op->insert_data_done)
+ if (!op->insert_data_done) {
continue_at(cl, bch_data_insert_start, op->wq);
+ return;
+ }
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -170,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
bch_data_insert_keys(cl);
}
-static void bch_data_insert_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
- if (error) {
+ if (bio->bi_error) {
/* TODO: We could try to recover from this. */
if (op->writeback)
- op->error = error;
+ op->error = bio->bi_error;
else if (!op->replace)
set_closure_fn(cl, bch_data_insert_error, op->wq);
else
set_closure_fn(cl, NULL, NULL);
}
- bch_bbio_endio(op->c, bio, error, "writing data to cache");
+ bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
}
static void bch_data_insert_start(struct closure *cl)
@@ -215,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0),
- op->c))
+ op->c)) {
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
+ }
k = op->insert_keys.top;
bkey_init(k);
@@ -254,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@@ -471,7 +477,7 @@ struct search {
struct data_insert_op iop;
};
-static void bch_cache_read_endio(struct bio *bio, int error)
+static void bch_cache_read_endio(struct bio *bio)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@@ -484,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
* from the backing device.
*/
- if (error)
- s->iop.error = error;
+ if (bio->bi_error)
+ s->iop.error = bio->bi_error;
else if (!KEY_DIRTY(&b->key) &&
ptr_stale(s->iop.c, &b->key, 0)) {
atomic_long_inc(&s->iop.c->cache_read_races);
s->iop.error = -EINTR;
}
- bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+ bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
}
/*
@@ -575,21 +581,23 @@ static void cache_lookup(struct closure *cl)
ret = bch_btree_map_keys(&s->op, s->iop.c,
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
continue_at(cl, cache_lookup, bcache_wq);
+ return;
+ }
closure_return(cl);
}
/* Common code for the make_request functions */
-static void request_endio(struct bio *bio, int error)
+static void request_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
- if (error) {
+ if (bio->bi_error) {
struct search *s = container_of(cl, struct search, cl);
- s->iop.error = error;
+ s->iop.error = bio->bi_error;
/* Only cache read errors are recoverable */
s->recoverable = false;
}
@@ -605,7 +613,8 @@ static void bio_complete(struct search *s)
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
- bio_endio(s->orig_bio, s->iop.error);
+ s->orig_bio->bi_error = s->iop.error;
+ bio_endio(s->orig_bio);
s->orig_bio = NULL;
}
}
@@ -619,7 +628,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
- atomic_set(&bio->bi_cnt, 3);
+ bio_cnt_set(bio, 3);
}
static void search_free(struct closure *cl)
@@ -710,7 +719,7 @@ static void cached_dev_read_error(struct closure *cl)
/* XXX: invalidate cache */
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
}
continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -833,7 +842,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
- closure_bio_submit(cache_bio, &s->cl, s->d);
+ closure_bio_submit(cache_bio, &s->cl);
return ret;
out_put:
@@ -841,7 +850,7 @@ out_put:
out_submit:
miss->bi_end_io = request_endio;
miss->bi_private = &s->cl;
- closure_bio_submit(miss, &s->cl, s->d);
+ closure_bio_submit(miss, &s->cl);
return ret;
}
@@ -906,7 +915,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
if (!(bio->bi_rw & REQ_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
} else if (s->iop.writeback) {
bch_writeback_add(dc);
s->iop.bio = bio;
@@ -921,12 +930,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_end_io = request_endio;
flush->bi_private = cl;
- closure_bio_submit(flush, cl, s->d);
+ closure_bio_submit(flush, cl);
}
} else {
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
}
closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
@@ -942,14 +951,15 @@ static void cached_dev_nodata(struct closure *cl)
bch_journal_meta(s->iop.c, cl);
/* If it's a flush, we send the flush to the backing device too */
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
continue_at(cl, cached_dev_bio_complete, NULL);
}
/* Cached devices - read & write stuff */
-static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t cached_dev_make_request(struct request_queue *q,
+ struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
@@ -984,10 +994,12 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
} else {
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
- bio_endio(bio, 0);
+ bio_endio(bio);
else
- bch_generic_make_request(bio, &d->bio_split_hook);
+ generic_make_request(bio);
}
+
+ return BLK_QC_T_NONE;
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1061,7 +1073,8 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t flash_dev_make_request(struct request_queue *q,
+ struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1084,6 +1097,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
+ return BLK_QC_T_NONE;
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1099,6 +1113,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
}
continue_at(cl, search_free, NULL);
+ return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/kernel/drivers/md/bcache/super.c b/kernel/drivers/md/bcache/super.c
index 4dd2bb716..8d0ead98e 100644
--- a/kernel/drivers/md/bcache/super.c
+++ b/kernel/drivers/md/bcache/super.c
@@ -59,29 +59,6 @@ struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
-static void bio_split_pool_free(struct bio_split_pool *p)
-{
- if (p->bio_split_hook)
- mempool_destroy(p->bio_split_hook);
-
- if (p->bio_split)
- bioset_free(p->bio_split);
-}
-
-static int bio_split_pool_init(struct bio_split_pool *p)
-{
- p->bio_split = bioset_create(4, 0);
- if (!p->bio_split)
- return -ENOMEM;
-
- p->bio_split_hook = mempool_create_kmalloc_pool(4,
- sizeof(struct bio_split_hook));
- if (!p->bio_split_hook)
- return -ENOMEM;
-
- return 0;
-}
-
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
@@ -221,7 +198,7 @@ err:
return err;
}
-static void write_bdev_super_endio(struct bio *bio, int error)
+static void write_bdev_super_endio(struct bio *bio)
{
struct cached_dev *dc = bio->bi_private;
/* XXX: error checking */
@@ -290,11 +267,11 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
-static void write_super_endio(struct bio *bio, int error)
+static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, error, "writing superblock");
+ bch_count_io_errors(ca, bio->bi_error, "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -339,12 +316,12 @@ void bcache_write_super(struct cache_set *c)
/* UUID io */
-static void uuid_endio(struct bio *bio, int error)
+static void uuid_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
- cache_set_err_on(error, c, "accessing uuids");
+ cache_set_err_on(bio->bi_error, c, "accessing uuids");
bch_bbio_free(bio, c);
closure_put(cl);
}
@@ -512,11 +489,11 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
* disk.
*/
-static void prio_endio(struct bio *bio, int error)
+static void prio_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- cache_set_err_on(error, ca->set, "accessing priorities");
+ cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
bch_bbio_free(bio, ca->set);
closure_put(&ca->prio);
}
@@ -537,7 +514,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
bio->bi_private = ca;
bch_bio_map(bio, ca->disk_buckets);
- closure_bio_submit(bio, &ca->prio, ca);
+ closure_bio_submit(bio, &ca->prio);
closure_sync(cl);
}
@@ -708,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
sysfs_create_link(&c->kobj, &d->kobj, d->name),
"Couldn't create device <-> cache set symlinks");
+
+ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
static void bcache_device_detach(struct bcache_device *d)
@@ -757,17 +736,10 @@ static void bcache_device_free(struct bcache_device *d)
put_disk(d->disk);
}
- bio_split_pool_free(&d->bio_split_hook);
if (d->bio_split)
bioset_free(d->bio_split);
- if (is_vmalloc_addr(d->full_dirty_stripes))
- vfree(d->full_dirty_stripes);
- else
- kfree(d->full_dirty_stripes);
- if (is_vmalloc_addr(d->stripe_sectors_dirty))
- vfree(d->stripe_sectors_dirty);
- else
- kfree(d->stripe_sectors_dirty);
+ kvfree(d->full_dirty_stripes);
+ kvfree(d->stripe_sectors_dirty);
closure_debug_destroy(&d->cl);
}
@@ -810,7 +782,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1))) {
ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
@@ -836,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_PAGES;
- q->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(q, UINT_MAX);
q->limits.discard_granularity = 512;
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
@@ -878,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
buf[SB_LABEL_SIZE] = '\0';
env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
- if (atomic_xchg(&dc->running, 1))
+ if (atomic_xchg(&dc->running, 1)) {
+ kfree(env[1]);
+ kfree(env[2]);
return;
+ }
if (!d->c &&
BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
@@ -1799,8 +1773,6 @@ void bch_cache_release(struct kobject *kobj)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
}
- bio_split_pool_free(&ca->bio_split_hook);
-
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
kfree(ca->prio_buckets);
vfree(ca->buckets);
@@ -1845,8 +1817,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
ca->sb.nbuckets)) ||
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
- !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
- bio_split_pool_init(&ca->bio_split_hook))
+ !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
@@ -1967,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (attr == &ksysfs_register_quiet)
+ goto out;
}
goto err;
}
@@ -2005,8 +1978,7 @@ out:
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
- if (attr != &ksysfs_register_quiet)
- pr_info("error opening %s: %s", path, err);
+ pr_info("error opening %s: %s", path, err);
ret = -EINVAL;
goto out;
}
@@ -2100,8 +2072,10 @@ static int __init bcache_init(void)
closure_debug_init();
bcache_major = register_blkdev(0, "bcache");
- if (bcache_major < 0)
+ if (bcache_major < 0) {
+ unregister_reboot_notifier(&reboot);
return bcache_major;
+ }
if (!(bcache_wq = create_workqueue("bcache")) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
diff --git a/kernel/drivers/md/bcache/util.h b/kernel/drivers/md/bcache/util.h
index 98df7572b..cf2cbc211 100644
--- a/kernel/drivers/md/bcache/util.h
+++ b/kernel/drivers/md/bcache/util.h
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
#include <linux/errno.h>
+#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/llist.h>
#include <linux/ratelimit.h>
@@ -52,10 +53,7 @@ struct closure;
#define free_heap(heap) \
do { \
- if (is_vmalloc_addr((heap)->data)) \
- vfree((heap)->data); \
- else \
- kfree((heap)->data); \
+ kvfree((heap)->data); \
(heap)->data = NULL; \
} while (0)
@@ -163,10 +161,7 @@ do { \
#define free_fifo(fifo) \
do { \
- if (is_vmalloc_addr((fifo)->data)) \
- vfree((fifo)->data); \
- else \
- kfree((fifo)->data); \
+ kvfree((fifo)->data); \
(fifo)->data = NULL; \
} while (0)
@@ -576,10 +571,10 @@ static inline sector_t bdev_sectors(struct block_device *bdev)
return bdev->bd_inode->i_size >> 9;
}
-#define closure_bio_submit(bio, cl, dev) \
+#define closure_bio_submit(bio, cl) \
do { \
closure_get(cl); \
- bch_generic_make_request(bio, &(dev)->bio_split_hook); \
+ generic_make_request(bio); \
} while (0)
uint64_t bch_crc64_update(uint64_t, const void *, size_t);
diff --git a/kernel/drivers/md/bcache/writeback.c b/kernel/drivers/md/bcache/writeback.c
index f1986bcd1..b9346cd9c 100644
--- a/kernel/drivers/md/bcache/writeback.c
+++ b/kernel/drivers/md/bcache/writeback.c
@@ -166,12 +166,12 @@ static void write_dirty_finish(struct closure *cl)
closure_return_with_destructor(cl, dirty_io_destructor);
}
-static void dirty_endio(struct bio *bio, int error)
+static void dirty_endio(struct bio *bio)
{
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
- if (error)
+ if (bio->bi_error)
SET_KEY_DIRTY(&w->key, false);
closure_put(&io->cl);
@@ -188,27 +188,27 @@ static void write_dirty(struct closure *cl)
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
- closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty_finish, system_wq);
}
-static void read_dirty_endio(struct bio *bio, int error)
+static void read_dirty_endio(struct bio *bio)
{
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- error, "reading dirty data from cache");
+ bio->bi_error, "reading dirty data from cache");
- dirty_endio(bio, error);
+ dirty_endio(bio);
}
static void read_dirty_submit(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
- closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty, system_wq);
}
@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
+ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
+
+ BUG_ON(KEY_INODE(k) != dc->disk.id);
+
return KEY_DIRTY(k);
}
@@ -372,11 +376,24 @@ next:
}
}
+/*
+ * Returns true if we scanned the entire disk
+ */
static bool refill_dirty(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
+ struct bkey start = KEY(dc->disk.id, 0, 0);
struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
- bool searched_from_start = false;
+ struct bkey start_pos;
+
+ /*
+ * make sure keybuf pos is inside the range for this disk - at bringup
+ * we might not be attached yet so this disk's inode nr isn't
+ * initialized then
+ */
+ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
+ bkey_cmp(&buf->last_scanned, &end) > 0)
+ buf->last_scanned = start;
if (dc->partial_stripes_expensive) {
refill_full_stripes(dc);
@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
return false;
}
- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
- buf->last_scanned = KEY(dc->disk.id, 0, 0);
- searched_from_start = true;
- }
-
+ start_pos = buf->last_scanned;
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+ if (bkey_cmp(&buf->last_scanned, &end) < 0)
+ return false;
+
+ /*
+ * If we get to the end start scanning again from the beginning, and
+ * only scan up to where we initially started scanning from:
+ */
+ buf->last_scanned = start;
+ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
}
static int bch_writeback_thread(void *arg)
diff --git a/kernel/drivers/md/bcache/writeback.h b/kernel/drivers/md/bcache/writeback.h
index 0a9dab187..073a042ae 100644
--- a/kernel/drivers/md/bcache/writeback.h
+++ b/kernel/drivers/md/bcache/writeback.h
@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
static inline void bch_writeback_queue(struct cached_dev *dc)
{
- wake_up_process(dc->writeback_thread);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ wake_up_process(dc->writeback_thread);
}
static inline void bch_writeback_add(struct cached_dev *dc)
diff --git a/kernel/drivers/md/bitmap.c b/kernel/drivers/md/bitmap.c
index c90118e90..4f22e9197 100644
--- a/kernel/drivers/md/bitmap.c
+++ b/kernel/drivers/md/bitmap.c
@@ -559,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
+ loff_t offset = bitmap->mddev->bitmap_info.offset;
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
chunksize = 128 * 1024 * 1024;
@@ -585,9 +586,9 @@ re_read:
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
- bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
- bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ bitmap->cluster_slot, offset);
}
if (bitmap->storage.file) {
@@ -598,7 +599,7 @@ re_read:
bitmap, bytes, sb_page);
} else {
err = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
+ offset,
sb_page,
0, sizeof(bitmap_super_t));
}
@@ -612,12 +613,10 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- /* XXX: This is a hack to ensure that we don't use clustering
- * in case:
- * - dm-raid is in use and
- * - the nodes written in bitmap_sb is erroneous.
+ /* Setup nodes/clustername only if bitmap version is
+ * cluster-compatible
*/
- if (!bitmap->mddev->sync_super) {
+ if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
nodes = le32_to_cpu(sb->nodes);
strlcpy(bitmap->mddev->bitmap_info.cluster_name,
sb->cluster_name, 64);
@@ -627,7 +626,7 @@ re_read:
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
reason = "bad magic";
else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
- le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
+ le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
reason = "unrecognized superblock version";
else if (chunksize < 512)
reason = "bitmap chunksize too small";
@@ -680,7 +679,7 @@ out:
kunmap_atomic(sb);
/* Assiging chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
- if (nodes && (bitmap->cluster_slot < 0)) {
+ if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_err("%s: Could not setup cluster service (%d)\n",
@@ -848,7 +847,7 @@ static void bitmap_file_kick(struct bitmap *bitmap)
if (bitmap->storage.file) {
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (path)
- ptr = d_path(&bitmap->storage.file->f_path,
+ ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
printk(KERN_ALERT
@@ -1571,7 +1570,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
}
EXPORT_SYMBOL(bitmap_close_sync);
-void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
{
sector_t s = 0;
sector_t blocks;
@@ -1582,7 +1581,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->last_end_sync = jiffies;
return;
}
- if (time_before(jiffies, (bitmap->last_end_sync
+ if (!force && time_before(jiffies, (bitmap->last_end_sync
+ bitmap->mddev->bitmap_info.daemon_sleep)))
return;
wait_event(bitmap->mddev->recovery_wait,
@@ -1875,10 +1874,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- rv = bitmap_read_sb(bitmap);
- if (rv)
- goto err;
-
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
goto err;
@@ -1936,7 +1931,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
chunk_kb ? "KB" : "B");
if (bitmap->storage.file) {
seq_printf(seq, ", file: ");
- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
+ seq_file_path(seq, bitmap->storage.file, " \t\n");
}
seq_printf(seq, "\n");
@@ -2000,7 +1995,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
!bitmap->mddev->bitmap_info.external,
- bitmap->cluster_slot);
+ mddev_is_clustered(bitmap->mddev)
+ ? bitmap->cluster_slot : 0);
if (ret)
goto err;
diff --git a/kernel/drivers/md/bitmap.h b/kernel/drivers/md/bitmap.h
index f1f4dd010..7d5c3a610 100644
--- a/kernel/drivers/md/bitmap.h
+++ b/kernel/drivers/md/bitmap.h
@@ -9,8 +9,10 @@
#define BITMAP_MAJOR_LO 3
/* version 4 insists the bitmap is in little-endian order
* with version 3, it is host-endian which is non-portable
+ * Version 5 is currently set only for clustered devices
*/
#define BITMAP_MAJOR_HI 4
+#define BITMAP_MAJOR_CLUSTERED 5
#define BITMAP_MAJOR_HOSTENDIAN 3
/*
@@ -255,7 +257,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
-void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct mddev *mddev);
diff --git a/kernel/drivers/md/dm-bio-prison.c b/kernel/drivers/md/dm-bio-prison.c
index be065300e..03af17448 100644
--- a/kernel/drivers/md/dm-bio-prison.c
+++ b/kernel/drivers/md/dm-bio-prison.c
@@ -236,8 +236,10 @@ void dm_cell_error(struct dm_bio_prison *prison,
bio_list_init(&bios);
dm_cell_release(prison, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- bio_endio(bio, error);
+ while ((bio = bio_list_pop(&bios))) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ }
}
EXPORT_SYMBOL_GPL(dm_cell_error);
@@ -255,6 +257,32 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
+static int __promote_or_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
+{
+ if (bio_list_empty(&cell->bios)) {
+ rb_erase(&cell->node, &prison->cells);
+ return 1;
+ }
+
+ cell->holder = bio_list_pop(&cell->bios);
+ return 0;
+}
+
+int dm_cell_promote_or_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prison->lock, flags);
+ r = __promote_or_release(prison, cell);
+ spin_unlock_irqrestore(&prison->lock, flags);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
+
/*----------------------------------------------------------------*/
#define DEFERRED_SET_SIZE 64
diff --git a/kernel/drivers/md/dm-bio-prison.h b/kernel/drivers/md/dm-bio-prison.h
index 74cf01144..54352f009 100644
--- a/kernel/drivers/md/dm-bio-prison.h
+++ b/kernel/drivers/md/dm-bio-prison.h
@@ -101,6 +101,19 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
void (*visit_fn)(void *, struct dm_bio_prison_cell *),
void *context, struct dm_bio_prison_cell *cell);
+/*
+ * Rather than always releasing the prisoners in a cell, the client may
+ * want to promote one of them to be the new holder. There is a race here
+ * though between releasing an empty cell, and other threads adding new
+ * inmates. So this function makes the decision with its lock held.
+ *
+ * This function can have two outcomes:
+ * i) An inmate is promoted to be the holder of the cell (return value of 0).
+ * ii) The cell has no inmate for promotion and is released (return value of 1).
+ */
+int dm_cell_promote_or_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
+
/*----------------------------------------------------------------*/
/*
diff --git a/kernel/drivers/md/dm-bufio.c b/kernel/drivers/md/dm-bufio.c
index 86dbbc737..2dd33085b 100644
--- a/kernel/drivers/md/dm-bufio.c
+++ b/kernel/drivers/md/dm-bufio.c
@@ -545,7 +545,8 @@ static void dmio_complete(unsigned long error, void *context)
{
struct dm_buffer *b = context;
- b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
+ b->bio.bi_error = error ? -EIO : 0;
+ b->bio.bi_end_io(&b->bio);
}
static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
@@ -575,13 +576,16 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
b->bio.bi_end_io = end_io;
r = dm_io(&io_req, 1, &region, NULL);
- if (r)
- end_io(&b->bio, r);
+ if (r) {
+ b->bio.bi_error = r;
+ end_io(&b->bio);
+ }
}
-static void inline_endio(struct bio *bio, int error)
+static void inline_endio(struct bio *bio)
{
bio_end_io_t *end_fn = bio->bi_private;
+ int error = bio->bi_error;
/*
* Reset the bio to free any attached resources
@@ -589,7 +593,8 @@ static void inline_endio(struct bio *bio, int error)
*/
bio_reset(bio);
- end_fn(bio, error);
+ bio->bi_error = error;
+ end_fn(bio);
}
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
@@ -661,13 +666,14 @@ static void submit_io(struct dm_buffer *b, int rw, sector_t block,
* Set the error, clear B_WRITING bit and wake anyone who was waiting on
* it.
*/
-static void write_endio(struct bio *bio, int error)
+static void write_endio(struct bio *bio)
{
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
- b->write_error = error;
- if (unlikely(error)) {
+ b->write_error = bio->bi_error;
+ if (unlikely(bio->bi_error)) {
struct dm_bufio_client *c = b->c;
+ int error = bio->bi_error;
(void)cmpxchg(&c->async_write_error, 0, error);
}
@@ -1026,11 +1032,11 @@ found_buffer:
* The endio routine for reading: set the error, clear the bit and wake up
* anyone waiting on the buffer.
*/
-static void read_endio(struct bio *bio, int error)
+static void read_endio(struct bio *bio)
{
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
- b->read_error = error;
+ b->read_error = bio->bi_error;
BUG_ON(!test_bit(B_READING, &b->state));
@@ -1592,11 +1598,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->bdev = bdev;
c->block_size = block_size;
- c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
- c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
- ffs(block_size) - 1 - PAGE_SHIFT : 0;
- c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
- PAGE_SHIFT - (ffs(block_size) - 1) : 0);
+ c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
+ c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
+ __ffs(block_size) - PAGE_SHIFT : 0;
+ c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
+ PAGE_SHIFT - __ffs(block_size) : 0);
c->aux_size = aux_size;
c->alloc_callback = alloc_callback;
@@ -1855,12 +1861,8 @@ static void __exit dm_bufio_exit(void)
cancel_delayed_work_sync(&dm_bufio_work);
destroy_workqueue(dm_bufio_wq);
- for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
- struct kmem_cache *kc = dm_bufio_caches[i];
-
- if (kc)
- kmem_cache_destroy(kc);
- }
+ for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
+ kmem_cache_destroy(dm_bufio_caches[i]);
for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
kfree(dm_bufio_cache_names[i]);
diff --git a/kernel/drivers/md/dm-cache-metadata.c b/kernel/drivers/md/dm-cache-metadata.c
index c1c010498..f6543f3a9 100644
--- a/kernel/drivers/md/dm-cache-metadata.c
+++ b/kernel/drivers/md/dm-cache-metadata.c
@@ -39,6 +39,8 @@
enum superblock_flag_bits {
/* for spotting crashes that would invalidate the dirty bitset */
CLEAN_SHUTDOWN,
+ /* metadata must be checked using the tools */
+ NEEDS_CHECK,
};
/*
@@ -107,6 +109,7 @@ struct dm_cache_metadata {
struct dm_disk_bitset discard_info;
struct rw_semaphore root_lock;
+ unsigned long flags;
dm_block_t root;
dm_block_t hint_root;
dm_block_t discard_root;
@@ -129,6 +132,14 @@ struct dm_cache_metadata {
* buffer before the superblock is locked and updated.
*/
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+
+ /*
+ * Set if a transaction has to be aborted but the attempt to roll
+ * back to the previous (good) transaction failed. The only
+ * metadata operation permissible in this state is the closing of
+ * the device.
+ */
+ bool fail_io:1;
};
/*-------------------------------------------------------------------
@@ -249,7 +260,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
static void __setup_mapping_info(struct dm_cache_metadata *cmd)
@@ -454,7 +467,9 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
sb_flags = le32_to_cpu(disk_super->flags);
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad:
dm_bm_unlock(sblock);
@@ -527,6 +542,7 @@ static unsigned long clear_clean_shutdown(unsigned long flags)
static void read_superblock_fields(struct dm_cache_metadata *cmd,
struct cache_disk_superblock *disk_super)
{
+ cmd->flags = le32_to_cpu(disk_super->flags);
cmd->root = le64_to_cpu(disk_super->mapping_root);
cmd->hint_root = le64_to_cpu(disk_super->hint_root);
cmd->discard_root = le64_to_cpu(disk_super->discard_root);
@@ -622,6 +638,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(cmd->flags);
if (mutator)
update_flags(disk_super, mutator);
@@ -693,6 +710,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
cmd->cache_blocks = 0;
cmd->policy_hint_size = policy_hint_size;
cmd->changed = true;
+ cmd->fail_io = false;
r = __create_persistent_data_objects(cmd, may_format_device);
if (r) {
@@ -796,7 +814,8 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
list_del(&cmd->list);
mutex_unlock(&table_lock);
- __destroy_persistent_data_objects(cmd);
+ if (!cmd->fail_io)
+ __destroy_persistent_data_objects(cmd);
kfree(cmd);
}
}
@@ -848,13 +867,26 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
+#define WRITE_LOCK(cmd) \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+ return -EINVAL; \
+ down_write(&cmd->root_lock)
+
+#define WRITE_LOCK_VOID(cmd) \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+ return; \
+ down_write(&cmd->root_lock)
+
+#define WRITE_UNLOCK(cmd) \
+ up_write(&cmd->root_lock)
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
bool clean;
__le64 null_mapping = pack_value(0, 0);
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
__dm_bless_for_disk(&null_mapping);
if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
@@ -880,7 +912,7 @@ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
cmd->changed = true;
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -891,7 +923,7 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = dm_bitset_resize(&cmd->discard_info,
cmd->discard_root,
from_dblock(cmd->discard_nr_blocks),
@@ -903,7 +935,7 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
}
cmd->changed = true;
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -946,9 +978,9 @@ int dm_cache_set_discard(struct dm_cache_metadata *cmd,
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = __discard(cmd, dblock, discard);
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1020,9 +1052,9 @@ int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = __remove(cmd, cblock);
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1048,9 +1080,9 @@ int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = __insert(cmd, cblock, oblock);
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1234,9 +1266,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = __dirty(cmd, cblock, dirty);
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1252,9 +1284,9 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_write(&cmd->root_lock);
+ WRITE_LOCK_VOID(cmd);
cmd->stats = *stats;
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
}
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
@@ -1263,7 +1295,7 @@ int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
@@ -1271,7 +1303,7 @@ int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
r = __begin_transaction(cmd);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1376,9 +1408,9 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
{
int r;
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
r = write_hints(cmd, policy);
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
@@ -1387,3 +1419,70 @@ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
}
+
+void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
+{
+ WRITE_LOCK_VOID(cmd);
+ dm_bm_set_read_only(cmd->bm);
+ WRITE_UNLOCK(cmd);
+}
+
+void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
+{
+ WRITE_LOCK_VOID(cmd);
+ dm_bm_set_read_write(cmd->bm);
+ WRITE_UNLOCK(cmd);
+}
+
+int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct cache_disk_superblock *disk_super;
+
+ /*
+ * We ignore fail_io for this function.
+ */
+ down_write(&cmd->root_lock);
+ set_bit(NEEDS_CHECK, &cmd->flags);
+
+ r = superblock_lock(cmd, &sblock);
+ if (r) {
+ DMERR("couldn't read superblock");
+ goto out;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(cmd->flags);
+
+ dm_bm_unlock(sblock);
+
+out:
+ up_write(&cmd->root_lock);
+ return r;
+}
+
+bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+{
+ bool needs_check;
+
+ down_read(&cmd->root_lock);
+ needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ up_read(&cmd->root_lock);
+
+ return needs_check;
+}
+
+int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ WRITE_LOCK(cmd);
+ __destroy_persistent_data_objects(cmd);
+ r = __create_persistent_data_objects(cmd, false);
+ if (r)
+ cmd->fail_io = true;
+ WRITE_UNLOCK(cmd);
+
+ return r;
+}
diff --git a/kernel/drivers/md/dm-cache-metadata.h b/kernel/drivers/md/dm-cache-metadata.h
index 4ecc403be..2ffee21f3 100644
--- a/kernel/drivers/md/dm-cache-metadata.h
+++ b/kernel/drivers/md/dm-cache-metadata.h
@@ -102,6 +102,10 @@ struct dm_cache_statistics {
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats);
+
+/*
+ * 'void' because it's no big deal if it fails.
+ */
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats);
@@ -133,6 +137,12 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
+void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
+void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
+
/*----------------------------------------------------------------*/
#endif /* DM_CACHE_METADATA_H */
diff --git a/kernel/drivers/md/dm-cache-policy-cleaner.c b/kernel/drivers/md/dm-cache-policy-cleaner.c
index 004e463c9..14aaaf059 100644
--- a/kernel/drivers/md/dm-cache-policy-cleaner.c
+++ b/kernel/drivers/md/dm-cache-policy-cleaner.c
@@ -83,7 +83,7 @@ static struct list_head *list_pop(struct list_head *q)
static int alloc_hash(struct hash *hash, unsigned elts)
{
hash->nr_buckets = next_power(elts >> 4, 16);
- hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->hash_bits = __ffs(hash->nr_buckets);
hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
return hash->table ? 0 : -ENOMEM;
@@ -359,7 +359,8 @@ static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
static int wb_writeback_work(struct dm_cache_policy *pe,
dm_oblock_t *oblock,
- dm_cblock_t *cblock)
+ dm_cblock_t *cblock,
+ bool critical_only)
{
int r = -ENOENT;
struct policy *p = to_policy(pe);
@@ -435,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
static struct dm_cache_policy_type wb_policy_type = {
.name = "cleaner",
.version = {1, 0, 0},
- .hint_size = 0,
+ .hint_size = 4,
.owner = THIS_MODULE,
.create = wb_create
};
diff --git a/kernel/drivers/md/dm-cache-policy-internal.h b/kernel/drivers/md/dm-cache-policy-internal.h
index c198e6def..2816018fa 100644
--- a/kernel/drivers/md/dm-cache-policy-internal.h
+++ b/kernel/drivers/md/dm-cache-policy-internal.h
@@ -7,6 +7,7 @@
#ifndef DM_CACHE_POLICY_INTERNAL_H
#define DM_CACHE_POLICY_INTERNAL_H
+#include <linux/vmalloc.h>
#include "dm-cache-policy.h"
/*----------------------------------------------------------------*/
@@ -55,9 +56,10 @@ static inline int policy_walk_mappings(struct dm_cache_policy *p,
static inline int policy_writeback_work(struct dm_cache_policy *p,
dm_oblock_t *oblock,
- dm_cblock_t *cblock)
+ dm_cblock_t *cblock,
+ bool critical_only)
{
- return p->writeback_work ? p->writeback_work(p, oblock, cblock) : -ENOENT;
+ return p->writeback_work ? p->writeback_work(p, oblock, cblock, critical_only) : -ENOENT;
}
static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
@@ -81,19 +83,21 @@ static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
return p->residency(p);
}
-static inline void policy_tick(struct dm_cache_policy *p)
+static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
{
if (p->tick)
- return p->tick(p);
+ return p->tick(p, can_block);
}
-static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen, ssize_t *sz_ptr)
{
- ssize_t sz = 0;
+ ssize_t sz = *sz_ptr;
if (p->emit_config_values)
- return p->emit_config_values(p, result, maxlen);
+ return p->emit_config_values(p, result, maxlen, sz_ptr);
- DMEMIT("0");
+ DMEMIT("0 ");
+ *sz_ptr = sz;
return 0;
}
@@ -106,6 +110,33 @@ static inline int policy_set_config_value(struct dm_cache_policy *p,
/*----------------------------------------------------------------*/
/*
+ * Some utility functions commonly used by policies and the core target.
+ */
+static inline size_t bitset_size_in_bytes(unsigned nr_entries)
+{
+ return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+}
+
+static inline unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ return vzalloc(s);
+}
+
+static inline void clear_bitset(void *bitset, unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ memset(bitset, 0, s);
+}
+
+static inline void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
* Creates a new cache policy given a policy name, a cache size, an origin size and the block size.
*/
struct dm_cache_policy *dm_cache_policy_create(const char *name, dm_cblock_t cache_size,
diff --git a/kernel/drivers/md/dm-cache-policy-mq.c b/kernel/drivers/md/dm-cache-policy-mq.c
index 515d44bf2..ddb26980c 100644
--- a/kernel/drivers/md/dm-cache-policy-mq.c
+++ b/kernel/drivers/md/dm-cache-policy-mq.c
@@ -1236,7 +1236,7 @@ static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
}
static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
- dm_cblock_t *cblock)
+ dm_cblock_t *cblock, bool critical_only)
{
int r;
struct mq_policy *mq = to_mq_policy(p);
@@ -1283,7 +1283,7 @@ static dm_cblock_t mq_residency(struct dm_cache_policy *p)
return r;
}
-static void mq_tick(struct dm_cache_policy *p)
+static void mq_tick(struct dm_cache_policy *p, bool can_block)
{
struct mq_policy *mq = to_mq_policy(p);
unsigned long flags;
@@ -1291,6 +1291,12 @@ static void mq_tick(struct dm_cache_policy *p)
spin_lock_irqsave(&mq->tick_lock, flags);
mq->tick_protected++;
spin_unlock_irqrestore(&mq->tick_lock, flags);
+
+ if (can_block) {
+ mutex_lock(&mq->lock);
+ copy_tick(mq);
+ mutex_unlock(&mq->lock);
+ }
}
static int mq_set_config_value(struct dm_cache_policy *p,
@@ -1323,22 +1329,24 @@ static int mq_set_config_value(struct dm_cache_policy *p,
return 0;
}
-static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen, ssize_t *sz_ptr)
{
- ssize_t sz = 0;
+ ssize_t sz = *sz_ptr;
struct mq_policy *mq = to_mq_policy(p);
DMEMIT("10 random_threshold %u "
"sequential_threshold %u "
"discard_promote_adjustment %u "
"read_promote_adjustment %u "
- "write_promote_adjustment %u",
+ "write_promote_adjustment %u ",
mq->tracker.thresholds[PATTERN_RANDOM],
mq->tracker.thresholds[PATTERN_SEQUENTIAL],
mq->discard_promote_adjustment,
mq->read_promote_adjustment,
mq->write_promote_adjustment);
+ *sz_ptr = sz;
return 0;
}
@@ -1402,7 +1410,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
- mq->hash_bits = ffs(mq->nr_buckets) - 1;
+ mq->hash_bits = __ffs(mq->nr_buckets);
mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
@@ -1423,21 +1431,12 @@ bad_pre_cache_init:
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
};
-static struct dm_cache_policy_type default_policy_type = {
- .name = "default",
- .version = {1, 3, 0},
- .hint_size = 4,
- .owner = THIS_MODULE,
- .create = mq_create,
- .real = &mq_policy_type
-};
-
static int __init mq_init(void)
{
int r;
@@ -1447,36 +1446,21 @@ static int __init mq_init(void)
__alignof__(struct entry),
0, NULL);
if (!mq_entry_cache)
- goto bad;
+ return -ENOMEM;
r = dm_cache_policy_register(&mq_policy_type);
if (r) {
DMERR("register failed %d", r);
- goto bad_register_mq;
- }
-
- r = dm_cache_policy_register(&default_policy_type);
- if (!r) {
- DMINFO("version %u.%u.%u loaded",
- mq_policy_type.version[0],
- mq_policy_type.version[1],
- mq_policy_type.version[2]);
- return 0;
+ kmem_cache_destroy(mq_entry_cache);
+ return -ENOMEM;
}
- DMERR("register failed (as default) %d", r);
-
- dm_cache_policy_unregister(&mq_policy_type);
-bad_register_mq:
- kmem_cache_destroy(mq_entry_cache);
-bad:
- return -ENOMEM;
+ return 0;
}
static void __exit mq_exit(void)
{
dm_cache_policy_unregister(&mq_policy_type);
- dm_cache_policy_unregister(&default_policy_type);
kmem_cache_destroy(mq_entry_cache);
}
@@ -1487,5 +1471,3 @@ module_exit(mq_exit);
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
diff --git a/kernel/drivers/md/dm-cache-policy-smq.c b/kernel/drivers/md/dm-cache-policy-smq.c
new file mode 100644
index 000000000..28d458674
--- /dev/null
+++ b/kernel/drivers/md/dm-cache-policy-smq.c
@@ -0,0 +1,1761 @@
+/*
+ * Copyright (C) 2015 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm-cache-policy-internal.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+
+#define DM_MSG_PREFIX "cache-policy-smq"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Safe division functions that return zero on divide by zero.
+ */
+static unsigned safe_div(unsigned n, unsigned d)
+{
+ return d ? n / d : 0u;
+}
+
+static unsigned safe_mod(unsigned n, unsigned d)
+{
+ return d ? n % d : 0u;
+}
+
+/*----------------------------------------------------------------*/
+
+struct entry {
+ unsigned hash_next:28;
+ unsigned prev:28;
+ unsigned next:28;
+ unsigned level:7;
+ bool dirty:1;
+ bool allocated:1;
+ bool sentinel:1;
+
+ dm_oblock_t oblock;
+};
+
+/*----------------------------------------------------------------*/
+
+#define INDEXER_NULL ((1u << 28u) - 1u)
+
+/*
+ * An entry_space manages a set of entries that we use for the queues.
+ * The clean and dirty queues share entries, so this object is separate
+ * from the queue itself.
+ */
+struct entry_space {
+ struct entry *begin;
+ struct entry *end;
+};
+
+static int space_init(struct entry_space *es, unsigned nr_entries)
+{
+ if (!nr_entries) {
+ es->begin = es->end = NULL;
+ return 0;
+ }
+
+ es->begin = vzalloc(sizeof(struct entry) * nr_entries);
+ if (!es->begin)
+ return -ENOMEM;
+
+ es->end = es->begin + nr_entries;
+ return 0;
+}
+
+static void space_exit(struct entry_space *es)
+{
+ vfree(es->begin);
+}
+
+static struct entry *__get_entry(struct entry_space *es, unsigned block)
+{
+ struct entry *e;
+
+ e = es->begin + block;
+ BUG_ON(e >= es->end);
+
+ return e;
+}
+
+static unsigned to_index(struct entry_space *es, struct entry *e)
+{
+ BUG_ON(e < es->begin || e >= es->end);
+ return e - es->begin;
+}
+
+static struct entry *to_entry(struct entry_space *es, unsigned block)
+{
+ if (block == INDEXER_NULL)
+ return NULL;
+
+ return __get_entry(es, block);
+}
+
+/*----------------------------------------------------------------*/
+
+struct ilist {
+ unsigned nr_elts; /* excluding sentinel entries */
+ unsigned head, tail;
+};
+
+static void l_init(struct ilist *l)
+{
+ l->nr_elts = 0;
+ l->head = l->tail = INDEXER_NULL;
+}
+
+static struct entry *l_head(struct entry_space *es, struct ilist *l)
+{
+ return to_entry(es, l->head);
+}
+
+static struct entry *l_tail(struct entry_space *es, struct ilist *l)
+{
+ return to_entry(es, l->tail);
+}
+
+static struct entry *l_next(struct entry_space *es, struct entry *e)
+{
+ return to_entry(es, e->next);
+}
+
+static struct entry *l_prev(struct entry_space *es, struct entry *e)
+{
+ return to_entry(es, e->prev);
+}
+
+static bool l_empty(struct ilist *l)
+{
+ return l->head == INDEXER_NULL;
+}
+
+static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
+{
+ struct entry *head = l_head(es, l);
+
+ e->next = l->head;
+ e->prev = INDEXER_NULL;
+
+ if (head)
+ head->prev = l->head = to_index(es, e);
+ else
+ l->head = l->tail = to_index(es, e);
+
+ if (!e->sentinel)
+ l->nr_elts++;
+}
+
+static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
+{
+ struct entry *tail = l_tail(es, l);
+
+ e->next = INDEXER_NULL;
+ e->prev = l->tail;
+
+ if (tail)
+ tail->next = l->tail = to_index(es, e);
+ else
+ l->head = l->tail = to_index(es, e);
+
+ if (!e->sentinel)
+ l->nr_elts++;
+}
+
+static void l_add_before(struct entry_space *es, struct ilist *l,
+ struct entry *old, struct entry *e)
+{
+ struct entry *prev = l_prev(es, old);
+
+ if (!prev)
+ l_add_head(es, l, e);
+
+ else {
+ e->prev = old->prev;
+ e->next = to_index(es, old);
+ prev->next = old->prev = to_index(es, e);
+
+ if (!e->sentinel)
+ l->nr_elts++;
+ }
+}
+
+static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
+{
+ struct entry *prev = l_prev(es, e);
+ struct entry *next = l_next(es, e);
+
+ if (prev)
+ prev->next = e->next;
+ else
+ l->head = e->next;
+
+ if (next)
+ next->prev = e->prev;
+ else
+ l->tail = e->prev;
+
+ if (!e->sentinel)
+ l->nr_elts--;
+}
+
+static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
+{
+ struct entry *e;
+
+ for (e = l_tail(es, l); e; e = l_prev(es, e))
+ if (!e->sentinel) {
+ l_del(es, l, e);
+ return e;
+ }
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The stochastic-multi-queue is a set of lru lists stacked into levels.
+ * Entries are moved up levels when they are used, which loosely orders the
+ * most accessed entries in the top levels and least in the bottom. This
+ * structure is *much* better than a single lru list.
+ */
+#define MAX_LEVELS 64u
+
+struct queue {
+ struct entry_space *es;
+
+ unsigned nr_elts;
+ unsigned nr_levels;
+ struct ilist qs[MAX_LEVELS];
+
+ /*
+ * We maintain a count of the number of entries we would like in each
+ * level.
+ */
+ unsigned last_target_nr_elts;
+ unsigned nr_top_levels;
+ unsigned nr_in_top_levels;
+ unsigned target_count[MAX_LEVELS];
+};
+
+static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
+{
+ unsigned i;
+
+ q->es = es;
+ q->nr_elts = 0;
+ q->nr_levels = nr_levels;
+
+ for (i = 0; i < q->nr_levels; i++) {
+ l_init(q->qs + i);
+ q->target_count[i] = 0u;
+ }
+
+ q->last_target_nr_elts = 0u;
+ q->nr_top_levels = 0u;
+ q->nr_in_top_levels = 0u;
+}
+
+static unsigned q_size(struct queue *q)
+{
+ return q->nr_elts;
+}
+
+/*
+ * Insert an entry to the back of the given level.
+ */
+static void q_push(struct queue *q, struct entry *e)
+{
+ if (!e->sentinel)
+ q->nr_elts++;
+
+ l_add_tail(q->es, q->qs + e->level, e);
+}
+
+static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
+{
+ if (!e->sentinel)
+ q->nr_elts++;
+
+ l_add_before(q->es, q->qs + e->level, old, e);
+}
+
+static void q_del(struct queue *q, struct entry *e)
+{
+ l_del(q->es, q->qs + e->level, e);
+ if (!e->sentinel)
+ q->nr_elts--;
+}
+
+/*
+ * Return the oldest entry of the lowest populated level.
+ */
+static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
+{
+ unsigned level;
+ struct entry *e;
+
+ max_level = min(max_level, q->nr_levels);
+
+ for (level = 0; level < max_level; level++)
+ for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
+ if (e->sentinel) {
+ if (can_cross_sentinel)
+ continue;
+ else
+ break;
+ }
+
+ return e;
+ }
+
+ return NULL;
+}
+
+static struct entry *q_pop(struct queue *q)
+{
+ struct entry *e = q_peek(q, q->nr_levels, true);
+
+ if (e)
+ q_del(q, e);
+
+ return e;
+}
+
+/*
+ * Pops an entry from a level that is not past a sentinel.
+ */
+static struct entry *q_pop_old(struct queue *q, unsigned max_level)
+{
+ struct entry *e = q_peek(q, max_level, false);
+
+ if (e)
+ q_del(q, e);
+
+ return e;
+}
+
+/*
+ * This function assumes there is a non-sentinel entry to pop. It's only
+ * used by redistribute, so we know this is true. It also doesn't adjust
+ * the q->nr_elts count.
+ */
+static struct entry *__redist_pop_from(struct queue *q, unsigned level)
+{
+ struct entry *e;
+
+ for (; level < q->nr_levels; level++)
+ for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
+ if (!e->sentinel) {
+ l_del(q->es, q->qs + e->level, e);
+ return e;
+ }
+
+ return NULL;
+}
+
+static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
+{
+ unsigned level, nr_levels, entries_per_level, remainder;
+
+ BUG_ON(lbegin > lend);
+ BUG_ON(lend > q->nr_levels);
+ nr_levels = lend - lbegin;
+ entries_per_level = safe_div(nr_elts, nr_levels);
+ remainder = safe_mod(nr_elts, nr_levels);
+
+ for (level = lbegin; level < lend; level++)
+ q->target_count[level] =
+ (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
+}
+
+/*
+ * Typically we have fewer elements in the top few levels which allows us
+ * to adjust the promote threshold nicely.
+ */
+static void q_set_targets(struct queue *q)
+{
+ if (q->last_target_nr_elts == q->nr_elts)
+ return;
+
+ q->last_target_nr_elts = q->nr_elts;
+
+ if (q->nr_top_levels > q->nr_levels)
+ q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
+
+ else {
+ q_set_targets_subrange_(q, q->nr_in_top_levels,
+ q->nr_levels - q->nr_top_levels, q->nr_levels);
+
+ if (q->nr_in_top_levels < q->nr_elts)
+ q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
+ 0, q->nr_levels - q->nr_top_levels);
+ else
+ q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
+ }
+}
+
+static void q_redistribute(struct queue *q)
+{
+ unsigned target, level;
+ struct ilist *l, *l_above;
+ struct entry *e;
+
+ q_set_targets(q);
+
+ for (level = 0u; level < q->nr_levels - 1u; level++) {
+ l = q->qs + level;
+ target = q->target_count[level];
+
+ /*
+ * Pull down some entries from the level above.
+ */
+ while (l->nr_elts < target) {
+ e = __redist_pop_from(q, level + 1u);
+ if (!e) {
+ /* bug in nr_elts */
+ break;
+ }
+
+ e->level = level;
+ l_add_tail(q->es, l, e);
+ }
+
+ /*
+ * Push some entries up.
+ */
+ l_above = q->qs + level + 1u;
+ while (l->nr_elts > target) {
+ e = l_pop_tail(q->es, l);
+
+ if (!e)
+ /* bug in nr_elts */
+ break;
+
+ e->level = level + 1u;
+ l_add_head(q->es, l_above, e);
+ }
+ }
+}
+
+static void q_requeue_before(struct queue *q, struct entry *dest, struct entry *e, unsigned extra_levels)
+{
+ struct entry *de;
+ unsigned new_level;
+
+ q_del(q, e);
+
+ if (extra_levels && (e->level < q->nr_levels - 1u)) {
+ new_level = min(q->nr_levels - 1u, e->level + extra_levels);
+ for (de = l_head(q->es, q->qs + new_level); de; de = l_next(q->es, de)) {
+ if (de->sentinel)
+ continue;
+
+ q_del(q, de);
+ de->level = e->level;
+
+ if (dest)
+ q_push_before(q, dest, de);
+ else
+ q_push(q, de);
+ break;
+ }
+
+ e->level = new_level;
+ }
+
+ q_push(q, e);
+}
+
+static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels)
+{
+ q_requeue_before(q, NULL, e, extra_levels);
+}
+
+/*----------------------------------------------------------------*/
+
+#define FP_SHIFT 8
+#define SIXTEENTH (1u << (FP_SHIFT - 4u))
+#define EIGHTH (1u << (FP_SHIFT - 3u))
+
+struct stats {
+ unsigned hit_threshold;
+ unsigned hits;
+ unsigned misses;
+};
+
+enum performance {
+ Q_POOR,
+ Q_FAIR,
+ Q_WELL
+};
+
+static void stats_init(struct stats *s, unsigned nr_levels)
+{
+ s->hit_threshold = (nr_levels * 3u) / 4u;
+ s->hits = 0u;
+ s->misses = 0u;
+}
+
+static void stats_reset(struct stats *s)
+{
+ s->hits = s->misses = 0u;
+}
+
+static void stats_level_accessed(struct stats *s, unsigned level)
+{
+ if (level >= s->hit_threshold)
+ s->hits++;
+ else
+ s->misses++;
+}
+
+static void stats_miss(struct stats *s)
+{
+ s->misses++;
+}
+
+/*
+ * There are times when we don't have any confidence in the hotspot queue.
+ * Such as when a fresh cache is created and the blocks have been spread
+ * out across the levels, or if an io load changes. We detect this by
+ * seeing how often a lookup is in the top levels of the hotspot queue.
+ */
+static enum performance stats_assess(struct stats *s)
+{
+ unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
+
+ if (confidence < SIXTEENTH)
+ return Q_POOR;
+
+ else if (confidence < EIGHTH)
+ return Q_FAIR;
+
+ else
+ return Q_WELL;
+}
+
+/*----------------------------------------------------------------*/
+
+struct hash_table {
+ struct entry_space *es;
+ unsigned long long hash_bits;
+ unsigned *buckets;
+};
+
+/*
+ * All cache entries are stored in a chained hash table. To save space we
+ * use indexing again, and only store indexes to the next entry.
+ */
+static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_entries)
+{
+ unsigned i, nr_buckets;
+
+ ht->es = es;
+ nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
+ ht->hash_bits = __ffs(nr_buckets);
+
+ ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
+ if (!ht->buckets)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_buckets; i++)
+ ht->buckets[i] = INDEXER_NULL;
+
+ return 0;
+}
+
+static void h_exit(struct hash_table *ht)
+{
+ vfree(ht->buckets);
+}
+
+static struct entry *h_head(struct hash_table *ht, unsigned bucket)
+{
+ return to_entry(ht->es, ht->buckets[bucket]);
+}
+
+static struct entry *h_next(struct hash_table *ht, struct entry *e)
+{
+ return to_entry(ht->es, e->hash_next);
+}
+
+static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e)
+{
+ e->hash_next = ht->buckets[bucket];
+ ht->buckets[bucket] = to_index(ht->es, e);
+}
+
+static void h_insert(struct hash_table *ht, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ __h_insert(ht, h, e);
+}
+
+static struct entry *__h_lookup(struct hash_table *ht, unsigned h, dm_oblock_t oblock,
+ struct entry **prev)
+{
+ struct entry *e;
+
+ *prev = NULL;
+ for (e = h_head(ht, h); e; e = h_next(ht, e)) {
+ if (e->oblock == oblock)
+ return e;
+
+ *prev = e;
+ }
+
+ return NULL;
+}
+
+static void __h_unlink(struct hash_table *ht, unsigned h,
+ struct entry *e, struct entry *prev)
+{
+ if (prev)
+ prev->hash_next = e->hash_next;
+ else
+ ht->buckets[h] = e->hash_next;
+}
+
+/*
+ * Also moves each entry to the front of the bucket.
+ */
+static struct entry *h_lookup(struct hash_table *ht, dm_oblock_t oblock)
+{
+ struct entry *e, *prev;
+ unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
+
+ e = __h_lookup(ht, h, oblock, &prev);
+ if (e && prev) {
+ /*
+ * Move to the front because this entry is likely
+ * to be hit again.
+ */
+ __h_unlink(ht, h, e, prev);
+ __h_insert(ht, h, e);
+ }
+
+ return e;
+}
+
+static void h_remove(struct hash_table *ht, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ struct entry *prev;
+
+ /*
+ * The down side of using a singly linked list is we have to
+ * iterate the bucket to remove an item.
+ */
+ e = __h_lookup(ht, h, e->oblock, &prev);
+ if (e)
+ __h_unlink(ht, h, e, prev);
+}
+
+/*----------------------------------------------------------------*/
+
+struct entry_alloc {
+ struct entry_space *es;
+ unsigned begin;
+
+ unsigned nr_allocated;
+ struct ilist free;
+};
+
+static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
+ unsigned begin, unsigned end)
+{
+ unsigned i;
+
+ ea->es = es;
+ ea->nr_allocated = 0u;
+ ea->begin = begin;
+
+ l_init(&ea->free);
+ for (i = begin; i != end; i++)
+ l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
+}
+
+static void init_entry(struct entry *e)
+{
+ /*
+ * We can't memset because that would clear the hotspot and
+ * sentinel bits which remain constant.
+ */
+ e->hash_next = INDEXER_NULL;
+ e->next = INDEXER_NULL;
+ e->prev = INDEXER_NULL;
+ e->level = 0u;
+ e->allocated = true;
+}
+
+static struct entry *alloc_entry(struct entry_alloc *ea)
+{
+ struct entry *e;
+
+ if (l_empty(&ea->free))
+ return NULL;
+
+ e = l_pop_tail(ea->es, &ea->free);
+ init_entry(e);
+ ea->nr_allocated++;
+
+ return e;
+}
+
+/*
+ * This assumes the cblock hasn't already been allocated.
+ */
+static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
+{
+ struct entry *e = __get_entry(ea->es, ea->begin + i);
+
+ BUG_ON(e->allocated);
+
+ l_del(ea->es, &ea->free, e);
+ init_entry(e);
+ ea->nr_allocated++;
+
+ return e;
+}
+
+static void free_entry(struct entry_alloc *ea, struct entry *e)
+{
+ BUG_ON(!ea->nr_allocated);
+ BUG_ON(!e->allocated);
+
+ ea->nr_allocated--;
+ e->allocated = false;
+ l_add_tail(ea->es, &ea->free, e);
+}
+
+static bool allocator_empty(struct entry_alloc *ea)
+{
+ return l_empty(&ea->free);
+}
+
+static unsigned get_index(struct entry_alloc *ea, struct entry *e)
+{
+ return to_index(ea->es, e) - ea->begin;
+}
+
+static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
+{
+ return __get_entry(ea->es, ea->begin + index);
+}
+
+/*----------------------------------------------------------------*/
+
+#define NR_HOTSPOT_LEVELS 64u
+#define NR_CACHE_LEVELS 64u
+
+#define WRITEBACK_PERIOD (10 * HZ)
+#define DEMOTE_PERIOD (60 * HZ)
+
+#define HOTSPOT_UPDATE_PERIOD (HZ)
+#define CACHE_UPDATE_PERIOD (10u * HZ)
+
+struct smq_policy {
+ struct dm_cache_policy policy;
+
+ /* protects everything */
+ spinlock_t lock;
+ dm_cblock_t cache_size;
+ sector_t cache_block_size;
+
+ sector_t hotspot_block_size;
+ unsigned nr_hotspot_blocks;
+ unsigned cache_blocks_per_hotspot_block;
+ unsigned hotspot_level_jump;
+
+ struct entry_space es;
+ struct entry_alloc writeback_sentinel_alloc;
+ struct entry_alloc demote_sentinel_alloc;
+ struct entry_alloc hotspot_alloc;
+ struct entry_alloc cache_alloc;
+
+ unsigned long *hotspot_hit_bits;
+ unsigned long *cache_hit_bits;
+
+ /*
+ * We maintain three queues of entries. The cache proper,
+ * consisting of a clean and dirty queue, containing the currently
+ * active mappings. The hotspot queue uses a larger block size to
+ * track blocks that are being hit frequently and potential
+ * candidates for promotion to the cache.
+ */
+ struct queue hotspot;
+ struct queue clean;
+ struct queue dirty;
+
+ struct stats hotspot_stats;
+ struct stats cache_stats;
+
+ /*
+ * Keeps track of time, incremented by the core. We use this to
+ * avoid attributing multiple hits within the same tick.
+ */
+ unsigned tick;
+
+ /*
+ * The hash tables allows us to quickly find an entry by origin
+ * block.
+ */
+ struct hash_table table;
+ struct hash_table hotspot_table;
+
+ bool current_writeback_sentinels;
+ unsigned long next_writeback_period;
+
+ bool current_demote_sentinels;
+ unsigned long next_demote_period;
+
+ unsigned write_promote_level;
+ unsigned read_promote_level;
+
+ unsigned long next_hotspot_period;
+ unsigned long next_cache_period;
+};
+
+/*----------------------------------------------------------------*/
+
+static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
+{
+ return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
+}
+
+static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
+{
+ return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
+}
+
+static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
+{
+ return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
+}
+
+static void __update_writeback_sentinels(struct smq_policy *mq)
+{
+ unsigned level;
+ struct queue *q = &mq->dirty;
+ struct entry *sentinel;
+
+ for (level = 0; level < q->nr_levels; level++) {
+ sentinel = writeback_sentinel(mq, level);
+ q_del(q, sentinel);
+ q_push(q, sentinel);
+ }
+}
+
+static void __update_demote_sentinels(struct smq_policy *mq)
+{
+ unsigned level;
+ struct queue *q = &mq->clean;
+ struct entry *sentinel;
+
+ for (level = 0; level < q->nr_levels; level++) {
+ sentinel = demote_sentinel(mq, level);
+ q_del(q, sentinel);
+ q_push(q, sentinel);
+ }
+}
+
+static void update_sentinels(struct smq_policy *mq)
+{
+ if (time_after(jiffies, mq->next_writeback_period)) {
+ __update_writeback_sentinels(mq);
+ mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
+ mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
+ }
+
+ if (time_after(jiffies, mq->next_demote_period)) {
+ __update_demote_sentinels(mq);
+ mq->next_demote_period = jiffies + DEMOTE_PERIOD;
+ mq->current_demote_sentinels = !mq->current_demote_sentinels;
+ }
+}
+
+static void __sentinels_init(struct smq_policy *mq)
+{
+ unsigned level;
+ struct entry *sentinel;
+
+ for (level = 0; level < NR_CACHE_LEVELS; level++) {
+ sentinel = writeback_sentinel(mq, level);
+ sentinel->level = level;
+ q_push(&mq->dirty, sentinel);
+
+ sentinel = demote_sentinel(mq, level);
+ sentinel->level = level;
+ q_push(&mq->clean, sentinel);
+ }
+}
+
+static void sentinels_init(struct smq_policy *mq)
+{
+ mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
+ mq->next_demote_period = jiffies + DEMOTE_PERIOD;
+
+ mq->current_writeback_sentinels = false;
+ mq->current_demote_sentinels = false;
+ __sentinels_init(mq);
+
+ mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
+ mq->current_demote_sentinels = !mq->current_demote_sentinels;
+ __sentinels_init(mq);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * These methods tie together the dirty queue, clean queue and hash table.
+ */
+static void push_new(struct smq_policy *mq, struct entry *e)
+{
+ struct queue *q = e->dirty ? &mq->dirty : &mq->clean;
+ h_insert(&mq->table, e);
+ q_push(q, e);
+}
+
+static void push(struct smq_policy *mq, struct entry *e)
+{
+ struct entry *sentinel;
+
+ h_insert(&mq->table, e);
+
+ /*
+ * Punch this into the queue just in front of the sentinel, to
+ * ensure it's cleaned straight away.
+ */
+ if (e->dirty) {
+ sentinel = writeback_sentinel(mq, e->level);
+ q_push_before(&mq->dirty, sentinel, e);
+ } else {
+ sentinel = demote_sentinel(mq, e->level);
+ q_push_before(&mq->clean, sentinel, e);
+ }
+}
+
+/*
+ * Removes an entry from cache. Removes from the hash table.
+ */
+static void __del(struct smq_policy *mq, struct queue *q, struct entry *e)
+{
+ q_del(q, e);
+ h_remove(&mq->table, e);
+}
+
+static void del(struct smq_policy *mq, struct entry *e)
+{
+ __del(mq, e->dirty ? &mq->dirty : &mq->clean, e);
+}
+
+static struct entry *pop_old(struct smq_policy *mq, struct queue *q, unsigned max_level)
+{
+ struct entry *e = q_pop_old(q, max_level);
+ if (e)
+ h_remove(&mq->table, e);
+ return e;
+}
+
+static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
+{
+ return to_cblock(get_index(&mq->cache_alloc, e));
+}
+
+static void requeue(struct smq_policy *mq, struct entry *e)
+{
+ struct entry *sentinel;
+
+ if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
+ if (e->dirty) {
+ sentinel = writeback_sentinel(mq, e->level);
+ q_requeue_before(&mq->dirty, sentinel, e, 1u);
+ } else {
+ sentinel = demote_sentinel(mq, e->level);
+ q_requeue_before(&mq->clean, sentinel, e, 1u);
+ }
+ }
+}
+
+static unsigned default_promote_level(struct smq_policy *mq)
+{
+ /*
+ * The promote level depends on the current performance of the
+ * cache.
+ *
+ * If the cache is performing badly, then we can't afford
+ * to promote much without causing performance to drop below that
+ * of the origin device.
+ *
+ * If the cache is performing well, then we don't need to promote
+ * much. If it isn't broken, don't fix it.
+ *
+ * If the cache is middling then we promote more.
+ *
+ * This scheme reminds me of a graph of entropy vs probability of a
+ * binary variable.
+ */
+ static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
+
+ unsigned hits = mq->cache_stats.hits;
+ unsigned misses = mq->cache_stats.misses;
+ unsigned index = safe_div(hits << 4u, hits + misses);
+ return table[index];
+}
+
+static void update_promote_levels(struct smq_policy *mq)
+{
+ /*
+ * If there are unused cache entries then we want to be really
+ * eager to promote.
+ */
+ unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
+ default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
+
+ /*
+ * If the hotspot queue is performing badly then we have little
+ * confidence that we know which blocks to promote. So we cut down
+ * the amount of promotions.
+ */
+ switch (stats_assess(&mq->hotspot_stats)) {
+ case Q_POOR:
+ threshold_level /= 4u;
+ break;
+
+ case Q_FAIR:
+ threshold_level /= 2u;
+ break;
+
+ case Q_WELL:
+ break;
+ }
+
+ mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
+ mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level) + 2u;
+}
+
+/*
+ * If the hotspot queue is performing badly, then we try and move entries
+ * around more quickly.
+ */
+static void update_level_jump(struct smq_policy *mq)
+{
+ switch (stats_assess(&mq->hotspot_stats)) {
+ case Q_POOR:
+ mq->hotspot_level_jump = 4u;
+ break;
+
+ case Q_FAIR:
+ mq->hotspot_level_jump = 2u;
+ break;
+
+ case Q_WELL:
+ mq->hotspot_level_jump = 1u;
+ break;
+ }
+}
+
+static void end_hotspot_period(struct smq_policy *mq)
+{
+ clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
+ update_promote_levels(mq);
+
+ if (time_after(jiffies, mq->next_hotspot_period)) {
+ update_level_jump(mq);
+ q_redistribute(&mq->hotspot);
+ stats_reset(&mq->hotspot_stats);
+ mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
+ }
+}
+
+static void end_cache_period(struct smq_policy *mq)
+{
+ if (time_after(jiffies, mq->next_cache_period)) {
+ clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
+
+ q_redistribute(&mq->dirty);
+ q_redistribute(&mq->clean);
+ stats_reset(&mq->cache_stats);
+
+ mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
+ }
+}
+
+static int demote_cblock(struct smq_policy *mq,
+ struct policy_locker *locker,
+ dm_oblock_t *oblock)
+{
+ struct entry *demoted = q_peek(&mq->clean, mq->clean.nr_levels, false);
+ if (!demoted)
+ /*
+ * We could get a block from mq->dirty, but that
+ * would add extra latency to the triggering bio as it
+ * waits for the writeback. Better to not promote this
+ * time and hope there's a clean block next time this block
+ * is hit.
+ */
+ return -ENOSPC;
+
+ if (locker->fn(locker, demoted->oblock))
+ /*
+ * We couldn't lock this block.
+ */
+ return -EBUSY;
+
+ del(mq, demoted);
+ *oblock = demoted->oblock;
+ free_entry(&mq->cache_alloc, demoted);
+
+ return 0;
+}
+
+enum promote_result {
+ PROMOTE_NOT,
+ PROMOTE_TEMPORARY,
+ PROMOTE_PERMANENT
+};
+
+/*
+ * Converts a boolean into a promote result.
+ */
+static enum promote_result maybe_promote(bool promote)
+{
+ return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
+}
+
+static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bio,
+ bool fast_promote)
+{
+ if (bio_data_dir(bio) == WRITE) {
+ if (!allocator_empty(&mq->cache_alloc) && fast_promote)
+ return PROMOTE_TEMPORARY;
+
+ else
+ return maybe_promote(hs_e->level >= mq->write_promote_level);
+ } else
+ return maybe_promote(hs_e->level >= mq->read_promote_level);
+}
+
+static void insert_in_cache(struct smq_policy *mq, dm_oblock_t oblock,
+ struct policy_locker *locker,
+ struct policy_result *result, enum promote_result pr)
+{
+ int r;
+ struct entry *e;
+
+ if (allocator_empty(&mq->cache_alloc)) {
+ result->op = POLICY_REPLACE;
+ r = demote_cblock(mq, locker, &result->old_oblock);
+ if (r) {
+ result->op = POLICY_MISS;
+ return;
+ }
+
+ } else
+ result->op = POLICY_NEW;
+
+ e = alloc_entry(&mq->cache_alloc);
+ BUG_ON(!e);
+ e->oblock = oblock;
+
+ if (pr == PROMOTE_TEMPORARY)
+ push(mq, e);
+ else
+ push_new(mq, e);
+
+ result->cblock = infer_cblock(mq, e);
+}
+
+static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
+{
+ sector_t r = from_oblock(b);
+ (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
+ return to_oblock(r);
+}
+
+static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio)
+{
+ unsigned hi;
+ dm_oblock_t hb = to_hblock(mq, b);
+ struct entry *e = h_lookup(&mq->hotspot_table, hb);
+
+ if (e) {
+ stats_level_accessed(&mq->hotspot_stats, e->level);
+
+ hi = get_index(&mq->hotspot_alloc, e);
+ q_requeue(&mq->hotspot, e,
+ test_and_set_bit(hi, mq->hotspot_hit_bits) ?
+ 0u : mq->hotspot_level_jump);
+
+ } else {
+ stats_miss(&mq->hotspot_stats);
+
+ e = alloc_entry(&mq->hotspot_alloc);
+ if (!e) {
+ e = q_pop(&mq->hotspot);
+ if (e) {
+ h_remove(&mq->hotspot_table, e);
+ hi = get_index(&mq->hotspot_alloc, e);
+ clear_bit(hi, mq->hotspot_hit_bits);
+ }
+
+ }
+
+ if (e) {
+ e->oblock = hb;
+ q_push(&mq->hotspot, e);
+ h_insert(&mq->hotspot_table, e);
+ }
+ }
+
+ return e;
+}
+
+/*
+ * Looks the oblock up in the hash table, then decides whether to put in
+ * pre_cache, or cache etc.
+ */
+static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock,
+ bool can_migrate, bool fast_promote,
+ struct policy_locker *locker, struct policy_result *result)
+{
+ struct entry *e, *hs_e;
+ enum promote_result pr;
+
+ hs_e = update_hotspot_queue(mq, oblock, bio);
+
+ e = h_lookup(&mq->table, oblock);
+ if (e) {
+ stats_level_accessed(&mq->cache_stats, e->level);
+
+ requeue(mq, e);
+ result->op = POLICY_HIT;
+ result->cblock = infer_cblock(mq, e);
+
+ } else {
+ stats_miss(&mq->cache_stats);
+
+ pr = should_promote(mq, hs_e, bio, fast_promote);
+ if (pr == PROMOTE_NOT)
+ result->op = POLICY_MISS;
+
+ else {
+ if (!can_migrate) {
+ result->op = POLICY_MISS;
+ return -EWOULDBLOCK;
+ }
+
+ insert_in_cache(mq, oblock, locker, result, pr);
+ }
+ }
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct smq_policy, policy);
+}
+
+static void smq_destroy(struct dm_cache_policy *p)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+
+ h_exit(&mq->hotspot_table);
+ h_exit(&mq->table);
+ free_bitset(mq->hotspot_hit_bits);
+ free_bitset(mq->cache_hit_bits);
+ space_exit(&mq->es);
+ kfree(mq);
+}
+
+static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool fast_promote,
+ struct bio *bio, struct policy_locker *locker,
+ struct policy_result *result)
+{
+ int r;
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ result->op = POLICY_MISS;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return r;
+}
+
+static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+ struct entry *e;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ e = h_lookup(&mq->table, oblock);
+ if (e) {
+ *cblock = infer_cblock(mq, e);
+ r = 0;
+ } else
+ r = -ENOENT;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return r;
+}
+
+static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, bool set)
+{
+ struct entry *e;
+
+ e = h_lookup(&mq->table, oblock);
+ BUG_ON(!e);
+
+ del(mq, e);
+ e->dirty = set;
+ push(mq, e);
+}
+
+static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ spin_lock_irqsave(&mq->lock, flags);
+ __smq_set_clear_dirty(mq, oblock, true);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ __smq_set_clear_dirty(mq, oblock, false);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+static int smq_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+ struct entry *e;
+
+ e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
+ e->oblock = oblock;
+ e->dirty = false; /* this gets corrected in a minute */
+ e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1;
+ push(mq, e);
+
+ return 0;
+}
+
+static int smq_save_hints(struct smq_policy *mq, struct queue *q,
+ policy_walk_fn fn, void *context)
+{
+ int r;
+ unsigned level;
+ struct entry *e;
+
+ for (level = 0; level < q->nr_levels; level++)
+ for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
+ if (!e->sentinel) {
+ r = fn(context, infer_cblock(mq, e),
+ e->oblock, e->level);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+ int r = 0;
+
+ /*
+ * We don't need to lock here since this method is only called once
+ * the IO has stopped.
+ */
+ r = smq_save_hints(mq, &mq->clean, fn, context);
+ if (!r)
+ r = smq_save_hints(mq, &mq->dirty, fn, context);
+
+ return r;
+}
+
+static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e;
+
+ e = h_lookup(&mq->table, oblock);
+ BUG_ON(!e);
+
+ del(mq, e);
+ free_entry(&mq->cache_alloc, e);
+}
+
+static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ __remove_mapping(mq, oblock);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
+{
+ struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
+
+ if (!e || !e->allocated)
+ return -ENODATA;
+
+ del(mq, e);
+ free_entry(&mq->cache_alloc, e);
+
+ return 0;
+}
+
+static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+ int r;
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ spin_lock_irqsave(&mq->lock, flags);
+ r = __remove_cblock(mq, cblock);
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return r;
+}
+
+
+#define CLEAN_TARGET_CRITICAL 5u /* percent */
+
+static bool clean_target_met(struct smq_policy *mq, bool critical)
+{
+ if (critical) {
+ /*
+ * Cache entries may not be populated. So we're cannot rely on the
+ * size of the clean queue.
+ */
+ unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_CRITICAL / 100u;
+
+ return nr_clean >= target;
+ } else
+ return !q_size(&mq->dirty);
+}
+
+static int __smq_writeback_work(struct smq_policy *mq, dm_oblock_t *oblock,
+ dm_cblock_t *cblock, bool critical_only)
+{
+ struct entry *e = NULL;
+ bool target_met = clean_target_met(mq, critical_only);
+
+ if (critical_only)
+ /*
+ * Always try and keep the bottom level clean.
+ */
+ e = pop_old(mq, &mq->dirty, target_met ? 1u : mq->dirty.nr_levels);
+
+ else
+ e = pop_old(mq, &mq->dirty, mq->dirty.nr_levels);
+
+ if (!e)
+ return -ENODATA;
+
+ *oblock = e->oblock;
+ *cblock = infer_cblock(mq, e);
+ e->dirty = false;
+ push_new(mq, e);
+
+ return 0;
+}
+
+static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+ dm_cblock_t *cblock, bool critical_only)
+{
+ int r;
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ spin_lock_irqsave(&mq->lock, flags);
+ r = __smq_writeback_work(mq, oblock, cblock, critical_only);
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return r;
+}
+
+static void __force_mapping(struct smq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = h_lookup(&mq->table, current_oblock);
+
+ if (e) {
+ del(mq, e);
+ e->oblock = new_oblock;
+ e->dirty = true;
+ push(mq, e);
+ }
+}
+
+static void smq_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ spin_lock_irqsave(&mq->lock, flags);
+ __force_mapping(mq, current_oblock, new_oblock);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+static dm_cblock_t smq_residency(struct dm_cache_policy *p)
+{
+ dm_cblock_t r;
+ unsigned long flags;
+ struct smq_policy *mq = to_smq_policy(p);
+
+ spin_lock_irqsave(&mq->lock, flags);
+ r = to_cblock(mq->cache_alloc.nr_allocated);
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return r;
+}
+
+static void smq_tick(struct dm_cache_policy *p, bool can_block)
+{
+ struct smq_policy *mq = to_smq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->tick++;
+ update_sentinels(mq);
+ end_hotspot_period(mq);
+ end_cache_period(mq);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct smq_policy *mq)
+{
+ mq->policy.destroy = smq_destroy;
+ mq->policy.map = smq_map;
+ mq->policy.lookup = smq_lookup;
+ mq->policy.set_dirty = smq_set_dirty;
+ mq->policy.clear_dirty = smq_clear_dirty;
+ mq->policy.load_mapping = smq_load_mapping;
+ mq->policy.walk_mappings = smq_walk_mappings;
+ mq->policy.remove_mapping = smq_remove_mapping;
+ mq->policy.remove_cblock = smq_remove_cblock;
+ mq->policy.writeback_work = smq_writeback_work;
+ mq->policy.force_mapping = smq_force_mapping;
+ mq->policy.residency = smq_residency;
+ mq->policy.tick = smq_tick;
+}
+
+static bool too_many_hotspot_blocks(sector_t origin_size,
+ sector_t hotspot_block_size,
+ unsigned nr_hotspot_blocks)
+{
+ return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
+}
+
+static void calc_hotspot_params(sector_t origin_size,
+ sector_t cache_block_size,
+ unsigned nr_cache_blocks,
+ sector_t *hotspot_block_size,
+ unsigned *nr_hotspot_blocks)
+{
+ *hotspot_block_size = cache_block_size * 16u;
+ *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
+
+ while ((*hotspot_block_size > cache_block_size) &&
+ too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
+ *hotspot_block_size /= 2u;
+}
+
+static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ unsigned i;
+ unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+ unsigned total_sentinels = 2u * nr_sentinels_per_queue;
+ struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ init_policy_functions(mq);
+ mq->cache_size = cache_size;
+ mq->cache_block_size = cache_block_size;
+
+ calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
+ &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
+
+ mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
+ mq->hotspot_level_jump = 1u;
+ if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
+ DMERR("couldn't initialize entry space");
+ goto bad_pool_init;
+ }
+
+ init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
+ for (i = 0; i < nr_sentinels_per_queue; i++)
+ get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
+
+ init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
+ for (i = 0; i < nr_sentinels_per_queue; i++)
+ get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
+
+ init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
+ total_sentinels + mq->nr_hotspot_blocks);
+
+ init_allocator(&mq->cache_alloc, &mq->es,
+ total_sentinels + mq->nr_hotspot_blocks,
+ total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
+
+ mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
+ if (!mq->hotspot_hit_bits) {
+ DMERR("couldn't allocate hotspot hit bitset");
+ goto bad_hotspot_hit_bits;
+ }
+ clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
+
+ if (from_cblock(cache_size)) {
+ mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
+ if (!mq->cache_hit_bits) {
+ DMERR("couldn't allocate cache hit bitset");
+ goto bad_cache_hit_bits;
+ }
+ clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
+ } else
+ mq->cache_hit_bits = NULL;
+
+ mq->tick = 0;
+ spin_lock_init(&mq->lock);
+
+ q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
+ mq->hotspot.nr_top_levels = 8;
+ mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
+ from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
+
+ q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
+ q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
+
+ stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
+ stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
+
+ if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
+ goto bad_alloc_table;
+
+ if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
+ goto bad_alloc_hotspot_table;
+
+ sentinels_init(mq);
+ mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
+
+ mq->next_hotspot_period = jiffies;
+ mq->next_cache_period = jiffies;
+
+ return &mq->policy;
+
+bad_alloc_hotspot_table:
+ h_exit(&mq->table);
+bad_alloc_table:
+ free_bitset(mq->cache_hit_bits);
+bad_cache_hit_bits:
+ free_bitset(mq->hotspot_hit_bits);
+bad_hotspot_hit_bits:
+ space_exit(&mq->es);
+bad_pool_init:
+ kfree(mq);
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type smq_policy_type = {
+ .name = "smq",
+ .version = {1, 0, 0},
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = smq_create
+};
+
+static struct dm_cache_policy_type default_policy_type = {
+ .name = "default",
+ .version = {1, 4, 0},
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = smq_create,
+ .real = &smq_policy_type
+};
+
+static int __init smq_init(void)
+{
+ int r;
+
+ r = dm_cache_policy_register(&smq_policy_type);
+ if (r) {
+ DMERR("register failed %d", r);
+ return -ENOMEM;
+ }
+
+ r = dm_cache_policy_register(&default_policy_type);
+ if (r) {
+ DMERR("register failed (as default) %d", r);
+ dm_cache_policy_unregister(&smq_policy_type);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit smq_exit(void)
+{
+ dm_cache_policy_unregister(&smq_policy_type);
+ dm_cache_policy_unregister(&default_policy_type);
+}
+
+module_init(smq_init);
+module_exit(smq_exit);
+
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/kernel/drivers/md/dm-cache-policy.h b/kernel/drivers/md/dm-cache-policy.h
index 5524e21e4..05db56eed 100644
--- a/kernel/drivers/md/dm-cache-policy.h
+++ b/kernel/drivers/md/dm-cache-policy.h
@@ -178,7 +178,9 @@ struct dm_cache_policy {
int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
- * Provide a dirty block to be written back by the core target.
+ * Provide a dirty block to be written back by the core target. If
+ * critical_only is set then the policy should only provide work if
+ * it urgently needs it.
*
* Returns:
*
@@ -186,7 +188,8 @@ struct dm_cache_policy {
*
* -ENODATA: no dirty blocks available
*/
- int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock,
+ bool critical_only);
/*
* How full is the cache?
@@ -197,16 +200,16 @@ struct dm_cache_policy {
* Because of where we sit in the block layer, we can be asked to
* map a lot of little bios that are all in the same block (no
* queue merging has occurred). To stop the policy being fooled by
- * these the core target sends regular tick() calls to the policy.
+ * these, the core target sends regular tick() calls to the policy.
* The policy should only count an entry as hit once per tick.
*/
- void (*tick)(struct dm_cache_policy *p);
+ void (*tick)(struct dm_cache_policy *p, bool can_block);
/*
* Configuration.
*/
- int (*emit_config_values)(struct dm_cache_policy *p,
- char *result, unsigned maxlen);
+ int (*emit_config_values)(struct dm_cache_policy *p, char *result,
+ unsigned maxlen, ssize_t *sz_ptr);
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
diff --git a/kernel/drivers/md/dm-cache-target.c b/kernel/drivers/md/dm-cache-target.c
index e049becaa..2fd4c8296 100644
--- a/kernel/drivers/md/dm-cache-target.c
+++ b/kernel/drivers/md/dm-cache-target.c
@@ -25,44 +25,93 @@ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
/*----------------------------------------------------------------*/
-/*
- * Glossary:
- *
- * oblock: index of an origin block
- * cblock: index of a cache block
- * promotion: movement of a block from origin to cache
- * demotion: movement of a block from cache to origin
- * migration: movement of a block between the origin and cache device,
- * either direction
- */
+#define IOT_RESOLUTION 4
-/*----------------------------------------------------------------*/
+struct io_tracker {
+ spinlock_t lock;
+
+ /*
+ * Sectors of in-flight IO.
+ */
+ sector_t in_flight;
+
+ /*
+ * The time, in jiffies, when this device became idle (if it is
+ * indeed idle).
+ */
+ unsigned long idle_time;
+ unsigned long last_update_time;
+};
+
+static void iot_init(struct io_tracker *iot)
+{
+ spin_lock_init(&iot->lock);
+ iot->in_flight = 0ul;
+ iot->idle_time = 0ul;
+ iot->last_update_time = jiffies;
+}
+
+static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
+{
+ if (iot->in_flight)
+ return false;
+
+ return time_after(jiffies, iot->idle_time + jifs);
+}
-static size_t bitset_size_in_bytes(unsigned nr_entries)
+static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
- return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ bool r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iot->lock, flags);
+ r = __iot_idle_for(iot, jifs);
+ spin_unlock_irqrestore(&iot->lock, flags);
+
+ return r;
}
-static unsigned long *alloc_bitset(unsigned nr_entries)
+static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
- size_t s = bitset_size_in_bytes(nr_entries);
- return vzalloc(s);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iot->lock, flags);
+ iot->in_flight += len;
+ spin_unlock_irqrestore(&iot->lock, flags);
}
-static void clear_bitset(void *bitset, unsigned nr_entries)
+static void __iot_io_end(struct io_tracker *iot, sector_t len)
{
- size_t s = bitset_size_in_bytes(nr_entries);
- memset(bitset, 0, s);
+ iot->in_flight -= len;
+ if (!iot->in_flight)
+ iot->idle_time = jiffies;
}
-static void free_bitset(unsigned long *bits)
+static void iot_io_end(struct io_tracker *iot, sector_t len)
{
- vfree(bits);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iot->lock, flags);
+ __iot_io_end(iot, len);
+ spin_unlock_irqrestore(&iot->lock, flags);
}
/*----------------------------------------------------------------*/
/*
+ * Glossary:
+ *
+ * oblock: index of an origin block
+ * cblock: index of a cache block
+ * promotion: movement of a block from origin to cache
+ * demotion: movement of a block from cache to origin
+ * migration: movement of a block between the origin and cache device,
+ * either direction
+ */
+
+/*----------------------------------------------------------------*/
+
+/*
* There are a couple of places where we let a bio run, but want to do some
* work before calling its endio function. We do this by temporarily
* changing the endio fn.
@@ -86,12 +135,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
bio->bi_end_io = h->bi_end_io;
bio->bi_private = h->bi_private;
-
- /*
- * Must bump bi_remaining to allow bio to complete with
- * restored bi_end_io.
- */
- atomic_inc(&bio->bi_remaining);
}
/*----------------------------------------------------------------*/
@@ -107,12 +150,10 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
-/*
- * FIXME: the cache is read/write for the time being.
- */
enum cache_metadata_mode {
CM_WRITE, /* metadata may be changed */
CM_READ_ONLY, /* metadata may not be changed */
+ CM_FAIL
};
enum cache_io_mode {
@@ -214,6 +255,7 @@ struct cache {
int sectors_per_block_shift;
spinlock_t lock;
+ struct list_head deferred_cells;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
struct bio_list deferred_writethrough_bios;
@@ -288,6 +330,8 @@ struct cache {
*/
spinlock_t invalidation_lock;
struct list_head invalidation_requests;
+
+ struct io_tracker origin_tracker;
};
struct per_bio_data {
@@ -295,6 +339,7 @@ struct per_bio_data {
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
struct dm_hook_info hook_info;
+ sector_t len;
/*
* writethrough fields. These MUST remain at the end of this
@@ -338,6 +383,8 @@ struct prealloc {
struct dm_bio_prison_cell *cell2;
};
+static enum cache_metadata_mode get_cache_mode(struct cache *cache);
+
static void wake_worker(struct cache *cache)
{
queue_work(cache->wq, &cache->worker);
@@ -371,10 +418,12 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
static void free_migration(struct dm_cache_migration *mg)
{
- if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
- wake_up(&mg->cache->migration_wait);
+ struct cache *cache = mg->cache;
+
+ if (atomic_dec_and_test(&cache->nr_allocated_migrations))
+ wake_up(&cache->migration_wait);
- mempool_free(mg, mg->cache->migration_pool);
+ mempool_free(mg, cache->migration_pool);
}
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -649,6 +698,9 @@ static void save_stats(struct cache *cache)
{
struct dm_cache_statistics stats;
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return;
+
stats.read_hits = atomic_read(&cache->stats.read_hit);
stats.read_misses = atomic_read(&cache->stats.read_miss);
stats.write_hits = atomic_read(&cache->stats.write_hit);
@@ -701,6 +753,7 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
pb->all_io_entry = NULL;
+ pb->len = 0;
return pb;
}
@@ -798,12 +851,43 @@ static void inc_ds(struct cache *cache, struct bio *bio,
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
}
+static bool accountable_bio(struct cache *cache, struct bio *bio)
+{
+ return ((bio->bi_bdev == cache->origin_dev->bdev) &&
+ !(bio->bi_rw & REQ_DISCARD));
+}
+
+static void accounted_begin(struct cache *cache, struct bio *bio)
+{
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+ if (accountable_bio(cache, bio)) {
+ pb->len = bio_sectors(bio);
+ iot_io_begin(&cache->origin_tracker, pb->len);
+ }
+}
+
+static void accounted_complete(struct cache *cache, struct bio *bio)
+{
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+ iot_io_end(&cache->origin_tracker, pb->len);
+}
+
+static void accounted_request(struct cache *cache, struct bio *bio)
+{
+ accounted_begin(cache, bio);
+ generic_make_request(bio);
+}
+
static void issue(struct cache *cache, struct bio *bio)
{
unsigned long flags;
if (!bio_triggers_commit(cache, bio)) {
- generic_make_request(bio);
+ accounted_request(cache, bio);
return;
}
@@ -834,14 +918,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
wake_worker(cache);
}
-static void writethrough_endio(struct bio *bio, int err)
+static void writethrough_endio(struct bio *bio)
{
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
dm_unhook_bio(&pb->hook_info, bio);
- if (err) {
- bio_endio(bio, err);
+ if (bio->bi_error) {
+ bio_endio(bio);
return;
}
@@ -876,6 +960,94 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
}
/*----------------------------------------------------------------
+ * Failure modes
+ *--------------------------------------------------------------*/
+static enum cache_metadata_mode get_cache_mode(struct cache *cache)
+{
+ return cache->features.mode;
+}
+
+static const char *cache_device_name(struct cache *cache)
+{
+ return dm_device_name(dm_table_get_md(cache->ti->table));
+}
+
+static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
+{
+ const char *descs[] = {
+ "write",
+ "read-only",
+ "fail"
+ };
+
+ dm_table_event(cache->ti->table);
+ DMINFO("%s: switching cache to %s mode",
+ cache_device_name(cache), descs[(int)mode]);
+}
+
+static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
+{
+ bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ enum cache_metadata_mode old_mode = get_cache_mode(cache);
+
+ if (new_mode == CM_WRITE && needs_check) {
+ DMERR("%s: unable to switch cache to write mode until repaired.",
+ cache_device_name(cache));
+ if (old_mode != new_mode)
+ new_mode = old_mode;
+ else
+ new_mode = CM_READ_ONLY;
+ }
+
+ /* Never move out of fail mode */
+ if (old_mode == CM_FAIL)
+ new_mode = CM_FAIL;
+
+ switch (new_mode) {
+ case CM_FAIL:
+ case CM_READ_ONLY:
+ dm_cache_metadata_set_read_only(cache->cmd);
+ break;
+
+ case CM_WRITE:
+ dm_cache_metadata_set_read_write(cache->cmd);
+ break;
+ }
+
+ cache->features.mode = new_mode;
+
+ if (new_mode != old_mode)
+ notify_mode_switch(cache, new_mode);
+}
+
+static void abort_transaction(struct cache *cache)
+{
+ const char *dev_name = cache_device_name(cache);
+
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return;
+
+ if (dm_cache_metadata_set_needs_check(cache->cmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_cache_mode(cache, CM_FAIL);
+ }
+
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_cache_metadata_abort(cache->cmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_cache_mode(cache, CM_FAIL);
+ }
+}
+
+static void metadata_operation_failed(struct cache *cache, const char *op, int r)
+{
+ DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
+ cache_device_name(cache), op, r);
+ abort_transaction(cache);
+ set_cache_mode(cache, CM_READ_ONLY);
+}
+
+/*----------------------------------------------------------------
* Migration processing
*
* Migration covers moving data from the origin device to the cache, or
@@ -891,50 +1063,82 @@ static void dec_io_migrations(struct cache *cache)
atomic_dec(&cache->nr_io_migrations);
}
-static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
- bool holder)
+static bool discard_or_flush(struct bio *bio)
{
- (holder ? dm_cell_release : dm_cell_release_no_holder)
- (cache->prison, cell, &cache->deferred_bios);
- free_prison_cell(cache, cell);
+ return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
+}
+
+static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+ if (discard_or_flush(cell->holder)) {
+ /*
+ * We have to handle these bios individually.
+ */
+ dm_cell_release(cache->prison, cell, &cache->deferred_bios);
+ free_prison_cell(cache, cell);
+ } else
+ list_add_tail(&cell->user_list, &cache->deferred_cells);
}
-static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
- bool holder)
+static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
{
unsigned long flags;
+ if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
+ /*
+ * There was no prisoner to promote to holder, the
+ * cell has been released.
+ */
+ free_prison_cell(cache, cell);
+ return;
+ }
+
spin_lock_irqsave(&cache->lock, flags);
- __cell_defer(cache, cell, holder);
+ __cell_defer(cache, cell);
spin_unlock_irqrestore(&cache->lock, flags);
wake_worker(cache);
}
+static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
+{
+ dm_cell_error(cache->prison, cell, err);
+ free_prison_cell(cache, cell);
+}
+
+static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+ cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
+}
+
static void free_io_migration(struct dm_cache_migration *mg)
{
- dec_io_migrations(mg->cache);
+ struct cache *cache = mg->cache;
+
+ dec_io_migrations(cache);
free_migration(mg);
+ wake_worker(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
{
struct cache *cache = mg->cache;
+ const char *dev_name = cache_device_name(cache);
if (mg->writeback) {
- DMWARN_LIMIT("writeback failed; couldn't copy block");
+ DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name);
set_dirty(cache, mg->old_oblock, mg->cblock);
cell_defer(cache, mg->old_ocell, false);
} else if (mg->demote) {
- DMWARN_LIMIT("demotion failed; couldn't copy block");
+ DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name);
policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote)
cell_defer(cache, mg->new_ocell, true);
} else {
- DMWARN_LIMIT("promotion failed; couldn't copy block");
+ DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name);
policy_remove_mapping(cache->policy, mg->new_oblock);
cell_defer(cache, mg->new_ocell, true);
}
@@ -944,6 +1148,7 @@ static void migration_failure(struct dm_cache_migration *mg)
static void migration_success_pre_commit(struct dm_cache_migration *mg)
{
+ int r;
unsigned long flags;
struct cache *cache = mg->cache;
@@ -954,8 +1159,11 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
return;
} else if (mg->demote) {
- if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
- DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
+ r = dm_cache_remove_mapping(cache->cmd, mg->cblock);
+ if (r) {
+ DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata",
+ cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
policy_force_mapping(cache->policy, mg->new_oblock,
mg->old_oblock);
if (mg->promote)
@@ -964,8 +1172,11 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
return;
}
} else {
- if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
- DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock);
+ if (r) {
+ DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata",
+ cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
policy_remove_mapping(cache->policy, mg->new_oblock);
free_io_migration(mg);
return;
@@ -984,7 +1195,8 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
struct cache *cache = mg->cache;
if (mg->writeback) {
- DMWARN("writeback unexpectedly triggered commit");
+ DMWARN_LIMIT("%s: writeback unexpectedly triggered commit",
+ cache_device_name(cache));
return;
} else if (mg->demote) {
@@ -1012,7 +1224,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
* The block was promoted via an overwrite, so it's dirty.
*/
set_dirty(cache, mg->new_oblock, mg->cblock);
- bio_endio(mg->new_ocell->holder, 0);
+ bio_endio(mg->new_ocell->holder);
cell_defer(cache, mg->new_ocell, false);
}
free_io_migration(mg);
@@ -1060,12 +1272,12 @@ static void issue_copy(struct dm_cache_migration *mg)
}
if (r < 0) {
- DMERR_LIMIT("issuing migration failed");
+ DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache));
migration_failure(mg);
}
}
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
{
struct dm_cache_migration *mg = bio->bi_private;
struct cache *cache = mg->cache;
@@ -1075,7 +1287,7 @@ static void overwrite_endio(struct bio *bio, int err)
dm_unhook_bio(&pb->hook_info, bio);
- if (err)
+ if (bio->bi_error)
mg->err = true;
mg->requeue_holder = false;
@@ -1099,7 +1311,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
* No need to inc_ds() here, since the cell will be held for the
* duration of the io.
*/
- generic_make_request(bio);
+ accounted_request(mg->cache, bio);
}
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
@@ -1132,16 +1344,18 @@ static void issue_discard(struct dm_cache_migration *mg)
{
dm_dblock_t b, e;
struct bio *bio = mg->new_ocell->holder;
+ struct cache *cache = mg->cache;
- calc_discard_block_range(mg->cache, bio, &b, &e);
+ calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
- set_discard(mg->cache, b);
+ set_discard(cache, b);
b = to_dblock(from_dblock(b) + 1);
}
- bio_endio(bio, 0);
- cell_defer(mg->cache, mg->new_ocell, false);
+ bio_endio(bio);
+ cell_defer(cache, mg->new_ocell, false);
free_migration(mg);
+ wake_worker(cache);
}
static void issue_copy_or_discard(struct dm_cache_migration *mg)
@@ -1412,7 +1626,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
calc_discard_block_range(cache, bio, &b, &e);
if (b == e) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -1447,6 +1661,111 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
/*----------------------------------------------------------------*/
+struct inc_detail {
+ struct cache *cache;
+ struct bio_list bios_for_issue;
+ struct bio_list unhandled_bios;
+ bool any_writes;
+};
+
+static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
+{
+ struct bio *bio;
+ struct inc_detail *detail = context;
+ struct cache *cache = detail->cache;
+
+ inc_ds(cache, cell->holder, cell);
+ if (bio_data_dir(cell->holder) == WRITE)
+ detail->any_writes = true;
+
+ while ((bio = bio_list_pop(&cell->bios))) {
+ if (discard_or_flush(bio)) {
+ bio_list_add(&detail->unhandled_bios, bio);
+ continue;
+ }
+
+ if (bio_data_dir(bio) == WRITE)
+ detail->any_writes = true;
+
+ bio_list_add(&detail->bios_for_issue, bio);
+ inc_ds(cache, bio, cell);
+ }
+}
+
+// FIXME: refactor these two
+static void remap_cell_to_origin_clear_discard(struct cache *cache,
+ struct dm_bio_prison_cell *cell,
+ dm_oblock_t oblock, bool issue_holder)
+{
+ struct bio *bio;
+ unsigned long flags;
+ struct inc_detail detail;
+
+ detail.cache = cache;
+ bio_list_init(&detail.bios_for_issue);
+ bio_list_init(&detail.unhandled_bios);
+ detail.any_writes = false;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
+ bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ remap_to_origin(cache, cell->holder);
+ if (issue_holder)
+ issue(cache, cell->holder);
+ else
+ accounted_begin(cache, cell->holder);
+
+ if (detail.any_writes)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+
+ while ((bio = bio_list_pop(&detail.bios_for_issue))) {
+ remap_to_origin(cache, bio);
+ issue(cache, bio);
+ }
+
+ free_prison_cell(cache, cell);
+}
+
+static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
+ dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
+{
+ struct bio *bio;
+ unsigned long flags;
+ struct inc_detail detail;
+
+ detail.cache = cache;
+ bio_list_init(&detail.bios_for_issue);
+ bio_list_init(&detail.unhandled_bios);
+ detail.any_writes = false;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
+ bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ remap_to_cache(cache, cell->holder, cblock);
+ if (issue_holder)
+ issue(cache, cell->holder);
+ else
+ accounted_begin(cache, cell->holder);
+
+ if (detail.any_writes) {
+ set_dirty(cache, oblock, cblock);
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+ }
+
+ while ((bio = bio_list_pop(&detail.bios_for_issue))) {
+ remap_to_cache(cache, bio, cblock);
+ issue(cache, bio);
+ }
+
+ free_prison_cell(cache, cell);
+}
+
+/*----------------------------------------------------------------*/
+
struct old_oblock_lock {
struct policy_locker locker;
struct cache *cache;
@@ -1471,36 +1790,26 @@ static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
l->structs, &l->cell);
}
-static void process_bio(struct cache *cache, struct prealloc *structs,
- struct bio *bio)
+static void process_cell(struct cache *cache, struct prealloc *structs,
+ struct dm_bio_prison_cell *new_ocell)
{
int r;
bool release_cell = true;
+ struct bio *bio = new_ocell->holder;
dm_oblock_t block = get_bio_block(cache, bio);
- struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
struct policy_result lookup_result;
bool passthrough = passthrough_mode(&cache->features);
- bool discarded_block, can_migrate;
+ bool fast_promotion, can_migrate;
struct old_oblock_lock ool;
- /*
- * Check to see if that block is currently migrating.
- */
- cell_prealloc = prealloc_get_cell(structs);
- r = bio_detain(cache, block, bio, cell_prealloc,
- (cell_free_fn) prealloc_put_cell,
- structs, &new_ocell);
- if (r > 0)
- return;
-
- discarded_block = is_discarded_oblock(cache, block);
- can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
+ fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
+ can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
ool.locker.fn = cell_locker;
ool.cache = cache;
ool.structs = structs;
ool.cell = NULL;
- r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+ r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK)
@@ -1537,9 +1846,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
inc_and_issue(cache, bio, new_ocell);
- } else {
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
- inc_and_issue(cache, bio, new_ocell);
+ } else {
+ remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
+ release_cell = false;
}
}
@@ -1547,8 +1856,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
case POLICY_MISS:
inc_miss_counter(cache, bio);
- remap_to_origin_clear_discard(cache, bio, block);
- inc_and_issue(cache, bio, new_ocell);
+ remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
+ release_cell = false;
break;
case POLICY_NEW:
@@ -1567,7 +1876,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
break;
default:
- DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
+ DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u",
+ cache_device_name(cache), __func__,
(unsigned) lookup_result.op);
bio_io_error(bio);
}
@@ -1576,10 +1886,48 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
cell_defer(cache, new_ocell, false);
}
+static void process_bio(struct cache *cache, struct prealloc *structs,
+ struct bio *bio)
+{
+ int r;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, block, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &new_ocell);
+ if (r > 0)
+ return;
+
+ process_cell(cache, structs, new_ocell);
+}
+
static int need_commit_due_to_time(struct cache *cache)
{
- return !time_in_range(jiffies, cache->last_commit_jiffies,
- cache->last_commit_jiffies + COMMIT_PERIOD);
+ return jiffies < cache->last_commit_jiffies ||
+ jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+}
+
+/*
+ * A non-zero return indicates read_only or fail_io mode.
+ */
+static int commit(struct cache *cache, bool clean_shutdown)
+{
+ int r;
+
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return -EINVAL;
+
+ atomic_inc(&cache->stats.commit_count);
+ r = dm_cache_commit(cache->cmd, clean_shutdown);
+ if (r)
+ metadata_operation_failed(cache, "dm_cache_commit", r);
+
+ return r;
}
static int commit_if_needed(struct cache *cache)
@@ -1588,9 +1936,8 @@ static int commit_if_needed(struct cache *cache)
if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
dm_cache_changed_this_transaction(cache->cmd)) {
- atomic_inc(&cache->stats.commit_count);
+ r = commit(cache, false);
cache->commit_requested = false;
- r = dm_cache_commit(cache->cmd, false);
cache->last_commit_jiffies = jiffies;
}
@@ -1599,6 +1946,7 @@ static int commit_if_needed(struct cache *cache)
static void process_deferred_bios(struct cache *cache)
{
+ bool prealloc_used = false;
unsigned long flags;
struct bio_list bios;
struct bio *bio;
@@ -1618,6 +1966,7 @@ static void process_deferred_bios(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
bio_list_merge(&cache->deferred_bios, &bios);
@@ -1635,7 +1984,45 @@ static void process_deferred_bios(struct cache *cache)
process_bio(cache, &structs, bio);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
+}
+
+static void process_deferred_cells(struct cache *cache)
+{
+ bool prealloc_used = false;
+ unsigned long flags;
+ struct dm_bio_prison_cell *cell, *tmp;
+ struct list_head cells;
+ struct prealloc structs;
+
+ memset(&structs, 0, sizeof(structs));
+
+ INIT_LIST_HEAD(&cells);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice_init(&cache->deferred_cells, &cells);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ list_for_each_entry_safe(cell, tmp, &cells, user_list) {
+ /*
+ * If we've got no free migration structs, and processing
+ * this bio might require one, we pause until there are some
+ * prepared mappings to process.
+ */
+ prealloc_used = true;
+ if (prealloc_data_structs(cache, &structs)) {
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice(&cells, &cache->deferred_cells);
+ spin_unlock_irqrestore(&cache->lock, flags);
+ break;
+ }
+
+ process_cell(cache, &structs, cell);
+ }
+
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -1655,7 +2042,7 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
* These bios have already been through inc_ds()
*/
while ((bio = bio_list_pop(&bios)))
- submit_bios ? generic_make_request(bio) : bio_io_error(bio);
+ submit_bios ? accounted_request(cache, bio) : bio_io_error(bio);
}
static void process_deferred_writethrough_bios(struct cache *cache)
@@ -1675,29 +2062,27 @@ static void process_deferred_writethrough_bios(struct cache *cache)
* These bios have already been through inc_ds()
*/
while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
+ accounted_request(cache, bio);
}
static void writeback_some_dirty_blocks(struct cache *cache)
{
- int r = 0;
+ bool prealloc_used = false;
dm_oblock_t oblock;
dm_cblock_t cblock;
struct prealloc structs;
struct dm_bio_prison_cell *old_ocell;
+ bool busy = !iot_idle_for(&cache->origin_tracker, HZ);
memset(&structs, 0, sizeof(structs));
while (spare_migration_bandwidth(cache)) {
- if (prealloc_data_structs(cache, &structs))
- break;
-
- r = policy_writeback_work(cache->policy, &oblock, &cblock);
- if (r)
- break;
+ if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
+ break; /* no work to do */
- r = get_cell(cache, oblock, &structs, &old_ocell);
- if (r) {
+ prealloc_used = true;
+ if (prealloc_data_structs(cache, &structs) ||
+ get_cell(cache, oblock, &structs, &old_ocell)) {
policy_set_dirty(cache->policy, oblock);
break;
}
@@ -1705,7 +2090,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
writeback(cache, &structs, oblock, cblock, old_ocell);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
/*----------------------------------------------------------------
@@ -1723,15 +2109,17 @@ static void process_invalidation_request(struct cache *cache, struct invalidatio
r = policy_remove_cblock(cache->policy, to_cblock(begin));
if (!r) {
r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
- if (r)
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
break;
+ }
} else if (r == -ENODATA) {
/* harmless, already unmapped */
r = 0;
} else {
- DMERR("policy_remove_cblock failed");
+ DMERR("%s: policy_remove_cblock failed", cache_device_name(cache));
break;
}
@@ -1804,7 +2192,22 @@ static void stop_worker(struct cache *cache)
flush_workqueue(cache->wq);
}
-static void requeue_deferred_io(struct cache *cache)
+static void requeue_deferred_cells(struct cache *cache)
+{
+ unsigned long flags;
+ struct list_head cells;
+ struct dm_bio_prison_cell *cell, *tmp;
+
+ INIT_LIST_HEAD(&cells);
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice_init(&cache->deferred_cells, &cells);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ list_for_each_entry_safe(cell, tmp, &cells, user_list)
+ cell_requeue(cache, cell);
+}
+
+static void requeue_deferred_bios(struct cache *cache)
{
struct bio *bio;
struct bio_list bios;
@@ -1813,8 +2216,10 @@ static void requeue_deferred_io(struct cache *cache)
bio_list_merge(&bios, &cache->deferred_bios);
bio_list_init(&cache->deferred_bios);
- while ((bio = bio_list_pop(&bios)))
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ while ((bio = bio_list_pop(&bios))) {
+ bio->bi_error = DM_ENDIO_REQUEUE;
+ bio_endio(bio);
+ }
}
static int more_work(struct cache *cache)
@@ -1825,6 +2230,7 @@ static int more_work(struct cache *cache)
!list_empty(&cache->need_commit_migrations);
else
return !bio_list_empty(&cache->deferred_bios) ||
+ !list_empty(&cache->deferred_cells) ||
!bio_list_empty(&cache->deferred_flush_bios) ||
!bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
@@ -1842,6 +2248,7 @@ static void do_worker(struct work_struct *ws)
writeback_some_dirty_blocks(cache);
process_deferred_writethrough_bios(cache);
process_deferred_bios(cache);
+ process_deferred_cells(cache);
process_invalidation_requests(cache);
}
@@ -1851,11 +2258,6 @@ static void do_worker(struct work_struct *ws)
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
process_migrations(cache, &cache->need_commit_migrations, migration_failure);
-
- /*
- * FIXME: rollback metadata or just go into a
- * failure mode and error everything
- */
} else {
process_deferred_flush_bios(cache, true);
process_migrations(cache, &cache->need_commit_migrations,
@@ -1874,7 +2276,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
- policy_tick(cache->policy);
+ policy_tick(cache->policy, true);
wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
@@ -1907,8 +2309,7 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->migration_pool)
- mempool_destroy(cache->migration_pool);
+ mempool_destroy(cache->migration_pool);
if (cache->all_io_ds)
dm_deferred_set_destroy(cache->all_io_ds);
@@ -2428,6 +2829,12 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
cache->cmd = cmd;
+ set_cache_mode(cache, CM_WRITE);
+ if (get_cache_mode(cache) != CM_WRITE) {
+ *error = "Unable to get write access to metadata, please check/repair metadata.";
+ r = -EINVAL;
+ goto bad;
+ }
if (passthrough_mode(&cache->features)) {
bool all_clean;
@@ -2446,6 +2853,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->deferred_cells);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
bio_list_init(&cache->deferred_writethrough_bios);
@@ -2535,6 +2943,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
spin_lock_init(&cache->invalidation_lock);
INIT_LIST_HEAD(&cache->invalidation_requests);
+ iot_init(&cache->origin_tracker);
+
*result = cache;
return 0;
@@ -2601,13 +3011,18 @@ out:
return r;
}
-static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
+/*----------------------------------------------------------------*/
+
+static int cache_map(struct dm_target *ti, struct bio *bio)
{
+ struct cache *cache = ti->private;
+
int r;
+ struct dm_bio_prison_cell *cell = NULL;
dm_oblock_t block = get_bio_block(cache, bio);
size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false;
- bool discarded_block;
+ bool fast_promotion;
struct policy_result lookup_result;
struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
struct old_oblock_lock ool;
@@ -2621,10 +3036,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
* Just remap to the origin and carry on.
*/
remap_to_origin(cache, bio);
+ accounted_begin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ if (discard_or_flush(bio)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2632,15 +3048,15 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
/*
* Check to see if that block is currently migrating.
*/
- *cell = alloc_prison_cell(cache);
- if (!*cell) {
+ cell = alloc_prison_cell(cache);
+ if (!cell) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
}
- r = bio_detain(cache, block, bio, *cell,
+ r = bio_detain(cache, block, bio, cell,
(cell_free_fn) free_prison_cell,
- cache, cell);
+ cache, &cell);
if (r) {
if (r < 0)
defer_bio(cache, bio);
@@ -2648,17 +3064,18 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
return DM_MAPIO_SUBMITTED;
}
- discarded_block = is_discarded_oblock(cache, block);
+ fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
- r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+ r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK) {
- cell_defer(cache, *cell, true);
+ cell_defer(cache, cell, true);
return DM_MAPIO_SUBMITTED;
} else if (r) {
- DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
- cell_defer(cache, *cell, false);
+ DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d",
+ cache_device_name(cache), r);
+ cell_defer(cache, cell, false);
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2672,21 +3089,30 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
* We need to invalidate this block, so
* defer for the worker thread.
*/
- cell_defer(cache, *cell, true);
+ cell_defer(cache, cell, true);
r = DM_MAPIO_SUBMITTED;
} else {
inc_miss_counter(cache, bio);
remap_to_origin_clear_discard(cache, bio, block);
+ accounted_begin(cache, bio);
+ inc_ds(cache, bio, cell);
+ // FIXME: we want to remap hits or misses straight
+ // away rather than passing over to the worker.
+ cell_defer(cache, cell, false);
}
} else {
inc_hit_counter(cache, bio);
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
- !is_dirty(cache, lookup_result.cblock))
+ !is_dirty(cache, lookup_result.cblock)) {
remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ accounted_begin(cache, bio);
+ inc_ds(cache, bio, cell);
+ cell_defer(cache, cell, false);
+
+ } else
+ remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
}
break;
@@ -2697,19 +3123,20 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
* This is a duplicate writethrough io that is no
* longer needed because the block has been demoted.
*/
- bio_endio(bio, 0);
- cell_defer(cache, *cell, false);
+ bio_endio(bio);
+ // FIXME: remap everything as a miss
+ cell_defer(cache, cell, false);
r = DM_MAPIO_SUBMITTED;
} else
- remap_to_origin_clear_discard(cache, bio, block);
-
+ remap_cell_to_origin_clear_discard(cache, cell, block, false);
break;
default:
- DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
+ DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u",
+ cache_device_name(cache), __func__,
(unsigned) lookup_result.op);
- cell_defer(cache, *cell, false);
+ cell_defer(cache, cell, false);
bio_io_error(bio);
r = DM_MAPIO_SUBMITTED;
}
@@ -2717,21 +3144,6 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
return r;
}
-static int cache_map(struct dm_target *ti, struct bio *bio)
-{
- int r;
- struct dm_bio_prison_cell *cell = NULL;
- struct cache *cache = ti->private;
-
- r = __cache_map(cache, bio, &cell);
- if (r == DM_MAPIO_REMAPPED && cell) {
- inc_ds(cache, bio, cell);
- cell_defer(cache, cell, false);
- }
-
- return r;
-}
-
static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct cache *cache = ti->private;
@@ -2740,7 +3152,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
if (pb->tick) {
- policy_tick(cache->policy);
+ policy_tick(cache->policy, false);
spin_lock_irqsave(&cache->lock, flags);
cache->need_tick_bio = true;
@@ -2748,6 +3160,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
}
check_for_quiesced_migrations(cache, pb);
+ accounted_complete(cache, bio);
return 0;
}
@@ -2756,11 +3169,16 @@ static int write_dirty_bitset(struct cache *cache)
{
unsigned i, r;
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return -EINVAL;
+
for (i = 0; i < from_cblock(cache->cache_size); i++) {
r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
is_dirty(cache, to_cblock(i)));
- if (r)
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_set_dirty", r);
return r;
+ }
}
return 0;
@@ -2770,18 +3188,40 @@ static int write_discard_bitset(struct cache *cache)
{
unsigned i, r;
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return -EINVAL;
+
r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
cache->discard_nr_blocks);
if (r) {
- DMERR("could not resize on-disk discard bitset");
+ DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
return r;
}
for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
r = dm_cache_set_discard(cache->cmd, to_dblock(i),
is_discarded(cache, to_dblock(i)));
- if (r)
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_set_discard", r);
return r;
+ }
+ }
+
+ return 0;
+}
+
+static int write_hints(struct cache *cache)
+{
+ int r;
+
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return -EINVAL;
+
+ r = dm_cache_write_hints(cache->cmd, cache->policy);
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_write_hints", r);
+ return r;
}
return 0;
@@ -2796,26 +3236,26 @@ static bool sync_metadata(struct cache *cache)
r1 = write_dirty_bitset(cache);
if (r1)
- DMERR("could not write dirty bitset");
+ DMERR("%s: could not write dirty bitset", cache_device_name(cache));
r2 = write_discard_bitset(cache);
if (r2)
- DMERR("could not write discard bitset");
+ DMERR("%s: could not write discard bitset", cache_device_name(cache));
save_stats(cache);
- r3 = dm_cache_write_hints(cache->cmd, cache->policy);
+ r3 = write_hints(cache);
if (r3)
- DMERR("could not write hints");
+ DMERR("%s: could not write hints", cache_device_name(cache));
/*
* If writing the above metadata failed, we still commit, but don't
* set the clean shutdown flag. This will effectively force every
* dirty bit to be set on reload.
*/
- r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
+ r4 = commit(cache, !r1 && !r2 && !r3);
if (r4)
- DMERR("could not write cache metadata. Data loss may occur.");
+ DMERR("%s: could not write cache metadata", cache_device_name(cache));
return !r1 && !r2 && !r3 && !r4;
}
@@ -2827,10 +3267,12 @@ static void cache_postsuspend(struct dm_target *ti)
start_quiescing(cache);
wait_for_migrations(cache);
stop_worker(cache);
- requeue_deferred_io(cache);
+ requeue_deferred_bios(cache);
+ requeue_deferred_cells(cache);
stop_quiescing(cache);
- (void) sync_metadata(cache);
+ if (get_cache_mode(cache) == CM_WRITE)
+ (void) sync_metadata(cache);
}
static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
@@ -2953,7 +3395,8 @@ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
new_size = to_cblock(from_cblock(new_size) + 1);
if (is_dirty(cache, new_size)) {
- DMERR("unable to shrink cache; cache block %llu is dirty",
+ DMERR("%s: unable to shrink cache; cache block %llu is dirty",
+ cache_device_name(cache),
(unsigned long long) from_cblock(new_size));
return false;
}
@@ -2968,7 +3411,8 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
r = dm_cache_resize(cache->cmd, new_size);
if (r) {
- DMERR("could not resize cache metadata");
+ DMERR("%s: could not resize cache metadata", cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_resize", r);
return r;
}
@@ -3006,7 +3450,8 @@ static int cache_preresume(struct dm_target *ti)
r = dm_cache_load_mappings(cache->cmd, cache->policy,
load_mapping, cache);
if (r) {
- DMERR("could not load cache mappings");
+ DMERR("%s: could not load cache mappings", cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_load_mappings", r);
return r;
}
@@ -3026,7 +3471,8 @@ static int cache_preresume(struct dm_target *ti)
discard_load_info_init(cache, &li);
r = dm_cache_load_discards(cache->cmd, load_discard, &li);
if (r) {
- DMERR("could not load origin discards");
+ DMERR("%s: could not load origin discards", cache_device_name(cache));
+ metadata_operation_failed(cache, "dm_cache_load_discards", r);
return r;
}
set_discard_range(&li);
@@ -3054,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
* <#demotions> <#promotions> <#dirty>
* <#features> <features>*
* <#core args> <core args>
- * <policy name> <#policy args> <policy args>*
+ * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3070,23 +3516,26 @@ static void cache_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- /* Commit to ensure statistics aren't out-of-date */
- if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
- r = dm_cache_commit(cache->cmd, false);
- if (r)
- DMERR("could not commit metadata for accurate status");
+ if (get_cache_mode(cache) == CM_FAIL) {
+ DMEMIT("Fail");
+ break;
}
- r = dm_cache_get_free_metadata_block_count(cache->cmd,
- &nr_free_blocks_metadata);
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
+ (void) commit(cache, false);
+
+ r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
if (r) {
- DMERR("could not get metadata free block count");
+ DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
+ cache_device_name(cache), r);
goto err;
}
r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
if (r) {
- DMERR("could not get metadata device size");
+ DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
+ cache_device_name(cache), r);
goto err;
}
@@ -3117,7 +3566,8 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT("1 writeback ");
else {
- DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
+ DMERR("%s: internal error: unknown io mode: %d",
+ cache_device_name(cache), (int) cache->features.io_mode);
goto err;
}
@@ -3125,11 +3575,22 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
if (sz < maxlen) {
- r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
+ r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
if (r)
- DMERR("policy_emit_config_values returned %d", r);
+ DMERR("%s: policy_emit_config_values returned %d",
+ cache_device_name(cache), r);
}
+ if (get_cache_mode(cache) == CM_READ_ONLY)
+ DMEMIT("ro ");
+ else
+ DMEMIT("rw ");
+
+ if (dm_cache_metadata_needs_check(cache->cmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3191,7 +3652,7 @@ static int parse_cblock_range(struct cache *cache, const char *str,
return 0;
}
- DMERR("invalid cblock range '%s'", str);
+ DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
return -EINVAL;
}
@@ -3202,17 +3663,20 @@ static int validate_cblock_range(struct cache *cache, struct cblock_range *range
uint64_t n = from_cblock(cache->cache_size);
if (b >= n) {
- DMERR("begin cblock out of range: %llu >= %llu", b, n);
+ DMERR("%s: begin cblock out of range: %llu >= %llu",
+ cache_device_name(cache), b, n);
return -EINVAL;
}
if (e > n) {
- DMERR("end cblock out of range: %llu > %llu", e, n);
+ DMERR("%s: end cblock out of range: %llu > %llu",
+ cache_device_name(cache), e, n);
return -EINVAL;
}
if (b >= e) {
- DMERR("invalid cblock range: %llu >= %llu", b, e);
+ DMERR("%s: invalid cblock range: %llu >= %llu",
+ cache_device_name(cache), b, e);
return -EINVAL;
}
@@ -3246,7 +3710,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
struct cblock_range range;
if (!passthrough_mode(&cache->features)) {
- DMERR("cache has to be in passthrough mode for invalidation");
+ DMERR("%s: cache has to be in passthrough mode for invalidation",
+ cache_device_name(cache));
return -EPERM;
}
@@ -3285,6 +3750,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
if (!argc)
return -EINVAL;
+ if (get_cache_mode(cache) >= CM_READ_ONLY) {
+ DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
+ cache_device_name(cache));
+ return -EOPNOTSUPP;
+ }
+
if (!strcasecmp(argv[0], "invalidate_cblocks"))
return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
@@ -3307,26 +3778,6 @@ static int cache_iterate_devices(struct dm_target *ti,
return r;
}
-/*
- * We assume I/O is going to the origin (which is the volume
- * more likely to have restrictions e.g. by being striped).
- * (Looking up the exact location of the data would be expensive
- * and could always be out of date by the time the bio is submitted.)
- */
-static int cache_bvec_merge(struct dm_target *ti,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct cache *cache = ti->private;
- struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = cache->origin_dev->bdev;
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
/*
@@ -3358,7 +3809,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 6, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
@@ -3370,7 +3821,6 @@ static struct target_type cache_target = {
.status = cache_status,
.message = cache_message,
.iterate_devices = cache_iterate_devices,
- .merge = cache_bvec_merge,
.io_hints = cache_io_hints,
};
diff --git a/kernel/drivers/md/dm-crypt.c b/kernel/drivers/md/dm-crypt.c
index 5503e43e5..3147c8d09 100644
--- a/kernel/drivers/md/dm-crypt.c
+++ b/kernel/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
@@ -112,7 +112,8 @@ struct iv_tcw_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
+ DM_CRYPT_EXIT_THREAD};
/*
* The fields in here must be read only after initialization.
@@ -891,6 +892,11 @@ static void crypt_alloc_req(struct crypt_config *cc,
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+
+ /*
+ * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
+ * requests if driver request queue is full.
+ */
ablkcipher_request_set_callback(ctx->req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
@@ -924,24 +930,32 @@ static int crypt_convert(struct crypt_config *cc,
r = crypt_convert_block(cc, ctx, ctx->req);
switch (r) {
- /* async */
+ /*
+ * The request was queued by a crypto driver
+ * but the driver request queue is full, let's wait.
+ */
case -EBUSY:
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- /* fall through*/
+ /* fall through */
+ /*
+ * The request is queued and processed asynchronously,
+ * completion function kcryptd_async_done() will be called.
+ */
case -EINPROGRESS:
ctx->req = NULL;
ctx->cc_sector++;
continue;
-
- /* sync */
+ /*
+ * The request was already processed (synchronously).
+ */
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector++;
cond_resched();
continue;
- /* error */
+ /* There was an error while processing the request. */
default:
atomic_dec(&ctx->cc_pending);
return r;
@@ -955,7 +969,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
/*
* Generate a new unfragmented bio with the given size
- * This should never violate the device limitations
+ * This should never violate the device limitations (but only because
+ * max_segment_size is being constrained to PAGE_SIZE).
*
* This function may be called concurrently. If we allocate from the mempool
* concurrently, there is a possibility of deadlock. For example, if we have
@@ -980,7 +995,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
struct bio_vec *bvec;
retry:
- if (unlikely(gfp_mask & __GFP_WAIT))
+ if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
@@ -996,7 +1011,7 @@ retry:
if (!page) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
- gfp_mask |= __GFP_WAIT;
+ gfp_mask |= __GFP_DIRECT_RECLAIM;
goto retry;
}
@@ -1013,7 +1028,7 @@ retry:
}
return_clone:
- if (unlikely(gfp_mask & __GFP_WAIT))
+ if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock);
return clone;
@@ -1063,7 +1078,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (io->ctx.req)
crypt_free_req(cc, io->ctx.req, base_bio);
- bio_endio(base_bio, error);
+ base_bio->bi_error = error;
+ bio_endio(base_bio);
}
/*
@@ -1083,14 +1099,12 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
* The work is done per CPU global for all dm-crypt instances.
* They should not depend on each other and do not block.
*/
-static void crypt_endio(struct bio *clone, int error)
+static void crypt_endio(struct bio *clone)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned rw = bio_data_dir(clone);
-
- if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
- error = -EIO;
+ int error;
/*
* free the processed pages
@@ -1098,6 +1112,7 @@ static void crypt_endio(struct bio *clone, int error)
if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
+ error = clone->bi_error;
bio_put(clone);
if (rw == READ && !error) {
@@ -1189,20 +1204,18 @@ continue_locked:
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
+ if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
+ spin_unlock_irq(&cc->write_thread_wait.lock);
+ break;
+ }
+
__set_current_state(TASK_INTERRUPTIBLE);
__add_wait_queue(&cc->write_thread_wait, &wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
- if (unlikely(kthread_should_stop())) {
- set_task_state(current, TASK_RUNNING);
- remove_wait_queue(&cc->write_thread_wait, &wait);
- break;
- }
-
schedule();
- set_task_state(current, TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
@@ -1346,6 +1359,11 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->cc;
+ /*
+ * A request from crypto driver backlog is going to be processed now,
+ * finish the completion and continue in crypt_convert().
+ * (Callback will be called for the second time for this request.)
+ */
if (error == -EINPROGRESS) {
complete(&ctx->restart);
return;
@@ -1512,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
- if (cc->write_thread)
+ if (cc->write_thread) {
+ spin_lock_irq(&cc->write_thread_wait.lock);
+ set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
+ wake_up_locked(&cc->write_thread_wait);
+ spin_unlock_irq(&cc->write_thread_wait.lock);
kthread_stop(cc->write_thread);
+ }
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
@@ -1525,10 +1548,8 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->bs)
bioset_free(cc->bs);
- if (cc->page_pool)
- mempool_destroy(cc->page_pool);
- if (cc->req_pool)
- mempool_destroy(cc->req_pool);
+ mempool_destroy(cc->page_pool);
+ mempool_destroy(cc->req_pool);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
@@ -1793,11 +1814,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->iv_offset = tmpll;
- if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
+ ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
+ ret = -EINVAL;
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -2017,21 +2040,6 @@ error:
return -EINVAL;
}
-static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct crypt_config *cc = ti->private;
- struct request_queue *q = bdev_get_queue(cc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = cc->dev->bdev;
- bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int crypt_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2040,9 +2048,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
return fn(ti, cc->dev, cc->start, ti->len, data);
}
+static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ /*
+ * Unfortunate constraint that is required to avoid the potential
+ * for exceeding underlying device's max_segments limits -- due to
+ * crypt_alloc_buffer() possibly allocating pages for the encryption
+ * bio that are not as physically contiguous as the original bio.
+ */
+ limits->max_segment_size = PAGE_SIZE;
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 0},
+ .version = {1, 14, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -2052,8 +2071,8 @@ static struct target_type crypt_target = {
.preresume = crypt_preresume,
.resume = crypt_resume,
.message = crypt_message,
- .merge = crypt_merge,
.iterate_devices = crypt_iterate_devices,
+ .io_hints = crypt_io_hints,
};
static int __init dm_crypt_init(void)
diff --git a/kernel/drivers/md/dm-delay.c b/kernel/drivers/md/dm-delay.c
index 57b6a1901..b4c356a21 100644
--- a/kernel/drivers/md/dm-delay.c
+++ b/kernel/drivers/md/dm-delay.c
@@ -122,6 +122,7 @@ static void flush_expired_bios(struct work_struct *work)
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
*
* With separate write parameters, the first set is only used for reads.
+ * Offsets are specified in sectors.
* Delays are specified in milliseconds.
*/
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
@@ -129,9 +130,10 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct delay_c *dc;
unsigned long long tmpll;
char dummy;
+ int ret;
if (argc != 3 && argc != 6) {
- ti->error = "requires exactly 3 or 6 arguments";
+ ti->error = "Requires exactly 3 or 6 arguments";
return -EINVAL;
}
@@ -143,6 +145,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dc->reads = dc->writes = 0;
+ ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -154,12 +157,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &dc->dev_read)) {
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &dc->dev_read);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
+ ret = -EINVAL;
dc->dev_write = NULL;
if (argc == 3)
goto out;
@@ -175,13 +180,15 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_dev_read;
}
- if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
- &dc->dev_write)) {
+ ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &dc->dev_write);
+ if (ret) {
ti->error = "Write device lookup failed";
goto bad_dev_read;
}
out:
+ ret = -EINVAL;
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
DMERR("Couldn't start kdelayd");
@@ -208,7 +215,7 @@ bad_dev_read:
dm_put_device(ti, dc->dev_read);
bad:
kfree(dc);
- return -EINVAL;
+ return ret;
}
static void delay_dtr(struct dm_target *ti)
@@ -231,7 +238,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
unsigned long expires = 0;
if (!delay || !atomic_read(&dc->may_delay))
- return 1;
+ return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@@ -251,7 +258,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
queue_timeout(dc, expires);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
static void delay_presuspend(struct dm_target *ti)
diff --git a/kernel/drivers/md/dm-era-target.c b/kernel/drivers/md/dm-era-target.c
index ad913cd4a..665bf3285 100644
--- a/kernel/drivers/md/dm-era-target.c
+++ b/kernel/drivers/md/dm-era-target.c
@@ -343,7 +343,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
/*----------------------------------------------------------------*/
@@ -582,7 +584,9 @@ static int open_metadata(struct era_metadata *md)
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
md->archived_writesets = true;
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad:
dm_bm_unlock(sblock);
@@ -1046,12 +1050,7 @@ static int metadata_take_snap(struct era_metadata *md)
md->metadata_snap = dm_block_location(clone);
- r = dm_tm_unlock(md->tm, clone);
- if (r) {
- DMERR("%s: couldn't unlock clone", __func__);
- md->metadata_snap = SUPERBLOCK_LOCATION;
- return r;
- }
+ dm_tm_unlock(md->tm, clone);
return 0;
}
@@ -1673,20 +1672,6 @@ static int era_iterate_devices(struct dm_target *ti,
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
}
-static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct era *era = ti->private;
- struct request_queue *q = bdev_get_queue(era->origin_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = era->origin_dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct era *era = ti->private;
@@ -1717,7 +1702,6 @@ static struct target_type era_target = {
.status = era_status,
.message = era_message,
.iterate_devices = era_iterate_devices,
- .merge = era_merge,
.io_hints = era_io_hints
};
diff --git a/kernel/drivers/md/dm-exception-store.c b/kernel/drivers/md/dm-exception-store.c
index ebaa4f803..3997f34cf 100644
--- a/kernel/drivers/md/dm-exception-store.c
+++ b/kernel/drivers/md/dm-exception-store.c
@@ -183,7 +183,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
store->chunk_size = chunk_size;
store->chunk_mask = chunk_size - 1;
- store->chunk_shift = ffs(chunk_size) - 1;
+ store->chunk_shift = __ffs(chunk_size);
return 0;
}
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
return -EINVAL;
}
- tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL);
+ tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
if (!tmp_store) {
ti->error = "Exception store allocation failed";
return -ENOMEM;
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
else if (persistent == 'N')
type = get_type("N");
else {
- ti->error = "Persistent flag is not P or N";
+ ti->error = "Exception store type is not P or N";
r = -EINVAL;
goto bad_type;
}
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
if (r)
goto bad;
- r = type->ctr(tmp_store, 0, NULL);
+ r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
if (r) {
ti->error = "Exception store type constructor failed";
goto bad;
diff --git a/kernel/drivers/md/dm-exception-store.h b/kernel/drivers/md/dm-exception-store.h
index 0b2536247..12b5216c2 100644
--- a/kernel/drivers/md/dm-exception-store.h
+++ b/kernel/drivers/md/dm-exception-store.h
@@ -42,8 +42,7 @@ struct dm_exception_store_type {
const char *name;
struct module *module;
- int (*ctr) (struct dm_exception_store *store,
- unsigned argc, char **argv);
+ int (*ctr) (struct dm_exception_store *store, char *options);
/*
* Destroys this object when you've finished with it.
@@ -70,7 +69,7 @@ struct dm_exception_store_type {
* Update the metadata with this exception.
*/
void (*commit_exception) (struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context);
@@ -123,6 +122,8 @@ struct dm_exception_store {
unsigned chunk_shift;
void *context;
+
+ bool userspace_supports_overflow;
};
/*
diff --git a/kernel/drivers/md/dm-flakey.c b/kernel/drivers/md/dm-flakey.c
index b257e4687..09e2afcaf 100644
--- a/kernel/drivers/md/dm-flakey.c
+++ b/kernel/drivers/md/dm-flakey.c
@@ -183,6 +183,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
devname = dm_shift_arg(&as);
+ r = -EINVAL;
if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -211,7 +212,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+ r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
+ if (r) {
ti->error = "Device lookup failed";
goto bad;
}
@@ -224,7 +226,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(fc);
- return -EINVAL;
+ return r;
}
static void flakey_dtr(struct dm_target *ti)
@@ -296,7 +298,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
* Drop writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -371,35 +373,20 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
}
}
-static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+static int flakey_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct flakey_c *fc = ti->private;
- struct dm_dev *dev = fc->dev;
- int r = 0;
+
+ *bdev = fc->dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (fc->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
-}
-
-static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct flakey_c *fc = ti->private;
- struct request_queue *q = bdev_get_queue(fc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = fc->dev->bdev;
- bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+ ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return 0;
}
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -418,8 +405,7 @@ static struct target_type flakey_target = {
.map = flakey_map,
.end_io = flakey_end_io,
.status = flakey_status,
- .ioctl = flakey_ioctl,
- .merge = flakey_merge,
+ .prepare_ioctl = flakey_prepare_ioctl,
.iterate_devices = flakey_iterate_devices,
};
diff --git a/kernel/drivers/md/dm-io.c b/kernel/drivers/md/dm-io.c
index 74adcd2c9..81c5e1a1f 100644
--- a/kernel/drivers/md/dm-io.c
+++ b/kernel/drivers/md/dm-io.c
@@ -65,8 +65,7 @@ struct dm_io_client *dm_io_client_create(void)
return client;
bad:
- if (client->pool)
- mempool_destroy(client->pool);
+ mempool_destroy(client->pool);
kfree(client);
return ERR_PTR(-ENOMEM);
}
@@ -134,12 +133,13 @@ static void dec_count(struct io *io, unsigned int region, int error)
complete_io(io);
}
-static void endio(struct bio *bio, int error)
+static void endio(struct bio *bio)
{
struct io *io;
unsigned region;
+ int error;
- if (error && bio_data_dir(bio) == READ)
+ if (bio->bi_error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
/*
@@ -147,6 +147,7 @@ static void endio(struct bio *bio, int error)
*/
retrieve_io_and_region_from_bio(bio, &io, &region);
+ error = bio->bi_error;
bio_put(bio);
dec_count(io, region, error);
@@ -314,7 +315,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1;
else
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ num_bvecs = min_t(int, BIO_MAX_PAGES,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
diff --git a/kernel/drivers/md/dm-ioctl.c b/kernel/drivers/md/dm-ioctl.c
index 720ceeb7f..80a439543 100644
--- a/kernel/drivers/md/dm-ioctl.c
+++ b/kernel/drivers/md/dm-ioctl.c
@@ -1919,9 +1919,7 @@ int __init dm_interface_init(void)
void dm_interface_exit(void)
{
- if (misc_deregister(&_dm_misc) < 0)
- DMERR("misc_deregister failed for control device");
-
+ misc_deregister(&_dm_misc);
dm_hash_exit();
}
diff --git a/kernel/drivers/md/dm-kcopyd.c b/kernel/drivers/md/dm-kcopyd.c
index 3a7cade5e..1452ed9aa 100644
--- a/kernel/drivers/md/dm-kcopyd.c
+++ b/kernel/drivers/md/dm-kcopyd.c
@@ -244,7 +244,7 @@ static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
*pages = NULL;
do {
- pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
if (unlikely(!pl)) {
/* Use reserved pages */
pl = kc->pages;
diff --git a/kernel/drivers/md/dm-linear.c b/kernel/drivers/md/dm-linear.c
index 53e848c10..05c35aacb 100644
--- a/kernel/drivers/md/dm-linear.c
+++ b/kernel/drivers/md/dm-linear.c
@@ -30,6 +30,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct linear_c *lc;
unsigned long long tmp;
char dummy;
+ int ret;
if (argc != 2) {
ti->error = "Invalid argument count";
@@ -38,18 +39,20 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (lc == NULL) {
- ti->error = "dm-linear: Cannot allocate linear context";
+ ti->error = "Cannot allocate linear context";
return -ENOMEM;
}
+ ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
- ti->error = "dm-linear: Invalid device sector";
+ ti->error = "Invalid device sector";
goto bad;
}
lc->start = tmp;
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
- ti->error = "dm-linear: Device lookup failed";
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
+ if (ret) {
+ ti->error = "Device lookup failed";
goto bad;
}
@@ -61,7 +64,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(lc);
- return -EINVAL;
+ return ret;
}
static void linear_dtr(struct dm_target *ti)
@@ -113,36 +116,21 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int linear_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct linear_c *lc = (struct linear_c *) ti->private;
struct dm_dev *dev = lc->dev;
- int r = 0;
+
+ *bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (lc->start ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
-}
-
-static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct linear_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = lc->dev->bdev;
- bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+ return 1;
+ return 0;
}
static int linear_iterate_devices(struct dm_target *ti,
@@ -161,8 +149,7 @@ static struct target_type linear_target = {
.dtr = linear_dtr,
.map = linear_map,
.status = linear_status,
- .ioctl = linear_ioctl,
- .merge = linear_merge,
+ .prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
};
diff --git a/kernel/drivers/md/dm-log-userspace-base.c b/kernel/drivers/md/dm-log-userspace-base.c
index 058256d2e..53b7b06d0 100644
--- a/kernel/drivers/md/dm-log-userspace-base.c
+++ b/kernel/drivers/md/dm-log-userspace-base.c
@@ -313,8 +313,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
- if (lc->flush_entry_pool)
- mempool_destroy(lc->flush_entry_pool);
+ mempool_destroy(lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
diff --git a/kernel/drivers/md/dm-log-writes.c b/kernel/drivers/md/dm-log-writes.c
index 93e08446a..624589d51 100644
--- a/kernel/drivers/md/dm-log-writes.c
+++ b/kernel/drivers/md/dm-log-writes.c
@@ -55,8 +55,8 @@
#define LOG_DISCARD_FLAG (1 << 2)
#define LOG_MARK_FLAG (1 << 3)
-#define WRITE_LOG_VERSION 1
-#define WRITE_LOG_MAGIC 0x6a736677736872
+#define WRITE_LOG_VERSION 1ULL
+#define WRITE_LOG_MAGIC 0x6a736677736872ULL
/*
* The disk format for this is braindead simple.
@@ -146,16 +146,16 @@ static void put_io_block(struct log_writes_c *lc)
}
}
-static void log_end_io(struct bio *bio, int err)
+static void log_end_io(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
struct bio_vec *bvec;
int i;
- if (err) {
+ if (bio->bi_error) {
unsigned long flags;
- DMERR("Error writing log block, error=%d", err);
+ DMERR("Error writing log block, error=%d", bio->bi_error);
spin_lock_irqsave(&lc->blocks_lock, flags);
lc->logging_enabled = false;
spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -205,7 +205,6 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -270,7 +269,6 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -292,7 +290,6 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
@@ -420,6 +417,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct log_writes_c *lc;
struct dm_arg_set as;
const char *devname, *logdevname;
+ int ret;
as.argc = argc;
as.argv = argv;
@@ -443,18 +441,22 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
atomic_set(&lc->pending_blocks, 0);
devname = dm_shift_arg(&as);
- if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+ ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
logdevname = dm_shift_arg(&as);
- if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+ ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
+ &lc->logdev);
+ if (ret) {
ti->error = "Log device lookup failed";
dm_put_device(ti, lc->dev);
goto bad;
}
+ ret = -EINVAL;
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
if (!lc->log_kthread) {
ti->error = "Couldn't alloc kthread";
@@ -479,7 +481,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(lc);
- return -EINVAL;
+ return ret;
}
static int log_mark(struct log_writes_c *lc, char *data)
@@ -606,7 +608,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
WARN_ON(flush_bio || fua_bio);
if (lc->device_supports_discard)
goto map_bio;
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -712,35 +714,19 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
}
}
-static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int log_writes_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
- int r = 0;
+ *bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
-}
-
-static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct log_writes_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = lc->dev->bdev;
- bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+ return 1;
+ return 0;
}
static int log_writes_iterate_devices(struct dm_target *ti,
@@ -795,8 +781,7 @@ static struct target_type log_writes_target = {
.map = log_writes_map,
.end_io = normal_end_io,
.status = log_writes_status,
- .ioctl = log_writes_ioctl,
- .merge = log_writes_merge,
+ .prepare_ioctl = log_writes_prepare_ioctl,
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
diff --git a/kernel/drivers/md/dm-mpath.c b/kernel/drivers/md/dm-mpath.c
index eff7bdd77..cfa29f574 100644
--- a/kernel/drivers/md/dm-mpath.c
+++ b/kernel/drivers/md/dm-mpath.c
@@ -159,12 +159,9 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
- struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
- if (m->hw_handler_name)
- scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
@@ -580,6 +577,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
q = bdev_get_queue(p->path.dev->bdev);
if (m->retain_attached_hw_handler) {
+retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
@@ -599,20 +597,14 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
}
if (m->hw_handler_name) {
- /*
- * Increments scsi_dh reference, even when using an
- * already-attached handler.
- */
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
- /*
- * Already attached to different hw_handler:
- * try to reattach with correct one.
- */
- scsi_dh_detach(q);
- r = scsi_dh_attach(q, m->hw_handler_name);
- }
+ char b[BDEVNAME_SIZE];
+ printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
+ bdevname(p->path.dev->bdev, b));
+ goto retain;
+ }
if (r < 0) {
ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
@@ -624,7 +616,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
if (r < 0) {
ti->error = "unable to set hardware "
"handler parameters";
- scsi_dh_detach(q);
dm_put_device(ti, p->path.dev);
goto bad;
}
@@ -734,12 +725,6 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
- if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
- "scsi_dh_%s", m->hw_handler_name)) {
- ti->error = "unknown hardware handler type";
- ret = -EINVAL;
- goto fail;
- }
if (hw_argc > 1) {
char *p;
@@ -1548,49 +1533,38 @@ out:
return r;
}
-static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
- unsigned long arg)
+static int multipath_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct multipath *m = ti->private;
- struct pgpath *pgpath;
- struct block_device *bdev;
- fmode_t mode;
unsigned long flags;
int r;
- bdev = NULL;
- mode = 0;
- r = 0;
-
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
__choose_pgpath(m, 0);
- pgpath = m->current_pgpath;
-
- if (pgpath) {
- bdev = pgpath->path.dev->bdev;
- mode = pgpath->path.dev->mode;
+ if (m->current_pgpath) {
+ if (!m->queue_io) {
+ *bdev = m->current_pgpath->path.dev->bdev;
+ *mode = m->current_pgpath->path.dev->mode;
+ r = 0;
+ } else {
+ /* pg_init has not started or completed */
+ r = -ENOTCONN;
+ }
+ } else {
+ /* No path is available */
+ if (m->queue_if_no_path)
+ r = -ENOTCONN;
+ else
+ r = -EIO;
}
- if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
- r = -ENOTCONN;
- else if (!bdev)
- r = -EIO;
-
spin_unlock_irqrestore(&m->lock, flags);
- /*
- * Only pass ioctls through if the device sizes match exactly.
- */
- if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
- int err = scsi_verify_blk_ioctl(NULL, cmd);
- if (err)
- r = err;
- }
-
- if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ if (r == -ENOTCONN) {
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pg) {
/* Path status changed, redo selection */
@@ -1602,7 +1576,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
dm_table_run_md_queue_async(m->ti->table);
}
- return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return r;
}
static int multipath_iterate_devices(struct dm_target *ti,
@@ -1705,7 +1684,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
@@ -1718,7 +1697,7 @@ static struct target_type multipath_target = {
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
- .ioctl = multipath_ioctl,
+ .prepare_ioctl = multipath_prepare_ioctl,
.iterate_devices = multipath_iterate_devices,
.busy = multipath_busy,
};
diff --git a/kernel/drivers/md/dm-raid.c b/kernel/drivers/md/dm-raid.c
index 88e4c7f24..a0901214a 100644
--- a/kernel/drivers/md/dm-raid.c
+++ b/kernel/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -17,6 +17,7 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "raid"
+#define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */
static bool devices_handle_discard_safely = false;
@@ -45,25 +46,25 @@ struct raid_dev {
};
/*
- * Flags for rs->print_flags field.
+ * Flags for rs->ctr_flags field.
*/
-#define DMPF_SYNC 0x1
-#define DMPF_NOSYNC 0x2
-#define DMPF_REBUILD 0x4
-#define DMPF_DAEMON_SLEEP 0x8
-#define DMPF_MIN_RECOVERY_RATE 0x10
-#define DMPF_MAX_RECOVERY_RATE 0x20
-#define DMPF_MAX_WRITE_BEHIND 0x40
-#define DMPF_STRIPE_CACHE 0x80
-#define DMPF_REGION_SIZE 0x100
-#define DMPF_RAID10_COPIES 0x200
-#define DMPF_RAID10_FORMAT 0x400
+#define CTR_FLAG_SYNC 0x1
+#define CTR_FLAG_NOSYNC 0x2
+#define CTR_FLAG_REBUILD 0x4
+#define CTR_FLAG_DAEMON_SLEEP 0x8
+#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
+#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
+#define CTR_FLAG_MAX_WRITE_BEHIND 0x40
+#define CTR_FLAG_STRIPE_CACHE 0x80
+#define CTR_FLAG_REGION_SIZE 0x100
+#define CTR_FLAG_RAID10_COPIES 0x200
+#define CTR_FLAG_RAID10_FORMAT 0x400
struct raid_set {
struct dm_target *ti;
uint32_t bitmap_loaded;
- uint32_t print_flags;
+ uint32_t ctr_flags;
struct mddev md;
struct raid_type *raid_type;
@@ -81,6 +82,7 @@ static struct raid_type {
const unsigned level; /* RAID level. */
const unsigned algorithm; /* RAID algorithm. */
} raid_types[] = {
+ {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
{"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
{"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
@@ -119,15 +121,15 @@ static int raid10_format_to_md_layout(char *format, unsigned copies)
{
unsigned n = 1, f = 1;
- if (!strcmp("near", format))
+ if (!strcasecmp("near", format))
n = copies;
else
f = copies;
- if (!strcmp("offset", format))
+ if (!strcasecmp("offset", format))
return 0x30000 | (f << 8) | n;
- if (!strcmp("far", format))
+ if (!strcasecmp("far", format))
return 0x20000 | (f << 8) | n;
return (f << 8) | n;
@@ -327,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
*/
if (min_region_size > (1 << 13)) {
/* If not a power of 2, make it the next power of 2 */
- if (min_region_size & (min_region_size - 1))
- region_size = 1 << fls(region_size);
+ region_size = roundup_pow_of_two(min_region_size);
DMINFO("Choosing default region size of %lu sectors",
region_size);
} else {
@@ -477,8 +478,6 @@ too_many:
* will form the "stripe"
* [[no]sync] Force or prevent recovery of the
* entire array
- * [devices_handle_discard_safely] Allow discards on RAID4/5/6; useful if RAID
- * member device(s) properly support TRIM/UNMAP
* [rebuild <idx>] Rebuild the drive indicated by the index
* [daemon_sleep <ms>] Time between bitmap daemon work to
* clear bits
@@ -555,12 +554,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
for (i = 0; i < num_raid_params; i++) {
if (!strcasecmp(argv[i], "nosync")) {
rs->md.recovery_cp = MaxSector;
- rs->print_flags |= DMPF_NOSYNC;
+ rs->ctr_flags |= CTR_FLAG_NOSYNC;
continue;
}
if (!strcasecmp(argv[i], "sync")) {
rs->md.recovery_cp = 0;
- rs->print_flags |= DMPF_SYNC;
+ rs->ctr_flags |= CTR_FLAG_SYNC;
continue;
}
@@ -585,7 +584,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
raid10_format = argv[i];
- rs->print_flags |= DMPF_RAID10_FORMAT;
+ rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
continue;
}
@@ -602,7 +601,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
clear_bit(In_sync, &rs->dev[value].rdev.flags);
rs->dev[value].rdev.recovery_offset = 0;
- rs->print_flags |= DMPF_REBUILD;
+ rs->ctr_flags |= CTR_FLAG_REBUILD;
} else if (!strcasecmp(key, "write_mostly")) {
if (rs->raid_type->level != 1) {
rs->ti->error = "write_mostly option is only valid for RAID1";
@@ -618,7 +617,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "max_write_behind option is only valid for RAID1";
return -EINVAL;
}
- rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
+ rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
/*
* In device-mapper, we specify things in sectors, but
@@ -631,14 +630,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.bitmap_info.max_write_behind = value;
} else if (!strcasecmp(key, "daemon_sleep")) {
- rs->print_flags |= DMPF_DAEMON_SLEEP;
+ rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
} else if (!strcasecmp(key, "stripe_cache")) {
- rs->print_flags |= DMPF_STRIPE_CACHE;
+ rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
/*
* In device-mapper, we specify things in sectors, but
@@ -656,21 +655,21 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
} else if (!strcasecmp(key, "min_recovery_rate")) {
- rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
+ rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
} else if (!strcasecmp(key, "max_recovery_rate")) {
- rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
+ rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
} else if (!strcasecmp(key, "region_size")) {
- rs->print_flags |= DMPF_REGION_SIZE;
+ rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
region_size = value;
} else if (!strcasecmp(key, "raid10_copies") &&
(rs->raid_type->level == 10)) {
@@ -678,7 +677,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Bad value for 'raid10_copies'";
return -EINVAL;
}
- rs->print_flags |= DMPF_RAID10_COPIES;
+ rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
raid10_copies = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
@@ -720,7 +719,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->md.layout = raid10_format_to_md_layout(raid10_format,
raid10_copies);
rs->md.new_layout = rs->md.layout;
- } else if ((rs->raid_type->level > 1) &&
+ } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
sector_div(sectors_per_dev,
(rs->md.raid_disks - rs->raid_type->parity_devs))) {
rs->ti->error = "Target length not divisible by number of data devices";
@@ -947,7 +946,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
}
- if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
+ if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
/*
@@ -1026,8 +1025,9 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
return 0;
}
-static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
+static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
{
+ struct mddev *mddev = &rs->md;
struct dm_raid_superblock *sb = page_address(rdev->sb_page);
/*
@@ -1037,8 +1037,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(mddev, rdev))
return -EINVAL;
- mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
- rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
+ /* Enable bitmap creation for RAID levels != 0 */
+ mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
+ rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
+
if (!test_bit(FirstUse, &rdev->flags)) {
rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
if (rdev->recovery_offset != MaxSector)
@@ -1073,7 +1075,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev) {
/*
- * Skipping super_load due to DMPF_SYNC will cause
+ * Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
* though it were new. This is the intended effect
* of the "sync" directive.
@@ -1082,7 +1084,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* that the "sync" directive is disallowed during the
* reshape.
*/
- if (rs->print_flags & DMPF_SYNC)
+ rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
+
+ if (rs->ctr_flags & CTR_FLAG_SYNC)
continue;
if (!rdev->meta_bdev)
@@ -1140,11 +1144,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* validation for the remaining devices.
*/
ti->error = "Unable to assemble array: Invalid superblocks";
- if (super_validate(mddev, freshest))
+ if (super_validate(rs, freshest))
return -EINVAL;
rdev_for_each(rdev, mddev)
- if ((rdev != freshest) && super_validate(mddev, rdev))
+ if ((rdev != freshest) && super_validate(rs, rdev))
return -EINVAL;
return 0;
@@ -1243,7 +1247,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
- (num_raid_devs >= INT_MAX)) {
+ (num_raid_devs > MAX_RAID_DEVICES)) {
ti->error = "Cannot understand number of raid devices";
return -EINVAL;
}
@@ -1282,10 +1286,11 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
*/
configure_discard_support(ti, rs);
- mutex_lock(&rs->md.reconfig_mutex);
+ /* Has to be held on running the array */
+ mddev_lock_nointr(&rs->md);
ret = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
- mutex_unlock(&rs->md.reconfig_mutex);
+ mddev_unlock(&rs->md);
if (ret) {
ti->error = "Fail to run raid array";
@@ -1368,34 +1373,40 @@ static void raid_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_INFO:
DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
- if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
- sync = rs->md.curr_resync_completed;
- else
- sync = rs->md.recovery_cp;
-
- if (sync >= rs->md.resync_max_sectors) {
- /*
- * Sync complete.
- */
+ if (rs->raid_type->level) {
+ if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
+ sync = rs->md.curr_resync_completed;
+ else
+ sync = rs->md.recovery_cp;
+
+ if (sync >= rs->md.resync_max_sectors) {
+ /*
+ * Sync complete.
+ */
+ array_in_sync = 1;
+ sync = rs->md.resync_max_sectors;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
+ /*
+ * If "check" or "repair" is occurring, the array has
+ * undergone and initial sync and the health characters
+ * should not be 'a' anymore.
+ */
+ array_in_sync = 1;
+ } else {
+ /*
+ * The array may be doing an initial sync, or it may
+ * be rebuilding individual components. If all the
+ * devices are In_sync, then it is the array that is
+ * being initialized.
+ */
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ array_in_sync = 1;
+ }
+ } else {
+ /* RAID0 */
array_in_sync = 1;
sync = rs->md.resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
- /*
- * If "check" or "repair" is occurring, the array has
- * undergone and initial sync and the health characters
- * should not be 'a' anymore.
- */
- array_in_sync = 1;
- } else {
- /*
- * The array may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the array that is
- * being initialized.
- */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
- array_in_sync = 1;
}
/*
@@ -1446,7 +1457,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
/* The string you would use to construct this array */
for (i = 0; i < rs->md.raid_disks; i++) {
- if ((rs->print_flags & DMPF_REBUILD) &&
+ if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
raid_param_cnt += 2; /* for rebuilds */
@@ -1455,33 +1466,33 @@ static void raid_status(struct dm_target *ti, status_type_t type,
raid_param_cnt += 2;
}
- raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
- if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
+ raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
+ if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
raid_param_cnt--;
DMEMIT("%s %u %u", rs->raid_type->name,
raid_param_cnt, rs->md.chunk_sectors);
- if ((rs->print_flags & DMPF_SYNC) &&
+ if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
(rs->md.recovery_cp == MaxSector))
DMEMIT(" sync");
- if (rs->print_flags & DMPF_NOSYNC)
+ if (rs->ctr_flags & CTR_FLAG_NOSYNC)
DMEMIT(" nosync");
for (i = 0; i < rs->md.raid_disks; i++)
- if ((rs->print_flags & DMPF_REBUILD) &&
+ if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
DMEMIT(" rebuild %u", i);
- if (rs->print_flags & DMPF_DAEMON_SLEEP)
+ if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
DMEMIT(" daemon_sleep %lu",
rs->md.bitmap_info.daemon_sleep);
- if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
+ if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
- if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
+ if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
for (i = 0; i < rs->md.raid_disks; i++)
@@ -1489,11 +1500,11 @@ static void raid_status(struct dm_target *ti, status_type_t type,
test_bit(WriteMostly, &rs->dev[i].rdev.flags))
DMEMIT(" write_mostly %u", i);
- if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
+ if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
DMEMIT(" max_write_behind %lu",
rs->md.bitmap_info.max_write_behind);
- if (rs->print_flags & DMPF_STRIPE_CACHE) {
+ if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
struct r5conf *conf = rs->md.private;
/* convert from kiB to sectors */
@@ -1501,15 +1512,15 @@ static void raid_status(struct dm_target *ti, status_type_t type,
conf ? conf->max_nr_stripes * 2 : 0);
}
- if (rs->print_flags & DMPF_REGION_SIZE)
+ if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
DMEMIT(" region_size %lu",
rs->md.bitmap_info.chunksize >> 9);
- if (rs->print_flags & DMPF_RAID10_COPIES)
+ if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
DMEMIT(" raid10_copies %u",
raid10_md_layout_to_copies(rs->md.layout));
- if (rs->print_flags & DMPF_RAID10_FORMAT)
+ if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
DMEMIT(" raid10_format %s",
raid10_md_layout_to_format(rs->md.layout));
@@ -1684,26 +1695,30 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- set_bit(MD_CHANGE_DEVS, &rs->md.flags);
- if (!rs->bitmap_loaded) {
- bitmap_load(&rs->md);
- rs->bitmap_loaded = 1;
- } else {
- /*
- * A secondary resume while the device is active.
- * Take this opportunity to check whether any failed
- * devices are reachable again.
- */
- attempt_restore_of_faulty_devices(rs);
+ if (rs->raid_type->level) {
+ set_bit(MD_CHANGE_DEVS, &rs->md.flags);
+
+ if (!rs->bitmap_loaded) {
+ bitmap_load(&rs->md);
+ rs->bitmap_loaded = 1;
+ } else {
+ /*
+ * A secondary resume while the device is active.
+ * Take this opportunity to check whether any failed
+ * devices are reachable again.
+ */
+ attempt_restore_of_faulty_devices(rs);
+ }
+
+ clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
}
- clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
mddev_resume(&rs->md);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/kernel/drivers/md/dm-raid1.c b/kernel/drivers/md/dm-raid1.c
index 089d62751..f2a363a89 100644
--- a/kernel/drivers/md/dm-raid1.c
+++ b/kernel/drivers/md/dm-raid1.c
@@ -23,8 +23,10 @@
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
-#define DM_RAID1_HANDLE_ERRORS 0x01
+#define DM_RAID1_HANDLE_ERRORS 0x01
+#define DM_RAID1_KEEP_LOG 0x02
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
+#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
@@ -229,7 +231,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
if (m != get_default_mirror(ms))
goto out;
- if (!ms->in_sync) {
+ if (!ms->in_sync && !keep_log(ms)) {
/*
* Better to issue requests to same failing device
* than to risk returning corrupt data.
@@ -370,6 +372,17 @@ static int recover(struct mirror_set *ms, struct dm_region *reg)
return r;
}
+static void reset_ms_flags(struct mirror_set *ms)
+{
+ unsigned int m;
+
+ ms->leg_failure = 0;
+ for (m = 0; m < ms->nr_mirrors; m++) {
+ atomic_set(&(ms->mirror[m].error_count), 0);
+ ms->mirror[m].error_type = 0;
+ }
+}
+
static void do_recovery(struct mirror_set *ms)
{
struct dm_region *reg;
@@ -398,6 +411,7 @@ static void do_recovery(struct mirror_set *ms)
/* the sync is complete */
dm_table_event(ms->ti->table);
ms->in_sync = 1;
+ reset_ms_flags(ms);
}
}
@@ -476,9 +490,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
* If device is suspended, complete the bio.
*/
if (dm_noflush_suspending(ms->ti))
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ bio->bi_error = DM_ENDIO_REQUEUE;
else
- bio_endio(bio, -EIO);
+ bio->bi_error = -EIO;
+
+ bio_endio(bio);
return;
}
@@ -501,7 +517,7 @@ static void read_callback(unsigned long error, void *context)
bio_set_m(bio, NULL);
if (likely(!error)) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -517,7 +533,7 @@ static void read_callback(unsigned long error, void *context)
DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
m->dev->name);
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
}
/* Asynchronous read. */
@@ -566,7 +582,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
if (likely(m))
read_async_bio(m, bio);
else
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
}
}
@@ -584,7 +600,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
static void write_callback(unsigned long error, void *context)
{
- unsigned i, ret = 0;
+ unsigned i;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
@@ -600,7 +616,7 @@ static void write_callback(unsigned long error, void *context)
* regions with the same code.
*/
if (likely(!error)) {
- bio_endio(bio, ret);
+ bio_endio(bio);
return;
}
@@ -609,7 +625,8 @@ static void write_callback(unsigned long error, void *context)
* degrade the array.
*/
if (bio->bi_rw & REQ_DISCARD) {
- bio_endio(bio, -EOPNOTSUPP);
+ bio->bi_error = -EOPNOTSUPP;
+ bio_endio(bio);
return;
}
@@ -759,7 +776,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- if (unlikely(ms->leg_failure) && errors_handled(ms)) {
+ if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
spin_lock_irq(&ms->lock);
bio_list_add(&ms->failures, bio);
spin_unlock_irq(&ms->lock);
@@ -803,18 +820,23 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
/*
* If all the legs are dead, fail the I/O.
- * If we have been told to handle errors, hold the bio
- * and wait for userspace to deal with the problem.
+ * If the device has failed and keep_log is enabled,
+ * fail the I/O.
+ *
+ * If we have been told to handle errors, and keep_log
+ * isn't enabled, hold the bio and wait for userspace to
+ * deal with the problem.
+ *
* Otherwise pretend that the I/O succeeded. (This would
* be wrong if the failed leg returned after reboot and
* got replicated back to the good legs.)
*/
- if (!get_valid_mirror(ms))
- bio_endio(bio, -EIO);
- else if (errors_handled(ms))
+ if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
+ bio_io_error(bio);
+ else if (errors_handled(ms) && !keep_log(ms))
hold_bio(ms, bio);
else
- bio_endio(bio, 0);
+ bio_endio(bio);
}
}
@@ -923,16 +945,18 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
{
unsigned long long offset;
char dummy;
+ int ret;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset";
return -EINVAL;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &ms->mirror[mirror].dev)) {
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &ms->mirror[mirror].dev);
+ if (ret) {
ti->error = "Device lookup failure";
- return -ENXIO;
+ return ret;
}
ms->mirror[mirror].ms = ms;
@@ -987,6 +1011,7 @@ static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
unsigned num_features;
struct dm_target *ti = ms->ti;
char dummy;
+ int i;
*args_used = 0;
@@ -1007,15 +1032,25 @@ static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
return -EINVAL;
}
- if (!strcmp("handle_errors", argv[0]))
- ms->features |= DM_RAID1_HANDLE_ERRORS;
- else {
- ti->error = "Unrecognised feature requested";
+ for (i = 0; i < num_features; i++) {
+ if (!strcmp("handle_errors", argv[0]))
+ ms->features |= DM_RAID1_HANDLE_ERRORS;
+ else if (!strcmp("keep_log", argv[0]))
+ ms->features |= DM_RAID1_KEEP_LOG;
+ else {
+ ti->error = "Unrecognised feature requested";
+ return -EINVAL;
+ }
+
+ argc--;
+ argv++;
+ (*args_used)++;
+ }
+ if (!errors_handled(ms) && keep_log(ms)) {
+ ti->error = "keep_log feature requires the handle_errors feature";
return -EINVAL;
}
- (*args_used)++;
-
return 0;
}
@@ -1029,7 +1064,7 @@ static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
* log_type is "core" or "disk"
* #log_params is between 1 and 3
*
- * If present, features must be "handle_errors".
+ * If present, supported features are "handle_errors" and "keep_log".
*/
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -1254,8 +1289,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
- atomic_inc(&bio->bi_remaining);
-
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
}
@@ -1365,6 +1398,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
unsigned int m, sz = 0;
+ int num_feature_args = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1];
@@ -1394,8 +1428,17 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %s %llu", ms->mirror[m].dev->name,
(unsigned long long)ms->mirror[m].offset);
- if (ms->features & DM_RAID1_HANDLE_ERRORS)
- DMEMIT(" 1 handle_errors");
+ num_feature_args += !!errors_handled(ms);
+ num_feature_args += !!keep_log(ms);
+ if (num_feature_args) {
+ DMEMIT(" %d", num_feature_args);
+ if (errors_handled(ms))
+ DMEMIT(" handle_errors");
+ if (keep_log(ms))
+ DMEMIT(" keep_log");
+ }
+
+ break;
}
}
@@ -1415,7 +1458,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 13, 2},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/kernel/drivers/md/dm-region-hash.c b/kernel/drivers/md/dm-region-hash.c
index b929fd5f4..74cb7b991 100644
--- a/kernel/drivers/md/dm-region-hash.c
+++ b/kernel/drivers/md/dm-region-hash.c
@@ -193,7 +193,7 @@ struct dm_region_hash *dm_region_hash_create(
rh->max_recovery = max_recovery;
rh->log = log;
rh->region_size = region_size;
- rh->region_shift = ffs(region_size) - 1;
+ rh->region_shift = __ffs(region_size);
rwlock_init(&rh->hash_lock);
rh->mask = nr_buckets - 1;
rh->nr_buckets = nr_buckets;
@@ -249,9 +249,7 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
if (rh->log)
dm_dirty_log_destroy(rh->log);
- if (rh->region_pool)
- mempool_destroy(rh->region_pool);
-
+ mempool_destroy(rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
diff --git a/kernel/drivers/md/dm-snap-persistent.c b/kernel/drivers/md/dm-snap-persistent.c
index 808b8419b..4d3909393 100644
--- a/kernel/drivers/md/dm-snap-persistent.c
+++ b/kernel/drivers/md/dm-snap-persistent.c
@@ -7,6 +7,7 @@
#include "dm-exception-store.h"
+#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
@@ -321,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
- ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
+ ps->store->chunk_shift = __ffs(ps->store->chunk_size);
chunk_size_supplied = 0;
}
@@ -533,7 +534,7 @@ static int read_exceptions(struct pstore *ps,
chunk = area_location(ps, ps->current_area);
area = dm_bufio_read(client, chunk, &bp);
- if (unlikely(IS_ERR(area))) {
+ if (IS_ERR(area)) {
r = PTR_ERR(area);
goto ret_destroy_bufio;
}
@@ -694,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
}
static void persistent_commit_exception(struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -703,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
struct core_exception ce;
struct commit_callback *cb;
+ if (!valid)
+ ps->valid = 0;
+
ce.old_chunk = e->old_chunk;
ce.new_chunk = e->new_chunk;
write_exception(ps, ps->current_committed++, &ce);
@@ -843,10 +847,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
DMWARN("write header failed");
}
-static int persistent_ctr(struct dm_exception_store *store,
- unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
{
struct pstore *ps;
+ int r;
/* allocate the pstore */
ps = kzalloc(sizeof(*ps), GFP_KERNEL);
@@ -868,14 +872,32 @@ static int persistent_ctr(struct dm_exception_store *store,
ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
if (!ps->metadata_wq) {
- kfree(ps);
DMERR("couldn't start header metadata update thread");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_workqueue;
+ }
+
+ if (options) {
+ char overflow = toupper(options[0]);
+ if (overflow == 'O')
+ store->userspace_supports_overflow = true;
+ else {
+ DMERR("Unsupported persistent store option: %s", options);
+ r = -EINVAL;
+ goto err_options;
+ }
}
store->context = ps;
return 0;
+
+err_options:
+ destroy_workqueue(ps->metadata_wq);
+err_workqueue:
+ kfree(ps);
+
+ return r;
}
static unsigned persistent_status(struct dm_exception_store *store,
@@ -888,7 +910,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+ DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+ (unsigned long long)store->chunk_size);
}
return sz;
diff --git a/kernel/drivers/md/dm-snap-transient.c b/kernel/drivers/md/dm-snap-transient.c
index 1ce9a2586..4d50a12cf 100644
--- a/kernel/drivers/md/dm-snap-transient.c
+++ b/kernel/drivers/md/dm-snap-transient.c
@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
}
static void transient_commit_exception(struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context)
{
/* Just succeed */
- callback(callback_context, 1);
+ callback(callback_context, valid);
}
static void transient_usage(struct dm_exception_store *store,
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store,
*metadata_sectors = 0;
}
-static int transient_ctr(struct dm_exception_store *store,
- unsigned argc, char **argv)
+static int transient_ctr(struct dm_exception_store *store, char *options)
{
struct transient_c *tc;
diff --git a/kernel/drivers/md/dm-snap.c b/kernel/drivers/md/dm-snap.c
index f83a0f3fc..61f184ad0 100644
--- a/kernel/drivers/md/dm-snap.c
+++ b/kernel/drivers/md/dm-snap.c
@@ -63,6 +63,13 @@ struct dm_snapshot {
*/
int valid;
+ /*
+ * The snapshot overflowed because of a write to the snapshot device.
+ * We don't have to invalidate the snapshot in this case, but we need
+ * to prevent further writes.
+ */
+ int snapshot_overflowed;
+
/* Origin writes don't trigger exceptions until this is set */
int active;
@@ -1091,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s)
}
/*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -1152,6 +1159,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->ti = ti;
s->valid = 1;
+ s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
s->exception_start_sequence = 0;
@@ -1294,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
u.store_swap = snap_dest->store;
snap_dest->store = snap_src->store;
+ snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
snap_src->store = u.store_swap;
snap_dest->store->snap = snap_dest;
@@ -1301,6 +1310,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
snap_dest->valid = snap_src->valid;
+ snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
/*
* Set source invalid to ensure it receives no further I/O.
@@ -1428,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->ti->table);
}
-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
+static void pending_complete(void *context, int success)
{
+ struct dm_snap_pending_exception *pe = context;
struct dm_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
@@ -1478,7 +1489,6 @@ out:
if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
- atomic_inc(&full_bio->bi_remaining);
}
increment_pending_exceptions_done_count();
@@ -1491,7 +1501,7 @@ out:
error_bios(snapshot_bios);
} else {
if (full_bio)
- bio_endio(full_bio, 0);
+ bio_endio(full_bio);
flush_bios(snapshot_bios);
}
@@ -1500,24 +1510,13 @@ out:
free_pending_exception(pe);
}
-static void commit_callback(void *context, int success)
-{
- struct dm_snap_pending_exception *pe = context;
-
- pending_complete(pe, success);
-}
-
static void complete_exception(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
- if (unlikely(pe->copy_error))
- pending_complete(pe, 0);
-
- else
- /* Update the metadata if we are persistent */
- s->store->type->commit_exception(s->store, &pe->e,
- commit_callback, pe);
+ /* Update the metadata if we are persistent */
+ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
+ pending_complete, pe);
}
/*
@@ -1581,11 +1580,11 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
-static void full_bio_end_io(struct bio *bio, int error)
+static void full_bio_end_io(struct bio *bio)
{
void *callback_data = bio->bi_private;
- dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+ dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
}
static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1692,7 +1691,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */
down_write(&s->lock);
- if (!s->valid) {
+ if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
r = -EIO;
goto out_unlock;
}
@@ -1716,7 +1715,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = alloc_pending_exception(s);
down_write(&s->lock);
- if (!s->valid) {
+ if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
r = -EIO;
goto out_unlock;
@@ -1731,7 +1730,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
- __invalidate_snapshot(s, -ENOMEM);
+ if (s->store->userspace_supports_overflow) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ } else
+ __invalidate_snapshot(s, -ENOMEM);
r = -EIO;
goto out_unlock;
}
@@ -1991,6 +1994,8 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Invalid");
else if (snap->merge_failed)
DMEMIT("Merge failed");
+ else if (snap->snapshot_overflowed)
+ DMEMIT("Overflow");
else {
if (snap->store->type->usage) {
sector_t total_sectors, sectors_allocated,
@@ -2331,20 +2336,6 @@ static void origin_status(struct dm_target *ti, status_type_t type,
}
}
-static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct dm_origin *o = ti->private;
- struct request_queue *q = bdev_get_queue(o->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = o->dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2363,13 +2354,12 @@ static struct target_type origin_target = {
.resume = origin_resume,
.postsuspend = origin_postsuspend,
.status = origin_status,
- .merge = origin_merge,
.iterate_devices = origin_iterate_devices,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 13, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2383,7 +2373,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/kernel/drivers/md/dm-stats.c b/kernel/drivers/md/dm-stats.c
index 419bdd4fc..8289804cc 100644
--- a/kernel/drivers/md/dm-stats.c
+++ b/kernel/drivers/md/dm-stats.c
@@ -29,30 +29,37 @@ struct dm_stat_percpu {
unsigned long long io_ticks[2];
unsigned long long io_ticks_total;
unsigned long long time_in_queue;
+ unsigned long long *histogram;
};
struct dm_stat_shared {
atomic_t in_flight[2];
- unsigned long stamp;
+ unsigned long long stamp;
struct dm_stat_percpu tmp;
};
struct dm_stat {
struct list_head list_entry;
int id;
+ unsigned stat_flags;
size_t n_entries;
sector_t start;
sector_t end;
sector_t step;
+ unsigned n_histogram_entries;
+ unsigned long long *histogram_boundaries;
const char *program_id;
const char *aux_data;
struct rcu_head rcu_head;
size_t shared_alloc_size;
size_t percpu_alloc_size;
+ size_t histogram_alloc_size;
struct dm_stat_percpu *stat_percpu[NR_CPUS];
struct dm_stat_shared stat_shared[0];
};
+#define STAT_PRECISE_TIMESTAMPS 1
+
struct dm_stats_last_position {
sector_t last_sector;
unsigned last_rw;
@@ -160,10 +167,7 @@ static void dm_kvfree(void *ptr, size_t alloc_size)
free_shared_memory(alloc_size);
- if (is_vmalloc_addr(ptr))
- vfree(ptr);
- else
- kfree(ptr);
+ kvfree(ptr);
}
static void dm_stat_free(struct rcu_head *head)
@@ -173,8 +177,11 @@ static void dm_stat_free(struct rcu_head *head)
kfree(s->program_id);
kfree(s->aux_data);
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+ }
+ dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
dm_kvfree(s, s->shared_alloc_size);
}
@@ -227,7 +234,10 @@ void dm_stats_cleanup(struct dm_stats *stats)
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
- sector_t step, const char *program_id, const char *aux_data,
+ sector_t step, unsigned stat_flags,
+ unsigned n_histogram_entries,
+ unsigned long long *histogram_boundaries,
+ const char *program_id, const char *aux_data,
void (*suspend_callback)(struct mapped_device *),
void (*resume_callback)(struct mapped_device *),
struct mapped_device *md)
@@ -238,6 +248,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
size_t ni;
size_t shared_alloc_size;
size_t percpu_alloc_size;
+ size_t histogram_alloc_size;
struct dm_stat_percpu *p;
int cpu;
int ret_id;
@@ -261,19 +272,34 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
return -EOVERFLOW;
- if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+ histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
+ if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
+ return -EOVERFLOW;
+
+ if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
+ num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
return -ENOMEM;
s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
if (!s)
return -ENOMEM;
+ s->stat_flags = stat_flags;
s->n_entries = n_entries;
s->start = start;
s->end = end;
s->step = step;
s->shared_alloc_size = shared_alloc_size;
s->percpu_alloc_size = percpu_alloc_size;
+ s->histogram_alloc_size = histogram_alloc_size;
+
+ s->n_histogram_entries = n_histogram_entries;
+ s->histogram_boundaries = kmemdup(histogram_boundaries,
+ s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
+ if (!s->histogram_boundaries) {
+ r = -ENOMEM;
+ goto out;
+ }
s->program_id = kstrdup(program_id, GFP_KERNEL);
if (!s->program_id) {
@@ -291,6 +317,19 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
}
+ if (s->n_histogram_entries) {
+ unsigned long long *hi;
+ hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
+ if (!hi) {
+ r = -ENOMEM;
+ goto out;
+ }
+ for (ni = 0; ni < n_entries; ni++) {
+ s->stat_shared[ni].tmp.histogram = hi;
+ hi += s->n_histogram_entries + 1;
+ }
+ }
+
for_each_possible_cpu(cpu) {
p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
if (!p) {
@@ -298,6 +337,18 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
goto out;
}
s->stat_percpu[cpu] = p;
+ if (s->n_histogram_entries) {
+ unsigned long long *hi;
+ hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
+ if (!hi) {
+ r = -ENOMEM;
+ goto out;
+ }
+ for (ni = 0; ni < n_entries; ni++) {
+ p[ni].histogram = hi;
+ hi += s->n_histogram_entries + 1;
+ }
+ }
}
/*
@@ -375,9 +426,11 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
* vfree can't be called from RCU callback
*/
for_each_possible_cpu(cpu)
- if (is_vmalloc_addr(s->stat_percpu))
+ if (is_vmalloc_addr(s->stat_percpu) ||
+ is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
goto do_sync_free;
- if (is_vmalloc_addr(s)) {
+ if (is_vmalloc_addr(s) ||
+ is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
@@ -404,12 +457,24 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
list_for_each_entry(s, &stats->list, list_entry) {
if (!program || !strcmp(program, s->program_id)) {
len = s->end - s->start;
- DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+ DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
(unsigned long long)s->start,
(unsigned long long)len,
(unsigned long long)s->step,
s->program_id,
s->aux_data);
+ if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ DMEMIT(" precise_timestamps");
+ if (s->n_histogram_entries) {
+ unsigned i;
+ DMEMIT(" histogram:");
+ for (i = 0; i < s->n_histogram_entries; i++) {
+ if (i)
+ DMEMIT(",");
+ DMEMIT("%llu", s->histogram_boundaries[i]);
+ }
+ }
+ DMEMIT("\n");
}
}
mutex_unlock(&stats->mutex);
@@ -417,18 +482,24 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
return 1;
}
-static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
+ struct dm_stat_percpu *p)
{
/*
* This is racy, but so is part_round_stats_single.
*/
- unsigned long now = jiffies;
- unsigned in_flight_read;
- unsigned in_flight_write;
- unsigned long difference = now - shared->stamp;
+ unsigned long long now, difference;
+ unsigned in_flight_read, in_flight_write;
+ if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
+ now = jiffies;
+ else
+ now = ktime_to_ns(ktime_get());
+
+ difference = now - shared->stamp;
if (!difference)
return;
+
in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
if (in_flight_read)
@@ -443,8 +514,9 @@ static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *
}
static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
- unsigned long bi_rw, sector_t len, bool merged,
- bool end, unsigned long duration)
+ unsigned long bi_rw, sector_t len,
+ struct dm_stats_aux *stats_aux, bool end,
+ unsigned long duration_jiffies)
{
unsigned long idx = bi_rw & REQ_WRITE;
struct dm_stat_shared *shared = &s->stat_shared[entry];
@@ -474,15 +546,35 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
p = &s->stat_percpu[smp_processor_id()][entry];
if (!end) {
- dm_stat_round(shared, p);
+ dm_stat_round(s, shared, p);
atomic_inc(&shared->in_flight[idx]);
} else {
- dm_stat_round(shared, p);
+ unsigned long long duration;
+ dm_stat_round(s, shared, p);
atomic_dec(&shared->in_flight[idx]);
p->sectors[idx] += len;
p->ios[idx] += 1;
- p->merges[idx] += merged;
- p->ticks[idx] += duration;
+ p->merges[idx] += stats_aux->merged;
+ if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
+ p->ticks[idx] += duration_jiffies;
+ duration = jiffies_to_msecs(duration_jiffies);
+ } else {
+ p->ticks[idx] += stats_aux->duration_ns;
+ duration = stats_aux->duration_ns;
+ }
+ if (s->n_histogram_entries) {
+ unsigned lo = 0, hi = s->n_histogram_entries + 1;
+ while (lo + 1 < hi) {
+ unsigned mid = (lo + hi) / 2;
+ if (s->histogram_boundaries[mid - 1] > duration) {
+ hi = mid;
+ } else {
+ lo = mid;
+ }
+
+ }
+ p->histogram[lo]++;
+ }
}
#if BITS_PER_LONG == 32
@@ -494,7 +586,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
sector_t bi_sector, sector_t end_sector,
- bool end, unsigned long duration,
+ bool end, unsigned long duration_jiffies,
struct dm_stats_aux *stats_aux)
{
sector_t rel_sector, offset, todo, fragment_len;
@@ -523,7 +615,7 @@ static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
if (fragment_len > s->step - offset)
fragment_len = s->step - offset;
dm_stat_for_entry(s, entry, bi_rw, fragment_len,
- stats_aux->merged, end, duration);
+ stats_aux, end, duration_jiffies);
todo -= fragment_len;
entry++;
offset = 0;
@@ -532,11 +624,13 @@ static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
- unsigned long duration, struct dm_stats_aux *stats_aux)
+ unsigned long duration_jiffies,
+ struct dm_stats_aux *stats_aux)
{
struct dm_stat *s;
sector_t end_sector;
struct dm_stats_last_position *last;
+ bool got_precise_time;
if (unlikely(!bi_sectors))
return;
@@ -560,8 +654,17 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
rcu_read_lock();
- list_for_each_entry_rcu(s, &stats->list, list_entry)
- __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+ got_precise_time = false;
+ list_for_each_entry_rcu(s, &stats->list, list_entry) {
+ if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
+ if (!end)
+ stats_aux->duration_ns = ktime_to_ns(ktime_get());
+ else
+ stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
+ got_precise_time = true;
+ }
+ __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
+ }
rcu_read_unlock();
}
@@ -574,10 +677,25 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
local_irq_disable();
p = &s->stat_percpu[smp_processor_id()][x];
- dm_stat_round(shared, p);
+ dm_stat_round(s, shared, p);
local_irq_enable();
- memset(&shared->tmp, 0, sizeof(shared->tmp));
+ shared->tmp.sectors[READ] = 0;
+ shared->tmp.sectors[WRITE] = 0;
+ shared->tmp.ios[READ] = 0;
+ shared->tmp.ios[WRITE] = 0;
+ shared->tmp.merges[READ] = 0;
+ shared->tmp.merges[WRITE] = 0;
+ shared->tmp.ticks[READ] = 0;
+ shared->tmp.ticks[WRITE] = 0;
+ shared->tmp.io_ticks[READ] = 0;
+ shared->tmp.io_ticks[WRITE] = 0;
+ shared->tmp.io_ticks_total = 0;
+ shared->tmp.time_in_queue = 0;
+
+ if (s->n_histogram_entries)
+ memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
+
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
@@ -592,6 +710,11 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ if (s->n_histogram_entries) {
+ unsigned i;
+ for (i = 0; i < s->n_histogram_entries + 1; i++)
+ shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ }
}
}
@@ -621,6 +744,15 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
p->io_ticks_total -= shared->tmp.io_ticks_total;
p->time_in_queue -= shared->tmp.time_in_queue;
local_irq_enable();
+ if (s->n_histogram_entries) {
+ unsigned i;
+ for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ p->histogram[i] -= shared->tmp.histogram[i];
+ local_irq_enable();
+ }
+ }
}
}
@@ -646,11 +778,15 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
/*
* This is like jiffies_to_msec, but works for 64-bit values.
*/
-static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
{
- unsigned long long result = 0;
+ unsigned long long result;
unsigned mult;
+ if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ return j;
+
+ result = 0;
if (j)
result = jiffies_to_msecs(j & 0x3fffff);
if (j >= 1 << 22) {
@@ -706,22 +842,29 @@ static int dm_stats_print(struct dm_stats *stats, int id,
__dm_stat_init_temporary_percpu_totals(shared, s, x);
- DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+ DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
(unsigned long long)start,
(unsigned long long)step,
shared->tmp.ios[READ],
shared->tmp.merges[READ],
shared->tmp.sectors[READ],
- dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+ dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
shared->tmp.ios[WRITE],
shared->tmp.merges[WRITE],
shared->tmp.sectors[WRITE],
- dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+ dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
dm_stat_in_flight(shared),
- dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
- dm_jiffies_to_msec64(shared->tmp.time_in_queue),
- dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
- dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+ dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
+ dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
+ dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
+ dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
+ if (s->n_histogram_entries) {
+ unsigned i;
+ for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
+ }
+ }
+ DMEMIT("\n");
if (unlikely(sz + 1 >= maxlen))
goto buffer_overflow;
@@ -763,38 +906,89 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
return 0;
}
+static int parse_histogram(const char *h, unsigned *n_histogram_entries,
+ unsigned long long **histogram_boundaries)
+{
+ const char *q;
+ unsigned n;
+ unsigned long long last;
+
+ *n_histogram_entries = 1;
+ for (q = h; *q; q++)
+ if (*q == ',')
+ (*n_histogram_entries)++;
+
+ *histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
+ if (!*histogram_boundaries)
+ return -ENOMEM;
+
+ n = 0;
+ last = 0;
+ while (1) {
+ unsigned long long hi;
+ int s;
+ char ch;
+ s = sscanf(h, "%llu%c", &hi, &ch);
+ if (!s || (s == 2 && ch != ','))
+ return -EINVAL;
+ if (hi <= last)
+ return -EINVAL;
+ last = hi;
+ (*histogram_boundaries)[n] = hi;
+ if (s == 1)
+ return 0;
+ h = strchr(h, ',') + 1;
+ n++;
+ }
+}
+
static int message_stats_create(struct mapped_device *md,
unsigned argc, char **argv,
char *result, unsigned maxlen)
{
+ int r;
int id;
char dummy;
unsigned long long start, end, len, step;
unsigned divisor;
const char *program_id, *aux_data;
+ unsigned stat_flags = 0;
+
+ unsigned n_histogram_entries = 0;
+ unsigned long long *histogram_boundaries = NULL;
+
+ struct dm_arg_set as, as_backup;
+ const char *a;
+ unsigned feature_args;
/*
* Input format:
- * <range> <step> [<program_id> [<aux_data>]]
+ * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
*/
- if (argc < 3 || argc > 5)
- return -EINVAL;
+ if (argc < 3)
+ goto ret_einval;
+
+ as.argc = argc;
+ as.argv = argv;
+ dm_consume_args(&as, 1);
- if (!strcmp(argv[1], "-")) {
+ a = dm_shift_arg(&as);
+ if (!strcmp(a, "-")) {
start = 0;
len = dm_get_size(md);
if (!len)
len = 1;
- } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+ } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
start != (sector_t)start || len != (sector_t)len)
- return -EINVAL;
+ goto ret_einval;
end = start + len;
if (start >= end)
- return -EINVAL;
+ goto ret_einval;
- if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+ a = dm_shift_arg(&as);
+ if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
if (!divisor)
return -EINVAL;
step = end - start;
@@ -802,18 +996,44 @@ static int message_stats_create(struct mapped_device *md,
step++;
if (!step)
step = 1;
- } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+ } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
step != (sector_t)step || !step)
- return -EINVAL;
+ goto ret_einval;
+
+ as_backup = as;
+ a = dm_shift_arg(&as);
+ if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
+ while (feature_args--) {
+ a = dm_shift_arg(&as);
+ if (!a)
+ goto ret_einval;
+ if (!strcasecmp(a, "precise_timestamps"))
+ stat_flags |= STAT_PRECISE_TIMESTAMPS;
+ else if (!strncasecmp(a, "histogram:", 10)) {
+ if (n_histogram_entries)
+ goto ret_einval;
+ if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
+ goto ret;
+ } else
+ goto ret_einval;
+ }
+ } else {
+ as = as_backup;
+ }
program_id = "-";
aux_data = "-";
- if (argc > 3)
- program_id = argv[3];
+ a = dm_shift_arg(&as);
+ if (a)
+ program_id = a;
- if (argc > 4)
- aux_data = argv[4];
+ a = dm_shift_arg(&as);
+ if (a)
+ aux_data = a;
+
+ if (as.argc)
+ goto ret_einval;
/*
* If a buffer overflow happens after we created the region,
@@ -822,17 +1042,29 @@ static int message_stats_create(struct mapped_device *md,
* leaked). So we must detect buffer overflow in advance.
*/
snprintf(result, maxlen, "%d", INT_MAX);
- if (dm_message_test_buffer_overflow(result, maxlen))
- return 1;
+ if (dm_message_test_buffer_overflow(result, maxlen)) {
+ r = 1;
+ goto ret;
+ }
- id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+ id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
+ n_histogram_entries, histogram_boundaries, program_id, aux_data,
dm_internal_suspend_fast, dm_internal_resume_fast, md);
- if (id < 0)
- return id;
+ if (id < 0) {
+ r = id;
+ goto ret;
+ }
snprintf(result, maxlen, "%d", id);
- return 1;
+ r = 1;
+ goto ret;
+
+ret_einval:
+ r = -EINVAL;
+ret:
+ kfree(histogram_boundaries);
+ return r;
}
static int message_stats_delete(struct mapped_device *md,
@@ -935,11 +1167,6 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
{
int r;
- if (dm_request_based(md)) {
- DMWARN("Statistics are only supported for bio-based devices");
- return -EOPNOTSUPP;
- }
-
/* All messages here must start with '@' */
if (!strcasecmp(argv[0], "@stats_create"))
r = message_stats_create(md, argc, argv, result, maxlen);
diff --git a/kernel/drivers/md/dm-stats.h b/kernel/drivers/md/dm-stats.h
index e7c4984bf..f1c0956e3 100644
--- a/kernel/drivers/md/dm-stats.h
+++ b/kernel/drivers/md/dm-stats.h
@@ -18,6 +18,7 @@ struct dm_stats {
struct dm_stats_aux {
bool merged;
+ unsigned long long duration_ns;
};
void dm_stats_init(struct dm_stats *st);
@@ -30,7 +31,8 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
- unsigned long duration, struct dm_stats_aux *aux);
+ unsigned long duration_jiffies,
+ struct dm_stats_aux *aux);
static inline bool dm_stats_used(struct dm_stats *st)
{
diff --git a/kernel/drivers/md/dm-stripe.c b/kernel/drivers/md/dm-stripe.c
index f8b37d4c0..797ddb900 100644
--- a/kernel/drivers/md/dm-stripe.c
+++ b/kernel/drivers/md/dm-stripe.c
@@ -75,13 +75,15 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
{
unsigned long long start;
char dummy;
+ int ret;
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL;
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &sc->stripe[stripe].dev))
- return -ENXIO;
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &sc->stripe[stripe].dev);
+ if (ret)
+ return ret;
sc->stripe[stripe].physical_start = start;
@@ -273,7 +275,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
}
@@ -412,26 +414,6 @@ static void stripe_io_hints(struct dm_target *ti,
blk_limits_io_opt(limits, chunk_size * sc->stripes);
}
-static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct stripe_c *sc = ti->private;
- sector_t bvm_sector = bvm->bi_sector;
- uint32_t stripe;
- struct request_queue *q;
-
- stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
-
- q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
- bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static struct target_type stripe_target = {
.name = "striped",
.version = {1, 5, 1},
@@ -443,7 +425,6 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
- .merge = stripe_merge,
};
int __init dm_stripe_init(void)
@@ -451,10 +432,8 @@ int __init dm_stripe_init(void)
int r;
r = dm_register_target(&stripe_target);
- if (r < 0) {
+ if (r < 0)
DMWARN("target registration failed");
- return r;
- }
return r;
}
diff --git a/kernel/drivers/md/dm-switch.c b/kernel/drivers/md/dm-switch.c
index 50fca469c..871c18fe0 100644
--- a/kernel/drivers/md/dm-switch.c
+++ b/kernel/drivers/md/dm-switch.c
@@ -99,11 +99,11 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
if (sector_div(nr_regions, sctx->region_size))
nr_regions++;
- sctx->nr_regions = nr_regions;
- if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) {
+ if (nr_regions >= ULONG_MAX) {
ti->error = "Region table too large";
return -EINVAL;
}
+ sctx->nr_regions = nr_regions;
nr_slots = nr_regions;
if (sector_div(nr_slots, sctx->region_entries_per_slot))
@@ -511,27 +511,24 @@ static void switch_status(struct dm_target *ti, status_type_t type,
*
* Passthrough all ioctls to the path for sector 0
*/
-static int switch_ioctl(struct dm_target *ti, unsigned cmd,
- unsigned long arg)
+static int switch_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct switch_ctx *sctx = ti->private;
- struct block_device *bdev;
- fmode_t mode;
unsigned path_nr;
- int r = 0;
path_nr = switch_get_path_nr(sctx, 0);
- bdev = sctx->path_list[path_nr].dmdev->bdev;
- mode = sctx->path_list[path_nr].dmdev->mode;
+ *bdev = sctx->path_list[path_nr].dmdev->bdev;
+ *mode = sctx->path_list[path_nr].dmdev->mode;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (ti->len + sctx->path_list[path_nr].start !=
+ i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+ return 0;
}
static int switch_iterate_devices(struct dm_target *ti,
@@ -560,7 +557,7 @@ static struct target_type switch_target = {
.map = switch_map,
.message = switch_message,
.status = switch_status,
- .ioctl = switch_ioctl,
+ .prepare_ioctl = switch_prepare_ioctl,
.iterate_devices = switch_iterate_devices,
};
diff --git a/kernel/drivers/md/dm-table.c b/kernel/drivers/md/dm-table.c
index 16ba55ad7..061152a43 100644
--- a/kernel/drivers/md/dm-table.c
+++ b/kernel/drivers/md/dm-table.c
@@ -440,14 +440,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
- /*
- * Check if merge fn is supported.
- * If not we'll force DM to use PAGE_SIZE or
- * smaller I/O, just to be safe.
- */
- if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
- blk_limits_max_hw_sectors(limits,
- (unsigned int) (PAGE_SIZE >> 9));
return 0;
}
@@ -1022,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t)
return r;
}
+static bool integrity_profile_exists(struct gendisk *disk)
+{
+ return !!blk_get_integrity(disk);
+}
+
/*
* Get a disk whose integrity profile reflects the table's profile.
- * If %match_all is true, all devices' profiles must match.
- * If %match_all is false, all devices must at least have an
- * allocated integrity profile; but uninitialized is ok.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
- bool match_all)
+static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
@@ -1038,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev->bdev->bd_disk;
- if (!blk_get_integrity(template_disk))
+ if (!integrity_profile_exists(template_disk))
goto no_integrity;
- if (!match_all && !blk_integrity_is_initialized(template_disk))
- continue; /* skip uninitialized profiles */
else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity;
@@ -1060,34 +1051,40 @@ no_integrity:
}
/*
- * Register the mapped device for blk_integrity support if
- * the underlying devices have an integrity profile. But all devices
- * may not have matching profiles (checking all devices isn't reliable
+ * Register the mapped device for blk_integrity support if the
+ * underlying devices have an integrity profile. But all devices may
+ * not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity profile).
- * Stacked DM devices force a 2 stage integrity profile validation:
- * 1 - during load, validate all initialized integrity profiles match
- * 2 - during resume, validate all integrity profiles match
+ * must be resumed before they will have an initialized integity
+ * profile). Consequently, stacked DM devices force a 2 stage integrity
+ * profile validation: First pass during table load, final pass during
+ * resume.
*/
-static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+static int dm_table_register_integrity(struct dm_table *t)
{
+ struct mapped_device *md = t->md;
struct gendisk *template_disk = NULL;
- template_disk = dm_table_get_integrity_disk(t, false);
+ template_disk = dm_table_get_integrity_disk(t);
if (!template_disk)
return 0;
- if (!blk_integrity_is_initialized(dm_disk(md))) {
+ if (!integrity_profile_exists(dm_disk(md))) {
t->integrity_supported = 1;
- return blk_integrity_register(dm_disk(md), NULL);
+ /*
+ * Register integrity profile during table load; we can do
+ * this because the final profile must match during resume.
+ */
+ blk_integrity_register(dm_disk(md),
+ blk_get_integrity(template_disk));
+ return 0;
}
/*
- * If DM device already has an initalized integrity
+ * If DM device already has an initialized integrity
* profile the new profile should not conflict.
*/
- if (blk_integrity_is_initialized(template_disk) &&
- blk_integrity_compare(dm_disk(md), template_disk) < 0) {
+ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: "
"%s profile mismatch",
dm_device_name(t->md),
@@ -1095,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
return 1;
}
- /* Preserve existing initialized integrity profile */
+ /* Preserve existing integrity profile */
t->integrity_supported = 1;
return 0;
}
@@ -1120,7 +1117,7 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_prealloc_integrity(t, t->md);
+ r = dm_table_register_integrity(t);
if (r) {
DMERR("could not register integrity profile.");
return r;
@@ -1286,29 +1283,30 @@ combine_limits:
}
/*
- * Set the integrity profile for this device if all devices used have
- * matching profiles. We're quite deep in the resume path but still
- * don't know if all devices (particularly DM devices this device
- * may be stacked on) have matching profiles. Even if the profiles
- * don't match we have no way to fail (to resume) at this point.
+ * Verify that all devices have an integrity profile that matches the
+ * DM device's registered integrity profile. If the profiles don't
+ * match then unregister the DM device's integrity profile.
*/
-static void dm_table_set_integrity(struct dm_table *t)
+static void dm_table_verify_integrity(struct dm_table *t)
{
struct gendisk *template_disk = NULL;
- if (!blk_get_integrity(dm_disk(t->md)))
- return;
+ if (t->integrity_supported) {
+ /*
+ * Verify that the original integrity profile
+ * matches all the devices in this table.
+ */
+ template_disk = dm_table_get_integrity_disk(t);
+ if (template_disk &&
+ blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
+ return;
+ }
- template_disk = dm_table_get_integrity_disk(t, true);
- if (template_disk)
- blk_integrity_register(dm_disk(t->md),
- blk_get_integrity(template_disk));
- else if (blk_integrity_is_initialized(dm_disk(t->md)))
- DMWARN("%s: device no longer has a valid integrity profile",
- dm_device_name(t->md));
- else
+ if (integrity_profile_exists(dm_disk(t->md))) {
DMWARN("%s: unable to establish an integrity profile",
dm_device_name(t->md));
+ blk_integrity_unregister(dm_disk(t->md));
+ }
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1388,14 +1386,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
-static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
-}
-
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
@@ -1516,12 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
- if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
- queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
-
- dm_table_set_integrity(t);
+ dm_table_verify_integrity(t);
/*
* Determine whether or not this queue's I/O timings contribute
diff --git a/kernel/drivers/md/dm-thin-metadata.c b/kernel/drivers/md/dm-thin-metadata.c
index cde1d6749..c219a053c 100644
--- a/kernel/drivers/md/dm-thin-metadata.c
+++ b/kernel/drivers/md/dm-thin-metadata.c
@@ -184,7 +184,6 @@ struct dm_pool_metadata {
uint64_t trans_id;
unsigned long flags;
sector_t data_block_size;
- bool read_only:1;
/*
* Set if a transaction has to be aborted but the attempt to roll back
@@ -397,7 +396,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
}
}
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
+
+ return 0;
}
static void __setup_btree_details(struct dm_pool_metadata *pmd)
@@ -651,7 +652,9 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
}
__setup_btree_details(pmd);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
@@ -836,7 +839,6 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
init_rwsem(&pmd->root_lock);
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
- pmd->read_only = false;
pmd->fail_io = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
@@ -880,7 +882,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
return -EBUSY;
}
- if (!pmd->read_only && !pmd->fail_io) {
+ if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
@@ -1205,6 +1207,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
dm_block_t held_root;
/*
+ * We commit to ensure the btree roots which we increment in a
+ * moment are up to date.
+ */
+ __commit_transaction(pmd);
+
+ /*
* Copy the superblock.
*/
dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1299,7 +1307,9 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
- return dm_tm_unlock(pmd->tm, copy);
+ dm_tm_unlock(pmd->tm, copy);
+
+ return 0;
}
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
@@ -1329,7 +1339,9 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd,
disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
- return dm_bm_unlock(sblock);
+ dm_bm_unlock(sblock);
+
+ return 0;
}
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
@@ -1392,10 +1404,11 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t keys[2] = { td->id, block };
struct dm_btree_info *info;
- if (pmd->fail_io)
- return -EINVAL;
-
down_read(&pmd->root_lock);
+ if (pmd->fail_io) {
+ up_read(&pmd->root_lock);
+ return -EINVAL;
+ }
if (can_issue_io) {
info = &pmd->info;
@@ -1419,6 +1432,63 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
return r;
}
+/* FIXME: write a more efficient one in btree */
+int dm_thin_find_mapped_range(struct dm_thin_device *td,
+ dm_block_t begin, dm_block_t end,
+ dm_block_t *thin_begin, dm_block_t *thin_end,
+ dm_block_t *pool_begin, bool *maybe_shared)
+{
+ int r;
+ dm_block_t pool_end;
+ struct dm_thin_lookup_result lookup;
+
+ if (end < begin)
+ return -ENODATA;
+
+ /*
+ * Find first mapped block.
+ */
+ while (begin < end) {
+ r = dm_thin_find_block(td, begin, true, &lookup);
+ if (r) {
+ if (r != -ENODATA)
+ return r;
+ } else
+ break;
+
+ begin++;
+ }
+
+ if (begin == end)
+ return -ENODATA;
+
+ *thin_begin = begin;
+ *pool_begin = lookup.block;
+ *maybe_shared = lookup.shared;
+
+ begin++;
+ pool_end = *pool_begin + 1;
+ while (begin != end) {
+ r = dm_thin_find_block(td, begin, true, &lookup);
+ if (r) {
+ if (r == -ENODATA)
+ break;
+ else
+ return r;
+ }
+
+ if ((lookup.block != pool_end) ||
+ (lookup.shared != *maybe_shared))
+ break;
+
+ pool_end++;
+ begin++;
+ }
+
+ *thin_end = begin;
+ return 0;
+}
+
static int __insert(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
@@ -1471,6 +1541,65 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
return 0;
}
+static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
+{
+ int r;
+ unsigned count, total_count = 0;
+ struct dm_pool_metadata *pmd = td->pmd;
+ dm_block_t keys[1] = { td->id };
+ __le64 value;
+ dm_block_t mapping_root;
+
+ /*
+ * Find the mapping tree
+ */
+ r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
+ if (r)
+ return r;
+
+ /*
+ * Remove from the mapping tree, taking care to inc the
+ * ref count so it doesn't get deleted.
+ */
+ mapping_root = le64_to_cpu(value);
+ dm_tm_inc(pmd->tm, mapping_root);
+ r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
+ if (r)
+ return r;
+
+ /*
+ * Remove leaves stops at the first unmapped entry, so we have to
+ * loop round finding mapped ranges.
+ */
+ while (begin < end) {
+ r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
+ if (r == -ENODATA)
+ break;
+
+ if (r)
+ return r;
+
+ if (begin >= end)
+ break;
+
+ r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
+ if (r)
+ return r;
+
+ total_count += count;
+ }
+
+ td->mapped_blocks -= total_count;
+ td->changed = 1;
+
+ /*
+ * Reinsert the mapping tree.
+ */
+ value = cpu_to_le64(mapping_root);
+ __dm_bless_for_disk(&value);
+ return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
+}
+
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
@@ -1483,6 +1612,19 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
return r;
}
+int dm_thin_remove_range(struct dm_thin_device *td,
+ dm_block_t begin, dm_block_t end)
+{
+ int r = -EINVAL;
+
+ down_write(&td->pmd->root_lock);
+ if (!td->pmd->fail_io)
+ r = __remove_range(td, begin, end);
+ up_write(&td->pmd->root_lock);
+
+ return r;
+}
+
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{
int r;
@@ -1739,7 +1881,6 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
down_write(&pmd->root_lock);
- pmd->read_only = true;
dm_bm_set_read_only(pmd->bm);
up_write(&pmd->root_lock);
}
@@ -1747,7 +1888,6 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
down_write(&pmd->root_lock);
- pmd->read_only = false;
dm_bm_set_read_write(pmd->bm);
up_write(&pmd->root_lock);
}
diff --git a/kernel/drivers/md/dm-thin-metadata.h b/kernel/drivers/md/dm-thin-metadata.h
index fac01a96d..a938babe4 100644
--- a/kernel/drivers/md/dm-thin-metadata.h
+++ b/kernel/drivers/md/dm-thin-metadata.h
@@ -147,6 +147,15 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
int can_issue_io, struct dm_thin_lookup_result *result);
/*
+ * Retrieve the next run of contiguously mapped blocks. Useful for working
+ * out where to break up IO. Returns 0 on success, < 0 on error.
+ */
+int dm_thin_find_mapped_range(struct dm_thin_device *td,
+ dm_block_t begin, dm_block_t end,
+ dm_block_t *thin_begin, dm_block_t *thin_end,
+ dm_block_t *pool_begin, bool *maybe_shared);
+
+/*
* Obtain an unused block.
*/
int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
@@ -158,6 +167,8 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block);
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
+int dm_thin_remove_range(struct dm_thin_device *td,
+ dm_block_t begin, dm_block_t end);
/*
* Queries.
diff --git a/kernel/drivers/md/dm-thin.c b/kernel/drivers/md/dm-thin.c
index e22e6c892..a1cc797fe 100644
--- a/kernel/drivers/md/dm-thin.c
+++ b/kernel/drivers/md/dm-thin.c
@@ -112,22 +112,30 @@ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
/*
* Key building.
*/
-static void build_data_key(struct dm_thin_device *td,
- dm_block_t b, struct dm_cell_key *key)
+enum lock_space {
+ VIRTUAL,
+ PHYSICAL
+};
+
+static void build_key(struct dm_thin_device *td, enum lock_space ls,
+ dm_block_t b, dm_block_t e, struct dm_cell_key *key)
{
- key->virtual = 0;
+ key->virtual = (ls == VIRTUAL);
key->dev = dm_thin_dev_id(td);
key->block_begin = b;
- key->block_end = b + 1ULL;
+ key->block_end = e;
+}
+
+static void build_data_key(struct dm_thin_device *td, dm_block_t b,
+ struct dm_cell_key *key)
+{
+ build_key(td, PHYSICAL, b, b + 1llu, key);
}
static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
struct dm_cell_key *key)
{
- key->virtual = 1;
- key->dev = dm_thin_dev_id(td);
- key->block_begin = b;
- key->block_end = b + 1ULL;
+ build_key(td, VIRTUAL, b, b + 1llu, key);
}
/*----------------------------------------------------------------*/
@@ -313,6 +321,80 @@ struct thin_c {
/*----------------------------------------------------------------*/
+/**
+ * __blkdev_issue_discard_async - queue a discard with async completion
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ * @parent_bio: parent discard bio that all sub discards get chained to
+ *
+ * Description:
+ * Asynchronously issue a discard request for the sectors in question.
+ */
+static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
+ struct bio *parent_bio)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio;
+
+ if (!q || !nr_sects)
+ return -ENXIO;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ if (flags & BLKDEV_DISCARD_SECURE) {
+ if (!blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
+ type |= REQ_SECURE;
+ }
+
+ /*
+ * Required bio_put occurs in bio_endio thanks to bio_chain below
+ */
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio_chain(bio, parent_bio);
+
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_size = nr_sects << 9;
+
+ submit_bio(type, bio);
+
+ return 0;
+}
+
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+ return pool->sectors_per_block_shift >= 0;
+}
+
+static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
+{
+ return block_size_is_power_of_two(pool) ?
+ (b << pool->sectors_per_block_shift) :
+ (b * pool->sectors_per_block);
+}
+
+static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
+ struct bio *parent_bio)
+{
+ sector_t s = block_to_sectors(tc->pool, data_b);
+ sector_t len = block_to_sectors(tc->pool, data_e - data_b);
+
+ return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
+ GFP_NOWAIT, 0, parent_bio);
+}
+
+/*----------------------------------------------------------------*/
+
/*
* wake_worker() is used when new work is queued and when pool_resume is
* ready to continue deferred IO processing.
@@ -462,6 +544,7 @@ struct dm_thin_endio_hook {
struct dm_deferred_entry *all_io_entry;
struct dm_thin_new_mapping *overwrite_mapping;
struct rb_node rb_node;
+ struct dm_bio_prison_cell *cell;
};
static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
@@ -474,8 +557,10 @@ static void error_bio_list(struct bio_list *bios, int error)
{
struct bio *bio;
- while ((bio = bio_list_pop(bios)))
- bio_endio(bio, error);
+ while ((bio = bio_list_pop(bios))) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ }
}
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
@@ -525,16 +610,21 @@ static void requeue_io(struct thin_c *tc)
requeue_deferred_cells(tc);
}
-static void error_retry_list(struct pool *pool)
+static void error_retry_list_with_code(struct pool *pool, int error)
{
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list)
- error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
+ error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
rcu_read_unlock();
}
+static void error_retry_list(struct pool *pool)
+{
+ return error_retry_list_with_code(pool, -EIO);
+}
+
/*
* This section of code contains the logic for processing a thin device's IO.
* Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -542,11 +632,6 @@ static void error_retry_list(struct pool *pool)
* target.
*/
-static bool block_size_is_power_of_two(struct pool *pool)
-{
- return pool->sectors_per_block_shift >= 0;
-}
-
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
@@ -560,6 +645,34 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
return block_nr;
}
+/*
+ * Returns the _complete_ blocks that this bio covers.
+ */
+static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
+ dm_block_t *begin, dm_block_t *end)
+{
+ struct pool *pool = tc->pool;
+ sector_t b = bio->bi_iter.bi_sector;
+ sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
+
+ b += pool->sectors_per_block - 1ull; /* so we round up */
+
+ if (block_size_is_power_of_two(pool)) {
+ b >>= pool->sectors_per_block_shift;
+ e >>= pool->sectors_per_block_shift;
+ } else {
+ (void) sector_div(b, pool->sectors_per_block);
+ (void) sector_div(e, pool->sectors_per_block);
+ }
+
+ if (e < b)
+ /* Can happen if the bio is within a single block. */
+ e = b;
+
+ *begin = b;
+ *end = e;
+}
+
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
@@ -648,7 +761,7 @@ struct dm_thin_new_mapping {
struct list_head list;
bool pass_discard:1;
- bool definitely_not_shared:1;
+ bool maybe_shared:1;
/*
* Track quiescing, copying and zeroing preparation actions. When this
@@ -659,9 +772,9 @@ struct dm_thin_new_mapping {
int err;
struct thin_c *tc;
- dm_block_t virt_block;
+ dm_block_t virt_begin, virt_end;
dm_block_t data_block;
- struct dm_bio_prison_cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell;
/*
* If the bio covers the whole area of a block then we can avoid
@@ -701,12 +814,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
complete_mapping_preparation(m);
}
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct dm_thin_new_mapping *m = h->overwrite_mapping;
- m->err = err;
+ bio->bi_end_io = m->saved_bi_end_io;
+
+ m->err = bio->bi_error;
complete_mapping_preparation(m);
}
@@ -794,10 +909,6 @@ static void inc_remap_and_issue_cell(struct thin_c *tc,
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
- if (m->bio) {
- m->bio->bi_end_io = m->saved_bi_end_io;
- atomic_inc(&m->bio->bi_remaining);
- }
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
@@ -807,15 +918,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
- struct bio *bio;
+ struct bio *bio = m->bio;
int r;
- bio = m->bio;
- if (bio) {
- bio->bi_end_io = m->saved_bi_end_io;
- atomic_inc(&bio->bi_remaining);
- }
-
if (m->err) {
cell_error(pool, m->cell);
goto out;
@@ -826,7 +931,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
* Any I/O for this block arriving after this point will get
* remapped to it directly.
*/
- r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
+ r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
if (r) {
metadata_operation_failed(pool, "dm_thin_insert_block", r);
cell_error(pool, m->cell);
@@ -841,7 +946,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
if (bio) {
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
- bio_endio(bio, 0);
+ bio_endio(bio);
} else {
inc_all_io_entry(tc->pool, m->cell->holder);
remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -853,50 +958,113 @@ out:
mempool_free(m, pool->mapping_pool);
}
-static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
+/*----------------------------------------------------------------*/
+
+static void free_discard_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ if (m->cell)
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, tc->pool->mapping_pool);
+}
+static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
+{
bio_io_error(m->bio);
+ free_discard_mapping(m);
+}
+
+static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
+{
+ bio_endio(m->bio);
+ free_discard_mapping(m);
+}
+
+static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
+{
+ int r;
+ struct thin_c *tc = m->tc;
+
+ r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
+ if (r) {
+ metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
+ bio_io_error(m->bio);
+ } else
+ bio_endio(m->bio);
+
cell_defer_no_holder(tc, m->cell);
- cell_defer_no_holder(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
-static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
+static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
{
+ /*
+ * We've already unmapped this range of blocks, but before we
+ * passdown we have to check that these blocks are now unused.
+ */
+ int r;
+ bool used = true;
struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
+ dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
- inc_all_io_entry(tc->pool, m->bio);
- cell_defer_no_holder(tc, m->cell);
- cell_defer_no_holder(tc, m->cell2);
+ while (b != end) {
+ /* find start of unmapped run */
+ for (; b < end; b++) {
+ r = dm_pool_block_is_used(pool->pmd, b, &used);
+ if (r)
+ return r;
- if (m->pass_discard)
- if (m->definitely_not_shared)
- remap_and_issue(tc, m->bio, m->data_block);
- else {
- bool used = false;
- if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
- bio_endio(m->bio, 0);
- else
- remap_and_issue(tc, m->bio, m->data_block);
+ if (!used)
+ break;
}
- else
- bio_endio(m->bio, 0);
- mempool_free(m, tc->pool->mapping_pool);
+ if (b == end)
+ break;
+
+ /* find end of run */
+ for (e = b + 1; e != end; e++) {
+ r = dm_pool_block_is_used(pool->pmd, e, &used);
+ if (r)
+ return r;
+
+ if (used)
+ break;
+ }
+
+ r = issue_discard(tc, b, e, m->bio);
+ if (r)
+ return r;
+
+ b = e;
+ }
+
+ return 0;
}
-static void process_prepared_discard(struct dm_thin_new_mapping *m)
+static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
int r;
struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
- r = dm_thin_remove_block(tc->td, m->virt_block);
+ r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
if (r)
- DMERR_LIMIT("dm_thin_remove_block() failed");
+ metadata_operation_failed(pool, "dm_thin_remove_range", r);
+
+ else if (m->maybe_shared)
+ r = passdown_double_checking_shared_status(m);
+ else
+ r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
- process_prepared_discard_passdown(m);
+ /*
+ * Even if r is set, there could be sub discards in flight that we
+ * need to wait for.
+ */
+ m->bio->bi_error = r;
+ bio_endio(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
}
static void process_prepared(struct pool *pool, struct list_head *head,
@@ -980,7 +1148,7 @@ static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
}
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
- dm_block_t data_block,
+ dm_block_t data_begin,
struct dm_thin_new_mapping *m)
{
struct pool *pool = tc->pool;
@@ -990,7 +1158,7 @@ static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
inc_all_io_entry(pool, bio);
- remap_and_issue(tc, bio, data_block);
+ remap_and_issue(tc, bio, data_begin);
}
/*
@@ -1007,7 +1175,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct dm_thin_new_mapping *m = get_next_mapping(pool);
m->tc = tc;
- m->virt_block = virt_block;
+ m->virt_begin = virt_block;
+ m->virt_end = virt_block + 1u;
m->data_block = data_dest;
m->cell = cell;
@@ -1086,7 +1255,8 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
m->tc = tc;
- m->virt_block = virt_block;
+ m->virt_begin = virt_block;
+ m->virt_end = virt_block + 1u;
m->data_block = data_block;
m->cell = cell;
@@ -1095,16 +1265,14 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
* zeroing pre-existing data, we can issue the bio immediately.
* Otherwise we use kcopyd to zero the data first.
*/
- if (!pool->pf.zero_new_blocks)
+ if (pool->pf.zero_new_blocks) {
+ if (io_overwrites_block(pool, bio))
+ remap_and_issue_overwrite(tc, bio, data_block, m);
+ else
+ ll_zero(tc, m, data_block * pool->sectors_per_block,
+ (data_block + 1) * pool->sectors_per_block);
+ } else
process_prepared_mapping(m);
-
- else if (io_overwrites_block(pool, bio))
- remap_and_issue_overwrite(tc, bio, data_block, m);
-
- else
- ll_zero(tc, m,
- data_block * pool->sectors_per_block,
- (data_block + 1) * pool->sectors_per_block);
}
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
@@ -1270,9 +1438,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
int error = should_error_unserviceable_bio(pool);
- if (error)
- bio_endio(bio, error);
- else
+ if (error) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ } else
retry_on_resume(bio);
}
@@ -1295,99 +1464,148 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
retry_on_resume(bio);
}
-static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
+static void process_discard_cell_no_passdown(struct thin_c *tc,
+ struct dm_bio_prison_cell *virt_cell)
{
- int r;
- struct bio *bio = cell->holder;
struct pool *pool = tc->pool;
- struct dm_bio_prison_cell *cell2;
- struct dm_cell_key key2;
- dm_block_t block = get_bio_block(tc, bio);
- struct dm_thin_lookup_result lookup_result;
- struct dm_thin_new_mapping *m;
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
- if (tc->requeue_mode) {
- cell_requeue(pool, cell);
- return;
- }
+ /*
+ * We don't need to lock the data blocks, since there's no
+ * passdown. We only lock data blocks for allocation and breaking sharing.
+ */
+ m->tc = tc;
+ m->virt_begin = virt_cell->key.block_begin;
+ m->virt_end = virt_cell->key.block_end;
+ m->cell = virt_cell;
+ m->bio = virt_cell->holder;
- r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
- switch (r) {
- case 0:
- /*
- * Check nobody is fiddling with this pool block. This can
- * happen if someone's in the process of breaking sharing
- * on this block.
- */
- build_data_key(tc->td, lookup_result.block, &key2);
- if (bio_detain(tc->pool, &key2, bio, &cell2)) {
- cell_defer_no_holder(tc, cell);
- break;
- }
+ if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
+ pool->process_prepared_discard(m);
+}
- if (io_overlaps_block(pool, bio)) {
- /*
- * IO may still be going to the destination block. We must
- * quiesce before we can do the removal.
- */
- m = get_next_mapping(pool);
- m->tc = tc;
- m->pass_discard = pool->pf.discard_passdown;
- m->definitely_not_shared = !lookup_result.shared;
- m->virt_block = block;
- m->data_block = lookup_result.block;
- m->cell = cell;
- m->cell2 = cell2;
- m->bio = bio;
-
- if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
- pool->process_prepared_discard(m);
+/*
+ * __bio_inc_remaining() is used to defer parent bios's end_io until
+ * we _know_ all chained sub range discard bios have completed.
+ */
+static inline void __bio_inc_remaining(struct bio *bio)
+{
+ bio->bi_flags |= (1 << BIO_CHAIN);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_remaining);
+}
- } else {
- inc_all_io_entry(pool, bio);
- cell_defer_no_holder(tc, cell);
- cell_defer_no_holder(tc, cell2);
+static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
+ struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+
+ int r;
+ bool maybe_shared;
+ struct dm_cell_key data_key;
+ struct dm_bio_prison_cell *data_cell;
+ struct dm_thin_new_mapping *m;
+ dm_block_t virt_begin, virt_end, data_begin;
+
+ while (begin != end) {
+ r = ensure_next_mapping(pool);
+ if (r)
+ /* we did our best */
+ return;
+ r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
+ &data_begin, &maybe_shared);
+ if (r)
/*
- * The DM core makes sure that the discard doesn't span
- * a block boundary. So we submit the discard of a
- * partial block appropriately.
+ * Silently fail, letting any mappings we've
+ * created complete.
*/
- if ((!lookup_result.shared) && pool->pf.discard_passdown)
- remap_and_issue(tc, bio, lookup_result.block);
- else
- bio_endio(bio, 0);
+ break;
+
+ build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
+ if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
+ /* contention, we'll give up with this range */
+ begin = virt_end;
+ continue;
}
- break;
- case -ENODATA:
/*
- * It isn't provisioned, just forget it.
+ * IO may still be going to the destination block. We must
+ * quiesce before we can do the removal.
*/
- cell_defer_no_holder(tc, cell);
- bio_endio(bio, 0);
- break;
+ m = get_next_mapping(pool);
+ m->tc = tc;
+ m->maybe_shared = maybe_shared;
+ m->virt_begin = virt_begin;
+ m->virt_end = virt_end;
+ m->data_block = data_begin;
+ m->cell = data_cell;
+ m->bio = bio;
- default:
- DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
- __func__, r);
- cell_defer_no_holder(tc, cell);
- bio_io_error(bio);
- break;
+ /*
+ * The parent bio must not complete before sub discard bios are
+ * chained to it (see __blkdev_issue_discard_async's bio_chain)!
+ *
+ * This per-mapping bi_remaining increment is paired with
+ * the implicit decrement that occurs via bio_endio() in
+ * process_prepared_discard_{passdown,no_passdown}.
+ */
+ __bio_inc_remaining(bio);
+ if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
+ pool->process_prepared_discard(m);
+
+ begin = virt_end;
}
}
+static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
+{
+ struct bio *bio = virt_cell->holder;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+
+ /*
+ * The virt_cell will only get freed once the origin bio completes.
+ * This means it will remain locked while all the individual
+ * passdown bios are in flight.
+ */
+ h->cell = virt_cell;
+ break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
+
+ /*
+ * We complete the bio now, knowing that the bi_remaining field
+ * will prevent completion until the sub range discards have
+ * completed.
+ */
+ bio_endio(bio);
+}
+
static void process_discard_bio(struct thin_c *tc, struct bio *bio)
{
- struct dm_bio_prison_cell *cell;
- struct dm_cell_key key;
- dm_block_t block = get_bio_block(tc, bio);
+ dm_block_t begin, end;
+ struct dm_cell_key virt_key;
+ struct dm_bio_prison_cell *virt_cell;
- build_virtual_key(tc->td, block, &key);
- if (bio_detain(tc->pool, &key, bio, &cell))
+ get_bio_block_range(tc, bio, &begin, &end);
+ if (begin == end) {
+ /*
+ * The discard covers less than a block.
+ */
+ bio_endio(bio);
+ return;
+ }
+
+ build_key(tc->td, VIRTUAL, begin, end, &virt_key);
+ if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
+ /*
+ * Potential starvation issue: We're relying on the
+ * fs/application being well behaved, and not trying to
+ * send IO to a region at the same time as discarding it.
+ * If they do this persistently then it's possible this
+ * cell will never be granted.
+ */
return;
- process_discard_cell(tc, cell);
+ tc->pool->process_discard_cell(tc, virt_cell);
}
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
@@ -1517,7 +1735,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
cell_defer_no_holder(tc, cell);
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -1582,7 +1800,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
} else {
zero_fill_bio(bio);
- bio_endio(bio, 0);
+ bio_endio(bio);
}
} else
provision_block(tc, bio, block, cell);
@@ -1653,7 +1871,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
}
zero_fill_bio(bio);
- bio_endio(bio, 0);
+ bio_endio(bio);
break;
default:
@@ -1678,7 +1896,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell
static void process_bio_success(struct thin_c *tc, struct bio *bio)
{
- bio_endio(bio, 0);
+ bio_endio(bio);
}
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
@@ -2014,18 +2232,23 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
- * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
*/
static void do_no_space_timeout(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool,
no_space_timeout);
- if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
- set_pool_mode(pool, PM_READ_ONLY);
+ if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+ pool->pf.error_if_no_space = true;
+ notify_of_pool_mode_change_to_oods(pool);
+ error_retry_list_with_code(pool, -ENOSPC);
+ }
}
/*----------------------------------------------------------------*/
@@ -2103,6 +2326,32 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
dm_device_name(pool->pool_md), new_mode);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+{
+ if (!pool->pf.error_if_no_space)
+ notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+ else
+ notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+}
+
+static bool passdown_enabled(struct pool_c *pt)
+{
+ return pt->adjusted_pf.discard_passdown;
+}
+
+static void set_discard_callbacks(struct pool *pool)
+{
+ struct pool_c *pt = pool->ti->private;
+
+ if (passdown_enabled(pt)) {
+ pool->process_discard_cell = process_discard_cell_passdown;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+ } else {
+ pool->process_discard_cell = process_discard_cell_no_passdown;
+ pool->process_prepared_discard = process_prepared_discard_no_passdown;
+ }
+}
+
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
struct pool_c *pt = pool->ti->private;
@@ -2154,7 +2403,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_cell_success;
pool->process_prepared_mapping = process_prepared_mapping_fail;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard_success;
error_retry_list(pool);
break;
@@ -2169,13 +2418,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* frequently seeing this mode.
*/
if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "out-of-data-space");
+ notify_of_pool_mode_change_to_oods(pool);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell_read_only;
- pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard;
+ set_discard_callbacks(pool);
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -2184,13 +2432,13 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
+ pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell;
- pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard;
+ set_discard_callbacks(pool);
break;
}
@@ -2279,6 +2527,7 @@ static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
h->shared_read_entry = NULL;
h->all_io_entry = NULL;
h->overwrite_mapping = NULL;
+ h->cell = NULL;
}
/*
@@ -2297,7 +2546,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
thin_hook_bio(tc, bio);
if (tc->requeue_mode) {
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ bio->bi_error = DM_ENDIO_REQUEUE;
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2426,7 +2676,6 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
- sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
@@ -2439,12 +2688,6 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
else if (data_limits->max_discard_sectors < pool->sectors_per_block)
reason = "max discard sectors smaller than a block";
- else if (data_limits->discard_granularity > block_size)
- reason = "discard granularity larger than a block";
-
- else if (!is_factor(block_size, data_limits->discard_granularity))
- reason = "discard granularity not a factor of block size";
-
if (reason) {
DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
pt->adjusted_pf.discard_passdown = false;
@@ -2959,7 +3202,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
metadata_low_callback,
pool);
if (r)
- goto out_free_pt;
+ goto out_flags_changed;
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -3210,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- cancel_delayed_work(&pool->waker);
- cancel_delayed_work(&pool->no_space_timeout);
+ cancel_delayed_work_sync(&pool->waker);
+ cancel_delayed_work_sync(&pool->no_space_timeout);
flush_workqueue(pool->wq);
(void) commit(pool);
}
@@ -3389,7 +3632,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
if (get_pool_mode(pool) >= PM_READ_ONLY) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (!strcasecmp(argv[0], "create_thin"))
@@ -3447,6 +3690,7 @@ static void emit_flags(struct pool_features *pf, char *result,
* Status line is:
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
+ * <pool mode> <discard config> <no space config> <needs_check>
*/
static void pool_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3548,6 +3792,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("queue_if_no_space ");
+ if (dm_pool_metadata_needs_check(pool->pmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3573,38 +3822,6 @@ static int pool_iterate_devices(struct dm_target *ti,
return fn(ti, pt->data_dev, 0, ti->len, data);
}
-static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct pool_c *pt = ti->private;
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = pt->data_dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
-static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
-{
- struct pool *pool = pt->pool;
- struct queue_limits *data_limits;
-
- limits->max_discard_sectors = pool->sectors_per_block;
-
- /*
- * discard_granularity is just a hint, and not enforced.
- */
- if (pt->adjusted_pf.discard_passdown) {
- data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
- limits->discard_granularity = max(data_limits->discard_granularity,
- pool->sectors_per_block << SECTOR_SHIFT);
- } else
- limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
-}
-
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
@@ -3659,14 +3876,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
disable_passdown_if_not_supported(pt);
- set_discard_limits(pt, limits);
+ /*
+ * The pool uses the same discard limits as the underlying data
+ * device. DM core has already set this up.
+ */
}
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 14, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -3678,7 +3898,6 @@ static struct target_type pool_target = {
.resume = pool_resume,
.message = pool_message,
.status = pool_status,
- .merge = pool_merge,
.iterate_devices = pool_iterate_devices,
.io_hints = pool_io_hints,
};
@@ -3825,8 +4044,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
ti->num_discard_bios = 1;
- /* Discard bios must be split on a block boundary */
- ti->split_discard_bios = true;
+ ti->split_discard_bios = false;
}
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3913,6 +4131,9 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
}
}
+ if (h->cell)
+ cell_defer_no_holder(h->tc, h->cell);
+
return 0;
}
@@ -4003,21 +4224,6 @@ err:
DMEMIT("Error");
}
-static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct thin_c *tc = ti->private;
- struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = tc->pool_dev->bdev;
- bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int thin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -4040,9 +4246,21 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0;
}
+static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct thin_c *tc = ti->private;
+ struct pool *pool = tc->pool;
+
+ if (!pool->pf.discard_enabled)
+ return;
+
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
+}
+
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 14, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -4052,8 +4270,8 @@ static struct target_type thin_target = {
.presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
- .merge = thin_merge,
.iterate_devices = thin_iterate_devices,
+ .io_hints = thin_io_hints,
};
/*----------------------------------------------------------------*/
diff --git a/kernel/drivers/md/dm-verity.c b/kernel/drivers/md/dm-verity.c
index 66616db33..ccf41886e 100644
--- a/kernel/drivers/md/dm-verity.c
+++ b/kernel/drivers/md/dm-verity.c
@@ -26,8 +26,6 @@
#define DM_VERITY_ENV_LENGTH 42
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
-#define DM_VERITY_IO_VEC_INLINE 16
-#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63
@@ -76,8 +74,6 @@ struct dm_verity {
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
- mempool_t *vec_mempool; /* mempool of bio vector */
-
struct workqueue_struct *verify_wq;
/* starting blocks for each tree level. 0 is the lowest level. */
@@ -271,7 +267,7 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
verity_hash_at_level(v, block, level, &hash_block, &offset);
data = dm_bufio_read(v->bufio, hash_block, &buf);
- if (unlikely(IS_ERR(data)))
+ if (IS_ERR(data))
return PTR_ERR(data);
aux = dm_bufio_get_aux_data(buf);
@@ -458,8 +454,9 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
+ bio->bi_error = error;
- bio_endio_nodec(bio, error);
+ bio_endio(bio);
}
static void verity_work(struct work_struct *w)
@@ -469,12 +466,12 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, verity_verify_io(io));
}
-static void verity_end_io(struct bio *bio, int error)
+static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (error) {
- verity_finish_io(io, error);
+ if (bio->bi_error) {
+ verity_finish_io(io, bio->bi_error);
return;
}
@@ -634,33 +631,17 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_ioctl(struct dm_target *ti, unsigned cmd,
- unsigned long arg)
+static int verity_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
{
struct dm_verity *v = ti->private;
- int r = 0;
+
+ *bdev = v->data_dev->bdev;
if (v->data_start ||
ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
-
- return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
- cmd, arg);
-}
-
-static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct dm_verity *v = ti->private;
- struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = v->data_dev->bdev;
- bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+ return 1;
+ return 0;
}
static int verity_iterate_devices(struct dm_target *ti,
@@ -691,9 +672,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
- if (v->vec_mempool)
- mempool_destroy(v->vec_mempool);
-
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -962,14 +940,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
- v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
- BIO_MAX_PAGES * sizeof(struct bio_vec));
- if (!v->vec_mempool) {
- ti->error = "Cannot allocate vector mempool";
- r = -ENOMEM;
- goto bad;
- }
-
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!v->verify_wq) {
@@ -994,8 +964,7 @@ static struct target_type verity_target = {
.dtr = verity_dtr,
.map = verity_map,
.status = verity_status,
- .ioctl = verity_ioctl,
- .merge = verity_merge,
+ .prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
};
diff --git a/kernel/drivers/md/dm-zero.c b/kernel/drivers/md/dm-zero.c
index b9a64bbce..766bc9300 100644
--- a/kernel/drivers/md/dm-zero.c
+++ b/kernel/drivers/md/dm-zero.c
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
break;
}
- bio_endio(bio, 0);
+ bio_endio(bio);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
diff --git a/kernel/drivers/md/dm.c b/kernel/drivers/md/dm.c
index 98347e2cf..4745d2f13 100644
--- a/kernel/drivers/md/dm.c
+++ b/kernel/drivers/md/dm.c
@@ -24,6 +24,7 @@
#include <linux/ktime.h>
#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
+#include <linux/pr.h>
#include <trace/events/block.h>
@@ -86,6 +87,9 @@ struct dm_rq_target_io {
struct kthread_work work;
int error;
union map_info info;
+ struct dm_stats_aux stats_aux;
+ unsigned long duration_jiffies;
+ unsigned n_sectors;
};
/*
@@ -121,9 +125,8 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
-#define DMF_MERGE_IS_OPTIONAL 6
-#define DMF_DEFERRED_REMOVE 7
-#define DMF_SUSPENDED_INTERNALLY 8
+#define DMF_DEFERRED_REMOVE 6
+#define DMF_SUSPENDED_INTERNALLY 7
/*
* A dummy definition to make RCU happy.
@@ -553,18 +556,16 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int dm_get_live_table_for_ioctl(struct mapped_device *md,
+ struct dm_target **tgt, struct block_device **bdev,
+ fmode_t *mode, int *srcu_idx)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- int srcu_idx;
struct dm_table *map;
- struct dm_target *tgt;
- int r = -ENOTTY;
+ int r;
retry:
- map = dm_get_live_table(md, &srcu_idx);
-
+ r = -ENOTTY;
+ map = dm_get_live_table(md, srcu_idx);
if (!map || !dm_table_get_size(map))
goto out;
@@ -572,8 +573,9 @@ retry:
if (dm_table_get_num_targets(map) != 1)
goto out;
- tgt = dm_table_get_target(map, 0);
- if (!tgt->type->ioctl)
+ *tgt = dm_table_get_target(map, 0);
+
+ if (!(*tgt)->type->prepare_ioctl)
goto out;
if (dm_suspended_md(md)) {
@@ -581,16 +583,47 @@ retry:
goto out;
}
- r = tgt->type->ioctl(tgt, cmd, arg);
+ r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
+ if (r < 0)
+ goto out;
-out:
- dm_put_live_table(md, srcu_idx);
+ return r;
- if (r == -ENOTCONN) {
+out:
+ dm_put_live_table(md, *srcu_idx);
+ if (r == -ENOTCONN && !fatal_signal_pending(current)) {
msleep(10);
goto retry;
}
+ return r;
+}
+
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_target *tgt;
+ struct block_device *tgt_bdev = NULL;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ /*
+ * Target determined this ioctl is being issued against
+ * a logical partition of the parent bdev; so extra
+ * validation is needed.
+ */
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (r)
+ goto out;
+ }
+
+ r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
+out:
+ dm_put_live_table(md, srcu_idx);
return r;
}
@@ -941,7 +974,8 @@ static void dec_pending(struct dm_io *io, int error)
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
- bio_endio(bio, io_error);
+ bio->bi_error = io_error;
+ bio_endio(bio);
}
}
}
@@ -954,17 +988,15 @@ static void disable_write_same(struct mapped_device *md)
limits->max_write_same_sectors = 0;
}
-static void clone_endio(struct bio *bio, int error)
+static void clone_endio(struct bio *bio)
{
+ int error = bio->bi_error;
int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (!bio_flagged(bio, BIO_UPTODATE) && !error)
- error = -EIO;
-
if (endio) {
r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
@@ -993,13 +1025,14 @@ static void clone_endio(struct bio *bio, int error)
/*
* Partial completion handling for request-based dm
*/
-static void end_clone_bio(struct bio *clone, int error)
+static void end_clone_bio(struct bio *clone)
{
struct dm_rq_clone_bio_info *info =
container_of(clone, struct dm_rq_clone_bio_info, clone);
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+ int error = clone->bi_error;
bio_put(clone);
@@ -1046,6 +1079,17 @@ static struct dm_rq_target_io *tio_from_request(struct request *rq)
return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
}
+static void rq_end_stats(struct mapped_device *md, struct request *orig)
+{
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies - tio->duration_jiffies;
+ dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
+ tio->n_sectors, true, tio->duration_jiffies,
+ &tio->stats_aux);
+ }
+}
+
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
@@ -1127,6 +1171,7 @@ static void dm_end_request(struct request *clone, int error)
}
free_rq_clone(clone);
+ rq_end_stats(md, rq);
if (!rq->q->mq_ops)
blk_end_request_all(rq, error);
else
@@ -1146,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
@@ -1162,13 +1209,14 @@ static void old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_requeue_unmapped_original_request(struct mapped_device *md,
- struct request *rq)
+static void dm_requeue_original_request(struct mapped_device *md,
+ struct request *rq)
{
int rw = rq_data_dir(rq);
dm_unprep_request(rq);
+ rq_end_stats(md, rq);
if (!rq->q->mq_ops)
old_requeue_request(rq);
else {
@@ -1179,13 +1227,6 @@ static void dm_requeue_unmapped_original_request(struct mapped_device *md,
rq_completed(md, rw, false);
}
-static void dm_requeue_unmapped_request(struct request *clone)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
-
- dm_requeue_unmapped_original_request(tio->md, tio->orig);
-}
-
static void old_stop_queue(struct request_queue *q)
{
unsigned long flags;
@@ -1249,7 +1290,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
- dm_requeue_unmapped_request(clone);
+ dm_requeue_original_request(tio->md, tio->orig);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -1267,6 +1308,7 @@ static void dm_softirq_done(struct request *rq)
int rw;
if (!clone) {
+ rq_end_stats(tio->md, rq);
rw = rq_data_dir(rq);
if (!rq->q->mq_ops) {
blk_end_request_all(rq, tio->error);
@@ -1456,7 +1498,7 @@ static void __map_bio(struct dm_target_io *tio)
md = tio->io->md;
dec_pending(tio->io, r);
free_tio(md, tio);
- } else if (r) {
+ } else if (r != DM_MAPIO_SUBMITTED) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
@@ -1712,65 +1754,11 @@ static void __split_and_process_bio(struct mapped_device *md,
* CRUD END
*---------------------------------------------------------------*/
-static int dm_merge_bvec(struct request_queue *q,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
- struct dm_target *ti;
- sector_t max_sectors;
- int max_size = 0;
-
- if (unlikely(!map))
- goto out;
-
- ti = dm_table_find_target(map, bvm->bi_sector);
- if (!dm_target_is_valid(ti))
- goto out;
-
- /*
- * Find maximum amount of I/O that won't need splitting
- */
- max_sectors = min(max_io_len(bvm->bi_sector, ti),
- (sector_t) BIO_MAX_SECTORS);
- max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
- if (max_size < 0)
- max_size = 0;
-
- /*
- * merge_bvec_fn() returns number of bytes
- * it can accept at this offset
- * max is precomputed maximal io size
- */
- if (max_size && ti->type->merge)
- max_size = ti->type->merge(ti, bvm, biovec, max_size);
- /*
- * If the target doesn't support merge method and some of the devices
- * provided their merge_bvec method (we know this by looking at
- * queue_max_hw_sectors), then we can't allow bios with multiple vector
- * entries. So always set max_size to 0, and the code below allows
- * just one page.
- */
- else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
- max_size = 0;
-
-out:
- dm_put_live_table_fast(md);
- /*
- * Always allow an entire first page
- */
- if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
- max_size = biovec->bv_len;
-
- return max_size;
-}
-
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static void dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -1789,12 +1777,12 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return;
+ return BLK_QC_T_NONE;
}
__split_and_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx);
- return;
+ return BLK_QC_T_NONE;
}
int dm_request_based(struct mapped_device *md)
@@ -1987,7 +1975,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
- dm_requeue_unmapped_request(clone);
+ dm_requeue_original_request(md, tio->orig);
break;
default:
if (r > 0) {
@@ -2010,7 +1998,7 @@ static void map_tio_request(struct kthread_work *work)
struct mapped_device *md = tio->md;
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_unmapped_original_request(md, rq);
+ dm_requeue_original_request(md, rq);
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -2027,6 +2015,14 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
md->last_rq_start_time = ktime_get();
}
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies;
+ tio->n_sectors = blk_rq_sectors(orig);
+ dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
+ tio->n_sectors, false, 0, &tio->stats_aux);
+ }
+
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
@@ -2157,7 +2153,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
- r = md->queue->backing_dev_info.state &
+ r = md->queue->backing_dev_info.wb.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
@@ -2233,6 +2229,13 @@ static void dm_init_md_queue(struct mapped_device *md)
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+ /*
+ * Initialize data that will only be used by a non-blk-mq DM queue
+ * - must do so here (in alloc_dev callchain) before queue is used
+ */
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info.congested_data = md;
}
static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2243,13 +2246,40 @@ static void dm_init_old_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
- md->queue->backing_dev_info.congested_data = md;
-
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
+static void cleanup_mapped_device(struct mapped_device *md)
+{
+ if (md->wq)
+ destroy_workqueue(md->wq);
+ if (md->kworker_task)
+ kthread_stop(md->kworker_task);
+ mempool_destroy(md->io_pool);
+ mempool_destroy(md->rq_pool);
+ if (md->bs)
+ bioset_free(md->bs);
+
+ cleanup_srcu_struct(&md->io_barrier);
+
+ if (md->disk) {
+ spin_lock(&_minor_lock);
+ md->disk->private_data = NULL;
+ spin_unlock(&_minor_lock);
+ del_gendisk(md->disk);
+ put_disk(md->disk);
+ }
+
+ if (md->queue)
+ blk_cleanup_queue(md->queue);
+
+ if (md->bdev) {
+ bdput(md->bdev);
+ md->bdev = NULL;
+ }
+}
+
/*
* Allocate and initialise a blank device with a given minor.
*/
@@ -2295,13 +2325,13 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
- goto bad_queue;
+ goto bad;
dm_init_md_queue(md);
md->disk = alloc_disk(1);
if (!md->disk)
- goto bad_disk;
+ goto bad;
atomic_set(&md->pending[0], 0);
atomic_set(&md->pending[1], 0);
@@ -2322,11 +2352,11 @@ static struct mapped_device *alloc_dev(int minor)
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
if (!md->wq)
- goto bad_thread;
+ goto bad;
md->bdev = bdget_disk(md->disk, 0);
if (!md->bdev)
- goto bad_bdev;
+ goto bad;
bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev;
@@ -2343,15 +2373,8 @@ static struct mapped_device *alloc_dev(int minor)
return md;
-bad_bdev:
- destroy_workqueue(md->wq);
-bad_thread:
- del_gendisk(md->disk);
- put_disk(md->disk);
-bad_disk:
- blk_cleanup_queue(md->queue);
-bad_queue:
- cleanup_srcu_struct(&md->io_barrier);
+bad:
+ cleanup_mapped_device(md);
bad_io_barrier:
free_minor(minor);
bad_minor:
@@ -2368,32 +2391,13 @@ static void free_dev(struct mapped_device *md)
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
- destroy_workqueue(md->wq);
- if (md->kworker_task)
- kthread_stop(md->kworker_task);
- if (md->io_pool)
- mempool_destroy(md->io_pool);
- if (md->rq_pool)
- mempool_destroy(md->rq_pool);
- if (md->bs)
- bioset_free(md->bs);
+ cleanup_mapped_device(md);
+ if (md->use_blk_mq)
+ blk_mq_free_tag_set(&md->tag_set);
- cleanup_srcu_struct(&md->io_barrier);
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
-
- spin_lock(&_minor_lock);
- md->disk->private_data = NULL;
- spin_unlock(&_minor_lock);
- if (blk_get_integrity(md->disk))
- blk_integrity_unregister(md->disk);
- del_gendisk(md->disk);
- put_disk(md->disk);
- blk_cleanup_queue(md->queue);
- if (md->use_blk_mq)
- blk_mq_free_tag_set(&md->tag_set);
- bdput(md->bdev);
free_minor(minor);
module_put(THIS_MODULE);
@@ -2470,59 +2474,6 @@ static void __set_size(struct mapped_device *md, sector_t size)
}
/*
- * Return 1 if the queue has a compulsory merge_bvec_fn function.
- *
- * If this function returns 0, then the device is either a non-dm
- * device without a merge_bvec_fn, or it is a dm device that is
- * able to split any bios it receives that are too big.
- */
-int dm_queue_merge_is_compulsory(struct request_queue *q)
-{
- struct mapped_device *dev_md;
-
- if (!q->merge_bvec_fn)
- return 0;
-
- if (q->make_request_fn == dm_make_request) {
- dev_md = q->queuedata;
- if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
- return 0;
- }
-
- return 1;
-}
-
-static int dm_device_merge_is_compulsory(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
-{
- struct block_device *bdev = dev->bdev;
- struct request_queue *q = bdev_get_queue(bdev);
-
- return dm_queue_merge_is_compulsory(q);
-}
-
-/*
- * Return 1 if it is acceptable to ignore merge_bvec_fn based
- * on the properties of the underlying devices.
- */
-static int dm_table_merge_is_optional(struct dm_table *table)
-{
- unsigned i = 0;
- struct dm_target *ti;
-
- while (i < dm_table_get_num_targets(table)) {
- ti = dm_table_get_target(table, i++);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
- return 0;
- }
-
- return 1;
-}
-
-/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -2531,7 +2482,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
- int merge_is_optional;
size = dm_table_get_size(t);
@@ -2557,17 +2507,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
- merge_is_optional = dm_table_merge_is_optional(t);
-
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
- if (merge_is_optional)
- set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
- else
- clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
if (old_map)
dm_sync_table(md);
@@ -2754,6 +2698,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
/* Direct call is fine since .queue_rq allows allocations */
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
+ rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
return BLK_MQ_RQ_QUEUE_BUSY;
}
@@ -2847,7 +2792,12 @@ int dm_setup_md_queue(struct mapped_device *md)
case DM_TYPE_BIO_BASED:
dm_init_old_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+ /*
+ * DM handles splitting bios as needed. Free the bio_split bioset
+ * since it won't be used (saves 1 process per bio-based DM device).
+ */
+ bioset_free(md->queue->bio_split);
+ md->queue->bio_split = NULL;
break;
}
@@ -2925,8 +2875,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
might_sleep();
- map = dm_get_live_table(md, &srcu_idx);
-
spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
@@ -2940,14 +2888,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
* do not race with internal suspend.
*/
mutex_lock(&md->suspend_lock);
+ map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
- mutex_unlock(&md->suspend_lock);
-
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
+ mutex_unlock(&md->suspend_lock);
/*
* Rare, but there may be I/O requests still going to complete,
@@ -3596,11 +3544,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- if (pools->io_pool)
- mempool_destroy(pools->io_pool);
-
- if (pools->rq_pool)
- mempool_destroy(pools->rq_pool);
+ mempool_destroy(pools->io_pool);
+ mempool_destroy(pools->rq_pool);
if (pools->bs)
bioset_free(pools->bs);
@@ -3608,11 +3553,133 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_register)
+ r = ops->pr_register(bdev, old_key, new_key, flags);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
+ u32 flags)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_reserve)
+ r = ops->pr_reserve(bdev, key, type, flags);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_release)
+ r = ops->pr_release(bdev, key, type);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
+ enum pr_type type, bool abort)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_preempt)
+ r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static int dm_pr_clear(struct block_device *bdev, u64 key)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ const struct pr_ops *ops;
+ struct dm_target *tgt;
+ fmode_t mode;
+ int srcu_idx, r;
+
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ if (r < 0)
+ return r;
+
+ ops = bdev->bd_disk->fops->pr_ops;
+ if (ops && ops->pr_clear)
+ r = ops->pr_clear(bdev, key);
+ else
+ r = -EOPNOTSUPP;
+
+ dm_put_live_table(md, srcu_idx);
+ return r;
+}
+
+static const struct pr_ops dm_pr_ops = {
+ .pr_register = dm_pr_register,
+ .pr_reserve = dm_pr_reserve,
+ .pr_release = dm_pr_release,
+ .pr_preempt = dm_pr_preempt,
+ .pr_clear = dm_pr_clear,
+};
+
static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
diff --git a/kernel/drivers/md/dm.h b/kernel/drivers/md/dm.h
index 6123c2bf9..7edcf97df 100644
--- a/kernel/drivers/md/dm.h
+++ b/kernel/drivers/md/dm.h
@@ -14,6 +14,7 @@
#include <linux/device-mapper.h>
#include <linux/list.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/hdreg.h>
#include <linux/completion.h>
#include <linux/kobject.h>
@@ -77,8 +78,6 @@ bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
-int dm_queue_merge_is_compulsory(struct request_queue *q);
-
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/kernel/drivers/md/faulty.c b/kernel/drivers/md/faulty.c
index 1277eb26b..4a8e15058 100644
--- a/kernel/drivers/md/faulty.c
+++ b/kernel/drivers/md/faulty.c
@@ -70,7 +70,7 @@
#include <linux/seq_file.h>
-static void faulty_fail(struct bio *bio, int error)
+static void faulty_fail(struct bio *bio)
{
struct bio *b = bio->bi_private;
@@ -181,7 +181,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
return;
}
diff --git a/kernel/drivers/md/linear.c b/kernel/drivers/md/linear.c
index fa7d577f3..b7fe7e9fc 100644
--- a/kernel/drivers/md/linear.c
+++ b/kernel/drivers/md/linear.c
@@ -52,48 +52,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
return conf->disks + lo;
}
-/**
- * linear_mergeable_bvec -- tell bio layer if two requests can be merged
- * @q: request queue
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can take at this offset
- */
-static int linear_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct dev_info *dev0;
- unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int maxbytes = biovec->bv_len;
- struct request_queue *subq;
-
- dev0 = which_dev(mddev, sector);
- maxsectors = dev0->end_sector - sector;
- subq = bdev_get_queue(dev0->rdev->bdev);
- if (subq->merge_bvec_fn) {
- bvm->bi_bdev = dev0->rdev->bdev;
- bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
- maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
- biovec));
- }
-
- if (maxsectors < bio_sectors)
- maxsectors = 0;
- else
- maxsectors -= bio_sectors;
-
- if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
- return maxbytes;
-
- if (maxsectors > (maxbytes >> 9))
- return maxbytes;
- else
- return maxsectors << 9;
-}
-
static int linear_congested(struct mddev *mddev, int bits)
{
struct linear_conf *conf;
@@ -297,7 +255,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely((split->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
- bio_endio(split, 0);
+ bio_endio(split);
} else
generic_make_request(split);
} while (split != bio);
@@ -338,7 +296,6 @@ static struct md_personality linear_personality =
.size = linear_size,
.quiesce = linear_quiesce,
.congested = linear_congested,
- .mergeable_bvec = linear_mergeable_bvec,
};
static int __init linear_init (void)
diff --git a/kernel/drivers/md/md-cluster.c b/kernel/drivers/md/md-cluster.c
index fcfc4b9b2..d6a1126d8 100644
--- a/kernel/drivers/md/md-cluster.c
+++ b/kernel/drivers/md/md-cluster.c
@@ -28,6 +28,7 @@ struct dlm_lock_resource {
struct completion completion; /* completion for synchronized locking */
void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
struct mddev *mddev; /* pointing back to mddev. */
+ int mode;
};
struct suspend_info {
@@ -44,6 +45,8 @@ struct resync_info {
/* md_cluster_info flags */
#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
+#define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
struct md_cluster_info {
@@ -51,9 +54,8 @@ struct md_cluster_info {
dlm_lockspace_t *lockspace;
int slot_number;
struct completion completion;
- struct dlm_lock_resource *sb_lock;
- struct mutex sb_mutex;
struct dlm_lock_resource *bitmap_lockres;
+ struct dlm_lock_resource *resync_lockres;
struct list_head suspend_list;
spinlock_t suspend_lock;
struct md_thread *recovery_thread;
@@ -74,23 +76,24 @@ enum msg_type {
NEWDISK,
REMOVE,
RE_ADD,
+ BITMAP_NEEDS_SYNC,
};
struct cluster_msg {
- int type;
- int slot;
+ __le32 type;
+ __le32 slot;
/* TODO: Unionize this for smaller footprint */
- sector_t low;
- sector_t high;
+ __le64 low;
+ __le64 high;
char uuid[16];
- int raid_slot;
+ __le32 raid_slot;
};
static void sync_ast(void *arg)
{
struct dlm_lock_resource *res;
- res = (struct dlm_lock_resource *) arg;
+ res = arg;
complete(&res->completion);
}
@@ -98,13 +101,14 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
{
int ret = 0;
- init_completion(&res->completion);
ret = dlm_lock(res->ls, mode, &res->lksb,
res->flags, res->name, strlen(res->name),
0, sync_ast, res, res->bast);
if (ret)
return ret;
wait_for_completion(&res->completion);
+ if (res->lksb.sb_status == 0)
+ res->mode = mode;
return res->lksb.sb_status;
}
@@ -123,8 +127,10 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
if (!res)
return NULL;
+ init_completion(&res->completion);
res->ls = cinfo->lockspace;
res->mddev = mddev;
+ res->mode = DLM_LOCK_IV;
namelen = strlen(name);
res->name = kzalloc(namelen + 1, GFP_KERNEL);
if (!res->name) {
@@ -164,11 +170,24 @@ out_err:
static void lockres_free(struct dlm_lock_resource *res)
{
+ int ret;
+
if (!res)
return;
- init_completion(&res->completion);
- dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ /* cancel a lock request or a conversion request that is blocked */
+ res->flags |= DLM_LKF_CANCEL;
+retry:
+ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ if (unlikely(ret != 0)) {
+ pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret);
+
+ /* if a lock conversion is cancelled, then the lock is put
+ * back to grant queue, need to ensure it is unlocked */
+ if (ret == -DLM_ECANCEL)
+ goto retry;
+ }
+ res->flags &= ~DLM_LKF_CANCEL;
wait_for_completion(&res->completion);
kfree(res->name);
@@ -176,20 +195,8 @@ static void lockres_free(struct dlm_lock_resource *res)
kfree(res);
}
-static char *pretty_uuid(char *dest, char *src)
-{
- int i, len = 0;
-
- for (i = 0; i < 16; i++) {
- if (i == 4 || i == 6 || i == 8 || i == 10)
- len += sprintf(dest + len, "-");
- len += sprintf(dest + len, "%02x", (__u8)src[i]);
- }
- return dest;
-}
-
-static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
- sector_t lo, sector_t hi)
+static void add_resync_info(struct dlm_lock_resource *lockres,
+ sector_t lo, sector_t hi)
{
struct resync_info *ri;
@@ -207,7 +214,7 @@ static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_loc
dlm_lock_sync(lockres, DLM_LOCK_CR);
memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
hi = le64_to_cpu(ri.hi);
- if (ri.hi > 0) {
+ if (hi > 0) {
s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
if (!s)
goto out;
@@ -275,18 +282,16 @@ clear_bit:
static void recover_prep(void *arg)
{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
-static void recover_slot(void *arg, struct dlm_slot *slot)
+static void __recover_slot(struct mddev *mddev, int slot)
{
- struct mddev *mddev = arg;
struct md_cluster_info *cinfo = mddev->cluster_info;
- pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
- mddev->bitmap_info.cluster_name,
- slot->nodeid, slot->slot,
- cinfo->slot_number);
- set_bit(slot->slot - 1, &cinfo->recovery_map);
+ set_bit(slot, &cinfo->recovery_map);
if (!cinfo->recovery_thread) {
cinfo->recovery_thread = md_register_thread(recover_bitmaps,
mddev, "recover");
@@ -298,6 +303,20 @@ static void recover_slot(void *arg, struct dlm_slot *slot)
md_wakeup_thread(cinfo->recovery_thread);
}
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+ mddev->bitmap_info.cluster_name,
+ slot->nodeid, slot->slot,
+ cinfo->slot_number);
+ /* deduct one since dlm slot starts from one while the num of
+ * cluster-md begins with 0 */
+ __recover_slot(mddev, slot->slot - 1);
+}
+
static void recover_done(void *arg, struct dlm_slot *slots,
int num_slots, int our_slot,
uint32_t generation)
@@ -306,9 +325,17 @@ static void recover_done(void *arg, struct dlm_slot *slots,
struct md_cluster_info *cinfo = mddev->cluster_info;
cinfo->slot_number = our_slot;
- complete(&cinfo->completion);
+ /* completion is only need to be complete when node join cluster,
+ * it doesn't need to run during another node's failure */
+ if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
+ complete(&cinfo->completion);
+ clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
+ }
+ clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
+/* the ops is called when node join the cluster, and do lock recovery
+ * if node failure occurs */
static const struct dlm_lockspace_ops md_ls_ops = {
.recover_prep = recover_prep,
.recover_slot = recover_slot,
@@ -322,7 +349,7 @@ static const struct dlm_lockspace_ops md_ls_ops = {
*/
static void ack_bast(void *arg, int mode)
{
- struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg;
+ struct dlm_lock_resource *res = arg;
struct md_cluster_info *cinfo = res->mddev->cluster_info;
if (mode == DLM_LOCK_EX)
@@ -335,29 +362,32 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
if (slot == s->slot) {
- pr_info("%s:%d Deleting suspend_info: %d\n",
- __func__, __LINE__, slot);
list_del(&s->list);
kfree(s);
break;
}
}
-static void remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+static void remove_suspend_info(struct mddev *mddev, int slot)
{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
spin_lock_irq(&cinfo->suspend_lock);
__remove_suspend_info(cinfo, slot);
spin_unlock_irq(&cinfo->suspend_lock);
+ mddev->pers->quiesce(mddev, 2);
}
-static void process_suspend_info(struct md_cluster_info *cinfo,
+static void process_suspend_info(struct mddev *mddev,
int slot, sector_t lo, sector_t hi)
{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
struct suspend_info *s;
if (!hi) {
- remove_suspend_info(cinfo, slot);
+ remove_suspend_info(mddev, slot);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
return;
}
s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
@@ -366,11 +396,14 @@ static void process_suspend_info(struct md_cluster_info *cinfo,
s->slot = slot;
s->lo = lo;
s->hi = hi;
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
spin_lock_irq(&cinfo->suspend_lock);
/* Remove existing entry (if exists) before adding */
__remove_suspend_info(cinfo, slot);
list_add(&s->list, &cinfo->suspend_list);
spin_unlock_irq(&cinfo->suspend_lock);
+ mddev->pers->quiesce(mddev, 2);
}
static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
@@ -383,8 +416,8 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
int len;
len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
- pretty_uuid(disk_uuid + len, cmsg->uuid);
- snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
+ sprintf(disk_uuid + len, "%pU", cmsg->uuid);
+ snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot));
pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
init_completion(&cinfo->newdisk_completion);
set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
@@ -398,60 +431,60 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
-
- md_reload_sb(mddev);
+ md_reload_sb(mddev, le32_to_cpu(msg->raid_slot));
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
}
static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
{
- struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
+ le32_to_cpu(msg->raid_slot));
if (rdev)
md_kick_rdev_from_array(rdev);
else
- pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot);
+ pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
+ __func__, __LINE__, le32_to_cpu(msg->raid_slot));
}
static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
{
- struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
+ le32_to_cpu(msg->raid_slot));
if (rdev && test_bit(Faulty, &rdev->flags))
clear_bit(Faulty, &rdev->flags);
else
- pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot);
+ pr_warn("%s: %d Could not find disk(%d) which is faulty",
+ __func__, __LINE__, le32_to_cpu(msg->raid_slot));
}
static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
{
- switch (msg->type) {
+ if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
+ "node %d received it's own msg\n", le32_to_cpu(msg->slot)))
+ return;
+ switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED:
- pr_info("%s: %d Received message: METADATA_UPDATE from %d\n",
- __func__, __LINE__, msg->slot);
process_metadata_update(mddev, msg);
break;
case RESYNCING:
- pr_info("%s: %d Received message: RESYNCING from %d\n",
- __func__, __LINE__, msg->slot);
- process_suspend_info(mddev->cluster_info, msg->slot,
- msg->low, msg->high);
+ process_suspend_info(mddev, le32_to_cpu(msg->slot),
+ le64_to_cpu(msg->low),
+ le64_to_cpu(msg->high));
break;
case NEWDISK:
- pr_info("%s: %d Received message: NEWDISK from %d\n",
- __func__, __LINE__, msg->slot);
process_add_new_disk(mddev, msg);
break;
case REMOVE:
- pr_info("%s: %d Received REMOVE from %d\n",
- __func__, __LINE__, msg->slot);
process_remove_disk(mddev, msg);
break;
case RE_ADD:
- pr_info("%s: %d Received RE_ADD from %d\n",
- __func__, __LINE__, msg->slot);
process_readd_disk(mddev, msg);
break;
+ case BITMAP_NEEDS_SYNC:
+ __recover_slot(mddev, le32_to_cpu(msg->slot));
+ break;
default:
pr_warn("%s:%d Received unknown message from %d\n",
__func__, __LINE__, msg->slot);
@@ -467,6 +500,7 @@ static void recv_daemon(struct md_thread *thread)
struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
struct cluster_msg msg;
+ int ret;
/*get CR on Message*/
if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
@@ -479,23 +513,37 @@ static void recv_daemon(struct md_thread *thread)
process_recvd_msg(thread->mddev, &msg);
/*release CR on ack_lockres*/
- dlm_unlock_sync(ack_lockres);
- /*up-convert to EX on message_lockres*/
- dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+ ret = dlm_unlock_sync(ack_lockres);
+ if (unlikely(ret != 0))
+ pr_info("unlock ack failed return %d\n", ret);
+ /*up-convert to PR on message_lockres*/
+ ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR);
+ if (unlikely(ret != 0))
+ pr_info("lock PR on msg failed return %d\n", ret);
/*get CR on ack_lockres again*/
- dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ if (unlikely(ret != 0))
+ pr_info("lock CR on ack failed return %d\n", ret);
/*release CR on message_lockres*/
- dlm_unlock_sync(message_lockres);
+ ret = dlm_unlock_sync(message_lockres);
+ if (unlikely(ret != 0))
+ pr_info("unlock msg failed return %d\n", ret);
}
/* lock_comm()
* Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway.
+ * If called again, and the TOKEN lock is alread in EX mode
+ * return success. However, care must be taken that unlock_comm()
+ * is called only once.
*/
static int lock_comm(struct md_cluster_info *cinfo)
{
int error;
+ if (cinfo->token_lockres->mode == DLM_LOCK_EX)
+ return 0;
+
error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
if (error)
pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
@@ -505,6 +553,7 @@ static int lock_comm(struct md_cluster_info *cinfo)
static void unlock_comm(struct md_cluster_info *cinfo)
{
+ WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX);
dlm_unlock_sync(cinfo->token_lockres);
}
@@ -514,7 +563,7 @@ static void unlock_comm(struct md_cluster_info *cinfo)
* The function:
* 1. Grabs the message lockresource in EX mode
* 2. Copies the message to the message LVB
- * 3. Downconverts message lockresource to CR
+ * 3. Downconverts message lockresource to CW
* 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
* and the other nodes read the message. The thread will wait here until all other
* nodes have released ack lock resource.
@@ -535,12 +584,12 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
sizeof(struct cluster_msg));
- /*down-convert EX to CR on Message*/
- error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+ /*down-convert EX to CW on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
if (error) {
- pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+ pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n",
error);
- goto failed_message;
+ goto failed_ack;
}
/*up-convert CR to EX on Ack*/
@@ -560,7 +609,13 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
}
failed_ack:
- dlm_unlock_sync(cinfo->message_lockres);
+ error = dlm_unlock_sync(cinfo->message_lockres);
+ if (unlikely(error != 0)) {
+ pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
+ error);
+ /* in case the message can't be released due to some reason */
+ goto failed_ack;
+ }
failed_message:
return error;
}
@@ -582,6 +637,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
struct dlm_lock_resource *bm_lockres;
struct suspend_info *s;
char str[64];
+ sector_t lo, hi;
for (i = 0; i < total_slots; i++) {
@@ -612,9 +668,24 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
lockres_free(bm_lockres);
continue;
}
- if (ret)
+ if (ret) {
+ lockres_free(bm_lockres);
goto out;
- /* TODO: Read the disk bitmap sb and check if it needs recovery */
+ }
+
+ /* Read the disk bitmap sb and check if it needs recovery */
+ ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
+ if (ret) {
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
+ lockres_free(bm_lockres);
+ continue;
+ }
+ if ((hi > 0) && (lo < mddev->recovery_cp)) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ mddev->recovery_cp = lo;
+ md_check_recovery(mddev);
+ }
+
dlm_unlock_sync(bm_lockres);
lockres_free(bm_lockres);
}
@@ -628,20 +699,19 @@ static int join(struct mddev *mddev, int nodes)
int ret, ops_rv;
char str[64];
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
+ INIT_LIST_HEAD(&cinfo->suspend_list);
+ spin_lock_init(&cinfo->suspend_lock);
init_completion(&cinfo->completion);
+ set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
- mutex_init(&cinfo->sb_mutex);
mddev->cluster_info = cinfo;
memset(str, 0, 64);
- pretty_uuid(str, mddev->uuid);
+ sprintf(str, "%pU", mddev->uuid);
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
DLM_LSFL_FS, LVB_SIZE,
&md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
@@ -654,12 +724,6 @@ static int join(struct mddev *mddev, int nodes)
ret = -ERANGE;
goto err;
}
- cinfo->sb_lock = lockres_init(mddev, "cmd-super",
- NULL, 0);
- if (!cinfo->sb_lock) {
- ret = -ENOMEM;
- goto err;
- }
/* Initiate the communication resources */
ret = -ENOMEM;
cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
@@ -700,8 +764,9 @@ static int join(struct mddev *mddev, int nodes)
goto err;
}
- INIT_LIST_HEAD(&cinfo->suspend_list);
- spin_lock_init(&cinfo->suspend_lock);
+ cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
+ if (!cinfo->resync_lockres)
+ goto err;
ret = gather_all_resync_info(mddev, nodes);
if (ret)
@@ -713,29 +778,47 @@ err:
lockres_free(cinfo->token_lockres);
lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
- lockres_free(cinfo->sb_lock);
if (cinfo->lockspace)
dlm_release_lockspace(cinfo->lockspace, 2);
mddev->cluster_info = NULL;
kfree(cinfo);
- module_put(THIS_MODULE);
return ret;
}
+static void resync_bitmap(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg = {0};
+ int err;
+
+ cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC);
+ err = sendmsg(cinfo, &cmsg);
+ if (err)
+ pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n",
+ __func__, __LINE__, err);
+}
+
static int leave(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
if (!cinfo)
return 0;
+
+ /* BITMAP_NEEDS_SYNC message should be sent when node
+ * is leaving the cluster with dirty bitmap, also we
+ * can only deliver it when dlm connection is available */
+ if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector)
+ resync_bitmap(mddev);
+
md_unregister_thread(&cinfo->recovery_thread);
md_unregister_thread(&cinfo->recv_thread);
lockres_free(cinfo->message_lockres);
lockres_free(cinfo->token_lockres);
lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres);
- lockres_free(cinfo->sb_lock);
lockres_free(cinfo->bitmap_lockres);
dlm_release_lockspace(cinfo->lockspace, 2);
return 0;
@@ -752,15 +835,6 @@ static int slot_number(struct mddev *mddev)
return cinfo->slot_number - 1;
}
-static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
-{
- struct md_cluster_info *cinfo = mddev->cluster_info;
-
- add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi);
- /* Re-acquire the lock to refresh LVB */
- dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
-}
-
static int metadata_update_start(struct mddev *mddev)
{
return lock_comm(mddev->cluster_info);
@@ -770,58 +844,75 @@ static int metadata_update_finish(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
struct cluster_msg cmsg;
- int ret;
+ struct md_rdev *rdev;
+ int ret = 0;
+ int raid_slot = -1;
memset(&cmsg, 0, sizeof(cmsg));
cmsg.type = cpu_to_le32(METADATA_UPDATED);
- ret = __sendmsg(cinfo, &cmsg);
+ /* Pick up a good active device number to send.
+ */
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
+ raid_slot = rdev->desc_nr;
+ break;
+ }
+ if (raid_slot >= 0) {
+ cmsg.raid_slot = cpu_to_le32(raid_slot);
+ ret = __sendmsg(cinfo, &cmsg);
+ } else
+ pr_warn("md-cluster: No good device id found to send\n");
unlock_comm(cinfo);
return ret;
}
-static int metadata_update_cancel(struct mddev *mddev)
+static void metadata_update_cancel(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+ unlock_comm(cinfo);
+}
- return dlm_unlock_sync(cinfo->token_lockres);
+static int resync_start(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE;
+ return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX);
}
-static int resync_send(struct mddev *mddev, enum msg_type type,
- sector_t lo, sector_t hi)
+static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- struct cluster_msg cmsg;
- int slot = cinfo->slot_number - 1;
+ struct cluster_msg cmsg = {0};
- pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__,
- (unsigned long long)lo,
- (unsigned long long)hi);
- resync_info_update(mddev, lo, hi);
- cmsg.type = cpu_to_le32(type);
- cmsg.slot = cpu_to_le32(slot);
+ add_resync_info(cinfo->bitmap_lockres, lo, hi);
+ /* Re-acquire the lock to refresh LVB */
+ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
+ cmsg.type = cpu_to_le32(RESYNCING);
cmsg.low = cpu_to_le64(lo);
cmsg.high = cpu_to_le64(hi);
- return sendmsg(cinfo, &cmsg);
-}
-static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi)
-{
- pr_info("%s:%d\n", __func__, __LINE__);
- return resync_send(mddev, RESYNCING, lo, hi);
+ return sendmsg(cinfo, &cmsg);
}
-static void resync_finish(struct mddev *mddev)
+static int resync_finish(struct mddev *mddev)
{
- pr_info("%s:%d\n", __func__, __LINE__);
- resync_send(mddev, RESYNCING, 0, 0);
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE;
+ dlm_unlock_sync(cinfo->resync_lockres);
+ return resync_info_update(mddev, 0, 0);
}
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+ sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
struct suspend_info *s;
+ if ((direction == READ) &&
+ test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+ return 1;
+
spin_lock_irq(&cinfo->suspend_lock);
if (list_empty(&cinfo->suspend_list))
goto out;
@@ -835,7 +926,11 @@ out:
return ret;
}
-static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
+/* add_new_disk() - initiates a disk add
+ * However, if this fails before writing md_update_sb(),
+ * add_new_disk_cancel() must be called to release token lock
+ */
+static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
struct cluster_msg cmsg;
@@ -846,7 +941,7 @@ static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
memset(&cmsg, 0, sizeof(cmsg));
cmsg.type = cpu_to_le32(NEWDISK);
memcpy(cmsg.uuid, uuid, 16);
- cmsg.raid_slot = rdev->desc_nr;
+ cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
lock_comm(cinfo);
ret = __sendmsg(cinfo, &cmsg);
if (ret)
@@ -857,22 +952,17 @@ static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
/* Some node does not "see" the device */
if (ret == -EAGAIN)
ret = -ENOENT;
+ if (ret)
+ unlock_comm(cinfo);
else
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
return ret;
}
-static int add_new_disk_finish(struct mddev *mddev)
+static void add_new_disk_cancel(struct mddev *mddev)
{
- struct cluster_msg cmsg;
struct md_cluster_info *cinfo = mddev->cluster_info;
- int ret;
- /* Write sb and inform others */
- md_update_sb(mddev, 1);
- cmsg.type = METADATA_UPDATED;
- ret = __sendmsg(cinfo, &cmsg);
unlock_comm(cinfo);
- return ret;
}
static int new_disk_ack(struct mddev *mddev, bool ack)
@@ -892,10 +982,10 @@ static int new_disk_ack(struct mddev *mddev, bool ack)
static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
- struct cluster_msg cmsg;
+ struct cluster_msg cmsg = {0};
struct md_cluster_info *cinfo = mddev->cluster_info;
- cmsg.type = REMOVE;
- cmsg.raid_slot = rdev->desc_nr;
+ cmsg.type = cpu_to_le32(REMOVE);
+ cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
return __sendmsg(cinfo, &cmsg);
}
@@ -903,12 +993,12 @@ static int gather_bitmaps(struct md_rdev *rdev)
{
int sn, err;
sector_t lo, hi;
- struct cluster_msg cmsg;
+ struct cluster_msg cmsg = {0};
struct mddev *mddev = rdev->mddev;
struct md_cluster_info *cinfo = mddev->cluster_info;
- cmsg.type = RE_ADD;
- cmsg.raid_slot = rdev->desc_nr;
+ cmsg.type = cpu_to_le32(RE_ADD);
+ cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
err = sendmsg(cinfo, &cmsg);
if (err)
goto out;
@@ -932,15 +1022,15 @@ static struct md_cluster_operations cluster_ops = {
.join = join,
.leave = leave,
.slot_number = slot_number,
- .resync_info_update = resync_info_update,
.resync_start = resync_start,
.resync_finish = resync_finish,
+ .resync_info_update = resync_info_update,
.metadata_update_start = metadata_update_start,
.metadata_update_finish = metadata_update_finish,
.metadata_update_cancel = metadata_update_cancel,
.area_resyncing = area_resyncing,
- .add_new_disk_start = add_new_disk_start,
- .add_new_disk_finish = add_new_disk_finish,
+ .add_new_disk = add_new_disk,
+ .add_new_disk_cancel = add_new_disk_cancel,
.new_disk_ack = new_disk_ack,
.remove_disk = remove_disk,
.gather_bitmaps = gather_bitmaps,
@@ -961,5 +1051,6 @@ static void cluster_exit(void)
module_init(cluster_init);
module_exit(cluster_exit);
+MODULE_AUTHOR("SUSE");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Clustering support for MD");
diff --git a/kernel/drivers/md/md-cluster.h b/kernel/drivers/md/md-cluster.h
index 6817ee00e..e75ea2613 100644
--- a/kernel/drivers/md/md-cluster.h
+++ b/kernel/drivers/md/md-cluster.h
@@ -12,15 +12,15 @@ struct md_cluster_operations {
int (*join)(struct mddev *mddev, int nodes);
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
- void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
- int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi);
- void (*resync_finish)(struct mddev *mddev);
+ int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
- int (*metadata_update_cancel)(struct mddev *mddev);
- int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
- int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
- int (*add_new_disk_finish)(struct mddev *mddev);
+ void (*metadata_update_cancel)(struct mddev *mddev);
+ int (*resync_start)(struct mddev *mddev);
+ int (*resync_finish)(struct mddev *mddev);
+ int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
+ int (*add_new_disk)(struct mddev *mddev, struct md_rdev *rdev);
+ void (*add_new_disk_cancel)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack);
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
int (*gather_bitmaps)(struct md_rdev *rdev);
diff --git a/kernel/drivers/md/md.c b/kernel/drivers/md/md.c
index e8c44fcb1..b1e1f6b95 100644
--- a/kernel/drivers/md/md.c
+++ b/kernel/drivers/md/md.c
@@ -250,21 +250,25 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
-static void md_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
int cpu;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
- return;
+ return BLK_QC_T_NONE;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
- bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
- return;
+ if (bio_sectors(bio) != 0)
+ bio->bi_error = -EROFS;
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
@@ -298,6 +302,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
+
+ return BLK_QC_T_NONE;
}
/* mddev_suspend makes sure no new requests are submitted
@@ -308,8 +314,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
*/
void mddev_suspend(struct mddev *mddev)
{
- BUG_ON(mddev->suspended);
- mddev->suspended = 1;
+ if (mddev->suspended++)
+ return;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
@@ -320,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
- mddev->suspended = 0;
+ if (--mddev->suspended)
+ return;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
@@ -350,34 +357,11 @@ static int md_congested(void *data, int bits)
return mddev_congested(mddev, bits);
}
-static int md_mergeable_bvec(struct request_queue *q,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct mddev *mddev = q->queuedata;
- int ret;
- rcu_read_lock();
- if (mddev->suspended) {
- /* Must always allow one vec */
- if (bvm->bi_size == 0)
- ret = biovec->bv_len;
- else
- ret = 0;
- } else {
- struct md_personality *pers = mddev->pers;
- if (pers && pers->mergeable_bvec)
- ret = pers->mergeable_bvec(mddev, bvm, biovec);
- else
- ret = biovec->bv_len;
- }
- rcu_read_unlock();
- return ret;
-}
/*
* Generic flush handling for md
*/
-static void md_end_flush(struct bio *bio, int err)
+static void md_end_flush(struct bio *bio)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
@@ -433,7 +417,7 @@ static void md_submit_flush_data(struct work_struct *ws)
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
- bio_endio(bio, 0);
+ bio_endio(bio);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
@@ -502,6 +486,8 @@ static void mddev_put(struct mddev *mddev)
bioset_free(bs);
}
+static void md_safemode_timeout(unsigned long data);
+
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
@@ -509,7 +495,8 @@ void mddev_init(struct mddev *mddev)
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
- init_timer(&mddev->safemode_timer);
+ setup_timer(&mddev->safemode_timer, md_safemode_timeout,
+ (unsigned long) mddev);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
@@ -728,15 +715,13 @@ void md_rdev_clear(struct md_rdev *rdev)
}
EXPORT_SYMBOL_GPL(md_rdev_clear);
-static void super_written(struct bio *bio, int error)
+static void super_written(struct bio *bio)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
- if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
- printk("md: super_written gets error=%d, uptodate=%d\n",
- error, test_bit(BIO_UPTODATE, &bio->bi_flags));
- WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
+ if (bio->bi_error) {
+ printk("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
}
@@ -791,7 +776,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
- ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ ret = !bio->bi_error;
bio_put(bio);
return ret;
}
@@ -1626,7 +1611,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
- le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
@@ -1646,16 +1632,29 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
- role = 0xffff;
+ role = MD_DISK_ROLE_SPARE;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
- case 0xffff: /* spare */
+ case MD_DISK_ROLE_SPARE: /* spare */
break;
- case 0xfffe: /* faulty */
+ case MD_DISK_ROLE_FAULTY: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
+ case MD_DISK_ROLE_JOURNAL: /* journal device */
+ if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
+ /* journal device without journal feature */
+ printk(KERN_WARNING
+ "md: journal device provided without journal feature, ignoring the device\n");
+ return -EINVAL;
+ }
+ set_bit(Journal, &rdev->flags);
+ rdev->journal_tail = le64_to_cpu(sb->journal_tail);
+ if (mddev->recovery_cp == MaxSector)
+ set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ rdev->raid_disk = 0;
+ break;
default:
rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
@@ -1673,6 +1672,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
set_bit(WriteMostly, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
+ set_bit(MD_HAS_JOURNAL, &mddev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
@@ -1697,6 +1698,8 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
+ else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
+ sb->resync_offset = cpu_to_le64(MaxSector);
else
sb->resync_offset = cpu_to_le64(0);
@@ -1720,7 +1723,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
- if (rdev->raid_disk >= 0 &&
+ if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1730,6 +1733,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
+ /* Note: recovery_offset and journal_tail share space */
+ if (test_bit(Journal, &rdev->flags))
+ sb->journal_tail = cpu_to_le64(rdev->journal_tail);
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
@@ -1753,6 +1759,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
}
}
+ if (mddev_is_clustered(mddev))
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
+
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
@@ -1803,18 +1812,23 @@ retry:
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
- sb->dev_roles[i] = cpu_to_le16(0xfffe);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
+
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
- sb->dev_roles[i] = cpu_to_le16(0xfffe);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
+ else if (test_bit(Journal, &rdev2->flags))
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
- sb->dev_roles[i] = cpu_to_le16(0xffff);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
}
sb->sb_csum = calc_sb_1_csum(sb);
@@ -1930,13 +1944,23 @@ static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
struct md_rdev *rdev, *rdev2;
rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev1)
- rdev_for_each_rcu(rdev2, mddev2)
+ rdev_for_each_rcu(rdev, mddev1) {
+ if (test_bit(Faulty, &rdev->flags) ||
+ test_bit(Journal, &rdev->flags) ||
+ rdev->raid_disk == -1)
+ continue;
+ rdev_for_each_rcu(rdev2, mddev2) {
+ if (test_bit(Faulty, &rdev2->flags) ||
+ test_bit(Journal, &rdev2->flags) ||
+ rdev2->raid_disk == -1)
+ continue;
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
+ }
+ }
rcu_read_unlock();
return 0;
}
@@ -1980,12 +2004,9 @@ int md_integrity_register(struct mddev *mddev)
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
- if (blk_integrity_register(mddev->gendisk,
- bdev_get_integrity(reference->bdev)) != 0) {
- printk(KERN_ERR "md: failed to register integrity for %s\n",
- mdname(mddev));
- return -EINVAL;
- }
+ blk_integrity_register(mddev->gendisk,
+ bdev_get_integrity(reference->bdev));
+
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
@@ -1996,27 +2017,32 @@ int md_integrity_register(struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_register);
-/* Disable data integrity if non-capable/non-matching disk is being added */
-void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
+/*
+ * Attempt to add an rdev, but only if it is consistent with the current
+ * integrity profile
+ */
+int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
+ char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
- return;
+ return 0;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
- return;
- if (rdev->raid_disk < 0) /* skip spares */
- return;
- if (bi_rdev && blk_integrity_compare(mddev->gendisk,
- rdev->bdev->bd_disk) >= 0)
- return;
- printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
- blk_integrity_unregister(mddev->gendisk);
+ return 0;
+
+ if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
+ printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
+ return -ENXIO;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(md_integrity_add_rdev);
@@ -2024,7 +2050,6 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
- char *s;
int err;
/* prevent duplicates */
@@ -2070,8 +2095,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return -EBUSY;
}
bdevname(rdev->bdev,b);
- while ( (s=strchr(b, '/')) != NULL)
- *s = '!';
+ strreplace(b, '/', '!');
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
@@ -2216,23 +2240,77 @@ static void sync_sbs(struct mddev *mddev, int nospares)
}
}
+static bool does_sb_need_changing(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ struct mdp_superblock_1 *sb;
+ int role;
+
+ /* Find a good rdev */
+ rdev_for_each(rdev, mddev)
+ if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
+ break;
+
+ /* No good device found. */
+ if (!rdev)
+ return false;
+
+ sb = page_address(rdev->sb_page);
+ /* Check if a device has become faulty or a spare become active */
+ rdev_for_each(rdev, mddev) {
+ role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
+ /* Device activated? */
+ if (role == 0xffff && rdev->raid_disk >=0 &&
+ !test_bit(Faulty, &rdev->flags))
+ return true;
+ /* Device turned faulty? */
+ if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
+ return true;
+ }
+
+ /* Check if any mddev parameters have changed */
+ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
+ (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
+ (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
+ (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
+ return true;
+
+ return false;
+}
+
void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
int nospares = 0;
int any_badblocks_changed = 0;
+ int ret = -1;
if (mddev->ro) {
if (force_change)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return;
}
+
+ if (mddev_is_clustered(mddev)) {
+ if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ force_change = 1;
+ ret = md_cluster_ops->metadata_update_start(mddev);
+ /* Has someone else has updated the sb */
+ if (!does_sb_need_changing(mddev)) {
+ if (ret == 0)
+ md_cluster_ops->metadata_update_cancel(mddev);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ return;
+ }
+ }
repeat:
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
@@ -2376,6 +2454,9 @@ repeat:
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
+
+ if (mddev_is_clustered(mddev) && ret == 0)
+ md_cluster_ops->metadata_update_finish(mddev);
}
EXPORT_SYMBOL(md_update_sb);
@@ -2451,6 +2532,10 @@ state_show(struct md_rdev *rdev, char *page)
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
+ if (test_bit(Journal, &flags)) {
+ len += sprintf(page+len, "%sjournal",sep);
+ sep = ",";
+ }
if (test_bit(WriteMostly, &flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
@@ -2462,6 +2547,7 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (!test_bit(Faulty, &flags) &&
+ !test_bit(Journal, &flags) &&
!test_bit(In_sync, &flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
@@ -2510,17 +2596,16 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
- if (mddev_is_clustered(mddev))
- md_cluster_ops->remove_disk(mddev, rdev);
- md_kick_rdev_from_array(rdev);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
- if (mddev->pers)
- md_update_sb(mddev, 1);
- md_new_event(mddev);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
err = 0;
+ if (mddev_is_clustered(mddev))
+ err = md_cluster_ops->remove_disk(mddev, rdev);
+
+ if (err == 0) {
+ md_kick_rdev_from_array(rdev);
+ if (mddev->pers)
+ md_update_sb(mddev, 1);
+ md_new_event(mddev);
+ }
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
@@ -2549,7 +2634,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
- } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
+ } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags)) {
if (rdev->mddev->pers == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
@@ -2568,6 +2654,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* check if recovery is needed.
*/
if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
@@ -2630,13 +2717,14 @@ errors_show(struct md_rdev *rdev, char *page)
static ssize_t
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
{
- char *e;
- unsigned long n = simple_strtoul(buf, &e, 10);
- if (*buf && (*e == 0 || *e == '\n')) {
- atomic_set(&rdev->corrected_errors, n);
- return len;
- }
- return -EINVAL;
+ unsigned int n;
+ int rv;
+
+ rv = kstrtouint(buf, 10, &n);
+ if (rv < 0)
+ return rv;
+ atomic_set(&rdev->corrected_errors, n);
+ return len;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
@@ -2644,7 +2732,9 @@ __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
{
- if (rdev->raid_disk < 0)
+ if (test_bit(Journal, &rdev->flags))
+ return sprintf(page, "journal\n");
+ else if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
@@ -2653,13 +2743,18 @@ slot_show(struct md_rdev *rdev, char *page)
static ssize_t
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
{
- char *e;
+ int slot;
int err;
- int slot = simple_strtoul(buf, &e, 10);
+
+ if (test_bit(Journal, &rdev->flags))
+ return -EBUSY;
if (strncmp(buf, "none", 4)==0)
slot = -1;
- else if (e==buf || (*e && *e!= '\n'))
- return -EINVAL;
+ else {
+ err = kstrtouint(buf, 10, (unsigned int *)&slot);
+ if (err < 0)
+ return err;
+ }
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
@@ -2683,6 +2778,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
+ int err;
if (rdev->raid_disk != -1)
return -EBUSY;
@@ -2857,6 +2953,8 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
sector_t oldsectors = rdev->sectors;
sector_t sectors;
+ if (test_bit(Journal, &rdev->flags))
+ return -EBUSY;
if (strict_blocks_to_sectors(buf, &sectors) < 0)
return -EINVAL;
if (rdev->data_offset != rdev->new_data_offset)
@@ -3214,20 +3312,14 @@ static void analyze_sbs(struct mddev *mddev)
md_kick_rdev_from_array(rdev);
continue;
}
- /* No device should have a Candidate flag
- * when reading devices
- */
- if (test_bit(Candidate, &rdev->flags)) {
- pr_info("md: kicking Cluster Candidate %s from array!\n",
- bdevname(rdev->bdev, b));
- md_kick_rdev_from_array(rdev);
- }
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
- } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
+ } else if (rdev->raid_disk >=
+ (mddev->raid_disks - min(0, mddev->delta_disks)) &&
+ !test_bit(Journal, &rdev->flags)) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
@@ -3274,8 +3366,6 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
return 0;
}
-static void md_safemode_timeout(unsigned long data);
-
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
@@ -3287,6 +3377,11 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
+ if (mddev_is_clustered(mddev)) {
+ pr_info("md: Safemode is disabled for clustered mode\n");
+ return -EINVAL;
+ }
+
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
@@ -3544,12 +3639,12 @@ layout_show(struct mddev *mddev, char *page)
static ssize_t
layout_store(struct mddev *mddev, const char *buf, size_t len)
{
- char *e;
- unsigned long n = simple_strtoul(buf, &e, 10);
+ unsigned int n;
int err;
- if (!*buf || (*e && *e != '\n'))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &n);
+ if (err < 0)
+ return err;
err = mddev_lock(mddev);
if (err)
return err;
@@ -3593,12 +3688,12 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks);
static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
- char *e;
+ unsigned int n;
int err;
- unsigned long n = simple_strtoul(buf, &e, 10);
- if (!*buf || (*e && *e != '\n'))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &n);
+ if (err < 0)
+ return err;
err = mddev_lock(mddev);
if (err)
@@ -3645,12 +3740,12 @@ chunk_size_show(struct mddev *mddev, char *page)
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
+ unsigned long n;
int err;
- char *e;
- unsigned long n = simple_strtoul(buf, &e, 10);
- if (!*buf || (*e && *e != '\n'))
- return -EINVAL;
+ err = kstrtoul(buf, 10, &n);
+ if (err < 0)
+ return err;
err = mddev_lock(mddev);
if (err)
@@ -3688,19 +3783,24 @@ resync_start_show(struct mddev *mddev, char *page)
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
+ unsigned long long n;
int err;
- char *e;
- unsigned long long n = simple_strtoull(buf, &e, 10);
+
+ if (cmd_match(buf, "none"))
+ n = MaxSector;
+ else {
+ err = kstrtoull(buf, 10, &n);
+ if (err < 0)
+ return err;
+ if (n != (sector_t)n)
+ return -EINVAL;
+ }
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
err = -EBUSY;
- else if (cmd_match(buf, "none"))
- n = MaxSector;
- else if (!*buf || (*e && *e != '\n'))
- err = -EINVAL;
if (!err) {
mddev->recovery_cp = n;
@@ -3882,7 +3982,9 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break;
case clean:
if (mddev->pers) {
- restart_array(mddev);
+ err = restart_array(mddev);
+ if (err)
+ break;
spin_lock(&mddev->lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
@@ -3900,7 +4002,9 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break;
case active:
if (mddev->pers) {
- restart_array(mddev);
+ err = restart_array(mddev);
+ if (err)
+ break;
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
@@ -3936,14 +4040,14 @@ max_corrected_read_errors_show(struct mddev *mddev, char *page) {
static ssize_t
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
{
- char *e;
- unsigned long n = simple_strtoul(buf, &e, 10);
+ unsigned int n;
+ int rv;
- if (*buf && (*e == 0 || *e == '\n')) {
- atomic_set(&mddev->max_corr_read_errors, n);
- return len;
- }
- return -EINVAL;
+ rv = kstrtouint(buf, 10, &n);
+ if (rv < 0)
+ return rv;
+ atomic_set(&mddev->max_corr_read_errors, n);
+ return len;
}
static struct md_sysfs_entry max_corr_read_errors =
@@ -4079,12 +4183,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
if (mddev->pers) {
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
@@ -4203,6 +4303,8 @@ action_show(struct mddev *mddev, char *page)
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
type = "recover";
+ else if (mddev->reshape_position != MaxSector)
+ type = "reshape";
}
return sprintf(page, "%s\n", type);
}
@@ -4228,8 +4330,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
mddev_unlock(mddev);
}
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4242,8 +4343,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- err = mddev->pers->start_reshape(mddev);
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ err = -EBUSY;
+ else {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = mddev->pers->start_reshape(mddev);
+ }
mddev_unlock(mddev);
}
if (err)
@@ -4302,15 +4407,18 @@ sync_min_show(struct mddev *mddev, char *page)
static ssize_t
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
{
- int min;
- char *e;
+ unsigned int min;
+ int rv;
+
if (strncmp(buf, "system", 6)==0) {
- mddev->sync_speed_min = 0;
- return len;
+ min = 0;
+ } else {
+ rv = kstrtouint(buf, 10, &min);
+ if (rv < 0)
+ return rv;
+ if (min == 0)
+ return -EINVAL;
}
- min = simple_strtoul(buf, &e, 10);
- if (buf == e || (*e && *e != '\n') || min <= 0)
- return -EINVAL;
mddev->sync_speed_min = min;
return len;
}
@@ -4328,15 +4436,18 @@ sync_max_show(struct mddev *mddev, char *page)
static ssize_t
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
{
- int max;
- char *e;
+ unsigned int max;
+ int rv;
+
if (strncmp(buf, "system", 6)==0) {
- mddev->sync_speed_max = 0;
- return len;
+ max = 0;
+ } else {
+ rv = kstrtouint(buf, 10, &max);
+ if (rv < 0)
+ return rv;
+ if (max == 0)
+ return -EINVAL;
}
- max = simple_strtoul(buf, &e, 10);
- if (buf == e || (*e && *e != '\n') || max <= 0)
- return -EINVAL;
mddev->sync_speed_max = max;
return len;
}
@@ -4519,12 +4630,13 @@ suspend_lo_show(struct mddev *mddev, char *page)
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
- char *e;
- unsigned long long new = simple_strtoull(buf, &e, 10);
- unsigned long long old;
+ unsigned long long old, new;
int err;
- if (buf == e || (*e && *e != '\n'))
+ err = kstrtoull(buf, 10, &new);
+ if (err < 0)
+ return err;
+ if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
@@ -4561,12 +4673,13 @@ suspend_hi_show(struct mddev *mddev, char *page)
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
- char *e;
- unsigned long long new = simple_strtoull(buf, &e, 10);
- unsigned long long old;
+ unsigned long long old, new;
int err;
- if (buf == e || (*e && *e != '\n'))
+ err = kstrtoull(buf, 10, &new);
+ if (err < 0)
+ return err;
+ if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
@@ -4608,11 +4721,13 @@ static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
struct md_rdev *rdev;
- char *e;
+ unsigned long long new;
int err;
- unsigned long long new = simple_strtoull(buf, &e, 10);
- if (buf == e || (*e && *e != '\n'))
+ err = kstrtoull(buf, 10, &new);
+ if (err < 0)
+ return err;
+ if (new != (sector_t)new)
return -EINVAL;
err = mddev_lock(mddev);
if (err)
@@ -5169,7 +5284,6 @@ int md_run(struct mddev *mddev)
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
- blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5185,9 +5299,10 @@ int md_run(struct mddev *mddev)
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
- mddev->safemode_timer.function = md_safemode_timeout;
- mddev->safemode_timer.data = (unsigned long) mddev;
- mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
+ if (mddev_is_clustered(mddev))
+ mddev->safemode_delay = 0;
+ else
+ mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
@@ -5199,6 +5314,11 @@ int md_run(struct mddev *mddev)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
+ if (mddev->degraded && !mddev->ro)
+ /* This ensures that recovering status is reported immediately
+ * via sysfs - until a lack of spares is confirmed.
+ */
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
@@ -5225,6 +5345,9 @@ static int do_md_run(struct mddev *mddev)
goto out;
}
+ if (mddev_is_clustered(mddev))
+ md_allow_write(mddev);
+
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5247,6 +5370,25 @@ static int restart_array(struct mddev *mddev)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ struct md_rdev *rdev;
+ bool has_journal = false;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags)) {
+ has_journal = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ /* Don't restart rw with journal missing/faulty */
+ if (!has_journal)
+ return -EINVAL;
+ }
+
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
@@ -5298,7 +5440,6 @@ static void md_clean(struct mddev *mddev)
mddev->degraded = 0;
mddev->safemode = 0;
mddev->private = NULL;
- mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
@@ -5309,8 +5450,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
@@ -5324,13 +5463,13 @@ static void __md_stop_writes(struct mddev *mddev)
md_super_wait(mddev);
if (mddev->ro == 0 &&
- (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+ ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
+ (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
- mddev->in_sync = 1;
+ if (!mddev_is_clustered(mddev))
+ mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
@@ -5409,9 +5548,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
+ if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
@@ -5497,7 +5640,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
@@ -5539,7 +5681,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
- blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
@@ -5748,16 +5889,16 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
err = 0;
spin_lock(&mddev->lock);
- /* bitmap disabled, zero the first byte and copy out */
- if (!mddev->bitmap_info.file)
- file->pathname[0] = '\0';
- else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
- file->pathname, sizeof(file->pathname))),
- IS_ERR(ptr))
- err = PTR_ERR(ptr);
- else
- memmove(file->pathname, ptr,
- sizeof(file->pathname)-(ptr-file->pathname));
+ /* bitmap enabled */
+ if (mddev->bitmap_info.file) {
+ ptr = file_path(mddev->bitmap_info.file, file->pathname,
+ sizeof(file->pathname));
+ if (IS_ERR(ptr))
+ err = PTR_ERR(ptr);
+ else
+ memmove(file->pathname, ptr,
+ sizeof(file->pathname)-(ptr-file->pathname));
+ }
spin_unlock(&mddev->lock);
if (err == 0 &&
@@ -5789,6 +5930,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
+ if (test_bit(Journal, &rdev->flags))
+ info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
@@ -5903,23 +6046,18 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_JOURNAL))
+ set_bit(Journal, &rdev->flags);
/*
* check whether the device shows up in other nodes
*/
if (mddev_is_clustered(mddev)) {
- if (info->state & (1 << MD_DISK_CANDIDATE)) {
- /* Through --cluster-confirm */
+ if (info->state & (1 << MD_DISK_CANDIDATE))
set_bit(Candidate, &rdev->flags);
- err = md_cluster_ops->new_disk_ack(mddev, true);
- if (err) {
- export_rdev(rdev);
- return err;
- }
- } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
+ else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
/* --add initiated by this node */
- err = md_cluster_ops->add_new_disk_start(mddev, rdev);
+ err = md_cluster_ops->add_new_disk(mddev, rdev);
if (err) {
- md_cluster_ops->add_new_disk_finish(mddev);
export_rdev(rdev);
return err;
}
@@ -5928,13 +6066,23 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
+
if (err)
export_rdev(rdev);
- else
+
+ if (mddev_is_clustered(mddev)) {
+ if (info->state & (1 << MD_DISK_CANDIDATE))
+ md_cluster_ops->new_disk_ack(mddev, (err == 0));
+ else {
+ if (err)
+ md_cluster_ops->add_new_disk_cancel(mddev);
+ else
+ err = add_bound_rdev(rdev);
+ }
+
+ } else if (!err)
err = add_bound_rdev(rdev);
- if (mddev_is_clustered(mddev) &&
- (info->state & (1 << MD_DISK_CLUSTER_ADD)))
- md_cluster_ops->add_new_disk_finish(mddev);
+
return err;
}
@@ -5990,13 +6138,17 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ int ret = -1;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
+ ret = md_cluster_ops->metadata_update_start(mddev);
+
+ if (rdev->raid_disk < 0)
+ goto kick_rdev;
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
@@ -6004,20 +6156,19 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
if (rdev->raid_disk >= 0)
goto busy;
- if (mddev_is_clustered(mddev))
+kick_rdev:
+ if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
-
return 0;
busy:
- if (mddev_is_clustered(mddev))
+ if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
+
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
@@ -6068,14 +6219,12 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
goto abort_export;
}
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
- goto abort_clustered;
+ goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
@@ -6085,9 +6234,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
-
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -6097,9 +6243,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_new_event(mddev);
return 0;
-abort_clustered:
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
@@ -6417,8 +6560,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return rv;
}
}
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6476,12 +6617,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
}
}
md_update_sb(mddev, 1);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
return rv;
err:
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
@@ -7076,7 +7213,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "\n");
}
-static void status_resync(struct seq_file *seq, struct mddev *mddev)
+static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
@@ -7084,18 +7221,32 @@ static void status_resync(struct seq_file *seq, struct mddev *mddev)
int scale;
unsigned int per_milli;
- if (mddev->curr_resync <= 3)
- resync = 0;
- else
- resync = mddev->curr_resync
- - atomic_read(&mddev->recovery_active);
-
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
+ resync = mddev->curr_resync;
+ if (resync <= 3) {
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ /* Still cleaning up */
+ resync = max_sectors;
+ } else
+ resync -= atomic_read(&mddev->recovery_active);
+
+ if (resync == 0) {
+ if (mddev->recovery_cp < MaxSector) {
+ seq_printf(seq, "\tresync=PENDING");
+ return 1;
+ }
+ return 0;
+ }
+ if (resync < 3) {
+ seq_printf(seq, "\tresync=DELAYED");
+ return 1;
+ }
+
WARN_ON(max_sectors == 0);
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
@@ -7160,6 +7311,7 @@ static void status_resync(struct seq_file *seq, struct mddev *mddev)
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
+ return 1;
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
@@ -7267,6 +7419,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
+ if (test_bit(Journal, &rdev->flags))
+ seq_printf(seq, "(J)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
@@ -7305,13 +7459,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
- if (mddev->curr_resync > 2) {
- status_resync(seq, mddev);
+ if (status_resync(seq, mddev))
seq_printf(seq, "\n ");
- } else if (mddev->curr_resync >= 1)
- seq_printf(seq, "\tresync=DELAYED\n ");
- else if (mddev->recovery_cp < MaxSector)
- seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
@@ -7394,15 +7543,19 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
-int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+int register_md_cluster_operations(struct md_cluster_operations *ops,
+ struct module *module)
{
- if (md_cluster_ops != NULL)
- return -EALREADY;
+ int ret = 0;
spin_lock(&pers_lock);
- md_cluster_ops = ops;
- md_cluster_mod = module;
+ if (md_cluster_ops != NULL)
+ ret = -EALREADY;
+ else {
+ md_cluster_ops = ops;
+ md_cluster_mod = module;
+ }
spin_unlock(&pers_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(register_md_cluster_operations);
@@ -7422,7 +7575,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
- return err;
+ return -ENOENT;
}
spin_lock(&pers_lock);
@@ -7580,11 +7733,7 @@ int md_allow_write(struct mddev *mddev)
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
@@ -7616,6 +7765,7 @@ void md_do_sync(struct md_thread *thread)
struct md_rdev *rdev;
char *desc, *action = NULL;
struct blk_plug plug;
+ bool cluster_resync_finished = false;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7725,6 +7875,7 @@ void md_do_sync(struct md_thread *thread)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
@@ -7785,9 +7936,6 @@ void md_do_sync(struct md_thread *thread)
md_new_event(mddev);
update_time = jiffies;
- if (mddev_is_clustered(mddev))
- md_cluster_ops->resync_start(mddev, j, max_sectors);
-
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7800,7 +7948,8 @@ void md_do_sync(struct md_thread *thread)
> (max_sectors >> 4)) ||
time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
- >= mddev->resync_max - mddev->curr_resync_completed
+ >= mddev->resync_max - mddev->curr_resync_completed ||
+ mddev->curr_resync_completed > mddev->resync_max
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
@@ -7845,10 +7994,11 @@ void md_do_sync(struct md_thread *thread)
break;
j += sectors;
+ if (j > max_sectors)
+ /* when skipping, extra large numbers can be returned. */
+ j = max_sectors;
if (j > 2)
mddev->curr_resync = j;
- if (mddev_is_clustered(mddev))
- md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
@@ -7913,11 +8063,18 @@ void md_do_sync(struct md_thread *thread)
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
- /* tell personality that we are finished */
- mddev->pers->sync_request(mddev, max_sectors, &skipped);
-
- if (mddev_is_clustered(mddev))
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ mddev->curr_resync > 2) {
+ mddev->curr_resync_completed = mddev->curr_resync;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ }
+ /* tell personality and other nodes that we are finished */
+ if (mddev_is_clustered(mddev)) {
md_cluster_ops->resync_finish(mddev);
+ cluster_resync_finished = true;
+ }
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
@@ -7944,6 +8101,7 @@ void md_do_sync(struct md_thread *thread)
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
@@ -7954,6 +8112,11 @@ void md_do_sync(struct md_thread *thread)
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (mddev_is_clustered(mddev) &&
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !cluster_resync_finished)
+ md_cluster_ops->resync_finish(mddev);
+
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
@@ -7962,11 +8125,11 @@ void md_do_sync(struct md_thread *thread)
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
+ set_bit(MD_RECOVERY_DONE, &mddev->recovery);
mddev->curr_resync = 0;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
- set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
}
@@ -7984,7 +8147,8 @@ static int remove_and_add_spares(struct mddev *mddev,
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
- ! test_bit(In_sync, &rdev->flags)) &&
+ (!test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags))) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
@@ -7996,25 +8160,31 @@ static int remove_and_add_spares(struct mddev *mddev,
if (removed && mddev->kobj.sd)
sysfs_notify(&mddev->kobj, NULL, "degraded");
- if (this)
+ if (this && removed)
goto no_add;
rdev_for_each(rdev, mddev) {
+ if (this && this != rdev)
+ continue;
+ if (test_bit(Candidate, &rdev->flags))
+ continue;
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk >= 0)
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
+ if (test_bit(Journal, &rdev->flags))
+ continue;
if (mddev->ro &&
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
- if (rdev->saved_raid_disk < 0)
- rdev->recovery_offset = 0;
+ rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
@@ -8033,14 +8203,25 @@ no_add:
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
+ int ret = 0;
+
+ if (mddev_is_clustered(mddev)) {
+ ret = md_cluster_ops->resync_start(mddev);
+ if (ret) {
+ mddev->sync_thread = NULL;
+ goto out;
+ }
+ }
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
+out:
if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync"
- " thread...\n",
- mdname(mddev));
+ if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
+ printk(KERN_ERR "%s: could not start resync"
+ " thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8113,6 +8294,15 @@ void md_check_recovery(struct mddev *mddev)
int spares = 0;
if (mddev->ro) {
+ struct md_rdev *rdev;
+ if (!mddev->external && mddev->in_sync)
+ /* 'Blocked' flag not needed as failed devices
+ * will be recorded if array switched to read/write.
+ * Leaving it set will prevent the device
+ * from being removed.
+ */
+ rdev_for_each(rdev, mddev)
+ clear_bit(Blocked, &rdev->flags);
/* On a read-only array we can:
* - remove failed devices
* - add already-in_sync devices if the array itself
@@ -8126,7 +8316,9 @@ void md_check_recovery(struct mddev *mddev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
goto unlock;
}
@@ -8148,13 +8340,8 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS) {
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
- }
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -8252,8 +8439,6 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
@@ -8266,8 +8451,6 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -8572,6 +8755,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
@@ -8889,25 +9073,128 @@ err_wq:
return ret;
}
-void md_reload_sb(struct mddev *mddev)
+static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
{
- struct md_rdev *rdev, *tmp;
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ struct md_rdev *rdev2;
+ int role, ret;
+ char b[BDEVNAME_SIZE];
- rdev_for_each_safe(rdev, tmp, mddev) {
- rdev->sb_loaded = 0;
- ClearPageUptodate(rdev->sb_page);
+ /* Check for change of roles in the active devices */
+ rdev_for_each(rdev2, mddev) {
+ if (test_bit(Faulty, &rdev2->flags))
+ continue;
+
+ /* Check if the roles changed */
+ role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
+
+ if (test_bit(Candidate, &rdev2->flags)) {
+ if (role == 0xfffe) {
+ pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
+ md_kick_rdev_from_array(rdev2);
+ continue;
+ }
+ else
+ clear_bit(Candidate, &rdev2->flags);
+ }
+
+ if (role != rdev2->raid_disk) {
+ /* got activated */
+ if (rdev2->raid_disk == -1 && role != 0xffff) {
+ rdev2->saved_raid_disk = role;
+ ret = remove_and_add_spares(mddev, rdev2);
+ pr_info("Activated spare: %s\n",
+ bdevname(rdev2->bdev,b));
+ continue;
+ }
+ /* device faulty
+ * We just want to do the minimum to mark the disk
+ * as faulty. The recovery is performed by the
+ * one who initiated the error.
+ */
+ if ((role == 0xfffe) || (role == 0xfffd)) {
+ md_error(mddev, rdev2);
+ clear_bit(Blocked, &rdev2->flags);
+ }
+ }
}
- mddev->raid_disks = 0;
- analyze_sbs(mddev);
- rdev_for_each_safe(rdev, tmp, mddev) {
- struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
- /* since we don't write to faulty devices, we figure out if the
- * disk is faulty by comparing events
- */
- if (mddev->events > sb->events)
- set_bit(Faulty, &rdev->flags);
+
+ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
+ update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+
+ /* Finally set the event to be up to date */
+ mddev->events = le64_to_cpu(sb->events);
+}
+
+static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
+{
+ int err;
+ struct page *swapout = rdev->sb_page;
+ struct mdp_superblock_1 *sb;
+
+ /* Store the sb page of the rdev in the swapout temporary
+ * variable in case we err in the future
+ */
+ rdev->sb_page = NULL;
+ alloc_disk_sb(rdev);
+ ClearPageUptodate(rdev->sb_page);
+ rdev->sb_loaded = 0;
+ err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
+
+ if (err < 0) {
+ pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
+ __func__, __LINE__, rdev->desc_nr, err);
+ put_page(rdev->sb_page);
+ rdev->sb_page = swapout;
+ rdev->sb_loaded = 1;
+ return err;
}
+ sb = page_address(rdev->sb_page);
+ /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
+ * is not set
+ */
+
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
+ rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
+
+ /* The other node finished recovery, call spare_active to set
+ * device In_sync and mddev->degraded
+ */
+ if (rdev->recovery_offset == MaxSector &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->pers->spare_active(mddev))
+ sysfs_notify(&mddev->kobj, NULL, "degraded");
+
+ put_page(swapout);
+ return 0;
+}
+
+void md_reload_sb(struct mddev *mddev, int nr)
+{
+ struct md_rdev *rdev;
+ int err;
+
+ /* Find the rdev */
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev->desc_nr == nr)
+ break;
+ }
+
+ if (!rdev || rdev->desc_nr != nr) {
+ pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
+ return;
+ }
+
+ err = read_rdev(mddev, rdev);
+ if (err < 0)
+ return;
+
+ check_sb_changes(mddev, rdev);
+
+ /* Read all rdev's to update recovery_offset */
+ rdev_for_each_rcu(rdev, mddev)
+ read_rdev(mddev, rdev);
}
EXPORT_SYMBOL(md_reload_sb);
@@ -9020,13 +9307,7 @@ static int get_ro(char *buffer, struct kernel_param *kp)
}
static int set_ro(const char *val, struct kernel_param *kp)
{
- char *e;
- int num = simple_strtoul(val, &e, 10);
- if (*val && (*e == '\0' || *e == '\n')) {
- start_readonly = num;
- return 0;
- }
- return -EINVAL;
+ return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
diff --git a/kernel/drivers/md/md.h b/kernel/drivers/md/md.h
index 4046a6c6f..dfa57b415 100644
--- a/kernel/drivers/md/md.h
+++ b/kernel/drivers/md/md.h
@@ -16,6 +16,7 @@
#define _MD_MD_H
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mm.h>
@@ -86,10 +87,16 @@ struct md_rdev {
* array and could again if we did a partial
* resync from the bitmap
*/
- sector_t recovery_offset;/* If this device has been partially
+ union {
+ sector_t recovery_offset;/* If this device has been partially
* recovered, this is where we were
* up to.
*/
+ sector_t journal_tail; /* If this device is a journal device,
+ * this is the journal tail (journal
+ * recovery start point)
+ */
+ };
atomic_t nr_pending; /* number of pending requests.
* only maintained for arrays that
@@ -133,10 +140,6 @@ enum flag_bits {
Bitmap_sync, /* ..actually, not quite In_sync. Need a
* bitmap-based recovery to get fully in sync
*/
- Unmerged, /* device is being added to array and should
- * be considerred for bvec_merge_fn but not
- * yet for actual IO
- */
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
Blocked, /* An error occurred but has not yet
@@ -175,6 +178,11 @@ enum flag_bits {
* This device is seen locally but not
* by the whole cluster
*/
+ Journal, /* This device is used as journal for
+ * raid-5/6.
+ * Usually, this device should be faster
+ * than other devices in the array
+ */
};
#define BB_LEN_MASK (0x00000000000001FFULL)
@@ -224,6 +232,8 @@ struct mddev {
#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
* md_ioctl checked on it.
*/
+#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
+#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
int suspended;
atomic_t active_io;
@@ -373,10 +383,6 @@ struct mddev {
int degraded; /* whether md should consider
* adding a spare
*/
- int merge_check_needed; /* at least one
- * member device
- * has a
- * merge_bvec_fn */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
@@ -531,10 +537,6 @@ struct md_personality
/* congested implements bdi.congested_fn().
* Will not be called while array is 'suspended' */
int (*congested)(struct mddev *mddev, int bits);
- /* mergeable_bvec is use to implement ->merge_bvec_fn */
- int (*mergeable_bvec)(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec);
};
struct md_sysfs_entry {
@@ -564,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
+ if (!test_bit(Replacement, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags) &&
+ mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
} else
@@ -574,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
+ if (!test_bit(Replacement, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags) &&
+ mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
@@ -651,7 +657,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
-extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
+extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
@@ -669,7 +675,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
-extern void md_reload_sb(struct mddev *mddev);
+extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
diff --git a/kernel/drivers/md/multipath.c b/kernel/drivers/md/multipath.c
index ac3ede2bd..0a72ab6e6 100644
--- a/kernel/drivers/md/multipath.c
+++ b/kernel/drivers/md/multipath.c
@@ -77,18 +77,18 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
struct bio *bio = mp_bh->master_bio;
struct mpconf *conf = mp_bh->mddev->private;
- bio_endio(bio, err);
+ bio->bi_error = err;
+ bio_endio(bio);
mempool_free(mp_bh, conf->pool);
}
-static void multipath_end_request(struct bio *bio, int error)
+static void multipath_end_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh *mp_bh = bio->bi_private;
struct mpconf *conf = mp_bh->mddev->private;
struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
- if (uptodate)
+ if (!bio->bi_error)
multipath_end_bh_io(mp_bh, 0);
else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
@@ -101,7 +101,7 @@ static void multipath_end_request(struct bio *bio, int error)
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
- multipath_end_bh_io(mp_bh, error);
+ multipath_end_bh_io(mp_bh, bio->bi_error);
rdev_dec_pending(rdev, conf->mddev);
}
@@ -123,7 +123,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
mempool_free(mp_bh, conf->pool);
return;
}
@@ -257,18 +257,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_segments to one, lying
- * within a single page.
- * (Note: it is very unlikely that a device with
- * merge_bvec_fn will be involved in multipath.)
- */
- if (q->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
-
+ err = md_integrity_add_rdev(rdev, mddev);
+ if (err)
+ break;
spin_lock_irq(&conf->device_lock);
mddev->degraded--;
rdev->raid_disk = path;
@@ -276,7 +267,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev);
err = 0;
- md_integrity_add_rdev(rdev, mddev);
break;
}
@@ -432,15 +422,6 @@ static int multipath_run (struct mddev *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, not that we ever expect a device with
- * a merge_bvec_fn to be involved in multipath */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
-
if (!test_bit(Faulty, &rdev->flags))
working_disks++;
}
@@ -491,8 +472,7 @@ static int multipath_run (struct mddev *mddev)
return 0;
out_free_conf:
- if (conf->pool)
- mempool_destroy(conf->pool);
+ mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
diff --git a/kernel/drivers/md/persistent-data/dm-array.c b/kernel/drivers/md/persistent-data/dm-array.c
index e64b61ad0..431a03067 100644
--- a/kernel/drivers/md/persistent-data/dm-array.c
+++ b/kernel/drivers/md/persistent-data/dm-array.c
@@ -233,9 +233,9 @@ static int get_ablock(struct dm_array_info *info, dm_block_t b,
/*
* Unlocks an array block.
*/
-static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
{
- return dm_tm_unlock(info->btree_info.tm, block);
+ dm_tm_unlock(info->btree_info.tm, block);
}
/*----------------------------------------------------------------*/
diff --git a/kernel/drivers/md/persistent-data/dm-block-manager.c b/kernel/drivers/md/persistent-data/dm-block-manager.c
index 087411c95..f2393ba83 100644
--- a/kernel/drivers/md/persistent-data/dm-block-manager.c
+++ b/kernel/drivers/md/persistent-data/dm-block-manager.c
@@ -454,7 +454,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
int r;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -490,7 +490,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -523,7 +523,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
int r;
p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
if (unlikely(!p))
return -EWOULDBLOCK;
@@ -559,7 +559,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm));
@@ -578,7 +578,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
}
EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
-int dm_bm_unlock(struct dm_block *b)
+void dm_bm_unlock(struct dm_block *b)
{
struct buffer_aux *aux;
aux = dm_bufio_get_aux_data(to_buffer(b));
@@ -590,8 +590,6 @@ int dm_bm_unlock(struct dm_block *b)
bl_up_read(&aux->lock);
dm_bufio_release(to_buffer(b));
-
- return 0;
}
EXPORT_SYMBOL_GPL(dm_bm_unlock);
@@ -609,6 +607,12 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
dm_bufio_prefetch(bm->bufio, b, 1);
}
+bool dm_bm_is_read_only(struct dm_block_manager *bm)
+{
+ return bm->read_only;
+}
+EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
+
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
bm->read_only = true;
diff --git a/kernel/drivers/md/persistent-data/dm-block-manager.h b/kernel/drivers/md/persistent-data/dm-block-manager.h
index 1b95dfc17..3627d1b76 100644
--- a/kernel/drivers/md/persistent-data/dm-block-manager.h
+++ b/kernel/drivers/md/persistent-data/dm-block-manager.h
@@ -94,7 +94,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **result);
-int dm_bm_unlock(struct dm_block *b);
+void dm_bm_unlock(struct dm_block *b);
/*
* It's a common idiom to have a superblock that should be committed last.
@@ -123,6 +123,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
* Additionally you should not use dm_bm_unlock_move, however no error will
* be returned if you do.
*/
+bool dm_bm_is_read_only(struct dm_block_manager *bm);
void dm_bm_set_read_only(struct dm_block_manager *bm);
void dm_bm_set_read_write(struct dm_block_manager *bm);
diff --git a/kernel/drivers/md/persistent-data/dm-btree-internal.h b/kernel/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c..a240990a7 100644
--- a/kernel/drivers/md/persistent-data/dm-btree-internal.h
+++ b/kernel/drivers/md/persistent-data/dm-btree-internal.h
@@ -52,7 +52,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
int new_block(struct dm_btree_info *info, struct dm_block **result);
-int unlock_block(struct dm_btree_info *info, struct dm_block *b);
+void unlock_block(struct dm_btree_info *info, struct dm_block *b);
/*
* Spines keep track of the rolling locks. There are 2 variants, read-only
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/kernel/drivers/md/persistent-data/dm-btree-remove.c b/kernel/drivers/md/persistent-data/dm-btree-remove.c
index a03178e91..21ea537bd 100644
--- a/kernel/drivers/md/persistent-data/dm-btree-remove.c
+++ b/kernel/drivers/md/persistent-data/dm-btree-remove.c
@@ -165,9 +165,9 @@ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt
return 0;
}
-static int exit_child(struct dm_btree_info *info, struct child *c)
+static void exit_child(struct dm_btree_info *info, struct child *c)
{
- return dm_tm_unlock(info->tm, c->block);
+ dm_tm_unlock(info->tm, c->block);
}
static void shift(struct btree_node *left, struct btree_node *right, int count)
@@ -249,13 +249,10 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
__rebalance2(info, parent, &left, &right);
- r = exit_child(info, &left);
- if (r) {
- exit_child(info, &right);
- return r;
- }
+ exit_child(info, &left);
+ exit_child(info, &right);
- return exit_child(info, &right);
+ return 0;
}
/*
@@ -301,11 +298,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
{
int s;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned target = (nr_left + nr_center + nr_right) / 3;
- BUG_ON(target > max_entries);
+ unsigned total = nr_left + nr_center + nr_right;
+ unsigned target_right = total / 3;
+ unsigned remainder = (target_right * 3) != total;
+ unsigned target_left = target_right + remainder;
+
+ BUG_ON(target_left > max_entries);
+ BUG_ON(target_right > max_entries);
if (nr_left < nr_right) {
- s = nr_left - target;
+ s = nr_left - target_left;
if (s < 0 && nr_center < -s) {
/* not enough in central node */
@@ -316,10 +318,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
} else
shift(left, center, s);
- shift(center, right, target - nr_right);
+ shift(center, right, target_right - nr_right);
} else {
- s = target - nr_right;
+ s = target_right - nr_right;
if (s > 0 && nr_center < s) {
/* not enough in central node */
shift(center, right, nr_center);
@@ -329,7 +331,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
} else
shift(center, right, s);
- shift(left, center, nr_left - target);
+ shift(left, center, nr_left - target_left);
}
*key_ptr(parent, c->index) = center->keys[0];
@@ -389,49 +391,18 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
__rebalance3(info, parent, &left, &center, &right);
- r = exit_child(info, &left);
- if (r) {
- exit_child(info, &center);
- exit_child(info, &right);
- return r;
- }
-
- r = exit_child(info, &center);
- if (r) {
- exit_child(info, &right);
- return r;
- }
-
- r = exit_child(info, &right);
- if (r)
- return r;
+ exit_child(info, &left);
+ exit_child(info, &center);
+ exit_child(info, &right);
return 0;
}
-static int get_nr_entries(struct dm_transaction_manager *tm,
- dm_block_t b, uint32_t *result)
-{
- int r;
- struct dm_block *block;
- struct btree_node *n;
-
- r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
- if (r)
- return r;
-
- n = dm_block_data(block);
- *result = le32_to_cpu(n->header.nr_entries);
-
- return dm_tm_unlock(tm, block);
-}
-
static int rebalance_children(struct shadow_spine *s,
struct dm_btree_info *info,
struct dm_btree_value_type *vt, uint64_t key)
{
int i, r, has_left_sibling, has_right_sibling;
- uint32_t child_entries;
struct btree_node *n;
n = dm_block_data(shadow_current(s));
@@ -446,9 +417,7 @@ static int rebalance_children(struct shadow_spine *s,
memcpy(n, dm_block_data(child),
dm_bm_block_size(dm_tm_get_bm(info->tm)));
- r = dm_tm_unlock(info->tm, child);
- if (r)
- return r;
+ dm_tm_unlock(info->tm, child);
dm_tm_dec(info->tm, dm_block_location(child));
return 0;
@@ -458,10 +427,6 @@ static int rebalance_children(struct shadow_spine *s,
if (i < 0)
return -ENODATA;
- r = get_nr_entries(info->tm, value64(n, i), &child_entries);
- if (r)
- return r;
-
has_left_sibling = i > 0;
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
@@ -544,14 +509,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +516,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
@@ -590,3 +549,133 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
return r;
}
EXPORT_SYMBOL_GPL(dm_btree_remove);
+
+/*----------------------------------------------------------------*/
+
+static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, dm_block_t root,
+ uint64_t key, int *index)
+{
+ int i = *index, r;
+ struct btree_node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+ if (r < 0)
+ break;
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s)) {
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
+ &location, sizeof(__le64));
+ }
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
+ *index = lower_bound(n, key);
+ return 0;
+ }
+
+ r = rebalance_children(s, info, vt, key);
+ if (r)
+ break;
+
+ n = dm_block_data(shadow_current(s));
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
+ *index = lower_bound(n, key);
+ return 0;
+ }
+
+ i = lower_bound(n, key);
+
+ /*
+ * We know the key is present, or else
+ * rebalance_children would have returned
+ * -ENODATA
+ */
+ root = value64(n, i);
+ }
+
+ return r;
+}
+
+static int remove_one(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t end_key,
+ dm_block_t *new_root, unsigned *nr_removed)
+{
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ uint64_t k;
+
+ init_le64_type(info->tm, &le64_vt);
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < last_level; level++) {
+ r = remove_raw(&spine, info, &le64_vt,
+ root, keys[level], (unsigned *) &index);
+ if (r < 0)
+ goto out;
+
+ n = dm_block_data(shadow_current(&spine));
+ root = value64(n, index);
+ }
+
+ r = remove_nearest(&spine, info, &info->value_type,
+ root, keys[last_level], &index);
+ if (r < 0)
+ goto out;
+
+ n = dm_block_data(shadow_current(&spine));
+
+ if (index < 0)
+ index = 0;
+
+ if (index >= le32_to_cpu(n->header.nr_entries)) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ k = le64_to_cpu(n->keys[index]);
+ if (k >= keys[last_level] && k < end_key) {
+ if (info->value_type.dec)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index));
+
+ delete_at(n, index);
+ keys[last_level] = k + 1ull;
+
+ } else
+ r = -ENODATA;
+
+out:
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return r;
+}
+
+int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *first_key, uint64_t end_key,
+ dm_block_t *new_root, unsigned *nr_removed)
+{
+ int r;
+
+ *nr_removed = 0;
+ do {
+ r = remove_one(info, root, first_key, end_key, &root, nr_removed);
+ if (!r)
+ (*nr_removed)++;
+ } while (!r);
+
+ *new_root = root;
+ return r == -ENODATA ? 0 : r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_remove_leaves);
diff --git a/kernel/drivers/md/persistent-data/dm-btree-spine.c b/kernel/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7..b27b8091a 100644
--- a/kernel/drivers/md/persistent-data/dm-btree-spine.c
+++ b/kernel/drivers/md/persistent-data/dm-btree-spine.c
@@ -117,9 +117,9 @@ int new_block(struct dm_btree_info *info, struct dm_block **result)
return dm_tm_new_block(info->tm, &btree_node_validator, result);
}
-int unlock_block(struct dm_btree_info *info, struct dm_block *b)
+void unlock_block(struct dm_btree_info *info, struct dm_block *b)
{
- return dm_tm_unlock(info->tm, b);
+ dm_tm_unlock(info->tm, b);
}
/*----------------------------------------------------------------*/
@@ -137,9 +137,7 @@ int exit_ro_spine(struct ro_spine *s)
int r = 0, i;
for (i = 0; i < s->count; i++) {
- int r2 = unlock_block(s->info, s->nodes[i]);
- if (r2 < 0)
- r = r2;
+ unlock_block(s->info, s->nodes[i]);
}
return r;
@@ -150,9 +148,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
int r;
if (s->count == 2) {
- r = unlock_block(s->info, s->nodes[0]);
- if (r < 0)
- return r;
+ unlock_block(s->info, s->nodes[0]);
s->nodes[0] = s->nodes[1];
s->count--;
}
@@ -194,9 +190,7 @@ int exit_shadow_spine(struct shadow_spine *s)
int r = 0, i;
for (i = 0; i < s->count; i++) {
- int r2 = unlock_block(s->info, s->nodes[i]);
- if (r2 < 0)
- r = r2;
+ unlock_block(s->info, s->nodes[i]);
}
return r;
@@ -208,9 +202,7 @@ int shadow_step(struct shadow_spine *s, dm_block_t b,
int r;
if (s->count == 2) {
- r = unlock_block(s->info, s->nodes[0]);
- if (r < 0)
- return r;
+ unlock_block(s->info, s->nodes[0]);
s->nodes[0] = s->nodes[1];
s->count--;
}
@@ -249,3 +241,40 @@ int shadow_root(struct shadow_spine *s)
{
return s->root;
}
+
+static void le64_inc(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/kernel/drivers/md/persistent-data/dm-btree.c b/kernel/drivers/md/persistent-data/dm-btree.c
index fdd3793e2..b1ced58eb 100644
--- a/kernel/drivers/md/persistent-data/dm-btree.c
+++ b/kernel/drivers/md/persistent-data/dm-btree.c
@@ -63,6 +63,11 @@ int lower_bound(struct btree_node *n, uint64_t key)
return bsearch(n, key, 0);
}
+static int upper_bound(struct btree_node *n, uint64_t key)
+{
+ return bsearch(n, key, 1);
+}
+
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt)
{
@@ -141,7 +146,9 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
n->header.value_size = cpu_to_le32(info->value_type.size);
*root = dm_block_location(b);
- return unlock_block(info, b);
+ unlock_block(info, b);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dm_btree_empty);
@@ -250,6 +257,16 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
+static void unlock_all_frames(struct del_stack *s)
+{
+ struct frame *f;
+
+ while (unprocessed_frames(s)) {
+ f = s->spine + s->top--;
+ dm_tm_unlock(s->tm, f->b);
+ }
+}
+
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -306,9 +323,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
pop_frame(s);
}
}
-
out:
+ if (r) {
+ /* cleanup all frames of del_stack */
+ unlock_all_frames(s);
+ }
kfree(s);
+
return r;
}
EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -390,6 +411,82 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
}
EXPORT_SYMBOL_GPL(dm_btree_lookup);
+static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
+ uint64_t key, uint64_t *rkey, void *value_le)
+{
+ int r, i;
+ uint32_t flags, nr_entries;
+ struct dm_block *node;
+ struct btree_node *n;
+
+ r = bn_read_lock(info, root, &node);
+ if (r)
+ return r;
+
+ n = dm_block_data(node);
+ flags = le32_to_cpu(n->header.flags);
+ nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (flags & INTERNAL_NODE) {
+ i = lower_bound(n, key);
+ if (i < 0 || i >= nr_entries) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
+ if (r == -ENODATA && i < (nr_entries - 1)) {
+ i++;
+ r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
+ }
+
+ } else {
+ i = upper_bound(n, key);
+ if (i < 0 || i >= nr_entries) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ *rkey = le64_to_cpu(n->keys[i]);
+ memcpy(value_le, value_ptr(n, i), info->value_type.size);
+ }
+out:
+ dm_tm_unlock(info->tm, node);
+ return r;
+}
+
+int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t *rkey, void *value_le)
+{
+ unsigned level;
+ int r = -ENODATA;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels - 1u; level++) {
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, rkey,
+ &internal_value_le, sizeof(uint64_t));
+ if (r)
+ goto out;
+
+ if (*rkey != keys[level]) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+
+ r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
+out:
+ exit_ro_spine(&spine);
+ return r;
+}
+
+EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
+
/*
* Splits a node by creating a sibling node and shifting half the nodes
* contents across. Assumes there is a parent node, and it has room for
@@ -420,8 +517,8 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup);
*
* Where A* is a shadow of A.
*/
-static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
- unsigned parent_index, uint64_t key)
+static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
+ uint64_t key)
{
int r;
size_t size;
@@ -471,8 +568,10 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
r = insert_at(sizeof(__le64), pn, parent_index + 1,
le64_to_cpu(rn->keys[0]), &location);
- if (r)
+ if (r) {
+ unlock_block(s->info, right);
return r;
+ }
if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, right);
@@ -523,7 +622,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
r = new_block(s->info, &right);
if (r < 0) {
- /* FIXME: put left */
+ unlock_block(s->info, left);
return r;
}
@@ -625,7 +724,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
if (top)
r = btree_split_beneath(s, key);
else
- r = btree_split_sibling(s, root, i, key);
+ r = btree_split_sibling(s, i, key);
if (r < 0)
return r;
@@ -667,12 +766,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
struct btree_node *n;
struct dm_btree_value_type le64_type;
- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);
for (level = 0; level < (info->levels - 1); level++) {
diff --git a/kernel/drivers/md/persistent-data/dm-btree.h b/kernel/drivers/md/persistent-data/dm-btree.h
index dacfc3418..c74301fa5 100644
--- a/kernel/drivers/md/persistent-data/dm-btree.h
+++ b/kernel/drivers/md/persistent-data/dm-btree.h
@@ -110,6 +110,13 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value_le);
/*
+ * Tries to find the first key where the bottom level key is >= to that
+ * given. Useful for skipping empty sections of the btree.
+ */
+int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t *rkey, void *value_le);
+
+/*
* Insertion (or overwrite an existing value). O(ln(n))
*/
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
@@ -135,6 +142,16 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root);
/*
+ * Removes a _contiguous_ run of values starting from 'keys' and not
+ * reaching keys2 (where keys2 is keys with the final key replaced with
+ * 'end_key'). 'end_key' is the one-past-the-end value. 'keys' may be
+ * altered.
+ */
+int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t end_key,
+ dm_block_t *new_root, unsigned *nr_removed);
+
+/*
* Returns < 0 on failure. Otherwise the number of key entries that have
* been filled out. Remember trees can have zero entries, and as such have
* no lowest key.
diff --git a/kernel/drivers/md/persistent-data/dm-space-map-common.c b/kernel/drivers/md/persistent-data/dm-space-map-common.c
index aacbe70c2..306d2e450 100644
--- a/kernel/drivers/md/persistent-data/dm-space-map-common.c
+++ b/kernel/drivers/md/persistent-data/dm-space-map-common.c
@@ -259,9 +259,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
idx.blocknr = cpu_to_le64(dm_block_location(b));
- r = dm_tm_unlock(ll->tm, b);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, b);
idx.nr_free = cpu_to_le32(ll->entries_per_block);
idx.none_free_before = 0;
@@ -293,7 +291,9 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
*result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
- return dm_tm_unlock(ll->tm, blk);
+ dm_tm_unlock(ll->tm, blk);
+
+ return 0;
}
static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
@@ -373,9 +373,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return r;
}
- r = dm_tm_unlock(ll->tm, blk);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, blk);
*result = i * ll->entries_per_block + (dm_block_t) position;
return 0;
@@ -429,9 +427,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
- r = dm_tm_unlock(ll->tm, nb);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, nb);
if (old > 2) {
r = dm_btree_remove(&ll->ref_count_info,
@@ -445,9 +441,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
__le32 le_rc = cpu_to_le32(ref_count);
sm_set_bitmap(bm_le, bit, 3);
- r = dm_tm_unlock(ll->tm, nb);
- if (r < 0)
- return r;
+ dm_tm_unlock(ll->tm, nb);
__dm_bless_for_disk(&le_rc);
r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
@@ -556,7 +550,9 @@ static int metadata_ll_init_index(struct ll_disk *ll)
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
- return dm_tm_unlock(ll->tm, b);
+ dm_tm_unlock(ll->tm, b);
+
+ return 0;
}
static int metadata_ll_open(struct ll_disk *ll)
@@ -570,7 +566,9 @@ static int metadata_ll_open(struct ll_disk *ll)
return r;
memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
- return dm_tm_unlock(ll->tm, block);
+ dm_tm_unlock(ll->tm, block);
+
+ return 0;
}
static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
@@ -590,7 +588,9 @@ static int metadata_ll_commit(struct ll_disk *ll)
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
- return dm_tm_unlock(ll->tm, b);
+ dm_tm_unlock(ll->tm, b);
+
+ return 0;
}
int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
diff --git a/kernel/drivers/md/persistent-data/dm-space-map-metadata.c b/kernel/drivers/md/persistent-data/dm-space-map-metadata.c
index 53091295f..7e4400559 100644
--- a/kernel/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/kernel/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
return 0;
}
-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
{
struct block_op *bop;
@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
result->type = bop->type;
result->block = bop->block;
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb)
+{
+ if (brb_empty(brb))
+ return -ENODATA;
+
brb->begin = brb_next(brb, brb->begin);
return 0;
@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm)
while (!brb_empty(&smm->uncommitted)) {
struct block_op bop;
- r = brb_pop(&smm->uncommitted, &bop);
+ r = brb_peek(&smm->uncommitted, &bop);
if (r) {
DMERR("bug in bop ring buffer");
break;
@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm)
r = commit_bop(smm, &bop);
if (r)
break;
+
+ brb_pop(&smm->uncommitted);
}
return r;
@@ -683,7 +693,6 @@ static struct dm_space_map bootstrap_ops = {
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
int r, i;
- enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
dm_block_t old_len = smm->ll.nr_blocks;
@@ -705,11 +714,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* allocate any new blocks.
*/
do {
- for (i = old_len; !r && i < smm->begin; i++) {
- r = sm_ll_inc(&smm->ll, i, &ev);
- if (r)
- goto out;
- }
+ for (i = old_len; !r && i < smm->begin; i++)
+ r = add_bop(smm, BOP_INC, i);
+
+ if (r)
+ goto out;
+
old_len = smm->begin;
r = apply_bops(smm);
@@ -754,7 +764,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
{
int r;
dm_block_t i;
- enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
smm->begin = superblock + 1;
@@ -782,7 +791,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
* allocated blocks that they were built from.
*/
for (i = superblock; !r && i < smm->begin; i++)
- r = sm_ll_inc(&smm->ll, i, &ev);
+ r = add_bop(smm, BOP_INC, i);
if (r)
return r;
diff --git a/kernel/drivers/md/persistent-data/dm-transaction-manager.c b/kernel/drivers/md/persistent-data/dm-transaction-manager.c
index 9cb797d80..abe2c5dd0 100644
--- a/kernel/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/kernel/drivers/md/persistent-data/dm-transaction-manager.c
@@ -342,9 +342,9 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
}
EXPORT_SYMBOL_GPL(dm_tm_read_lock);
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
{
- return dm_bm_unlock(b);
+ dm_bm_unlock(b);
}
EXPORT_SYMBOL_GPL(dm_tm_unlock);
diff --git a/kernel/drivers/md/persistent-data/dm-transaction-manager.h b/kernel/drivers/md/persistent-data/dm-transaction-manager.h
index 2e0d4d66f..f3a18be68 100644
--- a/kernel/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/kernel/drivers/md/persistent-data/dm-transaction-manager.h
@@ -94,7 +94,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **result);
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
/*
* Functions for altering the reference count of a block directly.
diff --git a/kernel/drivers/md/raid0.c b/kernel/drivers/md/raid0.c
index efb654eb5..f8e5db0cb 100644
--- a/kernel/drivers/md/raid0.c
+++ b/kernel/drivers/md/raid0.c
@@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- bool discard_supported = false;
+ unsigned short blksize = 512;
if (!conf)
return -ENOMEM;
@@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
sector_div(sectors, mddev->chunk_sectors);
rdev1->sectors = sectors * mddev->chunk_sectors;
+ blksize = max(blksize, queue_logical_block_size(
+ rdev1->bdev->bd_disk->queue));
+
rdev_for_each(rdev2, mddev) {
pr_debug("md/raid0:%s: comparing %s(%llu)"
" with %s(%llu)\n",
@@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+ */
+ if ((mddev->chunk_sectors << 9) % blksize) {
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
+ err = -EINVAL;
+ goto abort;
+ }
+
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -188,19 +203,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
dev[j] = rdev1;
- if (mddev->queue)
- disk_stack_limits(mddev->gendisk, rdev1->bdev,
- rdev1->data_offset << 9);
-
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
- conf->has_merge_bvec = 1;
-
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
-
- if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
- discard_supported = true;
}
if (cnt != mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
@@ -261,28 +266,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
(unsigned long long)smallest->sectors);
}
- /*
- * now since we have the hard sector sizes, we can make sure
- * chunk size is a multiple of that sector size
- */
- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
- mdname(mddev),
- mddev->chunk_sectors << 9);
- goto abort;
- }
-
- if (mddev->queue) {
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- if (!discard_supported)
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- }
-
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -351,58 +334,6 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_div(sector, zone->nb_dev)];
}
-/**
- * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- * @mddev: the md device
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can accept at this offset
- */
-static int raid0_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r0conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- sector_t sector_offset = sector;
- int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
- struct strip_zone *zone;
- struct md_rdev *rdev;
- struct request_queue *subq;
-
- if (is_power_of_2(chunk_sectors))
- max = (chunk_sectors - ((sector & (chunk_sectors-1))
- + bio_sectors)) << 9;
- else
- max = (chunk_sectors - (sector_div(sector, chunk_sectors)
- + bio_sectors)) << 9;
- if (max < 0)
- max = 0; /* bio_add cannot handle a negative return */
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- if (max < biovec->bv_len)
- /* too small already, no need to check further */
- return max;
- if (!conf->has_merge_bvec)
- return max;
-
- /* May need to check subordinate device */
- sector = sector_offset;
- zone = find_zone(mddev->private, &sector_offset);
- rdev = map_sector(mddev, zone, sector, &sector_offset);
- subq = bdev_get_queue(rdev->bdev);
- if (subq->merge_bvec_fn) {
- bvm->bi_bdev = rdev->bdev;
- bvm->bi_sector = sector_offset + zone->dev_start +
- rdev->data_offset;
- return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
- } else
- return max;
-}
-
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
sector_t array_sectors = 0;
@@ -433,12 +364,6 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- if (mddev->queue) {
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
- }
-
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
@@ -447,6 +372,29 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf;
}
conf = mddev->private;
+ if (mddev->queue) {
+ struct md_rdev *rdev;
+ bool discard_supported = false;
+
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+ rdev_for_each(rdev, mddev) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ discard_supported = true;
+ }
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
@@ -543,7 +491,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely((split->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
- bio_endio(split, 0);
+ bio_endio(split);
} else
generic_make_request(split);
} while (split != bio);
@@ -727,7 +675,6 @@ static struct md_personality raid0_personality=
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
.congested = raid0_congested,
- .mergeable_bvec = raid0_mergeable_bvec,
};
static int __init raid0_init (void)
diff --git a/kernel/drivers/md/raid0.h b/kernel/drivers/md/raid0.h
index 05539d9c9..7127a623f 100644
--- a/kernel/drivers/md/raid0.h
+++ b/kernel/drivers/md/raid0.h
@@ -12,8 +12,6 @@ struct r0conf {
struct md_rdev **devlist; /* lists of rdevs, pointed to
* by strip_zone->dev */
int nr_strip_zones;
- int has_merge_bvec; /* at least one member has
- * a merge_bvec_fn */
};
#endif
diff --git a/kernel/drivers/md/raid1.c b/kernel/drivers/md/raid1.c
index 5ce3cd5c4..c4b913409 100644
--- a/kernel/drivers/md/raid1.c
+++ b/kernel/drivers/md/raid1.c
@@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
@@ -255,9 +257,10 @@ static void call_bio_endio(struct r1bio *r1_bio)
done = 1;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
+
if (done) {
- bio_endio(bio, 0);
+ bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -312,9 +315,9 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
return mirror;
}
-static void raid1_end_read_request(struct bio *bio, int error)
+static void raid1_end_read_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r1bio *r1_bio = bio->bi_private;
int mirror;
struct r1conf *conf = r1_bio->mddev->private;
@@ -397,9 +400,8 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
}
}
-static void raid1_end_write_request(struct bio *bio, int error)
+static void raid1_end_write_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r1bio *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
struct r1conf *conf = r1_bio->mddev->private;
@@ -410,7 +412,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
/*
* 'one mirror IO has finished' event handler:
*/
- if (!uptodate) {
+ if (bio->bi_error) {
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -541,7 +543,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
this_sector + sectors)))
choose_first = 1;
else
@@ -557,7 +559,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
|| rdev == NULL
- || test_bit(Unmerged, &rdev->flags)
|| test_bit(Faulty, &rdev->flags))
continue;
if (!test_bit(In_sync, &rdev->flags) &&
@@ -708,44 +709,12 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
return best_disk;
}
-static int raid1_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r1conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max = biovec->bv_len;
-
- if (mddev->merge_check_needed) {
- int disk;
- rcu_read_lock();
- for (disk = 0; disk < conf->raid_disks * 2; disk++) {
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[disk].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = sector +
- rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- }
- rcu_read_unlock();
- }
- return max;
-
-}
-
static int raid1_congested(struct mddev *mddev, int bits)
{
struct r1conf *conf = mddev->private;
int i, ret = 0;
- if ((bits & (1 << BDI_async_congested)) &&
+ if ((bits & (1 << WB_async_congested)) &&
conf->pending_count >= max_queued_requests)
return 1;
@@ -760,7 +729,7 @@ static int raid1_congested(struct mddev *mddev, int bits)
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
*/
- if ((bits & (1<<BDI_async_congested)) || 1)
+ if ((bits & (1 << WB_async_congested)) || 1)
ret |= bdi_congested(&q->backing_dev_info, bits);
else
ret &= bdi_congested(&q->backing_dev_info, bits);
@@ -793,7 +762,7 @@ static void flush_pending_writes(struct r1conf *conf)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -914,8 +883,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
}
if (bio && bio_data_dir(bio) == WRITE) {
- if (bio->bi_iter.bi_sector >=
- conf->mddev->curr_resync_completed) {
+ if (bio->bi_iter.bi_sector >= conf->next_resync) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
@@ -1068,7 +1036,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1111,7 +1079,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1124,7 +1093,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev,
+ !md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
@@ -1157,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
* non-zero, then it is the number of not-completed requests.
*/
bio->bi_phys_segments = 0;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) {
/*
@@ -1268,8 +1237,7 @@ read_again:
break;
}
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)
- || test_bit(Unmerged, &rdev->flags)) {
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
set_bit(R1BIO_Degraded, &r1_bio->state);
continue;
@@ -1507,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid1:%s: Disk failure on %s, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
@@ -1548,7 +1517,7 @@ static void close_sync(struct r1conf *conf)
conf->r1buf_pool = NULL;
spin_lock_irq(&conf->resync_lock);
- conf->next_resync = 0;
+ conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
conf->start_next_window = MaxSector;
conf->current_window_requests +=
conf->next_window_requests;
@@ -1616,18 +1585,24 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct raid1_info *p;
int first = 0;
int last = conf->raid_disks - 1;
- struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (q->merge_bvec_fn) {
- set_bit(Unmerged, &rdev->flags);
- mddev->merge_check_needed = 1;
- }
+ /*
+ * find the disk ... but prefer rdev->saved_raid_disk
+ * if possible.
+ */
+ if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
+ conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+ first = last = rdev->saved_raid_disk;
for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors+mirror;
@@ -1660,20 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
- if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
- /* Some requests might not have seen this new
- * merge_bvec_fn. We must wait for them to complete
- * before merging the device fully.
- * First we make sure any code which has tested
- * our function has submitted the request, then
- * we wait for all outstanding requests to complete.
- */
- synchronize_sched();
- freeze_array(conf, 0);
- unfreeze_array(conf);
- clear_bit(Unmerged, &rdev->flags);
- }
- md_integrity_add_rdev(rdev, mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
@@ -1736,7 +1697,7 @@ abort:
return err;
}
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
{
struct r1bio *r1_bio = bio->bi_private;
@@ -1747,16 +1708,16 @@ static void end_sync_read(struct bio *bio, int error)
* or re-read if the read failed.
* We don't do much here, just schedule handling by raid1d
*/
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (!bio->bi_error)
set_bit(R1BIO_Uptodate, &r1_bio->state);
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r1bio *r1_bio = bio->bi_private;
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
@@ -1943,7 +1904,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
idx ++;
}
set_bit(R1BIO_Uptodate, &r1_bio->state);
- set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = 0;
return 1;
}
@@ -1967,15 +1928,14 @@ static void process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
- int uptodate;
+ int error;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse, but preserve BIO_UPTODATE */
- uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
+ /* fixup the bio for reuse, but preserve errno */
+ error = b->bi_error;
bio_reset(b);
- if (!uptodate)
- clear_bit(BIO_UPTODATE, &b->bi_flags);
+ b->bi_error = error;
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1998,7 +1958,7 @@ static void process_checks(struct r1bio *r1_bio)
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
- test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+ !r1_bio->bios[primary]->bi_error) {
r1_bio->bios[primary]->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
break;
@@ -2008,14 +1968,14 @@ static void process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
- int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
+ int error = sbio->bi_error;
if (sbio->bi_end_io != end_sync_read)
continue;
- /* Now we can 'fixup' the BIO_UPTODATE flag */
- set_bit(BIO_UPTODATE, &sbio->bi_flags);
+ /* Now we can 'fixup' the error value */
+ sbio->bi_error = 0;
- if (uptodate) {
+ if (!error) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2030,7 +1990,7 @@ static void process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && uptodate)) {
+ && !error)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2248,7 +2208,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) == 0)
+ if (submit_bio_wait(WRITE, wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2271,11 +2231,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
continue;
- if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (!bio->bi_error &&
test_bit(R1BIO_MadeGood, &r1_bio->state)) {
rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
}
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (bio->bi_error &&
test_bit(R1BIO_WriteError, &r1_bio->state)) {
if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
md_error(conf->mddev, rdev);
@@ -2288,6 +2248,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
+ bool fail = false;
for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -2300,6 +2261,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* narrow down and record precise write
* errors.
*/
+ fail = true;
if (!narrow_write_error(r1_bio, m)) {
md_error(conf->mddev,
conf->mirrors[m].rdev);
@@ -2309,9 +2271,16 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
- if (test_bit(R1BIO_WriteError, &r1_bio->state))
- close_write(r1_bio);
- raid_end_bio_io(r1_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
}
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2417,6 +2386,27 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r1_bio = list_first_entry(&tmp, struct r1bio,
+ retry_list);
+ list_del(&r1_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {
@@ -2516,6 +2506,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bitmap_close_sync(mddev->bitmap);
close_sync(conf);
+
+ if (mddev_is_clustered(mddev)) {
+ conf->cluster_sync_low = 0;
+ conf->cluster_sync_high = 0;
+ }
return 0;
}
@@ -2536,7 +2531,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
return sync_blocks;
}
- bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ /* we are incrementing sector_nr below. To be safe, we check against
+ * sector_nr + two times RESYNC_SECTORS
+ */
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
raise_barrier(conf, sector_nr);
@@ -2714,7 +2714,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -2727,6 +2727,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bio_full:
r1_bio->sectors = nr_sectors;
+ if (mddev_is_clustered(mddev) &&
+ conf->cluster_sync_high < sector_nr + nr_sectors) {
+ conf->cluster_sync_low = mddev->curr_resync_completed;
+ conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
+ /* Send resync message */
+ md_cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+
/* For a user-requested sync, we read all readable devices and do a
* compare
*/
@@ -2809,8 +2819,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
disk->rdev = rdev;
q = bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn)
- mddev->merge_check_needed = 1;
disk->head_position = 0;
disk->seq_start = MaxSector;
@@ -2818,6 +2826,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
@@ -2871,8 +2880,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -2974,8 +2982,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -3044,9 +3051,11 @@ static int raid1_reshape(struct mddev *mddev)
return -EINVAL;
}
- err = md_allow_write(mddev);
- if (err)
- return err;
+ if (!mddev_is_clustered(mddev)) {
+ err = md_allow_write(mddev);
+ if (err)
+ return err;
+ }
raid_disks = mddev->raid_disks + mddev->delta_disks;
@@ -3112,6 +3121,7 @@ static int raid1_reshape(struct mddev *mddev)
unfreeze_array(conf);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -3175,7 +3185,6 @@ static struct md_personality raid1_personality =
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
.congested = raid1_congested,
- .mergeable_bvec = raid1_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/kernel/drivers/md/raid1.h b/kernel/drivers/md/raid1.h
index 14ebb288c..61c39b390 100644
--- a/kernel/drivers/md/raid1.h
+++ b/kernel/drivers/md/raid1.h
@@ -61,6 +61,11 @@ struct r1conf {
* block, or anything else.
*/
struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
@@ -106,6 +111,13 @@ struct r1conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+
+ /* Keep track of cluster resync window to send to other
+ * nodes.
+ */
+ sector_t cluster_sync_low;
+ sector_t cluster_sync_high;
+
};
/*
diff --git a/kernel/drivers/md/raid10.c b/kernel/drivers/md/raid10.c
index fe0122771..ce959b4ae 100644
--- a/kernel/drivers/md/raid10.c
+++ b/kernel/drivers/md/raid10.c
@@ -39,6 +39,7 @@
* far_copies (stored in second byte of layout)
* far_offset (stored in bit 16 of layout )
* use_far_sets (stored in bit 17 of layout )
+ * use_far_sets_bugfixed (stored in bit 18 of layout )
*
* The data to be stored is divided into chunks using chunksize. Each device
* is divided into far_copies sections. In each section, chunks are laid out
@@ -101,7 +102,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
-static void end_reshape_write(struct bio *bio, int error);
+static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -307,9 +308,9 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
} else
done = 1;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
if (done) {
- bio_endio(bio, 0);
+ bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -358,9 +359,9 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
return r10_bio->devs[slot].devnum;
}
-static void raid10_end_read_request(struct bio *bio, int error)
+static void raid10_end_read_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
struct md_rdev *rdev;
@@ -438,9 +439,8 @@ static void one_write_done(struct r10bio *r10_bio)
}
}
-static void raid10_end_write_request(struct bio *bio, int error)
+static void raid10_end_write_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
int dev;
int dec_rdev = 1;
@@ -460,7 +460,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (!uptodate) {
+ if (bio->bi_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -672,93 +672,6 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
return (vchunk << geo->chunk_shift) + offset;
}
-/**
- * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- * @mddev: the md device
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can accept at this offset
- * This requires checking for end-of-chunk if near_copies != raid_disks,
- * and for subordinate merge_bvec_fns if merge_check_needed.
- */
-static int raid10_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r10conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max;
- unsigned int chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
- struct geom *geo = &conf->geo;
-
- chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
- if (conf->reshape_progress != MaxSector &&
- ((sector >= conf->reshape_progress) !=
- conf->mddev->reshape_backwards))
- geo = &conf->prev;
-
- if (geo->near_copies < geo->raid_disks) {
- max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << 9;
- if (max < 0)
- /* bio_add cannot handle a negative return */
- max = 0;
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- } else
- max = biovec->bv_len;
-
- if (mddev->merge_check_needed) {
- struct {
- struct r10bio r10_bio;
- struct r10dev devs[conf->copies];
- } on_stack;
- struct r10bio *r10_bio = &on_stack.r10_bio;
- int s;
- if (conf->reshape_progress != MaxSector) {
- /* Cannot give any guidance during reshape */
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- return 0;
- }
- r10_bio->sector = sector;
- raid10_find_phys(conf, r10_bio);
- rcu_read_lock();
- for (s = 0; s < conf->copies; s++) {
- int disk = r10_bio->devs[s].devnum;
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[disk].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio->devs[s].addr
- + rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- rdev = rcu_dereference(conf->mirrors[disk].replacement);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio->devs[s].addr
- + rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- }
- rcu_read_unlock();
- }
- return max;
-}
-
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
@@ -821,12 +734,10 @@ retry:
disk = r10_bio->devs[slot].devnum;
rdev = rcu_dereference(conf->mirrors[disk].replacement);
if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags) ||
r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (rdev == NULL ||
- test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags))
+ test_bit(Faulty, &rdev->flags))
continue;
if (!test_bit(In_sync, &rdev->flags) &&
r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
@@ -914,7 +825,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
struct r10conf *conf = mddev->private;
int i, ret = 0;
- if ((bits & (1 << BDI_async_congested)) &&
+ if ((bits & (1 << WB_async_congested)) &&
conf->pending_count >= max_queued_requests)
return 1;
@@ -957,7 +868,7 @@ static void flush_pending_writes(struct r10conf *conf)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1133,7 +1044,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1217,7 +1128,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
* non-zero, then it is the number of not-completed requests.
*/
bio->bi_phys_segments = 0;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) {
/*
@@ -1326,11 +1237,9 @@ retry_write:
blocked_rdev = rrdev;
break;
}
- if (rdev && (test_bit(Faulty, &rdev->flags)
- || test_bit(Unmerged, &rdev->flags)))
+ if (rdev && (test_bit(Faulty, &rdev->flags)))
rdev = NULL;
- if (rrdev && (test_bit(Faulty, &rrdev->flags)
- || test_bit(Unmerged, &rrdev->flags)))
+ if (rrdev && (test_bit(Faulty, &rrdev->flags)))
rrdev = NULL;
r10_bio->devs[i].bio = NULL;
@@ -1589,6 +1498,8 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
else
seq_printf(seq, " %d far-copies", conf->geo.far_copies);
+ if (conf->geo.far_set_size != conf->geo.raid_disks)
+ seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
}
seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
conf->geo.raid_disks - mddev->degraded);
@@ -1681,6 +1592,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
printk(KERN_ALERT
"md/raid10:%s: Disk failure on %s, disabling device.\n"
@@ -1777,7 +1689,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int mirror;
int first = 0;
int last = conf->geo.raid_disks - 1;
- struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1787,14 +1698,12 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (q->merge_bvec_fn) {
- set_bit(Unmerged, &rdev->flags);
- mddev->merge_check_needed = 1;
- }
-
if (rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
@@ -1833,20 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
- if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
- /* Some requests might not have seen this new
- * merge_bvec_fn. We must wait for them to complete
- * before merging the device fully.
- * First we make sure any code which has tested
- * our function has submitted the request, then
- * we wait for all outstanding requests to complete.
- */
- synchronize_sched();
- freeze_array(conf, 0);
- unfreeze_array(conf);
- clear_bit(Unmerged, &rdev->flags);
- }
- md_integrity_add_rdev(rdev, mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1916,7 +1811,7 @@ abort:
return err;
}
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
{
struct r10bio *r10_bio = bio->bi_private;
struct r10conf *conf = r10_bio->mddev->private;
@@ -1928,7 +1823,7 @@ static void end_sync_read(struct bio *bio, int error)
} else
d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (!bio->bi_error)
set_bit(R10BIO_Uptodate, &r10_bio->state);
else
/* The write handler will notice the lack of
@@ -1977,9 +1872,8 @@ static void end_sync_request(struct r10bio *r10_bio)
}
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
@@ -1996,7 +1890,7 @@ static void end_sync_write(struct bio *bio, int error)
else
rdev = conf->mirrors[d].rdev;
- if (!uptodate) {
+ if (bio->bi_error) {
if (repl)
md_error(mddev, rdev);
else {
@@ -2044,7 +1938,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* find the first device with a block */
for (i=0; i<conf->copies; i++)
- if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
+ if (!r10_bio->devs[i].bio->bi_error)
break;
if (i == conf->copies)
@@ -2052,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
first = i;
fbio = r10_bio->devs[i].bio;
+ fbio->bi_iter.bi_size = r10_bio->sectors << 9;
+ fbio->bi_iter.bi_idx = 0;
vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
/* now find blocks with errors */
@@ -2064,7 +1960,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
- if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
+ if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
* All vec entries are PAGE_SIZE;
@@ -2095,21 +1991,14 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
-
- for (j=0; j < vcnt ; j++) {
- tbio->bi_io_vec[j].bv_offset = 0;
- tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
-
- memcpy(page_address(tbio->bi_io_vec[j].bv_page),
- page_address(fbio->bi_io_vec[j].bv_page),
- PAGE_SIZE);
- }
tbio->bi_end_io = end_sync_write;
+ bio_copy_data(tbio, fbio);
+
d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
@@ -2124,17 +2013,14 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* that are active
*/
for (i = 0; i < conf->copies; i++) {
- int j, d;
+ int d;
tbio = r10_bio->devs[i].repl_bio;
if (!tbio || !tbio->bi_end_io)
continue;
if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
&& r10_bio->devs[i].bio != fbio)
- for (j = 0; j < vcnt; j++)
- memcpy(page_address(tbio->bi_io_vec[j].bv_page),
- page_address(fbio->bi_io_vec[j].bv_page),
- PAGE_SIZE);
+ bio_copy_data(tbio, fbio);
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
@@ -2404,7 +2290,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- !test_bit(Unmerged, &rdev->flags) &&
test_bit(In_sync, &rdev->flags) &&
is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
&first_bad, &bad_sectors) == 0) {
@@ -2458,7 +2343,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (!rdev ||
- test_bit(Unmerged, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -2590,7 +2474,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) == 0)
+ if (submit_bio_wait(WRITE, wbio) < 0)
/* Failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2716,8 +2600,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev = conf->mirrors[dev].rdev;
if (r10_bio->devs[m].bio == NULL)
continue;
- if (test_bit(BIO_UPTODATE,
- &r10_bio->devs[m].bio->bi_flags)) {
+ if (!r10_bio->devs[m].bio->bi_error) {
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
@@ -2732,8 +2615,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev = conf->mirrors[dev].replacement;
if (r10_bio->devs[m].repl_bio == NULL)
continue;
- if (test_bit(BIO_UPTODATE,
- &r10_bio->devs[m].repl_bio->bi_flags)) {
+
+ if (!r10_bio->devs[m].repl_bio->bi_error) {
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
@@ -2748,6 +2631,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
}
put_buf(r10_bio);
} else {
+ bool fail = false;
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
struct bio *bio = r10_bio->devs[m].bio;
@@ -2758,8 +2642,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->devs[m].addr,
r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
- } else if (bio != NULL &&
- !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ } else if (bio != NULL && bio->bi_error) {
+ fail = true;
if (!narrow_write_error(r10_bio, m)) {
md_error(conf->mddev, rdev);
set_bit(R10BIO_Degraded,
@@ -2777,10 +2661,17 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_dec_pending(rdev, conf->mddev);
}
}
- if (test_bit(R10BIO_WriteError,
- &r10_bio->state))
- close_write(r10_bio);
- raid_end_bio_io(r10_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
}
}
@@ -2795,6 +2686,29 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r10_bio = list_first_entry(&tmp, struct r10bio,
+ retry_list);
+ list_del(&r10_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R10BIO_Degraded, &r10_bio->state);
+
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {
@@ -3237,7 +3151,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
- bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
@@ -3273,7 +3187,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[i].bio;
bio_reset(bio);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
continue;
@@ -3310,7 +3224,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
bio_reset(bio);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
sector = r10_bio->devs[i].addr;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -3367,7 +3281,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+ bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -3387,7 +3301,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors);
- set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = 0;
generic_make_request(bio);
}
}
@@ -3487,7 +3401,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
disks = mddev->raid_disks + mddev->delta_disks;
break;
}
- if (layout >> 18)
+ if (layout >> 19)
return -1;
if (chunk < (PAGE_SIZE >> 9) ||
!is_power_of_2(chunk))
@@ -3499,7 +3413,22 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
geo->near_copies = nc;
geo->far_copies = fc;
geo->far_offset = fo;
- geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
+ switch (layout >> 17) {
+ case 0: /* original layout. simple but not always optimal */
+ geo->far_set_size = disks;
+ break;
+ case 1: /* "improved" layout which was buggy. Hopefully no-one is
+ * actually using this, but leave code here just in case.*/
+ geo->far_set_size = disks/fc;
+ WARN(geo->far_set_size < fc,
+ "This RAID10 layout does not provide data safety - please backup and create new array\n");
+ break;
+ case 2: /* "improved" layout fixed to match documentation */
+ geo->far_set_size = fc * nc;
+ break;
+ default: /* Not a valid layout */
+ return -1;
+ }
geo->chunk_mask = chunk - 1;
geo->chunk_shift = ffz(~chunk);
return nc*fc;
@@ -3569,6 +3498,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
conf->reshape_safe = conf->reshape_progress;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
@@ -3585,8 +3515,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
mdname(mddev));
if (conf) {
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf);
@@ -3653,8 +3582,6 @@ static int run(struct mddev *mddev)
disk->rdev = rdev;
}
q = bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn)
- mddev->merge_check_needed = 1;
diff = (rdev->new_data_offset - rdev->data_offset);
if (!mddev->reshape_backwards)
diff = -diff;
@@ -3783,8 +3710,7 @@ static int run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
@@ -3797,8 +3723,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
{
struct r10conf *conf = priv;
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
@@ -4225,7 +4150,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
* at a time, possibly less if that exceeds RESYNC_PAGES,
* or we hit a bad block or something.
* This might mean we pause for normal IO in the middle of
- * a chunk, but that is not a problem was mddev->reshape_position
+ * a chunk, but that is not a problem as mddev->reshape_position
* can record any location.
*
* If we will want to write to a location that isn't
@@ -4249,7 +4174,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
*
* In all this the minimum difference in data offsets
* (conf->offset_diff - always positive) allows a bit of slack,
- * so next can be after 'safe', but not by more than offset_disk
+ * so next can be after 'safe', but not by more than offset_diff
*
* We need to prepare all the bios here before we start any IO
* to ensure the size we choose is acceptable to all devices.
@@ -4392,7 +4317,7 @@ read_more:
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
- __set_bit(BIO_UPTODATE, &read_bio->bi_flags);
+ read_bio->bi_error = 0;
read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
@@ -4449,7 +4374,7 @@ read_more:
/* Remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+ bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -4614,9 +4539,8 @@ static int handle_reshape_read_error(struct mddev *mddev,
return 0;
}
-static void end_reshape_write(struct bio *bio, int error)
+static void end_reshape_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
@@ -4633,7 +4557,7 @@ static void end_reshape_write(struct bio *bio, int error)
rdev = conf->mirrors[d].rdev;
}
- if (!uptodate) {
+ if (bio->bi_error) {
/* FIXME should record badblock */
md_error(mddev, rdev);
}
@@ -4710,7 +4634,6 @@ static struct md_personality raid10_personality =
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
.congested = raid10_congested,
- .mergeable_bvec = raid10_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/kernel/drivers/md/raid10.h b/kernel/drivers/md/raid10.h
index 5ee6473dd..6fc2c7575 100644
--- a/kernel/drivers/md/raid10.h
+++ b/kernel/drivers/md/raid10.h
@@ -53,6 +53,12 @@ struct r10conf {
sector_t offset_diff;
struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;
+
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
int pending_count;
diff --git a/kernel/drivers/md/raid5-cache.c b/kernel/drivers/md/raid5-cache.c
new file mode 100644
index 000000000..b887e04d7
--- /dev/null
+++ b/kernel/drivers/md/raid5-cache.c
@@ -0,0 +1,1191 @@
+/*
+ * Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/raid/md_p.h>
+#include <linux/crc32c.h>
+#include <linux/random.h>
+#include "md.h"
+#include "raid5.h"
+
+/*
+ * metadata/data stored in disk with 4k size unit (a block) regardless
+ * underneath hardware sector size. only works with PAGE_SIZE == 4096
+ */
+#define BLOCK_SECTORS (8)
+
+/*
+ * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
+ * recovery scans a very long log
+ */
+#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
+#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+
+struct r5l_log {
+ struct md_rdev *rdev;
+
+ u32 uuid_checksum;
+
+ sector_t device_size; /* log device size, round to
+ * BLOCK_SECTORS */
+ sector_t max_free_space; /* reclaim run if free space is at
+ * this size */
+
+ sector_t last_checkpoint; /* log tail. where recovery scan
+ * starts from */
+ u64 last_cp_seq; /* log tail sequence */
+
+ sector_t log_start; /* log head. where new data appends */
+ u64 seq; /* log head sequence */
+
+ sector_t next_checkpoint;
+ u64 next_cp_seq;
+
+ struct mutex io_mutex;
+ struct r5l_io_unit *current_io; /* current io_unit accepting new data */
+
+ spinlock_t io_list_lock;
+ struct list_head running_ios; /* io_units which are still running,
+ * and have not yet been completely
+ * written to the log */
+ struct list_head io_end_ios; /* io_units which have been completely
+ * written to the log but not yet written
+ * to the RAID */
+ struct list_head flushing_ios; /* io_units which are waiting for log
+ * cache flush */
+ struct list_head finished_ios; /* io_units which settle down in log disk */
+ struct bio flush_bio;
+
+ struct kmem_cache *io_kc;
+
+ struct md_thread *reclaim_thread;
+ unsigned long reclaim_target; /* number of space that need to be
+ * reclaimed. if it's 0, reclaim spaces
+ * used by io_units which are in
+ * IO_UNIT_STRIPE_END state (eg, reclaim
+ * dones't wait for specific io_unit
+ * switching to IO_UNIT_STRIPE_END
+ * state) */
+ wait_queue_head_t iounit_wait;
+
+ struct list_head no_space_stripes; /* pending stripes, log has no space */
+ spinlock_t no_space_stripes_lock;
+
+ bool need_cache_flush;
+ bool in_teardown;
+};
+
+/*
+ * an IO range starts from a meta data block and end at the next meta data
+ * block. The io unit's the meta data block tracks data/parity followed it. io
+ * unit is written to log disk with normal write, as we always flush log disk
+ * first and then start move data to raid disks, there is no requirement to
+ * write io unit with FLUSH/FUA
+ */
+struct r5l_io_unit {
+ struct r5l_log *log;
+
+ struct page *meta_page; /* store meta block */
+ int meta_offset; /* current offset in meta_page */
+
+ struct bio *current_bio;/* current_bio accepting new data */
+
+ atomic_t pending_stripe;/* how many stripes not flushed to raid */
+ u64 seq; /* seq number of the metablock */
+ sector_t log_start; /* where the io_unit starts */
+ sector_t log_end; /* where the io_unit ends */
+ struct list_head log_sibling; /* log->running_ios */
+ struct list_head stripe_list; /* stripes added to the io_unit */
+
+ int state;
+ bool need_split_bio;
+};
+
+/* r5l_io_unit state */
+enum r5l_io_unit_state {
+ IO_UNIT_RUNNING = 0, /* accepting new IO */
+ IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
+ * don't accepting new bio */
+ IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
+ IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
+};
+
+static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
+{
+ start += inc;
+ if (start >= log->device_size)
+ start = start - log->device_size;
+ return start;
+}
+
+static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
+ sector_t end)
+{
+ if (end >= start)
+ return end - start;
+ else
+ return end + log->device_size - start;
+}
+
+static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
+{
+ sector_t used_size;
+
+ used_size = r5l_ring_distance(log, log->last_checkpoint,
+ log->log_start);
+
+ return log->device_size > used_size + size;
+}
+
+static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ __free_page(io->meta_page);
+ kmem_cache_free(log->io_kc, io);
+}
+
+static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
+ enum r5l_io_unit_state state)
+{
+ struct r5l_io_unit *io;
+
+ while (!list_empty(from)) {
+ io = list_first_entry(from, struct r5l_io_unit, log_sibling);
+ /* don't change list order */
+ if (io->state >= state)
+ list_move_tail(&io->log_sibling, to);
+ else
+ break;
+ }
+}
+
+static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
+ enum r5l_io_unit_state state)
+{
+ if (WARN_ON(io->state >= state))
+ return;
+ io->state = state;
+}
+
+static void r5l_io_run_stripes(struct r5l_io_unit *io)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
+ list_del_init(&sh->log_list);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ raid5_release_stripe(sh);
+ }
+}
+
+static void r5l_log_run_stripes(struct r5l_log *log)
+{
+ struct r5l_io_unit *io, *next;
+
+ assert_spin_locked(&log->io_list_lock);
+
+ list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
+ /* don't change list order */
+ if (io->state < IO_UNIT_IO_END)
+ break;
+
+ list_move_tail(&io->log_sibling, &log->finished_ios);
+ r5l_io_run_stripes(io);
+ }
+}
+
+static void r5l_log_endio(struct bio *bio)
+{
+ struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_log *log = io->log;
+ unsigned long flags;
+
+ if (bio->bi_error)
+ md_error(log->rdev->mddev, log->rdev);
+
+ bio_put(bio);
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+ if (log->need_cache_flush)
+ r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
+ IO_UNIT_IO_END);
+ else
+ r5l_log_run_stripes(log);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ if (log->need_cache_flush)
+ md_wakeup_thread(log->rdev->mddev->thread);
+}
+
+static void r5l_submit_current_io(struct r5l_log *log)
+{
+ struct r5l_io_unit *io = log->current_io;
+ struct r5l_meta_block *block;
+ unsigned long flags;
+ u32 crc;
+
+ if (!io)
+ return;
+
+ block = page_address(io->meta_page);
+ block->meta_size = cpu_to_le32(io->meta_offset);
+ crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
+ block->checksum = cpu_to_le32(crc);
+
+ log->current_io = NULL;
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ submit_bio(WRITE, io->current_bio);
+}
+
+static struct bio *r5l_bio_alloc(struct r5l_log *log)
+{
+ struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
+
+ bio->bi_rw = WRITE;
+ bio->bi_bdev = log->rdev->bdev;
+ bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
+
+ return bio;
+}
+
+static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+
+ /*
+ * If we filled up the log device start from the beginning again,
+ * which will require a new bio.
+ *
+ * Note: for this to work properly the log size needs to me a multiple
+ * of BLOCK_SECTORS.
+ */
+ if (log->log_start == 0)
+ io->need_split_bio = true;
+
+ io->log_end = log->log_start;
+}
+
+static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
+{
+ struct r5l_io_unit *io;
+ struct r5l_meta_block *block;
+
+ /* We can't handle memory allocate failure so far */
+ io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
+ io->log = log;
+ INIT_LIST_HEAD(&io->log_sibling);
+ INIT_LIST_HEAD(&io->stripe_list);
+ io->state = IO_UNIT_RUNNING;
+
+ io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO);
+ block = page_address(io->meta_page);
+ block->magic = cpu_to_le32(R5LOG_MAGIC);
+ block->version = R5LOG_VERSION;
+ block->seq = cpu_to_le64(log->seq);
+ block->position = cpu_to_le64(log->log_start);
+
+ io->log_start = log->log_start;
+ io->meta_offset = sizeof(struct r5l_meta_block);
+ io->seq = log->seq++;
+
+ io->current_bio = r5l_bio_alloc(log);
+ io->current_bio->bi_end_io = r5l_log_endio;
+ io->current_bio->bi_private = io;
+ bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
+
+ r5_reserve_log_entry(log, io);
+
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&io->log_sibling, &log->running_ios);
+ spin_unlock_irq(&log->io_list_lock);
+
+ return io;
+}
+
+static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
+{
+ if (log->current_io &&
+ log->current_io->meta_offset + payload_size > PAGE_SIZE)
+ r5l_submit_current_io(log);
+
+ if (!log->current_io)
+ log->current_io = r5l_new_meta(log);
+ return 0;
+}
+
+static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
+ sector_t location,
+ u32 checksum1, u32 checksum2,
+ bool checksum2_valid)
+{
+ struct r5l_io_unit *io = log->current_io;
+ struct r5l_payload_data_parity *payload;
+
+ payload = page_address(io->meta_page) + io->meta_offset;
+ payload->header.type = cpu_to_le16(type);
+ payload->header.flags = cpu_to_le16(0);
+ payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
+ (PAGE_SHIFT - 9));
+ payload->location = cpu_to_le64(location);
+ payload->checksum[0] = cpu_to_le32(checksum1);
+ if (checksum2_valid)
+ payload->checksum[1] = cpu_to_le32(checksum2);
+
+ io->meta_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * (1 + !!checksum2_valid);
+}
+
+static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
+{
+ struct r5l_io_unit *io = log->current_io;
+
+ if (io->need_split_bio) {
+ struct bio *prev = io->current_bio;
+
+ io->current_bio = r5l_bio_alloc(log);
+ bio_chain(io->current_bio, prev);
+
+ submit_bio(WRITE, prev);
+ }
+
+ if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
+ BUG();
+
+ r5_reserve_log_entry(log, io);
+}
+
+static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
+ int data_pages, int parity_pages)
+{
+ int i;
+ int meta_size;
+ struct r5l_io_unit *io;
+
+ meta_size =
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
+ * data_pages) +
+ sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * parity_pages;
+
+ r5l_get_meta(log, meta_size);
+ io = log->current_io;
+
+ for (i = 0; i < sh->disks; i++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ if (i == sh->pd_idx || i == sh->qd_idx)
+ continue;
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
+ raid5_compute_blocknr(sh, i, 0),
+ sh->dev[i].log_checksum, 0, false);
+ r5l_append_payload_page(log, sh->dev[i].page);
+ }
+
+ if (sh->qd_idx >= 0) {
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
+ sh->sector, sh->dev[sh->pd_idx].log_checksum,
+ sh->dev[sh->qd_idx].log_checksum, true);
+ r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
+ r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
+ } else {
+ r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
+ sh->sector, sh->dev[sh->pd_idx].log_checksum,
+ 0, false);
+ r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
+ }
+
+ list_add_tail(&sh->log_list, &io->stripe_list);
+ atomic_inc(&io->pending_stripe);
+ sh->log_io = io;
+}
+
+static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/*
+ * running in raid5d, where reclaim could wait for raid5d too (when it flushes
+ * data from log to raid disks), so we shouldn't wait for reclaim here
+ */
+int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
+{
+ int write_disks = 0;
+ int data_pages, parity_pages;
+ int meta_size;
+ int reserve;
+ int i;
+
+ if (!log)
+ return -EAGAIN;
+ /* Don't support stripe batch */
+ if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
+ test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* the stripe is written to log, we start writing it to raid */
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ write_disks++;
+ /* checksum is already calculated in last run */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ }
+ parity_pages = 1 + !!(sh->qd_idx >= 0);
+ data_pages = write_disks - parity_pages;
+
+ meta_size =
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
+ * data_pages) +
+ sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) * parity_pages;
+ /* Doesn't work with very big raid array */
+ if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
+ return -EINVAL;
+
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ /*
+ * The stripe must enter state machine again to finish the write, so
+ * don't delay.
+ */
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
+ if (r5l_has_free_space(log, reserve))
+ r5l_log_stripe(log, sh, data_pages, parity_pages);
+ else {
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+
+ r5l_wake_reclaim(log, reserve);
+ }
+ mutex_unlock(&log->io_mutex);
+
+ return 0;
+}
+
+void r5l_write_stripe_run(struct r5l_log *log)
+{
+ if (!log)
+ return;
+ mutex_lock(&log->io_mutex);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+}
+
+int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
+{
+ if (!log)
+ return -ENODEV;
+ /*
+ * we flush log disk cache first, then write stripe data to raid disks.
+ * So if bio is finished, the log disk cache is flushed already. The
+ * recovery guarantees we can recovery the bio from log disk, so we
+ * don't need to flush again
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_rw &= ~REQ_FLUSH;
+ return -EAGAIN;
+}
+
+/* This will run after log space is reclaimed */
+static void r5l_run_no_space_stripes(struct r5l_log *log)
+{
+ struct stripe_head *sh;
+
+ spin_lock(&log->no_space_stripes_lock);
+ while (!list_empty(&log->no_space_stripes)) {
+ sh = list_first_entry(&log->no_space_stripes,
+ struct stripe_head, log_list);
+ list_del_init(&sh->log_list);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ raid5_release_stripe(sh);
+ }
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
+static sector_t r5l_reclaimable_space(struct r5l_log *log)
+{
+ return r5l_ring_distance(log, log->last_checkpoint,
+ log->next_checkpoint);
+}
+
+static bool r5l_complete_finished_ios(struct r5l_log *log)
+{
+ struct r5l_io_unit *io, *next;
+ bool found = false;
+
+ assert_spin_locked(&log->io_list_lock);
+
+ list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
+ /* don't change list order */
+ if (io->state < IO_UNIT_STRIPE_END)
+ break;
+
+ log->next_checkpoint = io->log_start;
+ log->next_cp_seq = io->seq;
+
+ list_del(&io->log_sibling);
+ r5l_free_io_unit(log, io);
+
+ found = true;
+ }
+
+ return found;
+}
+
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
+{
+ struct r5l_log *log = io->log;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
+
+ if (!r5l_complete_finished_ios(log)) {
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ return;
+ }
+
+ if (r5l_reclaimable_space(log) > log->max_free_space)
+ r5l_wake_reclaim(log, 0);
+
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ wake_up(&log->iounit_wait);
+}
+
+void r5l_stripe_write_finished(struct stripe_head *sh)
+{
+ struct r5l_io_unit *io;
+
+ io = sh->log_io;
+ sh->log_io = NULL;
+
+ if (io && atomic_dec_and_test(&io->pending_stripe))
+ __r5l_stripe_write_finished(io);
+}
+
+static void r5l_log_flush_endio(struct bio *bio)
+{
+ struct r5l_log *log = container_of(bio, struct r5l_log,
+ flush_bio);
+ unsigned long flags;
+ struct r5l_io_unit *io;
+
+ if (bio->bi_error)
+ md_error(log->rdev->mddev, log->rdev);
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ list_for_each_entry(io, &log->flushing_ios, log_sibling)
+ r5l_io_run_stripes(io);
+ list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+}
+
+/*
+ * Starting dispatch IO to raid.
+ * io_unit(meta) consists of a log. There is one situation we want to avoid. A
+ * broken meta in the middle of a log causes recovery can't find meta at the
+ * head of log. If operations require meta at the head persistent in log, we
+ * must make sure meta before it persistent in log too. A case is:
+ *
+ * stripe data/parity is in log, we start write stripe to raid disks. stripe
+ * data/parity must be persistent in log before we do the write to raid disks.
+ *
+ * The solution is we restrictly maintain io_unit list order. In this case, we
+ * only write stripes of an io_unit to raid disks till the io_unit is the first
+ * one whose data/parity is in log.
+ */
+void r5l_flush_stripe_to_raid(struct r5l_log *log)
+{
+ bool do_flush;
+
+ if (!log || !log->need_cache_flush)
+ return;
+
+ spin_lock_irq(&log->io_list_lock);
+ /* flush bio is running */
+ if (!list_empty(&log->flushing_ios)) {
+ spin_unlock_irq(&log->io_list_lock);
+ return;
+ }
+ list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
+ do_flush = !list_empty(&log->flushing_ios);
+ spin_unlock_irq(&log->io_list_lock);
+
+ if (!do_flush)
+ return;
+ bio_reset(&log->flush_bio);
+ log->flush_bio.bi_bdev = log->rdev->bdev;
+ log->flush_bio.bi_end_io = r5l_log_flush_endio;
+ submit_bio(WRITE_FLUSH, &log->flush_bio);
+}
+
+static void r5l_write_super(struct r5l_log *log, sector_t cp);
+static void r5l_write_super_and_discard_space(struct r5l_log *log,
+ sector_t end)
+{
+ struct block_device *bdev = log->rdev->bdev;
+ struct mddev *mddev;
+
+ r5l_write_super(log, end);
+
+ if (!blk_queue_discard(bdev_get_queue(bdev)))
+ return;
+
+ mddev = log->rdev->mddev;
+ /*
+ * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
+ * wait for this thread to finish. This thread waits for
+ * MD_CHANGE_PENDING clear, which is supposed to be done in
+ * md_check_recovery(). md_check_recovery() tries to get
+ * reconfig_mutex. Since r5l_quiesce already holds the mutex,
+ * md_check_recovery() fails, so the PENDING never get cleared. The
+ * in_teardown check workaround this issue.
+ */
+ if (!log->in_teardown) {
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
+ log->in_teardown);
+ /*
+ * r5l_quiesce could run after in_teardown check and hold
+ * mutex first. Superblock might get updated twice.
+ */
+ if (log->in_teardown)
+ md_update_sb(mddev, 1);
+ } else {
+ WARN_ON(!mddev_is_locked(mddev));
+ md_update_sb(mddev, 1);
+ }
+
+ /* discard IO error really doesn't matter, ignore it */
+ if (log->last_checkpoint < end) {
+ blkdev_issue_discard(bdev,
+ log->last_checkpoint + log->rdev->data_offset,
+ end - log->last_checkpoint, GFP_NOIO, 0);
+ } else {
+ blkdev_issue_discard(bdev,
+ log->last_checkpoint + log->rdev->data_offset,
+ log->device_size - log->last_checkpoint,
+ GFP_NOIO, 0);
+ blkdev_issue_discard(bdev, log->rdev->data_offset, end,
+ GFP_NOIO, 0);
+ }
+}
+
+
+static void r5l_do_reclaim(struct r5l_log *log)
+{
+ sector_t reclaim_target = xchg(&log->reclaim_target, 0);
+ sector_t reclaimable;
+ sector_t next_checkpoint;
+ u64 next_cp_seq;
+
+ spin_lock_irq(&log->io_list_lock);
+ /*
+ * move proper io_unit to reclaim list. We should not change the order.
+ * reclaimable/unreclaimable io_unit can be mixed in the list, we
+ * shouldn't reuse space of an unreclaimable io_unit
+ */
+ while (1) {
+ reclaimable = r5l_reclaimable_space(log);
+ if (reclaimable >= reclaim_target ||
+ (list_empty(&log->running_ios) &&
+ list_empty(&log->io_end_ios) &&
+ list_empty(&log->flushing_ios) &&
+ list_empty(&log->finished_ios)))
+ break;
+
+ md_wakeup_thread(log->rdev->mddev->thread);
+ wait_event_lock_irq(log->iounit_wait,
+ r5l_reclaimable_space(log) > reclaimable,
+ log->io_list_lock);
+ }
+
+ next_checkpoint = log->next_checkpoint;
+ next_cp_seq = log->next_cp_seq;
+ spin_unlock_irq(&log->io_list_lock);
+
+ BUG_ON(reclaimable < 0);
+ if (reclaimable == 0)
+ return;
+
+ /*
+ * write_super will flush cache of each raid disk. We must write super
+ * here, because the log area might be reused soon and we don't want to
+ * confuse recovery
+ */
+ r5l_write_super_and_discard_space(log, next_checkpoint);
+
+ mutex_lock(&log->io_mutex);
+ log->last_checkpoint = next_checkpoint;
+ log->last_cp_seq = next_cp_seq;
+ mutex_unlock(&log->io_mutex);
+
+ r5l_run_no_space_stripes(log);
+}
+
+static void r5l_reclaim_thread(struct md_thread *thread)
+{
+ struct mddev *mddev = thread->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+
+ if (!log)
+ return;
+ r5l_do_reclaim(log);
+}
+
+static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+{
+ unsigned long target;
+ unsigned long new = (unsigned long)space; /* overflow in theory */
+
+ do {
+ target = log->reclaim_target;
+ if (new < target)
+ return;
+ } while (cmpxchg(&log->reclaim_target, target, new) != target);
+ md_wakeup_thread(log->reclaim_thread);
+}
+
+void r5l_quiesce(struct r5l_log *log, int state)
+{
+ struct mddev *mddev;
+ if (!log || state == 2)
+ return;
+ if (state == 0) {
+ log->in_teardown = 0;
+ log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
+ log->rdev->mddev, "reclaim");
+ } else if (state == 1) {
+ /*
+ * at this point all stripes are finished, so io_unit is at
+ * least in STRIPE_END state
+ */
+ log->in_teardown = 1;
+ /* make sure r5l_write_super_and_discard_space exits */
+ mddev = log->rdev->mddev;
+ wake_up(&mddev->sb_wait);
+ r5l_wake_reclaim(log, -1L);
+ md_unregister_thread(&log->reclaim_thread);
+ r5l_do_reclaim(log);
+ }
+}
+
+bool r5l_log_disk_error(struct r5conf *conf)
+{
+ /* don't allow write if journal disk is missing */
+ if (!conf->log)
+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ return test_bit(Faulty, &conf->log->rdev->flags);
+}
+
+struct r5l_recovery_ctx {
+ struct page *meta_page; /* current meta */
+ sector_t meta_total_blocks; /* total size of current meta and data */
+ sector_t pos; /* recovery position */
+ u64 seq; /* recovery position seq */
+};
+
+static int r5l_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct page *page = ctx->meta_page;
+ struct r5l_meta_block *mb;
+ u32 crc, stored_crc;
+
+ if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+ return -EIO;
+
+ mb = page_address(page);
+ stored_crc = le32_to_cpu(mb->checksum);
+ mb->checksum = 0;
+
+ if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
+ le64_to_cpu(mb->seq) != ctx->seq ||
+ mb->version != R5LOG_VERSION ||
+ le64_to_cpu(mb->position) != ctx->pos)
+ return -EINVAL;
+
+ crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ if (stored_crc != crc)
+ return -EINVAL;
+
+ if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
+ return -EINVAL;
+
+ ctx->meta_total_blocks = BLOCK_SECTORS;
+
+ return 0;
+}
+
+static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ sector_t stripe_sect,
+ int *offset, sector_t *log_offset)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ struct stripe_head *sh;
+ struct r5l_payload_data_parity *payload;
+ int disk_index;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
+ while (1) {
+ payload = page_address(ctx->meta_page) + *offset;
+
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &disk_index, sh);
+
+ sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
+ sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+ } else {
+ disk_index = sh->pd_idx;
+ sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
+ sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
+
+ if (sh->qd_idx >= 0) {
+ disk_index = sh->qd_idx;
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[disk_index].page,
+ READ, false);
+ sh->dev[disk_index].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite,
+ &sh->dev[disk_index].flags);
+ }
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ }
+
+ *log_offset = r5l_ring_add(log, *log_offset,
+ le32_to_cpu(payload->size));
+ *offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
+ break;
+ }
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ void *addr;
+ u32 checksum;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[disk_index].page);
+ checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ if (checksum != sh->dev[disk_index].log_checksum)
+ goto error;
+ }
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ struct md_rdev *rdev, *rrdev;
+
+ if (!test_and_clear_bit(R5_Wantwrite,
+ &sh->dev[disk_index].flags))
+ continue;
+
+ /* in case device is broken */
+ rdev = rcu_dereference(conf->disks[disk_index].rdev);
+ if (rdev)
+ sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ sh->dev[disk_index].page, WRITE, false);
+ rrdev = rcu_dereference(conf->disks[disk_index].replacement);
+ if (rrdev)
+ sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ sh->dev[disk_index].page, WRITE, false);
+ }
+ raid5_release_stripe(sh);
+ return 0;
+
+error:
+ for (disk_index = 0; disk_index < sh->disks; disk_index++)
+ sh->dev[disk_index].flags = 0;
+ raid5_release_stripe(sh);
+ return -EINVAL;
+}
+
+static int r5l_recovery_flush_one_meta(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ struct r5l_payload_data_parity *payload;
+ struct r5l_meta_block *mb;
+ int offset;
+ sector_t log_offset;
+ sector_t stripe_sector;
+
+ mb = page_address(ctx->meta_page);
+ offset = sizeof(struct r5l_meta_block);
+ log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ while (offset < le32_to_cpu(mb->meta_size)) {
+ int dd;
+
+ payload = (void *)mb + offset;
+ stripe_sector = raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0, &dd, NULL);
+ if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
+ &offset, &log_offset))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* copy data/parity from log to raid disks */
+static void r5l_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ while (1) {
+ if (r5l_read_meta_block(log, ctx))
+ return;
+ if (r5l_recovery_flush_one_meta(log, ctx))
+ return;
+ ctx->seq++;
+ ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
+ }
+}
+
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
+ u32 crc;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -ENOMEM;
+ mb = page_address(page);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+ crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ mb->checksum = cpu_to_le32(crc);
+
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
+
+static int r5l_recovery_log(struct r5l_log *log)
+{
+ struct r5l_recovery_ctx ctx;
+
+ ctx.pos = log->last_checkpoint;
+ ctx.seq = log->last_cp_seq;
+ ctx.meta_page = alloc_page(GFP_KERNEL);
+ if (!ctx.meta_page)
+ return -ENOMEM;
+
+ r5l_recovery_flush_log(log, &ctx);
+ __free_page(ctx.meta_page);
+
+ /*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10 and let superblock points to meta2. The same recovery will
+ * not think meta 3 is a valid meta, because its seq doesn't match
+ */
+ if (ctx.seq > log->last_cp_seq + 1) {
+ int ret;
+
+ ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
+ if (ret)
+ return ret;
+ log->seq = ctx.seq + 11;
+ log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+ r5l_write_super(log, ctx.pos);
+ } else {
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ }
+ return 0;
+}
+
+static void r5l_write_super(struct r5l_log *log, sector_t cp)
+{
+ struct mddev *mddev = log->rdev->mddev;
+
+ log->rdev->journal_tail = cp;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+}
+
+static int r5l_load_log(struct r5l_log *log)
+{
+ struct md_rdev *rdev = log->rdev;
+ struct page *page;
+ struct r5l_meta_block *mb;
+ sector_t cp = log->rdev->journal_tail;
+ u32 stored_crc, expected_crc;
+ bool create_super = false;
+ int ret;
+
+ /* Make sure it's valid */
+ if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
+ cp = 0;
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+ ret = -EIO;
+ goto ioerr;
+ }
+ mb = page_address(page);
+
+ if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
+ mb->version != R5LOG_VERSION) {
+ create_super = true;
+ goto create;
+ }
+ stored_crc = le32_to_cpu(mb->checksum);
+ mb->checksum = 0;
+ expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ if (stored_crc != expected_crc) {
+ create_super = true;
+ goto create;
+ }
+ if (le64_to_cpu(mb->position) != cp) {
+ create_super = true;
+ goto create;
+ }
+create:
+ if (create_super) {
+ log->last_cp_seq = prandom_u32();
+ cp = 0;
+ /*
+ * Make sure super points to correct address. Log might have
+ * data very soon. If super hasn't correct log tail address,
+ * recovery can't find the log
+ */
+ r5l_write_super(log, cp);
+ } else
+ log->last_cp_seq = le64_to_cpu(mb->seq);
+
+ log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
+ log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
+ if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
+ log->max_free_space = RECLAIM_MAX_FREE_SPACE;
+ log->last_checkpoint = cp;
+
+ __free_page(page);
+
+ return r5l_recovery_log(log);
+ioerr:
+ __free_page(page);
+ return ret;
+}
+
+int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
+{
+ struct r5l_log *log;
+
+ if (PAGE_SIZE != 4096)
+ return -EINVAL;
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+ log->rdev = rdev;
+
+ log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+
+ log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
+ sizeof(rdev->mddev->uuid));
+
+ mutex_init(&log->io_mutex);
+
+ spin_lock_init(&log->io_list_lock);
+ INIT_LIST_HEAD(&log->running_ios);
+ INIT_LIST_HEAD(&log->io_end_ios);
+ INIT_LIST_HEAD(&log->flushing_ios);
+ INIT_LIST_HEAD(&log->finished_ios);
+ bio_init(&log->flush_bio);
+
+ log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
+ if (!log->io_kc)
+ goto io_kc;
+
+ log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
+ log->rdev->mddev, "reclaim");
+ if (!log->reclaim_thread)
+ goto reclaim_thread;
+ init_waitqueue_head(&log->iounit_wait);
+
+ INIT_LIST_HEAD(&log->no_space_stripes);
+ spin_lock_init(&log->no_space_stripes_lock);
+
+ if (r5l_load_log(log))
+ goto error;
+
+ conf->log = log;
+ return 0;
+error:
+ md_unregister_thread(&log->reclaim_thread);
+reclaim_thread:
+ kmem_cache_destroy(log->io_kc);
+io_kc:
+ kfree(log);
+ return -EINVAL;
+}
+
+void r5l_exit_log(struct r5l_log *log)
+{
+ md_unregister_thread(&log->reclaim_thread);
+ kmem_cache_destroy(log->io_kc);
+ kfree(log);
+}
diff --git a/kernel/drivers/md/raid5.c b/kernel/drivers/md/raid5.c
index a90d5876f..adf72a9b6 100644
--- a/kernel/drivers/md/raid5.c
+++ b/kernel/drivers/md/raid5.c
@@ -223,18 +223,14 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
return slot;
}
-static void return_io(struct bio *return_bi)
+static void return_io(struct bio_list *return_bi)
{
- struct bio *bi = return_bi;
- while (bi) {
-
- return_bi = bi->bi_next;
- bi->bi_next = NULL;
+ struct bio *bi;
+ while ((bi = bio_list_pop(return_bi)) != NULL) {
bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
- bio_endio(bi, 0);
- bi = return_bi;
+ bio_endio(bi);
}
}
@@ -344,7 +340,8 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- bool do_wakeup = false;
+ unsigned long do_wakeup = 0;
+ int i = 0;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -356,7 +353,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
struct list_head *list = &temp_inactive_list[size - 1];
/*
- * We don't hold any lock here yet, get_active_stripe() might
+ * We don't hold any lock here yet, raid5_get_active_stripe() might
* remove stripes from the list
*/
if (!list_empty_careful(list)) {
@@ -365,15 +362,21 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup = true;
+ do_wakeup |= 1 << hash;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+ if (do_wakeup & (1 << i))
+ wake_up(&conf->wait_for_stripe[i]);
+ }
+
if (do_wakeup) {
- wake_up(&conf->wait_for_stripe);
+ if (atomic_read(&conf->active_stripes) == 0)
+ wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
md_wakeup_thread(conf->mddev->thread);
}
@@ -410,7 +413,7 @@ static int release_stripe_list(struct r5conf *conf,
return count;
}
-static void release_stripe(struct stripe_head *sh)
+void raid5_release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
@@ -655,9 +658,9 @@ static int has_failed(struct r5conf *conf)
return 0;
}
-static struct stripe_head *
-get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce)
+struct stripe_head *
+raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(sector);
@@ -667,15 +670,15 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
spin_lock_irq(conf->hash_locks + hash);
do {
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_lock_irq(conf->wait_for_quiescent,
conf->quiesce == 0 || noquiesce,
*(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
sh = get_free_stripe(conf, hash);
- if (!sh && llist_empty(&conf->released_stripes) &&
- !test_bit(R5_DID_ALLOC, &conf->cache_state))
+ if (!sh && !test_bit(R5_DID_ALLOC,
+ &conf->cache_state))
set_bit(R5_ALLOC_MORE,
&conf->cache_state);
}
@@ -684,14 +687,15 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_lock_irq(
- conf->wait_for_stripe,
+ wait_event_exclusive_cmd(
+ conf->wait_for_stripe[hash],
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- *(conf->hash_locks + hash));
+ spin_unlock_irq(conf->hash_locks + hash),
+ spin_lock_irq(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -716,6 +720,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
+ if (!list_empty(conf->inactive_list + hash))
+ wake_up(&conf->wait_for_stripe[hash]);
+
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -748,6 +755,10 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
/* Only freshly new full stripe normal write stripe can be added to a batch list */
static bool stripe_can_batch(struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
+
+ if (conf->log)
+ return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
is_full_stripe_write(sh);
@@ -851,7 +862,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
unlock_out:
unlock_two_stripes(head, sh);
out:
- release_stripe(head);
+ raid5_release_stripe(head);
}
/* Determine if 'data_offset' or 'new_data_offset' should be used
@@ -876,9 +887,9 @@ static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
}
static void
-raid5_end_read_request(struct bio *bi, int error);
+raid5_end_read_request(struct bio *bi);
static void
-raid5_end_write_request(struct bio *bi, int error);
+raid5_end_write_request(struct bio *bi);
static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
@@ -888,6 +899,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
for (i = disks; i--; ) {
int rw;
int replace_only = 0;
@@ -1166,7 +1179,7 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- struct bio *return_bi = NULL;
+ struct bio_list return_bi = BIO_EMPTY_LIST;
int i;
pr_debug("%s: stripe %llu\n", __func__,
@@ -1190,20 +1203,18 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
- if (!raid5_dec_bi_active_stripes(rbi)) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
+ if (!raid5_dec_bi_active_stripes(rbi))
+ bio_list_add(&return_bi, rbi);
rbi = rbi2;
}
}
}
clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
- return_io(return_bi);
+ return_io(&return_bi);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void ops_run_biofill(struct stripe_head *sh)
@@ -1266,7 +1277,7 @@ static void ops_complete_compute(void *stripe_head_ref)
if (sh->check_state == check_state_compute_run)
sh->check_state = check_state_compute_result;
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
/* return a pointer to the address conversion region of the scribble buffer */
@@ -1692,7 +1703,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
}
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void
@@ -1850,7 +1861,7 @@ static void ops_complete_check(void *stripe_head_ref)
sh->check_state = check_state_check_result;
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
@@ -2014,7 +2025,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
/* we just created an active stripe so... */
atomic_inc(&conf->active_stripes);
- release_stripe(sh);
+ raid5_release_stripe(sh);
conf->max_nr_stripes++;
return 1;
}
@@ -2183,7 +2194,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_cmd(conf->wait_for_stripe,
+ wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -2233,7 +2244,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!p)
err = -ENOMEM;
}
- release_stripe(nsh);
+ raid5_release_stripe(nsh);
}
/* critical section pass, GFP_NOIO no longer needed */
@@ -2268,17 +2279,15 @@ static void shrink_stripes(struct r5conf *conf)
drop_one_stripe(conf))
;
- if (conf->slab_cache)
- kmem_cache_destroy(conf->slab_cache);
+ kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
-static void raid5_end_read_request(struct bio * bi, int error)
+static void raid5_end_read_request(struct bio * bi)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
sector_t s;
@@ -2287,9 +2296,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
if (bi == &sh->dev[i].req)
break;
- pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+ pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
+ bi->bi_error);
if (i == disks) {
BUG();
return;
@@ -2308,7 +2317,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
s = sh->sector + rdev->new_data_offset;
else
s = sh->sector + rdev->data_offset;
- if (uptodate) {
+ if (!bi->bi_error) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
/* Note that this cannot happen on a
@@ -2393,16 +2402,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
-static void raid5_end_write_request(struct bio *bi, int error)
+static void raid5_end_write_request(struct bio *bi)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
struct md_rdev *uninitialized_var(rdev);
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
sector_t first_bad;
int bad_sectors;
int replacement = 0;
@@ -2425,23 +2433,23 @@ static void raid5_end_write_request(struct bio *bi, int error)
break;
}
}
- pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
+ pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
+ bi->bi_error);
if (i == disks) {
BUG();
return;
}
if (replacement) {
- if (!uptodate)
+ if (bi->bi_error)
md_error(conf->mddev, rdev);
else if (is_badblock(rdev, sh->sector,
STRIPE_SECTORS,
&first_bad, &bad_sectors))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
- if (!uptodate) {
+ if (bi->bi_error) {
set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2462,20 +2470,18 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
rdev_dec_pending(rdev, conf->mddev);
- if (sh->batch_head && !uptodate && !replacement)
+ if (sh->batch_head && bi->bi_error && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
- release_stripe(sh->batch_head);
+ raid5_release_stripe(sh->batch_head);
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
-
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
@@ -2491,7 +2497,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->rreq.bi_private = sh;
dev->flags = 0;
- dev->sector = compute_blocknr(sh, i, previous);
+ dev->sector = raid5_compute_blocknr(sh, i, previous);
}
static void error(struct mddev *mddev, struct md_rdev *rdev)
@@ -2510,6 +2516,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid:%s: Disk failure on %s, disabling device.\n"
"md/raid:%s: Operation continuing on %d devices.\n",
@@ -2523,9 +2530,9 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
* Input: a 'big' sector number,
* Output: index of the data and parity disk, and the sector # in them.
*/
-static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
- int previous, int *dd_idx,
- struct stripe_head *sh)
+sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+ int previous, int *dd_idx,
+ struct stripe_head *sh)
{
sector_t stripe, stripe2;
sector_t chunk_number;
@@ -2725,7 +2732,7 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
return new_sector;
}
-static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
+sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
{
struct r5conf *conf = sh->raid_conf;
int raid_disks = sh->disks;
@@ -3062,7 +3069,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void
handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks,
- struct bio **return_bi)
+ struct bio_list *return_bi)
{
int i;
BUG_ON(sh->batch_head);
@@ -3097,17 +3104,19 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (bi)
bitmap_end = 1;
+ r5l_stripe_write_finished(sh);
+
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+ bi->bi_error = -EIO;
if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
- bi->bi_next = *return_bi;
- *return_bi = bi;
+ bio_list_add(return_bi, bi);
}
bi = nextbi;
}
@@ -3127,11 +3136,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+ bi->bi_error = -EIO;
if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
- bi->bi_next = *return_bi;
- *return_bi = bi;
+ bio_list_add(return_bi, bi);
}
bi = bi2;
}
@@ -3140,6 +3149,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
* the data has not reached the cache yet.
*/
if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
+ s->failed > conf->max_degraded &&
(!test_bit(R5_Insync, &sh->dev[i].flags) ||
test_bit(R5_ReadError, &sh->dev[i].flags))) {
spin_lock_irq(&sh->stripe_lock);
@@ -3148,15 +3158,16 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
+ if (bi)
+ s->to_read--;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (!raid5_dec_bi_active_stripes(bi)) {
- bi->bi_next = *return_bi;
- *return_bi = bi;
- }
+
+ bi->bi_error = -EIO;
+ if (!raid5_dec_bi_active_stripes(bi))
+ bio_list_add(return_bi, bi);
bi = nextbi;
}
}
@@ -3168,6 +3179,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
*/
clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
+ s->to_write = 0;
+ s->written = 0;
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3299,7 +3312,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
*/
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (fdev[i]->towrite &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
!test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3323,7 +3336,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (s->failed_num[i] != sh->pd_idx &&
s->failed_num[i] != sh->qd_idx &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
@@ -3435,7 +3448,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
* never LOCKED, so we don't need to test 'failed' directly.
*/
static void handle_stripe_clean_event(struct r5conf *conf,
- struct stripe_head *sh, int disks, struct bio **return_bi)
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
{
int i;
struct r5dev *dev;
@@ -3469,8 +3482,7 @@ returnbi:
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
md_write_end(conf->mddev);
- wbi->bi_next = *return_bi;
- *return_bi = wbi;
+ bio_list_add(return_bi, wbi);
}
wbi = wbi2;
}
@@ -3494,8 +3506,12 @@ returnbi:
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
WARN_ON(dev->page != dev->orig_page);
}
+
+ r5l_stripe_write_finished(sh);
+
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
+ int hash;
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) {
@@ -3509,16 +3525,17 @@ returnbi:
* no updated data, so remove it from hash list and the stripe
* will be reinitialized
*/
- spin_lock_irq(&conf->device_lock);
unhash:
+ hash = sh->hash_lock_index;
+ spin_lock_irq(conf->hash_locks + hash);
remove_hash(sh);
+ spin_unlock_irq(conf->hash_locks + hash);
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head, batch_list);
if (sh != head_sh)
goto unhash;
}
- spin_unlock_irq(&conf->device_lock);
sh = head_sh;
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
@@ -3934,10 +3951,10 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
struct stripe_head *sh2;
struct async_submit_ctl submit;
- sector_t bn = compute_blocknr(sh, i, 1);
+ sector_t bn = raid5_compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = get_active_stripe(conf, s, 0, 1, 1);
+ sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -3947,7 +3964,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
/* must have already done this block */
- release_stripe(sh2);
+ raid5_release_stripe(sh2);
continue;
}
@@ -3968,7 +3985,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
set_bit(STRIPE_EXPAND_READY, &sh2->state);
set_bit(STRIPE_HANDLE, &sh2->state);
}
- release_stripe(sh2);
+ raid5_release_stripe(sh2);
}
/* done submitting copies, wait for them to complete */
@@ -4003,6 +4020,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
s->failed_num[0] = -1;
s->failed_num[1] = -1;
+ s->log_failed = r5l_log_disk_error(conf);
/* Now to look around and see what can be done */
rcu_read_lock();
@@ -4057,8 +4075,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
&first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags);
else {
- if (rdev)
+ if (rdev && !test_bit(Faulty, &rdev->flags))
set_bit(R5_NeedReplace, &dev->flags);
+ else
+ clear_bit(R5_NeedReplace, &dev->flags);
rdev = rcu_dereference(conf->disks[i].rdev);
clear_bit(R5_ReadRepl, &dev->flags);
}
@@ -4252,7 +4272,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
if (handle_flags == 0 ||
sh->state & handle_flags)
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
spin_lock_irq(&head_sh->stripe_lock);
head_sh->batch_head = NULL;
@@ -4313,6 +4333,9 @@ static void handle_stripe(struct stripe_head *sh)
analyse_stripe(sh, &s);
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
+ goto finish;
+
if (s.handle_bad_blocks) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
@@ -4341,7 +4364,7 @@ static void handle_stripe(struct stripe_head *sh)
/* check if the array has lost more than max_degraded devices and,
* if so, some requests might need to be failed.
*/
- if (s.failed > conf->max_degraded) {
+ if (s.failed > conf->max_degraded || s.log_failed) {
sh->check_state = 0;
sh->reconstruct_state = 0;
break_stripe_batch_list(sh, 0);
@@ -4499,7 +4522,7 @@ static void handle_stripe(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
+ = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
/* sh cannot be written until sh_src has been read.
* so arrange for sh to be delayed a little
@@ -4509,11 +4532,11 @@ static void handle_stripe(struct stripe_head *sh)
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
&sh_src->state))
atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh_src);
+ raid5_release_stripe(sh_src);
goto finish;
}
if (sh_src)
- release_stripe(sh_src);
+ raid5_release_stripe(sh_src);
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
@@ -4601,7 +4624,15 @@ finish:
md_wakeup_thread(conf->mddev->thread);
}
- return_io(s.return_bi);
+ if (!bio_list_empty(&s.return_bi)) {
+ if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+ spin_lock_irq(&conf->device_lock);
+ bio_list_merge(&conf->return_bi, &s.return_bi);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else
+ return_io(&s.return_bi);
+ }
clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
}
@@ -4658,43 +4689,14 @@ static int raid5_congested(struct mddev *mddev, int bits)
return 0;
}
-/* We want read requests to align with chunks where possible,
- * but write requests don't need to.
- */
-static int raid5_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
-
- /*
- * always allow writes to be mergeable, read as well if array
- * is degraded as we'll go through stripe cache anyway.
- */
- if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
- return biovec->bv_len;
-
- if (mddev->new_chunk_sectors < mddev->chunk_sectors)
- chunk_sectors = mddev->new_chunk_sectors;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
- if (max < 0) max = 0;
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- else
- return max;
-}
-
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
+ struct r5conf *conf = mddev->private;
sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
- unsigned int chunk_sectors = mddev->chunk_sectors;
+ unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- if (mddev->new_chunk_sectors < mddev->chunk_sectors)
- chunk_sectors = mddev->new_chunk_sectors;
+ chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
}
@@ -4745,13 +4747,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
* first).
* If the read failed..
*/
-static void raid5_align_endio(struct bio *bi, int error)
+static void raid5_align_endio(struct bio *bi)
{
struct bio* raid_bi = bi->bi_private;
struct mddev *mddev;
struct r5conf *conf;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
struct md_rdev *rdev;
+ int error = bi->bi_error;
bio_put(bi);
@@ -4762,12 +4764,12 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
- if (!error && uptodate) {
+ if (!error) {
trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
raid_bi, 0);
- bio_endio(raid_bi, 0);
+ bio_endio(raid_bi);
if (atomic_dec_and_test(&conf->active_aligned_reads))
- wake_up(&conf->wait_for_stripe);
+ wake_up(&conf->wait_for_quiescent);
return;
}
@@ -4776,26 +4778,7 @@ static void raid5_align_endio(struct bio *bi, int error)
add_bio_to_retry(raid_bi, conf);
}
-static int bio_fits_rdev(struct bio *bi)
-{
- struct request_queue *q = bdev_get_queue(bi->bi_bdev);
-
- if (bio_sectors(bi) > queue_max_sectors(q))
- return 0;
- blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > queue_max_segments(q))
- return 0;
-
- if (q->merge_bvec_fn)
- /* it's too hard to apply the merge_bvec_fn at this stage,
- * just just give up
- */
- return 0;
-
- return 1;
-}
-
-static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
struct r5conf *conf = mddev->private;
int dd_idx;
@@ -4804,7 +4787,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) {
- pr_debug("chunk_aligned_read : non aligned\n");
+ pr_debug("%s: non aligned\n", __func__);
return 0;
}
/*
@@ -4846,13 +4829,11 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
- __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
+ bio_clear_flag(align_bi, BIO_SEG_VALID);
- if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
- /* too big in some way, or has a known bad block */
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
@@ -4862,7 +4843,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_lock_irq(conf->wait_for_quiescent,
conf->quiesce == 0,
conf->device_lock);
atomic_inc(&conf->active_aligned_reads);
@@ -4881,6 +4862,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
}
+static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
+{
+ struct bio *split;
+
+ do {
+ sector_t sector = raid_bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
+ unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
+
+ if (sectors < bio_sectors(raid_bio)) {
+ split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, raid_bio);
+ } else
+ split = raid_bio;
+
+ if (!raid5_read_one_chunk(mddev, split)) {
+ if (split != raid_bio)
+ generic_make_request(raid_bio);
+ return split;
+ }
+ } while (split != raid_bio);
+
+ return NULL;
+}
+
/* __get_priority_stripe - get the next stripe to process
*
* Full stripe writes are allowed to pass preread active stripes up until
@@ -5022,7 +5028,7 @@ static void release_stripe_plug(struct mddev *mddev,
struct raid5_plug_cb *cb;
if (!blk_cb) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
return;
}
@@ -5038,7 +5044,7 @@ static void release_stripe_plug(struct mddev *mddev,
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
else
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
static void make_discard_request(struct mddev *mddev, struct bio *bi)
@@ -5073,12 +5079,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
DEFINE_WAIT(w);
int d;
again:
- sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
if (test_bit(STRIPE_SYNCING, &sh->state)) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
goto again;
}
@@ -5090,7 +5096,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
if (sh->dev[d].towrite || sh->dev[d].toread) {
set_bit(R5_Overlap, &sh->dev[d].flags);
spin_unlock_irq(&sh->stripe_lock);
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
goto again;
}
@@ -5129,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
md_write_end(mddev);
- bio_endio(bi, 0);
+ bio_endio(bi);
}
}
@@ -5146,8 +5152,15 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bool do_prepare;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
- md_flush_request(mddev, bi);
- return;
+ int ret = r5l_handle_flush_request(conf->log, bi);
+
+ if (ret == 0)
+ return;
+ if (ret == -ENODEV) {
+ md_flush_request(mddev, bi);
+ return;
+ }
+ /* ret == -EAGAIN, fallback */
}
md_write_start(mddev, bi);
@@ -5158,9 +5171,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
- mddev->reshape_position == MaxSector &&
- chunk_aligned_read(mddev,bi))
- return;
+ mddev->reshape_position == MaxSector) {
+ bi = chunk_aligned_read(mddev, bi);
+ if (!bi)
+ return;
+ }
if (unlikely(bi->bi_rw & REQ_DISCARD)) {
make_discard_request(mddev, bi);
@@ -5218,7 +5233,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
- sh = get_active_stripe(conf, new_sector, previous,
+ sh = raid5_get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK), 0);
if (sh) {
if (unlikely(previous)) {
@@ -5239,7 +5254,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
must_retry = 1;
spin_unlock_irq(&conf->device_lock);
if (must_retry) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
do_prepare = true;
goto retry;
@@ -5249,14 +5264,14 @@ static void make_request(struct mddev *mddev, struct bio * bi)
/* Might have got the wrong stripe_head
* by accident
*/
- release_stripe(sh);
+ raid5_release_stripe(sh);
goto retry;
}
if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -5279,7 +5294,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* and wait a while
*/
md_wakeup_thread(mddev->thread);
- release_stripe(sh);
+ raid5_release_stripe(sh);
schedule();
do_prepare = true;
goto retry;
@@ -5293,7 +5308,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
release_stripe_plug(mddev, sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ bi->bi_error = -EIO;
break;
}
}
@@ -5307,7 +5322,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
- bio_endio(bi, 0);
+ bio_endio(bi);
}
}
@@ -5336,6 +5351,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sector_t stripe_addr;
int reshape_sectors;
struct list_head stripes;
+ sector_t retn;
if (sector_nr == 0) {
/* If restarting in the middle, skip the initial sectors */
@@ -5343,6 +5359,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
sector_nr = raid5_size(mddev, 0, 0)
- conf->reshape_progress;
+ } else if (mddev->reshape_backwards &&
+ conf->reshape_progress == MaxSector) {
+ /* shouldn't happen, but just in case, finish up.*/
+ sector_nr = MaxSector;
} else if (!mddev->reshape_backwards &&
conf->reshape_progress > 0)
sector_nr = conf->reshape_progress;
@@ -5351,7 +5371,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->curr_resync_completed = sector_nr;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
*skipped = 1;
- return sector_nr;
+ retn = sector_nr;
+ goto finish;
}
}
@@ -5359,10 +5380,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* If old and new chunk sizes differ, we need to process the
* largest of these
*/
- if (mddev->new_chunk_sectors > mddev->chunk_sectors)
- reshape_sectors = mddev->new_chunk_sectors;
- else
- reshape_sectors = mddev->chunk_sectors;
+
+ reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
/* We update the metadata at least every 10 seconds, or when
* the data about to be copied would over-write the source of
@@ -5377,11 +5396,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->reshape_backwards) {
- writepos -= min_t(sector_t, reshape_sectors, writepos);
+ BUG_ON(writepos < reshape_sectors);
+ writepos -= reshape_sectors;
readpos += reshape_sectors;
safepos += reshape_sectors;
} else {
writepos += reshape_sectors;
+ /* readpos and safepos are worst-case calculations.
+ * A negative number is overly pessimistic, and causes
+ * obvious problems for unsigned storage. So clip to 0.
+ */
readpos -= min_t(sector_t, reshape_sectors, readpos);
safepos -= min_t(sector_t, reshape_sectors, safepos);
}
@@ -5457,7 +5481,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped_disk = 0;
- sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
+ sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -5470,7 +5494,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (conf->level == 6 &&
j == sh->qd_idx)
continue;
- s = compute_blocknr(sh, j, 0);
+ s = raid5_compute_blocknr(sh, j, 0);
if (s < raid5_size(mddev, 0, 0)) {
skipped_disk = 1;
continue;
@@ -5506,10 +5530,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = get_active_stripe(conf, first_sector, 1, 0, 1);
+ sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
/* Now that the sources are clearly marked, we can release
@@ -5518,13 +5542,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
while (!list_empty(&stripes)) {
sh = list_entry(stripes.next, struct stripe_head, lru);
list_del_init(&sh->lru);
- release_stripe(sh);
+ raid5_release_stripe(sh);
}
/* If this takes us to the resync_max point where we have to pause,
* then we need to write out the superblock.
*/
sector_nr += reshape_sectors;
- if ((sector_nr - mddev->curr_resync_completed) * 2
+ retn = reshape_sectors;
+finish:
+ if (mddev->curr_resync_completed > mddev->resync_max ||
+ (sector_nr - mddev->curr_resync_completed) * 2
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
@@ -5549,7 +5576,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
ret:
- return reshape_sectors;
+ return retn;
}
static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
@@ -5611,11 +5638,11 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
- bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
- sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -5639,7 +5666,7 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
- release_stripe(sh);
+ raid5_release_stripe(sh);
return STRIPE_SECTORS;
}
@@ -5678,7 +5705,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
/* already done this stripe */
continue;
- sh = get_active_stripe(conf, sector, 0, 1, 1);
+ sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
if (!sh) {
/* failed to get a stripe - must wait */
@@ -5688,7 +5715,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
- release_stripe(sh);
+ raid5_release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
@@ -5696,17 +5723,17 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
handle_stripe(sh);
- release_stripe(sh);
+ raid5_release_stripe(sh);
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
if (remaining == 0) {
trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
raid_bio, 0);
- bio_endio(raid_bio, 0);
+ bio_endio(raid_bio);
}
if (atomic_dec_and_test(&conf->active_aligned_reads))
- wake_up(&conf->wait_for_stripe);
+ wake_up(&conf->wait_for_quiescent);
return handled;
}
@@ -5726,8 +5753,12 @@ static int handle_active_stripes(struct r5conf *conf, int group,
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
if (!list_empty(temp_inactive_list + i))
break;
- if (i == NR_STRIPE_HASH_LOCKS)
+ if (i == NR_STRIPE_HASH_LOCKS) {
+ spin_unlock_irq(&conf->device_lock);
+ r5l_flush_stripe_to_raid(conf->log);
+ spin_lock_irq(&conf->device_lock);
return batch_size;
+ }
release_inactive = true;
}
spin_unlock_irq(&conf->device_lock);
@@ -5735,6 +5766,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
release_inactive_stripe_list(conf, temp_inactive_list,
NR_STRIPE_HASH_LOCKS);
+ r5l_flush_stripe_to_raid(conf->log);
if (release_inactive) {
spin_lock_irq(&conf->device_lock);
return 0;
@@ -5742,6 +5774,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
+ r5l_write_stripe_run(conf->log);
cond_resched();
@@ -5805,6 +5838,18 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!bio_list_empty(&conf->return_bi) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ struct bio_list tmp = BIO_EMPTY_LIST;
+ spin_lock_irq(&conf->device_lock);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ bio_list_merge(&tmp, &conf->return_bi);
+ bio_list_init(&conf->return_bi);
+ }
+ spin_unlock_irq(&conf->device_lock);
+ return_io(&tmp);
+ }
+
blk_start_plug(&plug);
handled = 0;
spin_lock_irq(&conf->device_lock);
@@ -5863,6 +5908,8 @@ static void raid5d(struct md_thread *thread)
mutex_unlock(&conf->cache_size_mutex);
}
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -6245,8 +6292,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
/* size is defined by the smallest of previous and new size */
raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
- sectors &= ~((sector_t)mddev->chunk_sectors - 1);
- sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
+ sectors &= ~((sector_t)conf->chunk_sectors - 1);
+ sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
return sectors * (raid_disks - conf->max_degraded);
}
@@ -6300,8 +6347,11 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ if (conf->log)
+ r5l_exit_log(conf->log);
if (conf->shrinker.seeks)
unregister_shrinker(&conf->shrinker);
+
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -6456,12 +6506,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
- init_waitqueue_head(&conf->wait_for_stripe);
+ init_waitqueue_head(&conf->wait_for_quiescent);
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+ init_waitqueue_head(&conf->wait_for_stripe[i]);
+ }
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
+ bio_list_init(&conf->return_bi);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -6511,7 +6565,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
raid_disk = rdev->raid_disk;
if (raid_disk >= max_disks
- || raid_disk < 0)
+ || raid_disk < 0 || test_bit(Journal, &rdev->flags))
continue;
disk = conf->disks + raid_disk;
@@ -6551,6 +6605,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
conf->prev_algo = mddev->layout;
+ } else {
+ conf->prev_chunk_sectors = conf->chunk_sectors;
+ conf->prev_algo = conf->algorithm;
}
conf->min_nr_stripes = NR_STRIPES;
@@ -6628,6 +6685,7 @@ static int run(struct mddev *mddev)
int working_disks = 0;
int dirty_parity_disks = 0;
struct md_rdev *rdev;
+ struct md_rdev *journal_dev = NULL;
sector_t reshape_offset = 0;
int i;
long long min_offset_diff = 0;
@@ -6640,6 +6698,11 @@ static int run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
long long diff;
+
+ if (test_bit(Journal, &rdev->flags)) {
+ journal_dev = rdev;
+ continue;
+ }
if (rdev->raid_disk < 0)
continue;
diff = (rdev->new_data_offset - rdev->data_offset);
@@ -6670,6 +6733,14 @@ static int run(struct mddev *mddev)
sector_t here_new, here_old;
int old_disks;
int max_degraded = (mddev->level == 6 ? 2 : 1);
+ int chunk_sectors;
+ int new_data_disks;
+
+ if (journal_dev) {
+ printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
if (mddev->new_level != mddev->level) {
printk(KERN_ERR "md/raid:%s: unsupported reshape "
@@ -6681,28 +6752,25 @@ static int run(struct mddev *mddev)
/* reshape_position must be on a new-stripe boundary, and one
* further up in new geometry must map after here in old
* geometry.
+ * If the chunk sizes are different, then as we perform reshape
+ * in units of the largest of the two, reshape_position needs
+ * be a multiple of the largest chunk size times new data disks.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, mddev->new_chunk_sectors *
- (mddev->raid_disks - max_degraded))) {
+ chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
+ new_data_disks = mddev->raid_disks - max_degraded;
+ if (sector_div(here_new, chunk_sectors * new_data_disks)) {
printk(KERN_ERR "md/raid:%s: reshape_position not "
"on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
- reshape_offset = here_new * mddev->new_chunk_sectors;
+ reshape_offset = here_new * chunk_sectors;
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
- sector_div(here_old, mddev->chunk_sectors *
- (old_disks-max_degraded));
+ sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
/* here_old is the first stripe that we might need to read
* from */
if (mddev->delta_disks == 0) {
- if ((here_new * mddev->new_chunk_sectors !=
- here_old * mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: reshape position is"
- " confused - aborting\n", mdname(mddev));
- return -EINVAL;
- }
/* We cannot be sure it is safe to start an in-place
* reshape. It is only safe if user-space is monitoring
* and taking constant backups.
@@ -6721,10 +6789,10 @@ static int run(struct mddev *mddev)
return -EINVAL;
}
} else if (mddev->reshape_backwards
- ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
- here_old * mddev->chunk_sectors)
- : (here_new * mddev->new_chunk_sectors >=
- here_old * mddev->chunk_sectors + (-min_offset_diff))) {
+ ? (here_new * chunk_sectors + min_offset_diff <=
+ here_old * chunk_sectors)
+ : (here_new * chunk_sectors >=
+ here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
printk(KERN_ERR "md/raid:%s: reshape_position too early for "
"auto-recovery - aborting.\n",
@@ -6749,6 +6817,13 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
+ printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ }
+
conf->min_offset_diff = min_offset_diff;
mddev->thread = conf->thread;
conf->thread = NULL;
@@ -6952,6 +7027,14 @@ static int run(struct mddev *mddev)
mddev->queue);
}
+ if (journal_dev) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ r5l_init_log(conf, journal_dev);
+ }
+
return 0;
abort:
md_unregister_thread(&mddev->thread);
@@ -6976,7 +7059,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
- mddev->chunk_sectors / 2, mddev->layout);
+ conf->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
@@ -7061,6 +7144,15 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct disk_info *p = conf->disks + number;
print_raid5_conf(conf);
+ if (test_bit(Journal, &rdev->flags)) {
+ /*
+ * journal disk is not removable, but we need give a chance to
+ * update superblock of other disks. Otherwise journal disk
+ * will be considered as 'fresh'
+ */
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ return -EINVAL;
+ }
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
@@ -7123,6 +7215,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int first = 0;
int last = conf->raid_disks - 1;
+ if (test_bit(Journal, &rdev->flags))
+ return -EINVAL;
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -7182,7 +7276,11 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize;
- sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+ struct r5conf *conf = mddev->private;
+
+ if (conf->log)
+ return -EINVAL;
+ sectors &= ~((sector_t)conf->chunk_sectors - 1);
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
if (mddev->external_size &&
mddev->array_sectors > newsize)
@@ -7233,6 +7331,8 @@ static int check_reshape(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
+ if (conf->log)
+ return -EINVAL;
if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout &&
mddev->new_chunk_sectors == mddev->chunk_sectors)
@@ -7421,6 +7521,7 @@ static void end_reshape(struct r5conf *conf)
rdev->data_offset = rdev->new_data_offset;
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
@@ -7489,7 +7590,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
* active stripes can drain
*/
conf->quiesce = 2;
- wait_event_cmd(conf->wait_for_stripe,
+ wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
unlock_all_device_hash_locks_irq(conf),
@@ -7503,11 +7604,12 @@ static void raid5_quiesce(struct mddev *mddev, int state)
case 0: /* re-enable writes */
lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
- wake_up(&conf->wait_for_stripe);
+ wake_up(&conf->wait_for_quiescent);
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
break;
}
+ r5l_quiesce(conf->log, state);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
@@ -7766,7 +7868,6 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid5_personality =
{
@@ -7790,7 +7891,6 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid4_personality =
@@ -7815,7 +7915,6 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static int __init raid5_init(void)
diff --git a/kernel/drivers/md/raid5.h b/kernel/drivers/md/raid5.h
index 584c7bd62..3f0c2e2a3 100644
--- a/kernel/drivers/md/raid5.h
+++ b/kernel/drivers/md/raid5.h
@@ -223,6 +223,9 @@ struct stripe_head {
struct stripe_head *batch_head; /* protected by stripe lock */
spinlock_t batch_lock; /* only header's lock is useful */
struct list_head batch_list; /* protected by head's batch lock*/
+
+ struct r5l_io_unit *log_io;
+ struct list_head log_list;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -244,6 +247,7 @@ struct stripe_head {
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
+ u32 log_checksum;
} dev[1]; /* allocated with extra space depending of RAID geometry */
};
@@ -265,9 +269,10 @@ struct stripe_head_state {
int dec_preread_active;
unsigned long ops_request;
- struct bio *return_bi;
+ struct bio_list return_bi;
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
+ int log_failed;
};
/* Flags for struct r5dev.flags */
@@ -340,6 +345,7 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
+ STRIPE_LOG_TRAPPED, /* trapped into log */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
@@ -476,6 +482,9 @@ struct r5conf {
int skip_copy; /* Don't copy data from bio to stripe cache */
struct list_head *last_hold; /* detect hold_list promotions */
+ /* bios to have bi_end_io called after metadata is synced */
+ struct bio_list return_bi;
+
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
/* unfortunately we need two cache names as we temporarily have
* two caches.
@@ -513,7 +522,8 @@ struct r5conf {
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
- wait_queue_head_t wait_for_stripe;
+ wait_queue_head_t wait_for_quiescent;
+ wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
@@ -540,6 +550,7 @@ struct r5conf {
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;
+ struct r5l_log *log;
};
@@ -606,4 +617,21 @@ static inline int algorithm_is_DDF(int layout)
extern void md_raid5_kick_device(struct r5conf *conf);
extern int raid5_set_cache_size(struct mddev *mddev, int size);
+extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
+extern void raid5_release_stripe(struct stripe_head *sh);
+extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+ int previous, int *dd_idx,
+ struct stripe_head *sh);
+extern struct stripe_head *
+raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ int previous, int noblock, int noquiesce);
+extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
+extern void r5l_exit_log(struct r5l_log *log);
+extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
+extern void r5l_write_stripe_run(struct r5l_log *log);
+extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
+extern void r5l_stripe_write_finished(struct stripe_head *sh);
+extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
+extern void r5l_quiesce(struct r5l_log *log, int state);
+extern bool r5l_log_disk_error(struct r5conf *conf);
#endif