summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/md/raid5.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-19 08:35:30 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-19 08:35:30 +0300
commitec0a2ed6d8a5e555edef907895c041e285fdb495 (patch)
treea4c8d982f8ac820b1b60818df22ad3ccac2036d5 /kernel/drivers/md/raid5.c
parent342fa5dfa053559f47caad657132522496dcf1b3 (diff)
These changes are a raw update to a vanilla kernel 4.1.10, with the
recently announced rt patch patch-4.1.10-rt10.patch. No further changes needed. Change-Id: I9a0cf084498133b10771e744b6da4b29dff706ba Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/md/raid5.c')
-rw-r--r--kernel/drivers/md/raid5.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/kernel/drivers/md/raid5.c b/kernel/drivers/md/raid5.c
index 47413f7f0..a90d5876f 100644
--- a/kernel/drivers/md/raid5.c
+++ b/kernel/drivers/md/raid5.c
@@ -2153,6 +2153,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc)
return -ENOMEM;
+ /* Need to ensure auto-resizing doesn't interfere */
+ mutex_lock(&conf->cache_size_mutex);
+
for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh)
@@ -2169,6 +2172,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh);
}
kmem_cache_destroy(sc);
+ mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM;
}
/* Step 2 - Must use GFP_NOIO now.
@@ -2215,6 +2219,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
+ mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2242,7 +2247,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -5848,12 +5853,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+ mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short
*/
set_bit(R5_DID_ALLOC, &conf->cache_state);
+ mutex_unlock(&conf->cache_size_mutex);
}
async_tx_issue_pending_all();
@@ -5885,18 +5892,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL;
conf->min_nr_stripes = size;
+ mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
;
+ mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev);
if (err)
return err;
+ mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL))
break;
+ mutex_unlock(&conf->cache_size_mutex);
return 0;
}
@@ -6363,11 +6374,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
- int ret = 0;
- while (ret < sc->nr_to_scan) {
- if (drop_one_stripe(conf) == 0)
- return SHRINK_STOP;
- ret++;
+ unsigned long ret = SHRINK_STOP;
+
+ if (mutex_trylock(&conf->cache_size_mutex)) {
+ ret= 0;
+ while (ret < sc->nr_to_scan &&
+ conf->max_nr_stripes > conf->min_nr_stripes) {
+ if (drop_one_stripe(conf) == 0) {
+ ret = SHRINK_STOP;
+ break;
+ }
+ ret++;
+ }
+ mutex_unlock(&conf->cache_size_mutex);
}
return ret;
}
@@ -6436,6 +6455,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
+ mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);