diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/md/raid0.c | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/md/raid0.c')
-rw-r--r-- | kernel/drivers/md/raid0.c | 133 |
1 files changed, 40 insertions, 93 deletions
diff --git a/kernel/drivers/md/raid0.c b/kernel/drivers/md/raid0.c index efb654eb5..f8e5db0cb 100644 --- a/kernel/drivers/md/raid0.c +++ b/kernel/drivers/md/raid0.c @@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) char b[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE]; struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - bool discard_supported = false; + unsigned short blksize = 512; if (!conf) return -ENOMEM; @@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) sector_div(sectors, mddev->chunk_sectors); rdev1->sectors = sectors * mddev->chunk_sectors; + blksize = max(blksize, queue_logical_block_size( + rdev1->bdev->bd_disk->queue)); + rdev_for_each(rdev2, mddev) { pr_debug("md/raid0:%s: comparing %s(%llu)" " with %s(%llu)\n", @@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } pr_debug("md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); + /* + * now since we have the hard sector sizes, we can make sure + * chunk size is a multiple of that sector size + */ + if ((mddev->chunk_sectors << 9) % blksize) { + printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n", + mdname(mddev), + mddev->chunk_sectors << 9, blksize); + err = -EINVAL; + goto abort; + } + err = -ENOMEM; conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); @@ -188,19 +203,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } dev[j] = rdev1; - if (mddev->queue) - disk_stack_limits(mddev->gendisk, rdev1->bdev, - rdev1->data_offset << 9); - - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) - conf->has_merge_bvec = 1; - if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; cnt++; - - if (blk_queue_discard(bdev_get_queue(rdev1->bdev))) - discard_supported = true; } if (cnt != mddev->raid_disks) { printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " @@ -261,28 +266,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) (unsigned long long)smallest->sectors); } - /* - * now since we have the hard sector sizes, we can make sure - * chunk size is a multiple of that sector size - */ - if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { - printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n", - mdname(mddev), - mddev->chunk_sectors << 9); - goto abort; - } - - if (mddev->queue) { - blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); - blk_queue_io_opt(mddev->queue, - (mddev->chunk_sectors << 9) * mddev->raid_disks); - - if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); - else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); - } - pr_debug("md/raid0:%s: done.\n", mdname(mddev)); *private_conf = conf; @@ -351,58 +334,6 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, + sector_div(sector, zone->nb_dev)]; } -/** - * raid0_mergeable_bvec -- tell bio layer if two requests can be merged - * @mddev: the md device - * @bvm: properties of new bio - * @biovec: the request that could be merged to it. - * - * Return amount of bytes we can accept at this offset - */ -static int raid0_mergeable_bvec(struct mddev *mddev, - struct bvec_merge_data *bvm, - struct bio_vec *biovec) -{ - struct r0conf *conf = mddev->private; - sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); - sector_t sector_offset = sector; - int max; - unsigned int chunk_sectors = mddev->chunk_sectors; - unsigned int bio_sectors = bvm->bi_size >> 9; - struct strip_zone *zone; - struct md_rdev *rdev; - struct request_queue *subq; - - if (is_power_of_2(chunk_sectors)) - max = (chunk_sectors - ((sector & (chunk_sectors-1)) - + bio_sectors)) << 9; - else - max = (chunk_sectors - (sector_div(sector, chunk_sectors) - + bio_sectors)) << 9; - if (max < 0) - max = 0; /* bio_add cannot handle a negative return */ - if (max <= biovec->bv_len && bio_sectors == 0) - return biovec->bv_len; - if (max < biovec->bv_len) - /* too small already, no need to check further */ - return max; - if (!conf->has_merge_bvec) - return max; - - /* May need to check subordinate device */ - sector = sector_offset; - zone = find_zone(mddev->private, §or_offset); - rdev = map_sector(mddev, zone, sector, §or_offset); - subq = bdev_get_queue(rdev->bdev); - if (subq->merge_bvec_fn) { - bvm->bi_bdev = rdev->bdev; - bvm->bi_sector = sector_offset + zone->dev_start + - rdev->data_offset; - return min(max, subq->merge_bvec_fn(subq, bvm, biovec)); - } else - return max; -} - static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) { sector_t array_sectors = 0; @@ -433,12 +364,6 @@ static int raid0_run(struct mddev *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; - if (mddev->queue) { - blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); - blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); - blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); - } - /* if private is not null, we are here after takeover */ if (mddev->private == NULL) { ret = create_strip_zones(mddev, &conf); @@ -447,6 +372,29 @@ static int raid0_run(struct mddev *mddev) mddev->private = conf; } conf = mddev->private; + if (mddev->queue) { + struct md_rdev *rdev; + bool discard_supported = false; + + blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); + blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); + blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); + + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + blk_queue_io_opt(mddev->queue, + (mddev->chunk_sectors << 9) * mddev->raid_disks); + + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } + if (!discard_supported) + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + else + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + } /* calculate array device size */ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); @@ -543,7 +491,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) if (unlikely((split->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ - bio_endio(split, 0); + bio_endio(split); } else generic_make_request(split); } while (split != bio); @@ -727,7 +675,6 @@ static struct md_personality raid0_personality= .takeover = raid0_takeover, .quiesce = raid0_quiesce, .congested = raid0_congested, - .mergeable_bvec = raid0_mergeable_bvec, }; static int __init raid0_init (void) |