summaryrefslogtreecommitdiffstats
path: root/kernel/block
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/block')
-rw-r--r--kernel/block/bio.c15
-rw-r--r--kernel/block/blk-cgroup.c4
-rw-r--r--kernel/block/blk-core.c6
-rw-r--r--kernel/block/blk-map.c3
-rw-r--r--kernel/block/blk-merge.c22
-rw-r--r--kernel/block/blk-mq.c18
-rw-r--r--kernel/block/bsg.c3
-rw-r--r--kernel/block/cfq-iosched.c15
-rw-r--r--kernel/block/genhd.c3
-rw-r--r--kernel/block/ioprio.c2
-rw-r--r--kernel/block/partition-generic.c13
11 files changed, 82 insertions, 22 deletions
diff --git a/kernel/block/bio.c b/kernel/block/bio.c
index d4d144363..46e2cc1d4 100644
--- a/kernel/block/bio.c
+++ b/kernel/block/bio.c
@@ -584,6 +584,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_rw = bio_src->bi_rw;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
+
+ bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -689,6 +691,8 @@ integrity_clone:
}
}
+ bio_clone_blkcg_association(bio, bio_src);
+
return bio;
}
EXPORT_SYMBOL(bio_clone_bioset);
@@ -2014,6 +2018,17 @@ void bio_disassociate_task(struct bio *bio)
}
}
+/**
+ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * @dst: destination bio
+ * @src: source bio
+ */
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+{
+ if (src->bi_css)
+ WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+}
+
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/kernel/block/blk-cgroup.c b/kernel/block/blk-cgroup.c
index 5a37188b5..9d359e05f 100644
--- a/kernel/block/blk-cgroup.c
+++ b/kernel/block/blk-cgroup.c
@@ -1331,10 +1331,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
- if (!cpd) {
- mutex_unlock(&blkcg_pol_mutex);
+ if (!cpd)
goto err_free_cpds;
- }
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
diff --git a/kernel/block/blk-core.c b/kernel/block/blk-core.c
index 2f7afb98e..52d2fe2fe 100644
--- a/kernel/block/blk-core.c
+++ b/kernel/block/blk-core.c
@@ -518,7 +518,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
if (q->mq_ops)
blk_mq_wake_waiters(q);
@@ -2192,7 +2194,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
- blk_mq_insert_request(rq, false, true, true);
+ blk_mq_insert_request(rq, false, true, false);
return 0;
}
diff --git a/kernel/block/blk-map.c b/kernel/block/blk-map.c
index f565e11f4..69953bd97 100644
--- a/kernel/block/blk-map.c
+++ b/kernel/block/blk-map.c
@@ -90,6 +90,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (!iter || !iter->count)
return -EINVAL;
+ if (!iter_is_iovec(iter))
+ return -EINVAL;
+
iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base;
diff --git a/kernel/block/blk-merge.c b/kernel/block/blk-merge.c
index b966db8f3..7225511cf 100644
--- a/kernel/block/blk-merge.c
+++ b/kernel/block/blk-merge.c
@@ -92,9 +92,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ unsigned bvecs = 0;
bio_for_each_segment(bv, bio, iter) {
/*
+ * With arbitrary bio size, the incoming bio may be very
+ * big. We have to split the bio into small bios so that
+ * each holds at most BIO_MAX_PAGES bvecs because
+ * bio_clone() can fail to allocate big bvecs.
+ *
+ * It should have been better to apply the limit per
+ * request queue in which bio_clone() is involved,
+ * instead of globally. The biggest blocker is the
+ * bio_clone() in bio bounce.
+ *
+ * If bio is splitted by this reason, we should have
+ * allowed to continue bios merging, but don't do
+ * that now for making the change simple.
+ *
+ * TODO: deal with bio bounce's bio_clone() gracefully
+ * and convert the global limit into per-queue limit.
+ */
+ if (bvecs++ >= BIO_MAX_PAGES)
+ goto split;
+
+ /*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
diff --git a/kernel/block/blk-mq.c b/kernel/block/blk-mq.c
index 7cdf19e4a..2e21e4c04 100644
--- a/kernel/block/blk-mq.c
+++ b/kernel/block/blk-mq.c
@@ -621,8 +621,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
- if (unlikely(blk_queue_dying(rq->q)))
- blk_mq_complete_request(rq, -EIO);
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -798,7 +800,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
switch (ret) {
case BLK_MQ_RQ_QUEUE_OK:
queued++;
- continue;
+ break;
case BLK_MQ_RQ_QUEUE_BUSY:
list_add(&rq->queuelist, &rq_list);
__blk_mq_requeue_request(rq);
@@ -860,7 +862,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int cpu = hctx->next_cpu, next_cpu;
+ int next_cpu;
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
if (next_cpu >= nr_cpu_ids)
@@ -868,8 +870,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
-
- return cpu;
}
return hctx->next_cpu;
@@ -1331,9 +1331,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx(data.ctx);
if (!old_rq)
goto done;
- if (!blk_mq_direct_issue_request(old_rq, &cookie))
- goto done;
- blk_mq_insert_request(old_rq, false, true, true);
+ if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
+ blk_mq_direct_issue_request(old_rq, &cookie) != 0)
+ blk_mq_insert_request(old_rq, false, true, true);
goto done;
}
diff --git a/kernel/block/bsg.c b/kernel/block/bsg.c
index d214e929c..b9a53615b 100644
--- a/kernel/block/bsg.c
+++ b/kernel/block/bsg.c
@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %Zd bytes\n", bd->name, count);
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
bsg_set_block(bd, file);
bytes_written = 0;
diff --git a/kernel/block/cfq-iosched.c b/kernel/block/cfq-iosched.c
index 1f9093e90..e04a7b849 100644
--- a/kernel/block/cfq-iosched.c
+++ b/kernel/block/cfq-iosched.c
@@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
{
struct cfq_group_data *cgd;
- cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
+ cgd = kzalloc(sizeof(*cgd), gfp);
if (!cgd)
return NULL;
return &cgd->cpd;
@@ -3003,7 +3003,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (time_before(jiffies, rq->fifo_time))
rq = NULL;
- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@@ -3377,6 +3376,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
+ if (cfq_cfqq_must_dispatch(cfqq))
+ return true;
+
/*
* Drain async requests before we start sync IO
*/
@@ -3468,15 +3470,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+ rq = cfq_check_fifo(cfqq);
+ if (rq)
+ cfq_mark_cfqq_must_dispatch(cfqq);
+
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
- rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
+ else
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
/*
* insert request into driver dispatch list
@@ -3944,7 +3951,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
return true;
if (new_cfqq->cfqg != cfqq->cfqg)
diff --git a/kernel/block/genhd.c b/kernel/block/genhd.c
index e5cafa515..a5bed6bc8 100644
--- a/kernel/block/genhd.c
+++ b/kernel/block/genhd.c
@@ -612,7 +612,7 @@ void add_disk(struct gendisk *disk)
/* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info;
- bdi_register_dev(bdi, disk_devt(disk));
+ bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
@@ -831,6 +831,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
+ seqf->private = NULL;
}
}
diff --git a/kernel/block/ioprio.c b/kernel/block/ioprio.c
index cc7800e9e..01b811629 100644
--- a/kernel/block/ioprio.c
+++ b/kernel/block/ioprio.c
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
diff --git a/kernel/block/partition-generic.c b/kernel/block/partition-generic.c
index 746935a59..a241e3900 100644
--- a/kernel/block/partition-generic.c
+++ b/kernel/block/partition-generic.c
@@ -349,15 +349,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
+ err = hd_ref_init(p);
+ if (err) {
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ goto out_remove_file;
+ goto out_del;
+ }
+
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
- if (!hd_ref_init(p))
- return p;
+ return p;
out_free_info:
free_part_info(p);
@@ -366,6 +371,8 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
+out_remove_file:
+ device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);