diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/include/linux/blk-mq.h | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/include/linux/blk-mq.h')
-rw-r--r-- | kernel/include/linux/blk-mq.h | 267 |
1 files changed, 267 insertions, 0 deletions
diff --git a/kernel/include/linux/blk-mq.h b/kernel/include/linux/blk-mq.h new file mode 100644 index 000000000..e6ff990c9 --- /dev/null +++ b/kernel/include/linux/blk-mq.h @@ -0,0 +1,267 @@ +#ifndef BLK_MQ_H +#define BLK_MQ_H + +#include <linux/blkdev.h> + +struct blk_mq_tags; +struct blk_flush_queue; + +struct blk_mq_cpu_notifier { + struct list_head list; + void *data; + int (*notify)(void *data, unsigned long action, unsigned int cpu); +}; + +struct blk_mq_ctxmap { + unsigned int size; + unsigned int bits_per_word; + struct blk_align_bitmap *map; +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + } ____cacheline_aligned_in_smp; + + unsigned long state; /* BLK_MQ_S_* flags */ + struct delayed_work run_work; + struct delayed_work delay_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + + unsigned long flags; /* BLK_MQ_F_* flags */ + + struct request_queue *queue; + struct blk_flush_queue *fq; + + void *driver_data; + + struct blk_mq_ctxmap ctx_map; + + unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + + atomic_t wait_index; + + struct blk_mq_tags *tags; + + unsigned long queued; + unsigned long run; +#define BLK_MQ_MAX_DISPATCH_ORDER 10 + unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; + + unsigned int numa_node; + unsigned int queue_num; + + atomic_t nr_active; + + struct blk_mq_cpu_notifier cpu_notifier; + struct kobject kobj; +}; + +struct blk_mq_tag_set { + struct blk_mq_ops *ops; + unsigned int nr_hw_queues; + unsigned int queue_depth; /* max hw supported */ + unsigned int reserved_tags; + unsigned int cmd_size; /* per-request extra data */ + int numa_node; + unsigned int timeout; + unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; +}; + +struct blk_mq_queue_data { + struct request *rq; + struct list_head *list; + bool last; +}; + +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); +typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); +typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); +typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); +typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(void *, struct request *, unsigned int, + unsigned int, unsigned int); +typedef void (exit_request_fn)(void *, struct request *, unsigned int, + unsigned int); + +typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, + bool); + +struct blk_mq_ops { + /* + * Queue request + */ + queue_rq_fn *queue_rq; + + /* + * Map to specific hardware queue + */ + map_queue_fn *map_queue; + + /* + * Called on request timeout + */ + timeout_fn *timeout; + + softirq_done_fn *complete; + + /* + * Called when the block layer side of a hardware queue has been + * set up, allowing the driver to allocate/init matching structures. + * Ditto for exit/teardown. + */ + init_hctx_fn *init_hctx; + exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * + * Tag greater than or equal to queue_depth is for setting up + * flush request. + * + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; +}; + +enum { + BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ + BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ + BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ + + BLK_MQ_F_SHOULD_MERGE = 1 << 0, + BLK_MQ_F_TAG_SHARED = 1 << 1, + BLK_MQ_F_SG_MERGE = 1 << 2, + BLK_MQ_F_SYSFS_UP = 1 << 3, + BLK_MQ_F_DEFER_ISSUE = 1 << 4, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + + BLK_MQ_MAX_DEPTH = 10240, + + BLK_MQ_CPU_WORK_BATCH = 8, +}; +#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ + ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ + ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) +#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ + ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ + << BLK_MQ_F_ALLOC_POLICY_START_BIT) + +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); +void blk_mq_finish_init(struct request_queue *q); +int blk_mq_register_disk(struct gendisk *); +void blk_mq_unregister_disk(struct gendisk *); + +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); + +void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); + +void blk_mq_insert_request(struct request *, bool, bool, bool); +void blk_mq_free_request(struct request *rq); +void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); +bool blk_mq_can_queue(struct blk_mq_hw_ctx *); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, + gfp_t gfp, bool reserved); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + +struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); +void __blk_mq_complete_request_remote_work(struct work_struct *work); + +int blk_mq_request_started(struct request *rq); +void blk_mq_start_request(struct request *rq); +void blk_mq_end_request(struct request *rq, int error); +void __blk_mq_end_request(struct request *rq, int error); + +void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_cancel_requeue_work(struct request_queue *q); +void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_abort_requeue_list(struct request_queue *q); +void blk_mq_complete_request(struct request *rq); + +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_stop_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_run_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, + void *priv); +void blk_mq_freeze_queue(struct request_queue *q); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_mq_freeze_queue_start(struct request_queue *q); + +/* + * Driver command data is immediately after the request. So subtract request + * size to get back to the original request, add request size to get the PDU. + */ +static inline struct request *blk_mq_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct request); +} +static inline void *blk_mq_rq_to_pdu(struct request *rq) +{ + return rq + 1; +} + +#define queue_for_each_hw_ctx(q, hctx, i) \ + for ((i) = 0; (i) < (q)->nr_hw_queues && \ + ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + +#define queue_for_each_ctx(q, ctx, i) \ + for ((i) = 0; (i) < (q)->nr_queues && \ + ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) + +#define hctx_for_each_ctx(hctx, ctx, i) \ + for ((i) = 0; (i) < (hctx)->nr_ctx && \ + ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) + +#define blk_ctx_sum(q, sum) \ +({ \ + struct blk_mq_ctx *__x; \ + unsigned int __ret = 0, __i; \ + \ + queue_for_each_ctx((q), __x, __i) \ + __ret += sum; \ + __ret; \ +}) + +#endif |