xref: /linux/block/blk-mq.h (revision bd6737f1ae92e2f1c6e8362efe96dbe7f18fa07d)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
4cf43e6beSJens Axboe #include "blk-stat.h"
5cf43e6beSJens Axboe 
624d2f903SChristoph Hellwig struct blk_mq_tag_set;
724d2f903SChristoph Hellwig 
8320ae51fSJens Axboe struct blk_mq_ctx {
9320ae51fSJens Axboe 	struct {
10320ae51fSJens Axboe 		spinlock_t		lock;
11320ae51fSJens Axboe 		struct list_head	rq_list;
12320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
13320ae51fSJens Axboe 
14320ae51fSJens Axboe 	unsigned int		cpu;
15320ae51fSJens Axboe 	unsigned int		index_hw;
16320ae51fSJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23cf43e6beSJens Axboe 	struct blk_rq_stat	stat[2];
24320ae51fSJens Axboe 
25320ae51fSJens Axboe 	struct request_queue	*queue;
26320ae51fSJens Axboe 	struct kobject		kobj;
274bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
28320ae51fSJens Axboe 
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
34f04c3df3SJens Axboe bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
352c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
3650e1dab8SJens Axboe bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37*bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38*bd6737f1SJens Axboe 				bool wait);
392c3ad667SJens Axboe 
402c3ad667SJens Axboe /*
412c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
422c3ad667SJens Axboe  */
43cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
442c3ad667SJens Axboe 		     unsigned int hctx_idx);
45cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
46cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
47cc71a6f4SJens Axboe 					unsigned int hctx_idx,
48cc71a6f4SJens Axboe 					unsigned int nr_tags,
49cc71a6f4SJens Axboe 					unsigned int reserved_tags);
50cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
51cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
522c3ad667SJens Axboe 
532c3ad667SJens Axboe /*
542c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
552c3ad667SJens Axboe  */
562c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
572c3ad667SJens Axboe 				bool at_head);
58bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59bd166ef1SJens Axboe 				struct list_head *list);
60320ae51fSJens Axboe /*
61320ae51fSJens Axboe  * CPU hotplug helpers
62320ae51fSJens Axboe  */
63676141e4SJens Axboe void blk_mq_enable_hotplug(void);
64676141e4SJens Axboe void blk_mq_disable_hotplug(void);
65320ae51fSJens Axboe 
66320ae51fSJens Axboe /*
67320ae51fSJens Axboe  * CPU -> queue mappings
68320ae51fSJens Axboe  */
69f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
70320ae51fSJens Axboe 
717d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
727d7e0f90SChristoph Hellwig 		int cpu)
737d7e0f90SChristoph Hellwig {
747d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
757d7e0f90SChristoph Hellwig }
767d7e0f90SChristoph Hellwig 
77e93ecf60SJens Axboe /*
7867aec14cSJens Axboe  * sysfs helpers
7967aec14cSJens Axboe  */
8067aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
8167aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
82868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
8367aec14cSJens Axboe 
8407e4feadSOmar Sandoval /*
8507e4feadSOmar Sandoval  * debugfs helpers
8607e4feadSOmar Sandoval  */
8707e4feadSOmar Sandoval #ifdef CONFIG_DEBUG_FS
8807e4feadSOmar Sandoval void blk_mq_debugfs_init(void);
8907e4feadSOmar Sandoval int blk_mq_debugfs_register(struct request_queue *q, const char *name);
9007e4feadSOmar Sandoval void blk_mq_debugfs_unregister(struct request_queue *q);
9107e4feadSOmar Sandoval int blk_mq_debugfs_register_hctxs(struct request_queue *q);
9207e4feadSOmar Sandoval void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
9307e4feadSOmar Sandoval #else
9407e4feadSOmar Sandoval static inline void blk_mq_debugfs_init(void)
9507e4feadSOmar Sandoval {
9607e4feadSOmar Sandoval }
9707e4feadSOmar Sandoval 
9807e4feadSOmar Sandoval int blk_mq_debugfs_register(struct request_queue *q, const char *name);
9907e4feadSOmar Sandoval {
10007e4feadSOmar Sandoval 	return 0;
10107e4feadSOmar Sandoval }
10207e4feadSOmar Sandoval 
10307e4feadSOmar Sandoval void blk_mq_debugfs_unregister(struct request_queue *q)
10407e4feadSOmar Sandoval {
10507e4feadSOmar Sandoval }
10607e4feadSOmar Sandoval 
10707e4feadSOmar Sandoval int blk_mq_debugfs_register_hctxs(struct request_queue *q)
10807e4feadSOmar Sandoval {
10907e4feadSOmar Sandoval 	return 0;
11007e4feadSOmar Sandoval }
11107e4feadSOmar Sandoval 
11207e4feadSOmar Sandoval void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
11307e4feadSOmar Sandoval {
11407e4feadSOmar Sandoval }
11507e4feadSOmar Sandoval #endif
11607e4feadSOmar Sandoval 
11790415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
11890415837SChristoph Hellwig 
119e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
120e09aae7eSMing Lei 
1211aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1221aecfe48SMing Lei 					   unsigned int cpu)
1231aecfe48SMing Lei {
1241aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1251aecfe48SMing Lei }
1261aecfe48SMing Lei 
1271aecfe48SMing Lei /*
1281aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1291aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1301aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1311aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1321aecfe48SMing Lei  */
1331aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1341aecfe48SMing Lei {
1351aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1361aecfe48SMing Lei }
1371aecfe48SMing Lei 
1381aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1391aecfe48SMing Lei {
1401aecfe48SMing Lei 	put_cpu();
1411aecfe48SMing Lei }
1421aecfe48SMing Lei 
143cb96a42cSMing Lei struct blk_mq_alloc_data {
144cb96a42cSMing Lei 	/* input parameter */
145cb96a42cSMing Lei 	struct request_queue *q;
1466f3b0e8bSChristoph Hellwig 	unsigned int flags;
147cb96a42cSMing Lei 
148cb96a42cSMing Lei 	/* input & output parameter */
149cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
150cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
151cb96a42cSMing Lei };
152cb96a42cSMing Lei 
153cb96a42cSMing Lei static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
1546f3b0e8bSChristoph Hellwig 		struct request_queue *q, unsigned int flags,
1556f3b0e8bSChristoph Hellwig 		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
156cb96a42cSMing Lei {
157cb96a42cSMing Lei 	data->q = q;
1586f3b0e8bSChristoph Hellwig 	data->flags = flags;
159cb96a42cSMing Lei 	data->ctx = ctx;
160cb96a42cSMing Lei 	data->hctx = hctx;
161cb96a42cSMing Lei }
162cb96a42cSMing Lei 
1634941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1644941115bSJens Axboe {
165bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
166bd166ef1SJens Axboe 		return data->hctx->sched_tags;
167bd166ef1SJens Axboe 
1684941115bSJens Axboe 	return data->hctx->tags;
1694941115bSJens Axboe }
1704941115bSJens Axboe 
1712c3ad667SJens Axboe /*
1722c3ad667SJens Axboe  * Internal helpers for request allocation/init/free
1732c3ad667SJens Axboe  */
1742c3ad667SJens Axboe void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
1752c3ad667SJens Axboe 			struct request *rq, unsigned int op);
176bd166ef1SJens Axboe void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1772c3ad667SJens Axboe 				struct request *rq);
178bd166ef1SJens Axboe void blk_mq_finish_request(struct request *rq);
1792c3ad667SJens Axboe struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
1802c3ad667SJens Axboe 					unsigned int op);
1812c3ad667SJens Axboe 
1825d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1835d1b25c1SBart Van Assche {
1845d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1855d1b25c1SBart Van Assche }
1865d1b25c1SBart Van Assche 
18719c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18819c66e59SMing Lei {
18919c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
19019c66e59SMing Lei }
19119c66e59SMing Lei 
192320ae51fSJens Axboe #endif
193