xref: /linux/block/blk-mq.h (revision bd166ef183c263c5ced656d49ef19c7da4adc774)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
4cf43e6beSJens Axboe #include "blk-stat.h"
5cf43e6beSJens Axboe 
624d2f903SChristoph Hellwig struct blk_mq_tag_set;
724d2f903SChristoph Hellwig 
8320ae51fSJens Axboe struct blk_mq_ctx {
9320ae51fSJens Axboe 	struct {
10320ae51fSJens Axboe 		spinlock_t		lock;
11320ae51fSJens Axboe 		struct list_head	rq_list;
12320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
13320ae51fSJens Axboe 
14320ae51fSJens Axboe 	unsigned int		cpu;
15320ae51fSJens Axboe 	unsigned int		index_hw;
16320ae51fSJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23cf43e6beSJens Axboe 	struct blk_rq_stat	stat[2];
24320ae51fSJens Axboe 
25320ae51fSJens Axboe 	struct request_queue	*queue;
26320ae51fSJens Axboe 	struct kobject		kobj;
274bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
28320ae51fSJens Axboe 
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
34f04c3df3SJens Axboe bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
352c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
362c3ad667SJens Axboe 
372c3ad667SJens Axboe /*
382c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
392c3ad667SJens Axboe  */
40cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
412c3ad667SJens Axboe 		     unsigned int hctx_idx);
42cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
43cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
44cc71a6f4SJens Axboe 					unsigned int hctx_idx,
45cc71a6f4SJens Axboe 					unsigned int nr_tags,
46cc71a6f4SJens Axboe 					unsigned int reserved_tags);
47cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
48cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
492c3ad667SJens Axboe 
502c3ad667SJens Axboe /*
512c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
522c3ad667SJens Axboe  */
532c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
542c3ad667SJens Axboe 				bool at_head);
55*bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
56*bd166ef1SJens Axboe 				struct list_head *list);
57320ae51fSJens Axboe /*
58320ae51fSJens Axboe  * CPU hotplug helpers
59320ae51fSJens Axboe  */
60676141e4SJens Axboe void blk_mq_enable_hotplug(void);
61676141e4SJens Axboe void blk_mq_disable_hotplug(void);
62320ae51fSJens Axboe 
63320ae51fSJens Axboe /*
64320ae51fSJens Axboe  * CPU -> queue mappings
65320ae51fSJens Axboe  */
66f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
67320ae51fSJens Axboe 
687d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
697d7e0f90SChristoph Hellwig 		int cpu)
707d7e0f90SChristoph Hellwig {
717d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
727d7e0f90SChristoph Hellwig }
737d7e0f90SChristoph Hellwig 
74e93ecf60SJens Axboe /*
7567aec14cSJens Axboe  * sysfs helpers
7667aec14cSJens Axboe  */
7767aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
7867aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
79868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
8067aec14cSJens Axboe 
8190415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
8290415837SChristoph Hellwig 
83e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
84e09aae7eSMing Lei 
851aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
861aecfe48SMing Lei 					   unsigned int cpu)
871aecfe48SMing Lei {
881aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
891aecfe48SMing Lei }
901aecfe48SMing Lei 
911aecfe48SMing Lei /*
921aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
931aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
941aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
951aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
961aecfe48SMing Lei  */
971aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
981aecfe48SMing Lei {
991aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1001aecfe48SMing Lei }
1011aecfe48SMing Lei 
1021aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1031aecfe48SMing Lei {
1041aecfe48SMing Lei 	put_cpu();
1051aecfe48SMing Lei }
1061aecfe48SMing Lei 
107cb96a42cSMing Lei struct blk_mq_alloc_data {
108cb96a42cSMing Lei 	/* input parameter */
109cb96a42cSMing Lei 	struct request_queue *q;
1106f3b0e8bSChristoph Hellwig 	unsigned int flags;
111cb96a42cSMing Lei 
112cb96a42cSMing Lei 	/* input & output parameter */
113cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
114cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
115cb96a42cSMing Lei };
116cb96a42cSMing Lei 
117cb96a42cSMing Lei static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
1186f3b0e8bSChristoph Hellwig 		struct request_queue *q, unsigned int flags,
1196f3b0e8bSChristoph Hellwig 		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
120cb96a42cSMing Lei {
121cb96a42cSMing Lei 	data->q = q;
1226f3b0e8bSChristoph Hellwig 	data->flags = flags;
123cb96a42cSMing Lei 	data->ctx = ctx;
124cb96a42cSMing Lei 	data->hctx = hctx;
125cb96a42cSMing Lei }
126cb96a42cSMing Lei 
1274941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1284941115bSJens Axboe {
129*bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
130*bd166ef1SJens Axboe 		return data->hctx->sched_tags;
131*bd166ef1SJens Axboe 
1324941115bSJens Axboe 	return data->hctx->tags;
1334941115bSJens Axboe }
1344941115bSJens Axboe 
1352c3ad667SJens Axboe /*
1362c3ad667SJens Axboe  * Internal helpers for request allocation/init/free
1372c3ad667SJens Axboe  */
1382c3ad667SJens Axboe void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
1392c3ad667SJens Axboe 			struct request *rq, unsigned int op);
140*bd166ef1SJens Axboe void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1412c3ad667SJens Axboe 				struct request *rq);
142*bd166ef1SJens Axboe void blk_mq_finish_request(struct request *rq);
1432c3ad667SJens Axboe struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
1442c3ad667SJens Axboe 					unsigned int op);
1452c3ad667SJens Axboe 
1465d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1475d1b25c1SBart Van Assche {
1485d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1495d1b25c1SBart Van Assche }
1505d1b25c1SBart Van Assche 
15119c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
15219c66e59SMing Lei {
15319c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
15419c66e59SMing Lei }
15519c66e59SMing Lei 
156320ae51fSJens Axboe #endif
157