xref: /linux/block/blk-mq.h (revision 2a5a24aa83382a88c43d18a901fab66e6ffe1199)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6244c65a3SMing Lei #include "blk-mq-tag.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
101db4909eSMing Lei struct blk_mq_ctxs {
111db4909eSMing Lei 	struct kobject kobj;
121db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
131db4909eSMing Lei };
141db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21c16d6b5aSMing Lei 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22320ae51fSJens Axboe 	} ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
268ccdf4a3SJianchao Wang 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27320ae51fSJens Axboe 
28320ae51fSJens Axboe 	/* incremented at dispatch time */
29320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
30320ae51fSJens Axboe 	unsigned long		rq_merged;
31320ae51fSJens Axboe 
32320ae51fSJens Axboe 	/* incremented at completion time */
33320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
34320ae51fSJens Axboe 
35320ae51fSJens Axboe 	struct request_queue	*queue;
361db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
37320ae51fSJens Axboe 	struct kobject		kobj;
384bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
39320ae51fSJens Axboe 
40c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q);
41e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
431fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
441fd40b5eSMing Lei 			     unsigned int);
45e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
46e6c98712SBart Van Assche 				bool kick_requeue_list);
472c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49b347689fSMing Lei 					struct blk_mq_ctx *start);
502c3ad667SJens Axboe 
512c3ad667SJens Axboe /*
522c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
532c3ad667SJens Axboe  */
54cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
552c3ad667SJens Axboe 		     unsigned int hctx_idx);
561c0706a7SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
57cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
58cc71a6f4SJens Axboe 					unsigned int hctx_idx,
59cc71a6f4SJens Axboe 					unsigned int nr_tags,
601c0706a7SJohn Garry 					unsigned int reserved_tags,
611c0706a7SJohn Garry 					unsigned int flags);
62cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
63cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
642c3ad667SJens Axboe 
652c3ad667SJens Axboe /*
662c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
672c3ad667SJens Axboe  */
682c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
692c3ad667SJens Axboe 				bool at_head);
7001e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
7101e99aecSMing Lei 				  bool run_queue);
72bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
73bd166ef1SJens Axboe 				struct list_head *list);
74320ae51fSJens Axboe 
75fd9c40f6SBart Van Assche /* Used by blk_insert_cloned_request() to issue request directly */
76fd9c40f6SBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
776ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
786ce3dd6eSMing Lei 				    struct list_head *list);
79396eaf21SMing Lei 
80320ae51fSJens Axboe /*
81320ae51fSJens Axboe  * CPU -> queue mappings
82320ae51fSJens Axboe  */
83ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
84320ae51fSJens Axboe 
85b3c661b1SJens Axboe /*
86b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
87b3c661b1SJens Axboe  * @q: request queue
88e20ba6e1SChristoph Hellwig  * @type: the hctx type index
89b3c661b1SJens Axboe  * @cpu: CPU
90b3c661b1SJens Axboe  */
91ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
92e20ba6e1SChristoph Hellwig 							  enum hctx_type type,
93ff2c5660SJens Axboe 							  unsigned int cpu)
94ff2c5660SJens Axboe {
95e20ba6e1SChristoph Hellwig 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
96b3c661b1SJens Axboe }
97b3c661b1SJens Axboe 
98b3c661b1SJens Axboe /*
99b3c661b1SJens Axboe  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
100b3c661b1SJens Axboe  * @q: request queue
101b3c661b1SJens Axboe  * @flags: request command flags
102d220a214SMinwoo Im  * @ctx: software queue cpu ctx
103b3c661b1SJens Axboe  */
104b3c661b1SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
105b3c661b1SJens Axboe 						     unsigned int flags,
1068ccdf4a3SJianchao Wang 						     struct blk_mq_ctx *ctx)
107b3c661b1SJens Axboe {
108e20ba6e1SChristoph Hellwig 	enum hctx_type type = HCTX_TYPE_DEFAULT;
109b3c661b1SJens Axboe 
110bb94aea1SJianchao Wang 	/*
111bb94aea1SJianchao Wang 	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
112bb94aea1SJianchao Wang 	 */
113bb94aea1SJianchao Wang 	if (flags & REQ_HIPRI)
114e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_POLL;
115bb94aea1SJianchao Wang 	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
116e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_READ;
117e20ba6e1SChristoph Hellwig 
1188ccdf4a3SJianchao Wang 	return ctx->hctxs[type];
119ff2c5660SJens Axboe }
120ff2c5660SJens Axboe 
121e93ecf60SJens Axboe /*
12267aec14cSJens Axboe  * sysfs helpers
12367aec14cSJens Axboe  */
124737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1257ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1262d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
12767aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
12867aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
129868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
13067aec14cSJens Axboe 
131e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
132e09aae7eSMing Lei 
1331aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1341aecfe48SMing Lei 					   unsigned int cpu)
1351aecfe48SMing Lei {
1361aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1371aecfe48SMing Lei }
1381aecfe48SMing Lei 
1391aecfe48SMing Lei /*
1401aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1411aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1421aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1431aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1441aecfe48SMing Lei  */
1451aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1461aecfe48SMing Lei {
147c05f4220SBart Van Assche 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1481aecfe48SMing Lei }
1491aecfe48SMing Lei 
150cb96a42cSMing Lei struct blk_mq_alloc_data {
151cb96a42cSMing Lei 	/* input parameter */
152cb96a42cSMing Lei 	struct request_queue *q;
1539a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
154229a9287SOmar Sandoval 	unsigned int shallow_depth;
155f9afca4dSJens Axboe 	unsigned int cmd_flags;
156cb96a42cSMing Lei 
157cb96a42cSMing Lei 	/* input & output parameter */
158cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
159cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
160cb96a42cSMing Lei };
161cb96a42cSMing Lei 
16232bc15afSJohn Garry static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
16332bc15afSJohn Garry {
16432bc15afSJohn Garry 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
16532bc15afSJohn Garry }
16632bc15afSJohn Garry 
1674941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1684941115bSJens Axboe {
16942fdc5e4SChristoph Hellwig 	if (data->q->elevator)
170bd166ef1SJens Axboe 		return data->hctx->sched_tags;
171bd166ef1SJens Axboe 
1724941115bSJens Axboe 	return data->hctx->tags;
1734941115bSJens Axboe }
1744941115bSJens Axboe 
1755d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1765d1b25c1SBart Van Assche {
1775d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1785d1b25c1SBart Van Assche }
1795d1b25c1SBart Van Assche 
18019c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18119c66e59SMing Lei {
18219c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
18319c66e59SMing Lei }
18419c66e59SMing Lei 
1858446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
1868446fe92SChristoph Hellwig 		struct block_device *part);
1878446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
188bf0ddabaSOmar Sandoval 		unsigned int inflight[2]);
189f299b7c7SJens Axboe 
190*2a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
191*2a5a24aaSMing Lei 					      int budget_token)
192de148297SMing Lei {
193de148297SMing Lei 	if (q->mq_ops->put_budget)
194*2a5a24aaSMing Lei 		q->mq_ops->put_budget(q, budget_token);
195de148297SMing Lei }
196de148297SMing Lei 
197*2a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
198de148297SMing Lei {
199de148297SMing Lei 	if (q->mq_ops->get_budget)
20065c76369SMing Lei 		return q->mq_ops->get_budget(q);
201*2a5a24aaSMing Lei 	return 0;
202*2a5a24aaSMing Lei }
203*2a5a24aaSMing Lei 
204*2a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
205*2a5a24aaSMing Lei {
206*2a5a24aaSMing Lei 	if (token < 0)
207*2a5a24aaSMing Lei 		return;
208*2a5a24aaSMing Lei 
209*2a5a24aaSMing Lei 	if (rq->q->mq_ops->set_rq_budget_token)
210*2a5a24aaSMing Lei 		rq->q->mq_ops->set_rq_budget_token(rq, token);
211*2a5a24aaSMing Lei }
212*2a5a24aaSMing Lei 
213*2a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq)
214*2a5a24aaSMing Lei {
215*2a5a24aaSMing Lei 	if (rq->q->mq_ops->get_rq_budget_token)
216*2a5a24aaSMing Lei 		return rq->q->mq_ops->get_rq_budget_token(rq);
217*2a5a24aaSMing Lei 	return -1;
218de148297SMing Lei }
219de148297SMing Lei 
220bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
221bccf5e26SJohn Garry {
222bccf5e26SJohn Garry 	if (blk_mq_is_sbitmap_shared(hctx->flags))
223bccf5e26SJohn Garry 		atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
224bccf5e26SJohn Garry 	else
225bccf5e26SJohn Garry 		atomic_inc(&hctx->nr_active);
226bccf5e26SJohn Garry }
227bccf5e26SJohn Garry 
228bccf5e26SJohn Garry static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
229bccf5e26SJohn Garry {
230bccf5e26SJohn Garry 	if (blk_mq_is_sbitmap_shared(hctx->flags))
231bccf5e26SJohn Garry 		atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
232bccf5e26SJohn Garry 	else
233bccf5e26SJohn Garry 		atomic_dec(&hctx->nr_active);
234bccf5e26SJohn Garry }
235bccf5e26SJohn Garry 
236bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
237bccf5e26SJohn Garry {
238bccf5e26SJohn Garry 	if (blk_mq_is_sbitmap_shared(hctx->flags))
239bccf5e26SJohn Garry 		return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
240bccf5e26SJohn Garry 	return atomic_read(&hctx->nr_active);
241bccf5e26SJohn Garry }
2424e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
2434e2f62e5SJens Axboe 					   struct request *rq)
2444e2f62e5SJens Axboe {
2454e2f62e5SJens Axboe 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
2464e2f62e5SJens Axboe 	rq->tag = BLK_MQ_NO_TAG;
2474e2f62e5SJens Axboe 
2484e2f62e5SJens Axboe 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
2494e2f62e5SJens Axboe 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
250bccf5e26SJohn Garry 		__blk_mq_dec_active_requests(hctx);
2514e2f62e5SJens Axboe 	}
2524e2f62e5SJens Axboe }
2534e2f62e5SJens Axboe 
2544e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq)
2554e2f62e5SJens Axboe {
2564e2f62e5SJens Axboe 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
2574e2f62e5SJens Axboe 		return;
2584e2f62e5SJens Axboe 
2594e2f62e5SJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
2604e2f62e5SJens Axboe }
2614e2f62e5SJens Axboe 
262ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
2630da73d00SMinwoo Im {
2640da73d00SMinwoo Im 	int cpu;
2650da73d00SMinwoo Im 
2660da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
267ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
2680da73d00SMinwoo Im }
2690da73d00SMinwoo Im 
270b49773e7SDamien Le Moal /*
271b49773e7SDamien Le Moal  * blk_mq_plug() - Get caller context plug
272b49773e7SDamien Le Moal  * @q: request queue
273b49773e7SDamien Le Moal  * @bio : the bio being submitted by the caller context
274b49773e7SDamien Le Moal  *
275b49773e7SDamien Le Moal  * Plugging, by design, may delay the insertion of BIOs into the elevator in
276b49773e7SDamien Le Moal  * order to increase BIO merging opportunities. This however can cause BIO
277b49773e7SDamien Le Moal  * insertion order to change from the order in which submit_bio() is being
278b49773e7SDamien Le Moal  * executed in the case of multiple contexts concurrently issuing BIOs to a
279b49773e7SDamien Le Moal  * device, even if these context are synchronized to tightly control BIO issuing
280b49773e7SDamien Le Moal  * order. While this is not a problem with regular block devices, this ordering
281b49773e7SDamien Le Moal  * change can cause write BIO failures with zoned block devices as these
282b49773e7SDamien Le Moal  * require sequential write patterns to zones. Prevent this from happening by
283b49773e7SDamien Le Moal  * ignoring the plug state of a BIO issuing context if the target request queue
284b49773e7SDamien Le Moal  * is for a zoned block device and the BIO to plug is a write operation.
285b49773e7SDamien Le Moal  *
286b49773e7SDamien Le Moal  * Return current->plug if the bio can be plugged and NULL otherwise
287b49773e7SDamien Le Moal  */
288b49773e7SDamien Le Moal static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
289b49773e7SDamien Le Moal 					   struct bio *bio)
290b49773e7SDamien Le Moal {
291b49773e7SDamien Le Moal 	/*
292b49773e7SDamien Le Moal 	 * For regular block devices or read operations, use the context plug
293b49773e7SDamien Le Moal 	 * which may be NULL if blk_start_plug() was not executed.
294b49773e7SDamien Le Moal 	 */
295b49773e7SDamien Le Moal 	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
296b49773e7SDamien Le Moal 		return current->plug;
297b49773e7SDamien Le Moal 
298b49773e7SDamien Le Moal 	/* Zoned block device write operation case: do not plug the BIO */
299b49773e7SDamien Le Moal 	return NULL;
300b49773e7SDamien Le Moal }
301b49773e7SDamien Le Moal 
302a0235d23SJohn Garry /*
303a0235d23SJohn Garry  * For shared tag users, we track the number of currently active users
304a0235d23SJohn Garry  * and attempt to provide a fair share of the tag depth for each of them.
305a0235d23SJohn Garry  */
306a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
307a0235d23SJohn Garry 				  struct sbitmap_queue *bt)
308a0235d23SJohn Garry {
309a0235d23SJohn Garry 	unsigned int depth, users;
310a0235d23SJohn Garry 
311a0235d23SJohn Garry 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
312a0235d23SJohn Garry 		return true;
313a0235d23SJohn Garry 
314a0235d23SJohn Garry 	/*
315a0235d23SJohn Garry 	 * Don't try dividing an ant
316a0235d23SJohn Garry 	 */
317a0235d23SJohn Garry 	if (bt->sb.depth == 1)
318a0235d23SJohn Garry 		return true;
319a0235d23SJohn Garry 
320f1b49fdcSJohn Garry 	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
321f1b49fdcSJohn Garry 		struct request_queue *q = hctx->queue;
322f1b49fdcSJohn Garry 		struct blk_mq_tag_set *set = q->tag_set;
323f1b49fdcSJohn Garry 
3242569063cSMing Lei 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
325f1b49fdcSJohn Garry 			return true;
326f1b49fdcSJohn Garry 		users = atomic_read(&set->active_queues_shared_sbitmap);
327f1b49fdcSJohn Garry 	} else {
328f1b49fdcSJohn Garry 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
329f1b49fdcSJohn Garry 			return true;
330a0235d23SJohn Garry 		users = atomic_read(&hctx->tags->active_queues);
331f1b49fdcSJohn Garry 	}
332f1b49fdcSJohn Garry 
333a0235d23SJohn Garry 	if (!users)
334a0235d23SJohn Garry 		return true;
335a0235d23SJohn Garry 
336a0235d23SJohn Garry 	/*
337a0235d23SJohn Garry 	 * Allow at least some tags
338a0235d23SJohn Garry 	 */
339a0235d23SJohn Garry 	depth = max((bt->sb.depth + users - 1) / users, 4U);
340bccf5e26SJohn Garry 	return __blk_mq_active_requests(hctx) < depth;
341a0235d23SJohn Garry }
342a0235d23SJohn Garry 
343a0235d23SJohn Garry 
344320ae51fSJens Axboe #endif
345