xref: /linux/block/blk-mq.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
590110e04SChristoph Hellwig #include <linux/blk-mq.h>
6cf43e6beSJens Axboe #include "blk-stat.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
101db4909eSMing Lei struct blk_mq_ctxs {
111db4909eSMing Lei 	struct kobject kobj;
121db4909eSMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
131db4909eSMing Lei };
141db4909eSMing Lei 
15fe644072SLinus Walleij /**
16fe644072SLinus Walleij  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17fe644072SLinus Walleij  */
18320ae51fSJens Axboe struct blk_mq_ctx {
19320ae51fSJens Axboe 	struct {
20320ae51fSJens Axboe 		spinlock_t		lock;
21c16d6b5aSMing Lei 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22320ae51fSJens Axboe 	} ____cacheline_aligned_in_smp;
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	unsigned int		cpu;
25f31967f0SJens Axboe 	unsigned short		index_hw[HCTX_MAX_TYPES];
268ccdf4a3SJianchao Wang 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27320ae51fSJens Axboe 
28320ae51fSJens Axboe 	struct request_queue	*queue;
291db4909eSMing Lei 	struct blk_mq_ctxs      *ctxs;
30320ae51fSJens Axboe 	struct kobject		kobj;
314bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
32320ae51fSJens Axboe 
33bebe84ebSChristoph Hellwig enum {
34bebe84ebSChristoph Hellwig 	BLK_MQ_NO_TAG		= -1U,
35bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MIN		= 1,
36bebe84ebSChristoph Hellwig 	BLK_MQ_TAG_MAX		= BLK_MQ_NO_TAG - 1,
37bebe84ebSChristoph Hellwig };
38bebe84ebSChristoph Hellwig 
39*3dff6155SJohn Garry #define BLK_MQ_CPU_WORK_BATCH	(8)
40*3dff6155SJohn Garry 
41710fa378SChristoph Hellwig typedef unsigned int __bitwise blk_insert_t;
42710fa378SChristoph Hellwig #define BLK_MQ_INSERT_AT_HEAD		((__force blk_insert_t)0x01)
43710fa378SChristoph Hellwig 
443e08773cSChristoph Hellwig void blk_mq_submit_bio(struct bio *bio);
455a72e899SJens Axboe int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
465a72e899SJens Axboe 		unsigned int flags);
47c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q);
48e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
49aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
501fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
511fd40b5eSMing Lei 			     unsigned int);
522c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
53b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
54b347689fSMing Lei 					struct blk_mq_ctx *start);
552e315dc0SMing Lei void blk_mq_put_rq_ref(struct request *rq);
562c3ad667SJens Axboe 
572c3ad667SJens Axboe /*
582c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
592c3ad667SJens Axboe  */
60cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
612c3ad667SJens Axboe 		     unsigned int hctx_idx);
62e155b0c2SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags);
6363064be1SJohn Garry struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
64cc71a6f4SJens Axboe 				unsigned int hctx_idx, unsigned int depth);
65645db34eSJohn Garry void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
66645db34eSJohn Garry 			     struct blk_mq_tags *tags,
67645db34eSJohn Garry 			     unsigned int hctx_idx);
68396eaf21SMing Lei 
69320ae51fSJens Axboe /*
70320ae51fSJens Axboe  * CPU -> queue mappings
71320ae51fSJens Axboe  */
72ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
73320ae51fSJens Axboe 
74b3c661b1SJens Axboe /*
75b3c661b1SJens Axboe  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
76b3c661b1SJens Axboe  * @q: request queue
77e20ba6e1SChristoph Hellwig  * @type: the hctx type index
78b3c661b1SJens Axboe  * @cpu: CPU
79b3c661b1SJens Axboe  */
blk_mq_map_queue_type(struct request_queue * q,enum hctx_type type,unsigned int cpu)80ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
81e20ba6e1SChristoph Hellwig 							  enum hctx_type type,
82ff2c5660SJens Axboe 							  unsigned int cpu)
83ff2c5660SJens Axboe {
844e5cc99eSMing Lei 	return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
85b3c661b1SJens Axboe }
86b3c661b1SJens Axboe 
blk_mq_get_hctx_type(blk_opf_t opf)8716458cf3SBart Van Assche static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
88b3c661b1SJens Axboe {
89e20ba6e1SChristoph Hellwig 	enum hctx_type type = HCTX_TYPE_DEFAULT;
90b3c661b1SJens Axboe 
91bb94aea1SJianchao Wang 	/*
926ce913feSChristoph Hellwig 	 * The caller ensure that if REQ_POLLED, poll must be enabled.
93bb94aea1SJianchao Wang 	 */
947e923f40SBart Van Assche 	if (opf & REQ_POLLED)
95e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_POLL;
967e923f40SBart Van Assche 	else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
97e20ba6e1SChristoph Hellwig 		type = HCTX_TYPE_READ;
98b637108aSMing Lei 	return type;
99b637108aSMing Lei }
100e20ba6e1SChristoph Hellwig 
101b637108aSMing Lei /*
102b637108aSMing Lei  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
103b637108aSMing Lei  * @q: request queue
1047e923f40SBart Van Assche  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
105b637108aSMing Lei  * @ctx: software queue cpu ctx
106b637108aSMing Lei  */
blk_mq_map_queue(struct request_queue * q,blk_opf_t opf,struct blk_mq_ctx * ctx)107b637108aSMing Lei static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
10816458cf3SBart Van Assche 						     blk_opf_t opf,
109b637108aSMing Lei 						     struct blk_mq_ctx *ctx)
110b637108aSMing Lei {
1117e923f40SBart Van Assche 	return ctx->hctxs[blk_mq_get_hctx_type(opf)];
112ff2c5660SJens Axboe }
113ff2c5660SJens Axboe 
114e93ecf60SJens Axboe /*
11567aec14cSJens Axboe  * sysfs helpers
11667aec14cSJens Axboe  */
117737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
1187ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
1198682b92eSChristoph Hellwig int blk_mq_sysfs_register(struct gendisk *disk);
1208682b92eSChristoph Hellwig void blk_mq_sysfs_unregister(struct gendisk *disk);
121eaa870f9SChristoph Hellwig int blk_mq_sysfs_register_hctxs(struct request_queue *q);
122eaa870f9SChristoph Hellwig void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
123868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
12447c122e3SJens Axboe void blk_mq_free_plug_rqs(struct blk_plug *plug);
125dbb6f764SChristoph Hellwig void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
12667aec14cSJens Axboe 
1272a19b28fSMing Lei void blk_mq_cancel_work_sync(struct request_queue *q);
1282a19b28fSMing Lei 
129e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
130e09aae7eSMing Lei 
__blk_mq_get_ctx(struct request_queue * q,unsigned int cpu)1311aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1321aecfe48SMing Lei 					   unsigned int cpu)
1331aecfe48SMing Lei {
1341aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1351aecfe48SMing Lei }
1361aecfe48SMing Lei 
1371aecfe48SMing Lei /*
1381aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1391aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1401aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1411aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1421aecfe48SMing Lei  */
blk_mq_get_ctx(struct request_queue * q)1431aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1441aecfe48SMing Lei {
145c05f4220SBart Van Assche 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
1461aecfe48SMing Lei }
1471aecfe48SMing Lei 
148cb96a42cSMing Lei struct blk_mq_alloc_data {
149cb96a42cSMing Lei 	/* input parameter */
150cb96a42cSMing Lei 	struct request_queue *q;
1519a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
152229a9287SOmar Sandoval 	unsigned int shallow_depth;
15316458cf3SBart Van Assche 	blk_opf_t cmd_flags;
154ecaf97f4SJens Axboe 	req_flags_t rq_flags;
155cb96a42cSMing Lei 
15647c122e3SJens Axboe 	/* allocate multiple requests/tags in one go */
15747c122e3SJens Axboe 	unsigned int nr_tags;
15847c122e3SJens Axboe 	struct request **cached_rq;
15947c122e3SJens Axboe 
160cb96a42cSMing Lei 	/* input & output parameter */
161cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
162cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
163cb96a42cSMing Lei };
164cb96a42cSMing Lei 
165bebe84ebSChristoph Hellwig struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
166bebe84ebSChristoph Hellwig 		unsigned int reserved_tags, int node, int alloc_policy);
167bebe84ebSChristoph Hellwig void blk_mq_free_tags(struct blk_mq_tags *tags);
168bebe84ebSChristoph Hellwig int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
169bebe84ebSChristoph Hellwig 		struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
170bebe84ebSChristoph Hellwig 		unsigned int reserved, int node, int alloc_policy);
171bebe84ebSChristoph Hellwig 
172bebe84ebSChristoph Hellwig unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
173bebe84ebSChristoph Hellwig unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
174bebe84ebSChristoph Hellwig 		unsigned int *offset);
175bebe84ebSChristoph Hellwig void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
176bebe84ebSChristoph Hellwig 		unsigned int tag);
177bebe84ebSChristoph Hellwig void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
178bebe84ebSChristoph Hellwig int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
179bebe84ebSChristoph Hellwig 		struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
180bebe84ebSChristoph Hellwig void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
181bebe84ebSChristoph Hellwig 		unsigned int size);
182bebe84ebSChristoph Hellwig void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
183bebe84ebSChristoph Hellwig 
184bebe84ebSChristoph Hellwig void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
185bebe84ebSChristoph Hellwig void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
186bebe84ebSChristoph Hellwig 		void *priv);
187bebe84ebSChristoph Hellwig void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
188bebe84ebSChristoph Hellwig 		void *priv);
189bebe84ebSChristoph Hellwig 
bt_wait_ptr(struct sbitmap_queue * bt,struct blk_mq_hw_ctx * hctx)190bebe84ebSChristoph Hellwig static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
191bebe84ebSChristoph Hellwig 						 struct blk_mq_hw_ctx *hctx)
192bebe84ebSChristoph Hellwig {
193bebe84ebSChristoph Hellwig 	if (!hctx)
194bebe84ebSChristoph Hellwig 		return &bt->ws[0];
195bebe84ebSChristoph Hellwig 	return sbq_wait_ptr(bt, &hctx->wait_index);
196bebe84ebSChristoph Hellwig }
197bebe84ebSChristoph Hellwig 
198bebe84ebSChristoph Hellwig void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
199bebe84ebSChristoph Hellwig void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
200bebe84ebSChristoph Hellwig 
blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)201bebe84ebSChristoph Hellwig static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
202bebe84ebSChristoph Hellwig {
203bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
204bebe84ebSChristoph Hellwig 		__blk_mq_tag_busy(hctx);
205bebe84ebSChristoph Hellwig }
206bebe84ebSChristoph Hellwig 
blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)207bebe84ebSChristoph Hellwig static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
208bebe84ebSChristoph Hellwig {
209bebe84ebSChristoph Hellwig 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
210bebe84ebSChristoph Hellwig 		__blk_mq_tag_idle(hctx);
211bebe84ebSChristoph Hellwig }
212bebe84ebSChristoph Hellwig 
blk_mq_tag_is_reserved(struct blk_mq_tags * tags,unsigned int tag)213bebe84ebSChristoph Hellwig static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
214bebe84ebSChristoph Hellwig 					  unsigned int tag)
215bebe84ebSChristoph Hellwig {
216bebe84ebSChristoph Hellwig 	return tag < tags->nr_reserved_tags;
217bebe84ebSChristoph Hellwig }
218bebe84ebSChristoph Hellwig 
blk_mq_is_shared_tags(unsigned int flags)219079a2e3eSJohn Garry static inline bool blk_mq_is_shared_tags(unsigned int flags)
22032bc15afSJohn Garry {
22132bc15afSJohn Garry 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
22232bc15afSJohn Garry }
22332bc15afSJohn Garry 
blk_mq_tags_from_data(struct blk_mq_alloc_data * data)2244941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
2254941115bSJens Axboe {
226dd6216bbSChristoph Hellwig 	if (data->rq_flags & RQF_SCHED_TAGS)
22756f8da64SJens Axboe 		return data->hctx->sched_tags;
228dd6216bbSChristoph Hellwig 	return data->hctx->tags;
2294941115bSJens Axboe }
2304941115bSJens Axboe 
blk_mq_hctx_stopped(struct blk_mq_hw_ctx * hctx)2315d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
2325d1b25c1SBart Van Assche {
2335d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
2345d1b25c1SBart Van Assche }
2355d1b25c1SBart Van Assche 
blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx * hctx)23619c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
23719c66e59SMing Lei {
23819c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
23919c66e59SMing Lei }
24019c66e59SMing Lei 
2418446fe92SChristoph Hellwig unsigned int blk_mq_in_flight(struct request_queue *q,
2428446fe92SChristoph Hellwig 		struct block_device *part);
2438446fe92SChristoph Hellwig void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
244bf0ddabaSOmar Sandoval 		unsigned int inflight[2]);
245f299b7c7SJens Axboe 
blk_mq_put_dispatch_budget(struct request_queue * q,int budget_token)2462a5a24aaSMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
2472a5a24aaSMing Lei 					      int budget_token)
248de148297SMing Lei {
249de148297SMing Lei 	if (q->mq_ops->put_budget)
2502a5a24aaSMing Lei 		q->mq_ops->put_budget(q, budget_token);
251de148297SMing Lei }
252de148297SMing Lei 
blk_mq_get_dispatch_budget(struct request_queue * q)2532a5a24aaSMing Lei static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
254de148297SMing Lei {
255de148297SMing Lei 	if (q->mq_ops->get_budget)
25665c76369SMing Lei 		return q->mq_ops->get_budget(q);
2572a5a24aaSMing Lei 	return 0;
2582a5a24aaSMing Lei }
2592a5a24aaSMing Lei 
blk_mq_set_rq_budget_token(struct request * rq,int token)2602a5a24aaSMing Lei static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
2612a5a24aaSMing Lei {
2622a5a24aaSMing Lei 	if (token < 0)
2632a5a24aaSMing Lei 		return;
2642a5a24aaSMing Lei 
2652a5a24aaSMing Lei 	if (rq->q->mq_ops->set_rq_budget_token)
2662a5a24aaSMing Lei 		rq->q->mq_ops->set_rq_budget_token(rq, token);
2672a5a24aaSMing Lei }
2682a5a24aaSMing Lei 
blk_mq_get_rq_budget_token(struct request * rq)2692a5a24aaSMing Lei static inline int blk_mq_get_rq_budget_token(struct request *rq)
2702a5a24aaSMing Lei {
2712a5a24aaSMing Lei 	if (rq->q->mq_ops->get_rq_budget_token)
2722a5a24aaSMing Lei 		return rq->q->mq_ops->get_rq_budget_token(rq);
2732a5a24aaSMing Lei 	return -1;
274de148297SMing Lei }
275de148297SMing Lei 
__blk_mq_add_active_requests(struct blk_mq_hw_ctx * hctx,int val)276b8643d68SChengming Zhou static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
277b8643d68SChengming Zhou 						int val)
278bccf5e26SJohn Garry {
279079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
280b8643d68SChengming Zhou 		atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
281bccf5e26SJohn Garry 	else
282b8643d68SChengming Zhou 		atomic_add(val, &hctx->nr_active);
283b8643d68SChengming Zhou }
284b8643d68SChengming Zhou 
__blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx)285b8643d68SChengming Zhou static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
286b8643d68SChengming Zhou {
287b8643d68SChengming Zhou 	__blk_mq_add_active_requests(hctx, 1);
288bccf5e26SJohn Garry }
289bccf5e26SJohn Garry 
__blk_mq_sub_active_requests(struct blk_mq_hw_ctx * hctx,int val)2903b87c6eaSMing Lei static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
2913b87c6eaSMing Lei 		int val)
292bccf5e26SJohn Garry {
293079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
2943b87c6eaSMing Lei 		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
295bccf5e26SJohn Garry 	else
2963b87c6eaSMing Lei 		atomic_sub(val, &hctx->nr_active);
2973b87c6eaSMing Lei }
2983b87c6eaSMing Lei 
__blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx)2993b87c6eaSMing Lei static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
3003b87c6eaSMing Lei {
3013b87c6eaSMing Lei 	__blk_mq_sub_active_requests(hctx, 1);
302bccf5e26SJohn Garry }
303bccf5e26SJohn Garry 
blk_mq_add_active_requests(struct blk_mq_hw_ctx * hctx,int val)304b8643d68SChengming Zhou static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
305b8643d68SChengming Zhou 					      int val)
306b8643d68SChengming Zhou {
307b8643d68SChengming Zhou 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
308b8643d68SChengming Zhou 		__blk_mq_add_active_requests(hctx, val);
309b8643d68SChengming Zhou }
310b8643d68SChengming Zhou 
blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx)311b8643d68SChengming Zhou static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
312b8643d68SChengming Zhou {
313b8643d68SChengming Zhou 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
314b8643d68SChengming Zhou 		__blk_mq_inc_active_requests(hctx);
315b8643d68SChengming Zhou }
316b8643d68SChengming Zhou 
blk_mq_sub_active_requests(struct blk_mq_hw_ctx * hctx,int val)317b8643d68SChengming Zhou static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
318b8643d68SChengming Zhou 					      int val)
319b8643d68SChengming Zhou {
320b8643d68SChengming Zhou 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
321b8643d68SChengming Zhou 		__blk_mq_sub_active_requests(hctx, val);
322b8643d68SChengming Zhou }
323b8643d68SChengming Zhou 
blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx)324b8643d68SChengming Zhou static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
325b8643d68SChengming Zhou {
326b8643d68SChengming Zhou 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
327b8643d68SChengming Zhou 		__blk_mq_dec_active_requests(hctx);
328b8643d68SChengming Zhou }
329b8643d68SChengming Zhou 
__blk_mq_active_requests(struct blk_mq_hw_ctx * hctx)330bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
331bccf5e26SJohn Garry {
332079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags))
333079a2e3eSJohn Garry 		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
334bccf5e26SJohn Garry 	return atomic_read(&hctx->nr_active);
335bccf5e26SJohn Garry }
__blk_mq_put_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)3364e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
3374e2f62e5SJens Axboe 					   struct request *rq)
3384e2f62e5SJens Axboe {
339b8643d68SChengming Zhou 	blk_mq_dec_active_requests(hctx);
3404e2f62e5SJens Axboe 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
3414e2f62e5SJens Axboe 	rq->tag = BLK_MQ_NO_TAG;
3424e2f62e5SJens Axboe }
3434e2f62e5SJens Axboe 
blk_mq_put_driver_tag(struct request * rq)3444e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq)
3454e2f62e5SJens Axboe {
3464e2f62e5SJens Axboe 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
3474e2f62e5SJens Axboe 		return;
3484e2f62e5SJens Axboe 
3494e2f62e5SJens Axboe 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
3504e2f62e5SJens Axboe }
3514e2f62e5SJens Axboe 
352b8643d68SChengming Zhou bool __blk_mq_alloc_driver_tag(struct request *rq);
353a808a9d5SJens Axboe 
blk_mq_get_driver_tag(struct request * rq)354a808a9d5SJens Axboe static inline bool blk_mq_get_driver_tag(struct request *rq)
355a808a9d5SJens Axboe {
356b8643d68SChengming Zhou 	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
357b8643d68SChengming Zhou 		return false;
358b8643d68SChengming Zhou 
359a808a9d5SJens Axboe 	return true;
360a808a9d5SJens Axboe }
361a808a9d5SJens Axboe 
blk_mq_clear_mq_map(struct blk_mq_queue_map * qmap)362ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
3630da73d00SMinwoo Im {
3640da73d00SMinwoo Im 	int cpu;
3650da73d00SMinwoo Im 
3660da73d00SMinwoo Im 	for_each_possible_cpu(cpu)
367ed76e329SJens Axboe 		qmap->mq_map[cpu] = 0;
3680da73d00SMinwoo Im }
3690da73d00SMinwoo Im 
370fd2ef39cSJan Kara /* Free all requests on the list */
blk_mq_free_requests(struct list_head * list)371fd2ef39cSJan Kara static inline void blk_mq_free_requests(struct list_head *list)
372fd2ef39cSJan Kara {
373fd2ef39cSJan Kara 	while (!list_empty(list)) {
374fd2ef39cSJan Kara 		struct request *rq = list_entry_rq(list->next);
375fd2ef39cSJan Kara 
376fd2ef39cSJan Kara 		list_del_init(&rq->queuelist);
377fd2ef39cSJan Kara 		blk_mq_free_request(rq);
378fd2ef39cSJan Kara 	}
379fd2ef39cSJan Kara }
380fd2ef39cSJan Kara 
381a0235d23SJohn Garry /*
382a0235d23SJohn Garry  * For shared tag users, we track the number of currently active users
383a0235d23SJohn Garry  * and attempt to provide a fair share of the tag depth for each of them.
384a0235d23SJohn Garry  */
hctx_may_queue(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt)385a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
386a0235d23SJohn Garry 				  struct sbitmap_queue *bt)
387a0235d23SJohn Garry {
388a0235d23SJohn Garry 	unsigned int depth, users;
389a0235d23SJohn Garry 
390a0235d23SJohn Garry 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
391a0235d23SJohn Garry 		return true;
392a0235d23SJohn Garry 
393a0235d23SJohn Garry 	/*
394a0235d23SJohn Garry 	 * Don't try dividing an ant
395a0235d23SJohn Garry 	 */
396a0235d23SJohn Garry 	if (bt->sb.depth == 1)
397a0235d23SJohn Garry 		return true;
398a0235d23SJohn Garry 
399079a2e3eSJohn Garry 	if (blk_mq_is_shared_tags(hctx->flags)) {
400f1b49fdcSJohn Garry 		struct request_queue *q = hctx->queue;
401f1b49fdcSJohn Garry 
4022569063cSMing Lei 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
403f1b49fdcSJohn Garry 			return true;
404f1b49fdcSJohn Garry 	} else {
405f1b49fdcSJohn Garry 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
406f1b49fdcSJohn Garry 			return true;
407f1b49fdcSJohn Garry 	}
408f1b49fdcSJohn Garry 
4094f1731dfSYu Kuai 	users = READ_ONCE(hctx->tags->active_queues);
410a0235d23SJohn Garry 	if (!users)
411a0235d23SJohn Garry 		return true;
412a0235d23SJohn Garry 
413a0235d23SJohn Garry 	/*
414a0235d23SJohn Garry 	 * Allow at least some tags
415a0235d23SJohn Garry 	 */
416a0235d23SJohn Garry 	depth = max((bt->sb.depth + users - 1) / users, 4U);
417bccf5e26SJohn Garry 	return __blk_mq_active_requests(hctx) < depth;
418a0235d23SJohn Garry }
419a0235d23SJohn Garry 
4202a904d00SMing Lei /* run the code block in @dispatch_ops with rcu/srcu read lock held */
42141adf531SMing Lei #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
4222a904d00SMing Lei do {								\
42380bd4a7aSChristoph Hellwig 	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
42400e885efSChris Leech 		struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
4252a904d00SMing Lei 		int srcu_idx;					\
4262a904d00SMing Lei 								\
42741adf531SMing Lei 		might_sleep_if(check_sleep);			\
42800e885efSChris Leech 		srcu_idx = srcu_read_lock(__tag_set->srcu);	\
4292a904d00SMing Lei 		(dispatch_ops);					\
43000e885efSChris Leech 		srcu_read_unlock(__tag_set->srcu, srcu_idx);	\
43180bd4a7aSChristoph Hellwig 	} else {						\
43280bd4a7aSChristoph Hellwig 		rcu_read_lock();				\
43380bd4a7aSChristoph Hellwig 		(dispatch_ops);					\
43480bd4a7aSChristoph Hellwig 		rcu_read_unlock();				\
4352a904d00SMing Lei 	}							\
4362a904d00SMing Lei } while (0)
437a0235d23SJohn Garry 
43841adf531SMing Lei #define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
43941adf531SMing Lei 	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
44041adf531SMing Lei 
441320ae51fSJens Axboe #endif
442