xref: /linux/block/blk-mq.h (revision 396eaf21ee17c476e8f66249fb1f4a39003d0ab4)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
3320ae51fSJens Axboe #define INT_BLK_MQ_H
4320ae51fSJens Axboe 
5cf43e6beSJens Axboe #include "blk-stat.h"
6244c65a3SMing Lei #include "blk-mq-tag.h"
7cf43e6beSJens Axboe 
824d2f903SChristoph Hellwig struct blk_mq_tag_set;
924d2f903SChristoph Hellwig 
10320ae51fSJens Axboe struct blk_mq_ctx {
11320ae51fSJens Axboe 	struct {
12320ae51fSJens Axboe 		spinlock_t		lock;
13320ae51fSJens Axboe 		struct list_head	rq_list;
14320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
15320ae51fSJens Axboe 
16320ae51fSJens Axboe 	unsigned int		cpu;
17320ae51fSJens Axboe 	unsigned int		index_hw;
18320ae51fSJens Axboe 
19320ae51fSJens Axboe 	/* incremented at dispatch time */
20320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
21320ae51fSJens Axboe 	unsigned long		rq_merged;
22320ae51fSJens Axboe 
23320ae51fSJens Axboe 	/* incremented at completion time */
24320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
25320ae51fSJens Axboe 
26320ae51fSJens Axboe 	struct request_queue	*queue;
27320ae51fSJens Axboe 	struct kobject		kobj;
284bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
29320ae51fSJens Axboe 
301d9bd516STejun Heo /*
311d9bd516STejun Heo  * Bits for request->gstate.  The lower two bits carry MQ_RQ_* state value
321d9bd516STejun Heo  * and the upper bits the generation number.
331d9bd516STejun Heo  */
341d9bd516STejun Heo enum mq_rq_state {
351d9bd516STejun Heo 	MQ_RQ_IDLE		= 0,
361d9bd516STejun Heo 	MQ_RQ_IN_FLIGHT		= 1,
375a61c363STejun Heo 	MQ_RQ_COMPLETE		= 2,
381d9bd516STejun Heo 
391d9bd516STejun Heo 	MQ_RQ_STATE_BITS	= 2,
401d9bd516STejun Heo 	MQ_RQ_STATE_MASK	= (1 << MQ_RQ_STATE_BITS) - 1,
411d9bd516STejun Heo 	MQ_RQ_GEN_INC		= 1 << MQ_RQ_STATE_BITS,
421d9bd516STejun Heo };
431d9bd516STejun Heo 
44780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q);
453edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
46e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
47aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q);
48de148297SMing Lei bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
492c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
50bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
51bd6737f1SJens Axboe 				bool wait);
52b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
53b347689fSMing Lei 					struct blk_mq_ctx *start);
542c3ad667SJens Axboe 
552c3ad667SJens Axboe /*
562c3ad667SJens Axboe  * Internal helpers for allocating/freeing the request map
572c3ad667SJens Axboe  */
58cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
592c3ad667SJens Axboe 		     unsigned int hctx_idx);
60cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags);
61cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
62cc71a6f4SJens Axboe 					unsigned int hctx_idx,
63cc71a6f4SJens Axboe 					unsigned int nr_tags,
64cc71a6f4SJens Axboe 					unsigned int reserved_tags);
65cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
66cc71a6f4SJens Axboe 		     unsigned int hctx_idx, unsigned int depth);
672c3ad667SJens Axboe 
682c3ad667SJens Axboe /*
692c3ad667SJens Axboe  * Internal helpers for request insertion into sw queues
702c3ad667SJens Axboe  */
712c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
722c3ad667SJens Axboe 				bool at_head);
73b0850297SMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
74bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
75bd166ef1SJens Axboe 				struct list_head *list);
76320ae51fSJens Axboe 
77*396eaf21SMing Lei /* Used by blk_insert_cloned_request() to issue request directly */
78*396eaf21SMing Lei blk_status_t blk_mq_request_direct_issue(struct request *rq);
79*396eaf21SMing Lei 
80320ae51fSJens Axboe /*
81320ae51fSJens Axboe  * CPU -> queue mappings
82320ae51fSJens Axboe  */
83f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
84320ae51fSJens Axboe 
857d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
867d7e0f90SChristoph Hellwig 		int cpu)
877d7e0f90SChristoph Hellwig {
887d7e0f90SChristoph Hellwig 	return q->queue_hw_ctx[q->mq_map[cpu]];
897d7e0f90SChristoph Hellwig }
907d7e0f90SChristoph Hellwig 
91e93ecf60SJens Axboe /*
9267aec14cSJens Axboe  * sysfs helpers
9367aec14cSJens Axboe  */
94737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q);
957ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q);
962d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
9767aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
9867aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
99868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
10067aec14cSJens Axboe 
101e09aae7eSMing Lei void blk_mq_release(struct request_queue *q);
102e09aae7eSMing Lei 
1031d9bd516STejun Heo /**
1041d9bd516STejun Heo  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
1051d9bd516STejun Heo  * @rq: target request.
1061d9bd516STejun Heo  */
1071d9bd516STejun Heo static inline int blk_mq_rq_state(struct request *rq)
1081d9bd516STejun Heo {
1091d9bd516STejun Heo 	return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
1101d9bd516STejun Heo }
1111d9bd516STejun Heo 
1121d9bd516STejun Heo /**
1131d9bd516STejun Heo  * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
1141d9bd516STejun Heo  * @rq: target request.
1151d9bd516STejun Heo  * @state: new state to set.
1161d9bd516STejun Heo  *
1171d9bd516STejun Heo  * Set @rq's state to @state.  The caller is responsible for ensuring that
1181d9bd516STejun Heo  * there are no other updaters.  A request can transition into IN_FLIGHT
1191d9bd516STejun Heo  * only from IDLE and doing so increments the generation number.
1201d9bd516STejun Heo  */
1211d9bd516STejun Heo static inline void blk_mq_rq_update_state(struct request *rq,
1221d9bd516STejun Heo 					  enum mq_rq_state state)
1231d9bd516STejun Heo {
1241d9bd516STejun Heo 	u64 old_val = READ_ONCE(rq->gstate);
1251d9bd516STejun Heo 	u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
1261d9bd516STejun Heo 
1271d9bd516STejun Heo 	if (state == MQ_RQ_IN_FLIGHT) {
1281d9bd516STejun Heo 		WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
1291d9bd516STejun Heo 		new_val += MQ_RQ_GEN_INC;
1301d9bd516STejun Heo 	}
1311d9bd516STejun Heo 
1321d9bd516STejun Heo 	/* avoid exposing interim values */
1331d9bd516STejun Heo 	WRITE_ONCE(rq->gstate, new_val);
1341d9bd516STejun Heo }
1351d9bd516STejun Heo 
1361aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
1371aecfe48SMing Lei 					   unsigned int cpu)
1381aecfe48SMing Lei {
1391aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
1401aecfe48SMing Lei }
1411aecfe48SMing Lei 
1421aecfe48SMing Lei /*
1431aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
1441aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
1451aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
1461aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
1471aecfe48SMing Lei  */
1481aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
1491aecfe48SMing Lei {
1501aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
1511aecfe48SMing Lei }
1521aecfe48SMing Lei 
1531aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
1541aecfe48SMing Lei {
1551aecfe48SMing Lei 	put_cpu();
1561aecfe48SMing Lei }
1571aecfe48SMing Lei 
158cb96a42cSMing Lei struct blk_mq_alloc_data {
159cb96a42cSMing Lei 	/* input parameter */
160cb96a42cSMing Lei 	struct request_queue *q;
1619a95e4efSBart Van Assche 	blk_mq_req_flags_t flags;
162229a9287SOmar Sandoval 	unsigned int shallow_depth;
163cb96a42cSMing Lei 
164cb96a42cSMing Lei 	/* input & output parameter */
165cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
166cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
167cb96a42cSMing Lei };
168cb96a42cSMing Lei 
1694941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
1704941115bSJens Axboe {
171bd166ef1SJens Axboe 	if (data->flags & BLK_MQ_REQ_INTERNAL)
172bd166ef1SJens Axboe 		return data->hctx->sched_tags;
173bd166ef1SJens Axboe 
1744941115bSJens Axboe 	return data->hctx->tags;
1754941115bSJens Axboe }
1764941115bSJens Axboe 
1775d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
1785d1b25c1SBart Van Assche {
1795d1b25c1SBart Van Assche 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
1805d1b25c1SBart Van Assche }
1815d1b25c1SBart Van Assche 
18219c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
18319c66e59SMing Lei {
18419c66e59SMing Lei 	return hctx->nr_ctx && hctx->tags;
18519c66e59SMing Lei }
18619c66e59SMing Lei 
187f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
188f299b7c7SJens Axboe 			unsigned int inflight[2]);
189f299b7c7SJens Axboe 
190de148297SMing Lei static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
191de148297SMing Lei {
192de148297SMing Lei 	struct request_queue *q = hctx->queue;
193de148297SMing Lei 
194de148297SMing Lei 	if (q->mq_ops->put_budget)
195de148297SMing Lei 		q->mq_ops->put_budget(hctx);
196de148297SMing Lei }
197de148297SMing Lei 
19888022d72SMing Lei static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
199de148297SMing Lei {
200de148297SMing Lei 	struct request_queue *q = hctx->queue;
201de148297SMing Lei 
202de148297SMing Lei 	if (q->mq_ops->get_budget)
203de148297SMing Lei 		return q->mq_ops->get_budget(hctx);
20488022d72SMing Lei 	return true;
205de148297SMing Lei }
206de148297SMing Lei 
207244c65a3SMing Lei static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
208244c65a3SMing Lei 					   struct request *rq)
209244c65a3SMing Lei {
210244c65a3SMing Lei 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
211244c65a3SMing Lei 	rq->tag = -1;
212244c65a3SMing Lei 
213244c65a3SMing Lei 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
214244c65a3SMing Lei 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
215244c65a3SMing Lei 		atomic_dec(&hctx->nr_active);
216244c65a3SMing Lei 	}
217244c65a3SMing Lei }
218244c65a3SMing Lei 
219244c65a3SMing Lei static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
220244c65a3SMing Lei 				       struct request *rq)
221244c65a3SMing Lei {
222244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
223244c65a3SMing Lei 		return;
224244c65a3SMing Lei 
225244c65a3SMing Lei 	__blk_mq_put_driver_tag(hctx, rq);
226244c65a3SMing Lei }
227244c65a3SMing Lei 
228244c65a3SMing Lei static inline void blk_mq_put_driver_tag(struct request *rq)
229244c65a3SMing Lei {
230244c65a3SMing Lei 	struct blk_mq_hw_ctx *hctx;
231244c65a3SMing Lei 
232244c65a3SMing Lei 	if (rq->tag == -1 || rq->internal_tag == -1)
233244c65a3SMing Lei 		return;
234244c65a3SMing Lei 
235244c65a3SMing Lei 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
236244c65a3SMing Lei 	__blk_mq_put_driver_tag(hctx, rq);
237244c65a3SMing Lei }
238244c65a3SMing Lei 
239320ae51fSJens Axboe #endif
240