xref: /linux/block/blk-mq.h (revision e6c987120e24cb913cb7bd4e675129a30fa49e0d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_H
3 #define INT_BLK_MQ_H
4 
5 #include "blk-stat.h"
6 #include "blk-mq-tag.h"
7 
8 struct blk_mq_tag_set;
9 
10 struct blk_mq_ctxs {
11 	struct kobject kobj;
12 	struct blk_mq_ctx __percpu	*queue_ctx;
13 };
14 
15 /**
16  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17  */
18 struct blk_mq_ctx {
19 	struct {
20 		spinlock_t		lock;
21 		struct list_head	rq_lists[HCTX_MAX_TYPES];
22 	} ____cacheline_aligned_in_smp;
23 
24 	unsigned int		cpu;
25 	unsigned short		index_hw[HCTX_MAX_TYPES];
26 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
27 
28 	/* incremented at dispatch time */
29 	unsigned long		rq_dispatched[2];
30 	unsigned long		rq_merged;
31 
32 	/* incremented at completion time */
33 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
34 
35 	struct request_queue	*queue;
36 	struct blk_mq_ctxs      *ctxs;
37 	struct kobject		kobj;
38 } ____cacheline_aligned_in_smp;
39 
40 void blk_mq_free_queue(struct request_queue *q);
41 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42 void blk_mq_wake_waiters(struct request_queue *q);
43 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
44 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
45 				bool kick_requeue_list);
46 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
47 bool blk_mq_get_driver_tag(struct request *rq);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49 					struct blk_mq_ctx *start);
50 
51 /*
52  * Internal helpers for allocating/freeing the request map
53  */
54 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
55 		     unsigned int hctx_idx);
56 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
57 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
58 					unsigned int hctx_idx,
59 					unsigned int nr_tags,
60 					unsigned int reserved_tags);
61 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
62 		     unsigned int hctx_idx, unsigned int depth);
63 
64 /*
65  * Internal helpers for request insertion into sw queues
66  */
67 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
68 				bool at_head);
69 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
70 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
71 				struct list_head *list);
72 
73 blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
74 						struct request *rq,
75 						blk_qc_t *cookie,
76 						bool bypass, bool last);
77 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
78 				    struct list_head *list);
79 
80 /*
81  * CPU -> queue mappings
82  */
83 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
84 
85 /*
86  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
87  * @q: request queue
88  * @type: the hctx type index
89  * @cpu: CPU
90  */
91 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
92 							  enum hctx_type type,
93 							  unsigned int cpu)
94 {
95 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
96 }
97 
98 /*
99  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
100  * @q: request queue
101  * @flags: request command flags
102  * @cpu: cpu ctx
103  */
104 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
105 						     unsigned int flags,
106 						     struct blk_mq_ctx *ctx)
107 {
108 	enum hctx_type type = HCTX_TYPE_DEFAULT;
109 
110 	/*
111 	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
112 	 */
113 	if (flags & REQ_HIPRI)
114 		type = HCTX_TYPE_POLL;
115 	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
116 		type = HCTX_TYPE_READ;
117 
118 	return ctx->hctxs[type];
119 }
120 
121 /*
122  * sysfs helpers
123  */
124 extern void blk_mq_sysfs_init(struct request_queue *q);
125 extern void blk_mq_sysfs_deinit(struct request_queue *q);
126 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
127 extern int blk_mq_sysfs_register(struct request_queue *q);
128 extern void blk_mq_sysfs_unregister(struct request_queue *q);
129 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
130 
131 void blk_mq_release(struct request_queue *q);
132 
133 /**
134  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
135  * @rq: target request.
136  */
137 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
138 {
139 	return READ_ONCE(rq->state);
140 }
141 
142 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
143 					   unsigned int cpu)
144 {
145 	return per_cpu_ptr(q->queue_ctx, cpu);
146 }
147 
148 /*
149  * This assumes per-cpu software queueing queues. They could be per-node
150  * as well, for instance. For now this is hardcoded as-is. Note that we don't
151  * care about preemption, since we know the ctx's are persistent. This does
152  * mean that we can't rely on ctx always matching the currently running CPU.
153  */
154 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
155 {
156 	return __blk_mq_get_ctx(q, get_cpu());
157 }
158 
159 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
160 {
161 	put_cpu();
162 }
163 
164 struct blk_mq_alloc_data {
165 	/* input parameter */
166 	struct request_queue *q;
167 	blk_mq_req_flags_t flags;
168 	unsigned int shallow_depth;
169 	unsigned int cmd_flags;
170 
171 	/* input & output parameter */
172 	struct blk_mq_ctx *ctx;
173 	struct blk_mq_hw_ctx *hctx;
174 };
175 
176 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
177 {
178 	if (data->flags & BLK_MQ_REQ_INTERNAL)
179 		return data->hctx->sched_tags;
180 
181 	return data->hctx->tags;
182 }
183 
184 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
185 {
186 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
187 }
188 
189 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
190 {
191 	return hctx->nr_ctx && hctx->tags;
192 }
193 
194 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
195 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
196 			 unsigned int inflight[2]);
197 
198 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
199 {
200 	struct request_queue *q = hctx->queue;
201 
202 	if (q->mq_ops->put_budget)
203 		q->mq_ops->put_budget(hctx);
204 }
205 
206 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
207 {
208 	struct request_queue *q = hctx->queue;
209 
210 	if (q->mq_ops->get_budget)
211 		return q->mq_ops->get_budget(hctx);
212 	return true;
213 }
214 
215 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
216 					   struct request *rq)
217 {
218 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
219 	rq->tag = -1;
220 
221 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
222 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
223 		atomic_dec(&hctx->nr_active);
224 	}
225 }
226 
227 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
228 				       struct request *rq)
229 {
230 	if (rq->tag == -1 || rq->internal_tag == -1)
231 		return;
232 
233 	__blk_mq_put_driver_tag(hctx, rq);
234 }
235 
236 static inline void blk_mq_put_driver_tag(struct request *rq)
237 {
238 	if (rq->tag == -1 || rq->internal_tag == -1)
239 		return;
240 
241 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
242 }
243 
244 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
245 {
246 	int cpu;
247 
248 	for_each_possible_cpu(cpu)
249 		qmap->mq_map[cpu] = 0;
250 }
251 
252 #endif
253