1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * blk-mq scheduling framework
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list_sort.h>
10
11 #include <trace/events/block.h>
12
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-debugfs.h"
16 #include "blk-mq-sched.h"
17 #include "blk-wbt.h"
18
19 /*
20 * Mark a hardware queue as needing a restart.
21 */
blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx * hctx)22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
23 {
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
25 return;
26
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
28 }
29 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
30
__blk_mq_sched_restart(struct blk_mq_hw_ctx * hctx)31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
32 {
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
34
35 /*
36 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
37 * in blk_mq_run_hw_queue(). Its pair is the barrier in
38 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
39 * meantime new request added to hctx->dispatch is missed to check in
40 * blk_mq_run_hw_queue().
41 */
42 smp_mb();
43
44 blk_mq_run_hw_queue(hctx, true);
45 }
46
sched_rq_cmp(void * priv,const struct list_head * a,const struct list_head * b)47 static int sched_rq_cmp(void *priv, const struct list_head *a,
48 const struct list_head *b)
49 {
50 struct request *rqa = container_of(a, struct request, queuelist);
51 struct request *rqb = container_of(b, struct request, queuelist);
52
53 return rqa->mq_hctx > rqb->mq_hctx;
54 }
55
blk_mq_dispatch_hctx_list(struct list_head * rq_list)56 static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
57 {
58 struct blk_mq_hw_ctx *hctx =
59 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
60 struct request *rq;
61 LIST_HEAD(hctx_list);
62 unsigned int count = 0;
63
64 list_for_each_entry(rq, rq_list, queuelist) {
65 if (rq->mq_hctx != hctx) {
66 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
67 goto dispatch;
68 }
69 count++;
70 }
71 list_splice_tail_init(rq_list, &hctx_list);
72
73 dispatch:
74 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
75 }
76
77 #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
78
79 /*
80 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
81 * its queue by itself in its completion handler, so we don't need to
82 * restart queue if .get_budget() fails to get the budget.
83 *
84 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
85 * be run again. This is necessary to avoid starving flushes.
86 */
__blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)87 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
88 {
89 struct request_queue *q = hctx->queue;
90 struct elevator_queue *e = q->elevator;
91 bool multi_hctxs = false, run_queue = false;
92 bool dispatched = false, busy = false;
93 unsigned int max_dispatch;
94 LIST_HEAD(rq_list);
95 int count = 0;
96
97 if (hctx->dispatch_busy)
98 max_dispatch = 1;
99 else
100 max_dispatch = hctx->queue->nr_requests;
101
102 do {
103 struct request *rq;
104 int budget_token;
105
106 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
107 break;
108
109 if (!list_empty_careful(&hctx->dispatch)) {
110 busy = true;
111 break;
112 }
113
114 budget_token = blk_mq_get_dispatch_budget(q);
115 if (budget_token < 0)
116 break;
117
118 rq = e->type->ops.dispatch_request(hctx);
119 if (!rq) {
120 blk_mq_put_dispatch_budget(q, budget_token);
121 /*
122 * We're releasing without dispatching. Holding the
123 * budget could have blocked any "hctx"s with the
124 * same queue and if we didn't dispatch then there's
125 * no guarantee anyone will kick the queue. Kick it
126 * ourselves.
127 */
128 run_queue = true;
129 break;
130 }
131
132 blk_mq_set_rq_budget_token(rq, budget_token);
133
134 /*
135 * Now this rq owns the budget which has to be released
136 * if this rq won't be queued to driver via .queue_rq()
137 * in blk_mq_dispatch_rq_list().
138 */
139 list_add_tail(&rq->queuelist, &rq_list);
140 count++;
141 if (rq->mq_hctx != hctx)
142 multi_hctxs = true;
143
144 /*
145 * If we cannot get tag for the request, stop dequeueing
146 * requests from the IO scheduler. We are unlikely to be able
147 * to submit them anyway and it creates false impression for
148 * scheduling heuristics that the device can take more IO.
149 */
150 if (!blk_mq_get_driver_tag(rq))
151 break;
152 } while (count < max_dispatch);
153
154 if (!count) {
155 if (run_queue)
156 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
157 } else if (multi_hctxs) {
158 /*
159 * Requests from different hctx may be dequeued from some
160 * schedulers, such as bfq and deadline.
161 *
162 * Sort the requests in the list according to their hctx,
163 * dispatch batching requests from same hctx at a time.
164 */
165 list_sort(NULL, &rq_list, sched_rq_cmp);
166 do {
167 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
168 } while (!list_empty(&rq_list));
169 } else {
170 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
171 }
172
173 if (busy)
174 return -EAGAIN;
175 return !!dispatched;
176 }
177
blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx)178 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
179 {
180 unsigned long end = jiffies + HZ;
181 int ret;
182
183 do {
184 ret = __blk_mq_do_dispatch_sched(hctx);
185 if (ret != 1)
186 break;
187 if (need_resched() || time_is_before_jiffies(end)) {
188 blk_mq_delay_run_hw_queue(hctx, 0);
189 break;
190 }
191 } while (1);
192
193 return ret;
194 }
195
blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)196 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
197 struct blk_mq_ctx *ctx)
198 {
199 unsigned short idx = ctx->index_hw[hctx->type];
200
201 if (++idx == hctx->nr_ctx)
202 idx = 0;
203
204 return hctx->ctxs[idx];
205 }
206
207 /*
208 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
209 * its queue by itself in its completion handler, so we don't need to
210 * restart queue if .get_budget() fails to get the budget.
211 *
212 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
213 * be run again. This is necessary to avoid starving flushes.
214 */
blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx)215 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
216 {
217 struct request_queue *q = hctx->queue;
218 LIST_HEAD(rq_list);
219 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
220 int ret = 0;
221 struct request *rq;
222
223 do {
224 int budget_token;
225
226 if (!list_empty_careful(&hctx->dispatch)) {
227 ret = -EAGAIN;
228 break;
229 }
230
231 if (!sbitmap_any_bit_set(&hctx->ctx_map))
232 break;
233
234 budget_token = blk_mq_get_dispatch_budget(q);
235 if (budget_token < 0)
236 break;
237
238 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
239 if (!rq) {
240 blk_mq_put_dispatch_budget(q, budget_token);
241 /*
242 * We're releasing without dispatching. Holding the
243 * budget could have blocked any "hctx"s with the
244 * same queue and if we didn't dispatch then there's
245 * no guarantee anyone will kick the queue. Kick it
246 * ourselves.
247 */
248 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
249 break;
250 }
251
252 blk_mq_set_rq_budget_token(rq, budget_token);
253
254 /*
255 * Now this rq owns the budget which has to be released
256 * if this rq won't be queued to driver via .queue_rq()
257 * in blk_mq_dispatch_rq_list().
258 */
259 list_add(&rq->queuelist, &rq_list);
260
261 /* round robin for fair dispatch */
262 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
263
264 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
265
266 WRITE_ONCE(hctx->dispatch_from, ctx);
267 return ret;
268 }
269
__blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)270 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
271 {
272 bool need_dispatch = false;
273 LIST_HEAD(rq_list);
274
275 /*
276 * If we have previous entries on our dispatch list, grab them first for
277 * more fair dispatch.
278 */
279 if (!list_empty_careful(&hctx->dispatch)) {
280 spin_lock(&hctx->lock);
281 if (!list_empty(&hctx->dispatch))
282 list_splice_init(&hctx->dispatch, &rq_list);
283 spin_unlock(&hctx->lock);
284 }
285
286 /*
287 * Only ask the scheduler for requests, if we didn't have residual
288 * requests from the dispatch list. This is to avoid the case where
289 * we only ever dispatch a fraction of the requests available because
290 * of low device queue depth. Once we pull requests out of the IO
291 * scheduler, we can no longer merge or sort them. So it's best to
292 * leave them there for as long as we can. Mark the hw queue as
293 * needing a restart in that case.
294 *
295 * We want to dispatch from the scheduler if there was nothing
296 * on the dispatch list or we were able to dispatch from the
297 * dispatch list.
298 */
299 if (!list_empty(&rq_list)) {
300 blk_mq_sched_mark_restart_hctx(hctx);
301 if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
302 return 0;
303 need_dispatch = true;
304 } else {
305 need_dispatch = hctx->dispatch_busy;
306 }
307
308 if (hctx->queue->elevator)
309 return blk_mq_do_dispatch_sched(hctx);
310
311 /* dequeue request one by one from sw queue if queue is busy */
312 if (need_dispatch)
313 return blk_mq_do_dispatch_ctx(hctx);
314 blk_mq_flush_busy_ctxs(hctx, &rq_list);
315 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
316 return 0;
317 }
318
blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx)319 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
320 {
321 struct request_queue *q = hctx->queue;
322
323 /* RCU or SRCU read lock is needed before checking quiesced flag */
324 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
325 return;
326
327 /*
328 * A return of -EAGAIN is an indication that hctx->dispatch is not
329 * empty and we must run again in order to avoid starving flushes.
330 */
331 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
332 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
333 blk_mq_run_hw_queue(hctx, true);
334 }
335 }
336
blk_mq_sched_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)337 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
338 unsigned int nr_segs)
339 {
340 struct elevator_queue *e = q->elevator;
341 struct blk_mq_ctx *ctx;
342 struct blk_mq_hw_ctx *hctx;
343 bool ret = false;
344 enum hctx_type type;
345
346 if (e && e->type->ops.bio_merge) {
347 ret = e->type->ops.bio_merge(q, bio, nr_segs);
348 goto out_put;
349 }
350
351 ctx = blk_mq_get_ctx(q);
352 hctx = blk_mq_map_queue(bio->bi_opf, ctx);
353 type = hctx->type;
354 if (list_empty_careful(&ctx->rq_lists[type]))
355 goto out_put;
356
357 /* default per sw-queue merge */
358 spin_lock(&ctx->lock);
359 /*
360 * Reverse check our software queue for entries that we could
361 * potentially merge with. Currently includes a hand-wavy stop
362 * count of 8, to not spend too much time checking for merges.
363 */
364 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
365 ret = true;
366
367 spin_unlock(&ctx->lock);
368 out_put:
369 return ret;
370 }
371
blk_mq_sched_try_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)372 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
373 struct list_head *free)
374 {
375 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
376 }
377 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
378
blk_mq_sched_alloc_map_and_rqs(struct request_queue * q,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)379 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
380 struct blk_mq_hw_ctx *hctx,
381 unsigned int hctx_idx)
382 {
383 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
384 hctx->sched_tags = q->sched_shared_tags;
385 return 0;
386 }
387
388 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
389 q->nr_requests);
390
391 if (!hctx->sched_tags)
392 return -ENOMEM;
393 return 0;
394 }
395
blk_mq_exit_sched_shared_tags(struct request_queue * queue)396 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
397 {
398 blk_mq_free_rq_map(queue->sched_shared_tags);
399 queue->sched_shared_tags = NULL;
400 }
401
402 /* called in queue's release handler, tagset has gone away */
blk_mq_sched_tags_teardown(struct request_queue * q,unsigned int flags)403 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
404 {
405 struct blk_mq_hw_ctx *hctx;
406 unsigned long i;
407
408 queue_for_each_hw_ctx(q, hctx, i) {
409 if (hctx->sched_tags) {
410 if (!blk_mq_is_shared_tags(flags))
411 blk_mq_free_rq_map(hctx->sched_tags);
412 hctx->sched_tags = NULL;
413 }
414 }
415
416 if (blk_mq_is_shared_tags(flags))
417 blk_mq_exit_sched_shared_tags(q);
418 }
419
blk_mq_init_sched_shared_tags(struct request_queue * queue)420 static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
421 {
422 struct blk_mq_tag_set *set = queue->tag_set;
423
424 /*
425 * Set initial depth at max so that we don't need to reallocate for
426 * updating nr_requests.
427 */
428 queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
429 BLK_MQ_NO_HCTX_IDX,
430 MAX_SCHED_RQ);
431 if (!queue->sched_shared_tags)
432 return -ENOMEM;
433
434 blk_mq_tag_update_sched_shared_tags(queue);
435
436 return 0;
437 }
438
439 /* caller must have a reference to @e, will grab another one if successful */
blk_mq_init_sched(struct request_queue * q,struct elevator_type * e)440 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
441 {
442 unsigned int flags = q->tag_set->flags;
443 struct blk_mq_hw_ctx *hctx;
444 struct elevator_queue *eq;
445 unsigned long i;
446 int ret;
447
448 /*
449 * Default to double of smaller one between hw queue_depth and 128,
450 * since we don't split into sync/async like the old code did.
451 * Additionally, this is a per-hw queue depth.
452 */
453 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
454 BLKDEV_DEFAULT_RQ);
455
456 if (blk_mq_is_shared_tags(flags)) {
457 ret = blk_mq_init_sched_shared_tags(q);
458 if (ret)
459 return ret;
460 }
461
462 queue_for_each_hw_ctx(q, hctx, i) {
463 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
464 if (ret)
465 goto err_free_map_and_rqs;
466 }
467
468 ret = e->ops.init_sched(q, e);
469 if (ret)
470 goto err_free_map_and_rqs;
471
472 mutex_lock(&q->debugfs_mutex);
473 blk_mq_debugfs_register_sched(q);
474 mutex_unlock(&q->debugfs_mutex);
475
476 queue_for_each_hw_ctx(q, hctx, i) {
477 if (e->ops.init_hctx) {
478 ret = e->ops.init_hctx(hctx, i);
479 if (ret) {
480 eq = q->elevator;
481 blk_mq_sched_free_rqs(q);
482 blk_mq_exit_sched(q, eq);
483 kobject_put(&eq->kobj);
484 return ret;
485 }
486 }
487 mutex_lock(&q->debugfs_mutex);
488 blk_mq_debugfs_register_sched_hctx(q, hctx);
489 mutex_unlock(&q->debugfs_mutex);
490 }
491
492 return 0;
493
494 err_free_map_and_rqs:
495 blk_mq_sched_free_rqs(q);
496 blk_mq_sched_tags_teardown(q, flags);
497
498 q->elevator = NULL;
499 return ret;
500 }
501
502 /*
503 * called in either blk_queue_cleanup or elevator_switch, tagset
504 * is required for freeing requests
505 */
blk_mq_sched_free_rqs(struct request_queue * q)506 void blk_mq_sched_free_rqs(struct request_queue *q)
507 {
508 struct blk_mq_hw_ctx *hctx;
509 unsigned long i;
510
511 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
512 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
513 BLK_MQ_NO_HCTX_IDX);
514 } else {
515 queue_for_each_hw_ctx(q, hctx, i) {
516 if (hctx->sched_tags)
517 blk_mq_free_rqs(q->tag_set,
518 hctx->sched_tags, i);
519 }
520 }
521 }
522
blk_mq_exit_sched(struct request_queue * q,struct elevator_queue * e)523 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
524 {
525 struct blk_mq_hw_ctx *hctx;
526 unsigned long i;
527 unsigned int flags = 0;
528
529 queue_for_each_hw_ctx(q, hctx, i) {
530 mutex_lock(&q->debugfs_mutex);
531 blk_mq_debugfs_unregister_sched_hctx(hctx);
532 mutex_unlock(&q->debugfs_mutex);
533
534 if (e->type->ops.exit_hctx && hctx->sched_data) {
535 e->type->ops.exit_hctx(hctx, i);
536 hctx->sched_data = NULL;
537 }
538 flags = hctx->flags;
539 }
540
541 mutex_lock(&q->debugfs_mutex);
542 blk_mq_debugfs_unregister_sched(q);
543 mutex_unlock(&q->debugfs_mutex);
544
545 if (e->type->ops.exit_sched)
546 e->type->ops.exit_sched(e);
547 blk_mq_sched_tags_teardown(q, flags);
548 q->elevator = NULL;
549 }
550