xref: /linux/block/mq-deadline.c (revision d8d2b1f81530988abe2e2bfaceec1c5d30b9a0b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/rbtree.h>
17 #include <linux/sbitmap.h>
18 
19 #include <trace/events/block.h>
20 
21 #include "elevator.h"
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-sched.h"
26 
27 /*
28  * See Documentation/block/deadline-iosched.rst
29  */
30 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 /*
33  * Time after which to dispatch lower priority requests even if higher
34  * priority requests are pending.
35  */
36 static const int prio_aging_expire = 10 * HZ;
37 static const int writes_starved = 2;    /* max times reads can starve a write */
38 static const int fifo_batch = 16;       /* # of sequential requests treated as one
39 				     by the above parameters. For throughput. */
40 
41 enum dd_data_dir {
42 	DD_READ		= READ,
43 	DD_WRITE	= WRITE,
44 };
45 
46 enum { DD_DIR_COUNT = 2 };
47 
48 enum dd_prio {
49 	DD_RT_PRIO	= 0,
50 	DD_BE_PRIO	= 1,
51 	DD_IDLE_PRIO	= 2,
52 	DD_PRIO_MAX	= 2,
53 };
54 
55 enum { DD_PRIO_COUNT = 3 };
56 
57 /*
58  * I/O statistics per I/O priority. It is fine if these counters overflow.
59  * What matters is that these counters are at least as wide as
60  * log2(max_outstanding_requests).
61  */
62 struct io_stats_per_prio {
63 	uint32_t inserted;
64 	uint32_t merged;
65 	uint32_t dispatched;
66 	atomic_t completed;
67 };
68 
69 /*
70  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
71  * present on both sort_list[] and fifo_list[].
72  */
73 struct dd_per_prio {
74 	struct list_head dispatch;
75 	struct rb_root sort_list[DD_DIR_COUNT];
76 	struct list_head fifo_list[DD_DIR_COUNT];
77 	/* Position of the most recently dispatched request. */
78 	sector_t latest_pos[DD_DIR_COUNT];
79 	struct io_stats_per_prio stats;
80 };
81 
82 struct deadline_data {
83 	/*
84 	 * run time data
85 	 */
86 
87 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
88 
89 	/* Data direction of latest dispatched request. */
90 	enum dd_data_dir last_dir;
91 	unsigned int batching;		/* number of sequential requests made */
92 	unsigned int starved;		/* times reads have starved writes */
93 
94 	/*
95 	 * settings that change how the i/o scheduler behaves
96 	 */
97 	int fifo_expire[DD_DIR_COUNT];
98 	int fifo_batch;
99 	int writes_starved;
100 	int front_merges;
101 	u32 async_depth;
102 	int prio_aging_expire;
103 
104 	spinlock_t lock;
105 };
106 
107 /* Maps an I/O priority class to a deadline scheduler priority. */
108 static const enum dd_prio ioprio_class_to_prio[] = {
109 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
110 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
111 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
112 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
113 };
114 
115 static inline struct rb_root *
116 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
117 {
118 	return &per_prio->sort_list[rq_data_dir(rq)];
119 }
120 
121 /*
122  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
123  * request.
124  */
125 static u8 dd_rq_ioclass(struct request *rq)
126 {
127 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
128 }
129 
130 /*
131  * Return the first request for which blk_rq_pos() >= @pos.
132  */
133 static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
134 				enum dd_data_dir data_dir, sector_t pos)
135 {
136 	struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
137 	struct request *rq, *res = NULL;
138 
139 	while (node) {
140 		rq = rb_entry_rq(node);
141 		if (blk_rq_pos(rq) >= pos) {
142 			res = rq;
143 			node = node->rb_left;
144 		} else {
145 			node = node->rb_right;
146 		}
147 	}
148 	return res;
149 }
150 
151 static void
152 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
153 {
154 	struct rb_root *root = deadline_rb_root(per_prio, rq);
155 
156 	elv_rb_add(root, rq);
157 }
158 
159 static inline void
160 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
161 {
162 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
163 }
164 
165 /*
166  * remove rq from rbtree and fifo.
167  */
168 static void deadline_remove_request(struct request_queue *q,
169 				    struct dd_per_prio *per_prio,
170 				    struct request *rq)
171 {
172 	list_del_init(&rq->queuelist);
173 
174 	/*
175 	 * We might not be on the rbtree, if we are doing an insert merge
176 	 */
177 	if (!RB_EMPTY_NODE(&rq->rb_node))
178 		deadline_del_rq_rb(per_prio, rq);
179 
180 	elv_rqhash_del(q, rq);
181 	if (q->last_merge == rq)
182 		q->last_merge = NULL;
183 }
184 
185 static void dd_request_merged(struct request_queue *q, struct request *req,
186 			      enum elv_merge type)
187 {
188 	struct deadline_data *dd = q->elevator->elevator_data;
189 	const u8 ioprio_class = dd_rq_ioclass(req);
190 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
191 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
192 
193 	/*
194 	 * if the merge was a front merge, we need to reposition request
195 	 */
196 	if (type == ELEVATOR_FRONT_MERGE) {
197 		elv_rb_del(deadline_rb_root(per_prio, req), req);
198 		deadline_add_rq_rb(per_prio, req);
199 	}
200 }
201 
202 /*
203  * Callback function that is invoked after @next has been merged into @req.
204  */
205 static void dd_merged_requests(struct request_queue *q, struct request *req,
206 			       struct request *next)
207 {
208 	struct deadline_data *dd = q->elevator->elevator_data;
209 	const u8 ioprio_class = dd_rq_ioclass(next);
210 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
211 
212 	lockdep_assert_held(&dd->lock);
213 
214 	dd->per_prio[prio].stats.merged++;
215 
216 	/*
217 	 * if next expires before rq, assign its expire time to rq
218 	 * and move into next position (next will be deleted) in fifo
219 	 */
220 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
221 		if (time_before((unsigned long)next->fifo_time,
222 				(unsigned long)req->fifo_time)) {
223 			list_move(&req->queuelist, &next->queuelist);
224 			req->fifo_time = next->fifo_time;
225 		}
226 	}
227 
228 	/*
229 	 * kill knowledge of next, this one is a goner
230 	 */
231 	deadline_remove_request(q, &dd->per_prio[prio], next);
232 }
233 
234 /*
235  * move an entry to dispatch queue
236  */
237 static void
238 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
239 		      struct request *rq)
240 {
241 	/*
242 	 * take it off the sort and fifo list
243 	 */
244 	deadline_remove_request(rq->q, per_prio, rq);
245 }
246 
247 /* Number of requests queued for a given priority level. */
248 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
249 {
250 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
251 
252 	lockdep_assert_held(&dd->lock);
253 
254 	return stats->inserted - atomic_read(&stats->completed);
255 }
256 
257 /*
258  * deadline_check_fifo returns true if and only if there are expired requests
259  * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]).
260  */
261 static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
262 				       enum dd_data_dir data_dir)
263 {
264 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
265 
266 	return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
267 }
268 
269 /*
270  * For the specified data direction, return the next request to
271  * dispatch using arrival ordered lists.
272  */
273 static struct request *
274 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
275 		      enum dd_data_dir data_dir)
276 {
277 	if (list_empty(&per_prio->fifo_list[data_dir]))
278 		return NULL;
279 
280 	return rq_entry_fifo(per_prio->fifo_list[data_dir].next);
281 }
282 
283 /*
284  * For the specified data direction, return the next request to
285  * dispatch using sector position sorted lists.
286  */
287 static struct request *
288 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
289 		      enum dd_data_dir data_dir)
290 {
291 	return deadline_from_pos(per_prio, data_dir,
292 				 per_prio->latest_pos[data_dir]);
293 }
294 
295 /*
296  * Returns true if and only if @rq started after @latest_start where
297  * @latest_start is in jiffies.
298  */
299 static bool started_after(struct deadline_data *dd, struct request *rq,
300 			  unsigned long latest_start)
301 {
302 	unsigned long start_time = (unsigned long)rq->fifo_time;
303 
304 	start_time -= dd->fifo_expire[rq_data_dir(rq)];
305 
306 	return time_after(start_time, latest_start);
307 }
308 
309 /*
310  * deadline_dispatch_requests selects the best request according to
311  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
312  */
313 static struct request *__dd_dispatch_request(struct deadline_data *dd,
314 					     struct dd_per_prio *per_prio,
315 					     unsigned long latest_start)
316 {
317 	struct request *rq, *next_rq;
318 	enum dd_data_dir data_dir;
319 	enum dd_prio prio;
320 	u8 ioprio_class;
321 
322 	lockdep_assert_held(&dd->lock);
323 
324 	if (!list_empty(&per_prio->dispatch)) {
325 		rq = list_first_entry(&per_prio->dispatch, struct request,
326 				      queuelist);
327 		if (started_after(dd, rq, latest_start))
328 			return NULL;
329 		list_del_init(&rq->queuelist);
330 		data_dir = rq_data_dir(rq);
331 		goto done;
332 	}
333 
334 	/*
335 	 * batches are currently reads XOR writes
336 	 */
337 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
338 	if (rq && dd->batching < dd->fifo_batch) {
339 		/* we have a next request and are still entitled to batch */
340 		data_dir = rq_data_dir(rq);
341 		goto dispatch_request;
342 	}
343 
344 	/*
345 	 * at this point we are not running a batch. select the appropriate
346 	 * data direction (read / write)
347 	 */
348 
349 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
350 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
351 
352 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
353 		    (dd->starved++ >= dd->writes_starved))
354 			goto dispatch_writes;
355 
356 		data_dir = DD_READ;
357 
358 		goto dispatch_find_request;
359 	}
360 
361 	/*
362 	 * there are either no reads or writes have been starved
363 	 */
364 
365 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
366 dispatch_writes:
367 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
368 
369 		dd->starved = 0;
370 
371 		data_dir = DD_WRITE;
372 
373 		goto dispatch_find_request;
374 	}
375 
376 	return NULL;
377 
378 dispatch_find_request:
379 	/*
380 	 * we are not running a batch, find best request for selected data_dir
381 	 */
382 	next_rq = deadline_next_request(dd, per_prio, data_dir);
383 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
384 		/*
385 		 * A deadline has expired, the last request was in the other
386 		 * direction, or we have run out of higher-sectored requests.
387 		 * Start again from the request with the earliest expiry time.
388 		 */
389 		rq = deadline_fifo_request(dd, per_prio, data_dir);
390 	} else {
391 		/*
392 		 * The last req was the same dir and we have a next request in
393 		 * sort order. No expired requests so continue on from here.
394 		 */
395 		rq = next_rq;
396 	}
397 
398 	if (!rq)
399 		return NULL;
400 
401 	dd->last_dir = data_dir;
402 	dd->batching = 0;
403 
404 dispatch_request:
405 	if (started_after(dd, rq, latest_start))
406 		return NULL;
407 
408 	/*
409 	 * rq is the selected appropriate request.
410 	 */
411 	dd->batching++;
412 	deadline_move_request(dd, per_prio, rq);
413 done:
414 	ioprio_class = dd_rq_ioclass(rq);
415 	prio = ioprio_class_to_prio[ioprio_class];
416 	dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
417 	dd->per_prio[prio].stats.dispatched++;
418 	rq->rq_flags |= RQF_STARTED;
419 	return rq;
420 }
421 
422 /*
423  * Check whether there are any requests with priority other than DD_RT_PRIO
424  * that were inserted more than prio_aging_expire jiffies ago.
425  */
426 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
427 						      unsigned long now)
428 {
429 	struct request *rq;
430 	enum dd_prio prio;
431 	int prio_cnt;
432 
433 	lockdep_assert_held(&dd->lock);
434 
435 	prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
436 		   !!dd_queued(dd, DD_IDLE_PRIO);
437 	if (prio_cnt < 2)
438 		return NULL;
439 
440 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
441 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
442 					   now - dd->prio_aging_expire);
443 		if (rq)
444 			return rq;
445 	}
446 
447 	return NULL;
448 }
449 
450 /*
451  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
452  *
453  * One confusing aspect here is that we get called for a specific
454  * hardware queue, but we may return a request that is for a
455  * different hardware queue. This is because mq-deadline has shared
456  * state for all hardware queues, in terms of sorting, FIFOs, etc.
457  */
458 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
459 {
460 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
461 	const unsigned long now = jiffies;
462 	struct request *rq;
463 	enum dd_prio prio;
464 
465 	spin_lock(&dd->lock);
466 	rq = dd_dispatch_prio_aged_requests(dd, now);
467 	if (rq)
468 		goto unlock;
469 
470 	/*
471 	 * Next, dispatch requests in priority order. Ignore lower priority
472 	 * requests if any higher priority requests are pending.
473 	 */
474 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
475 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
476 		if (rq || dd_queued(dd, prio))
477 			break;
478 	}
479 
480 unlock:
481 	spin_unlock(&dd->lock);
482 
483 	return rq;
484 }
485 
486 /*
487  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
488  * function is used by __blk_mq_get_tag().
489  */
490 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
491 {
492 	struct deadline_data *dd = data->q->elevator->elevator_data;
493 
494 	/* Do not throttle synchronous reads. */
495 	if (op_is_sync(opf) && !op_is_write(opf))
496 		return;
497 
498 	/*
499 	 * Throttle asynchronous requests and writes such that these requests
500 	 * do not block the allocation of synchronous requests.
501 	 */
502 	data->shallow_depth = dd->async_depth;
503 }
504 
505 /* Called by blk_mq_update_nr_requests(). */
506 static void dd_depth_updated(struct request_queue *q)
507 {
508 	struct deadline_data *dd = q->elevator->elevator_data;
509 
510 	dd->async_depth = q->nr_requests;
511 	blk_mq_set_min_shallow_depth(q, 1);
512 }
513 
514 static void dd_exit_sched(struct elevator_queue *e)
515 {
516 	struct deadline_data *dd = e->elevator_data;
517 	enum dd_prio prio;
518 
519 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
520 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
521 		const struct io_stats_per_prio *stats = &per_prio->stats;
522 		uint32_t queued;
523 
524 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
525 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
526 
527 		spin_lock(&dd->lock);
528 		queued = dd_queued(dd, prio);
529 		spin_unlock(&dd->lock);
530 
531 		WARN_ONCE(queued != 0,
532 			  "statistics for priority %d: i %u m %u d %u c %u\n",
533 			  prio, stats->inserted, stats->merged,
534 			  stats->dispatched, atomic_read(&stats->completed));
535 	}
536 
537 	kfree(dd);
538 }
539 
540 /*
541  * initialize elevator private data (deadline_data).
542  */
543 static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
544 {
545 	struct deadline_data *dd;
546 	enum dd_prio prio;
547 
548 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
549 	if (!dd)
550 		return -ENOMEM;
551 
552 	eq->elevator_data = dd;
553 
554 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
555 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
556 
557 		INIT_LIST_HEAD(&per_prio->dispatch);
558 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
559 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
560 		per_prio->sort_list[DD_READ] = RB_ROOT;
561 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
562 	}
563 	dd->fifo_expire[DD_READ] = read_expire;
564 	dd->fifo_expire[DD_WRITE] = write_expire;
565 	dd->writes_starved = writes_starved;
566 	dd->front_merges = 1;
567 	dd->last_dir = DD_WRITE;
568 	dd->fifo_batch = fifo_batch;
569 	dd->prio_aging_expire = prio_aging_expire;
570 	spin_lock_init(&dd->lock);
571 
572 	/* We dispatch from request queue wide instead of hw queue */
573 	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
574 
575 	q->elevator = eq;
576 	dd_depth_updated(q);
577 	return 0;
578 }
579 
580 /*
581  * Try to merge @bio into an existing request. If @bio has been merged into
582  * an existing request, store the pointer to that request into *@rq.
583  */
584 static int dd_request_merge(struct request_queue *q, struct request **rq,
585 			    struct bio *bio)
586 {
587 	struct deadline_data *dd = q->elevator->elevator_data;
588 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
589 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
590 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
591 	sector_t sector = bio_end_sector(bio);
592 	struct request *__rq;
593 
594 	if (!dd->front_merges)
595 		return ELEVATOR_NO_MERGE;
596 
597 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
598 	if (__rq) {
599 		BUG_ON(sector != blk_rq_pos(__rq));
600 
601 		if (elv_bio_merge_ok(__rq, bio)) {
602 			*rq = __rq;
603 			if (blk_discard_mergable(__rq))
604 				return ELEVATOR_DISCARD_MERGE;
605 			return ELEVATOR_FRONT_MERGE;
606 		}
607 	}
608 
609 	return ELEVATOR_NO_MERGE;
610 }
611 
612 /*
613  * Attempt to merge a bio into an existing request. This function is called
614  * before @bio is associated with a request.
615  */
616 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
617 		unsigned int nr_segs)
618 {
619 	struct deadline_data *dd = q->elevator->elevator_data;
620 	struct request *free = NULL;
621 	bool ret;
622 
623 	spin_lock(&dd->lock);
624 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
625 	spin_unlock(&dd->lock);
626 
627 	if (free)
628 		blk_mq_free_request(free);
629 
630 	return ret;
631 }
632 
633 /*
634  * add rq to rbtree and fifo
635  */
636 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
637 			      blk_insert_t flags, struct list_head *free)
638 {
639 	struct request_queue *q = hctx->queue;
640 	struct deadline_data *dd = q->elevator->elevator_data;
641 	const enum dd_data_dir data_dir = rq_data_dir(rq);
642 	u16 ioprio = req_get_ioprio(rq);
643 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
644 	struct dd_per_prio *per_prio;
645 	enum dd_prio prio;
646 
647 	lockdep_assert_held(&dd->lock);
648 
649 	prio = ioprio_class_to_prio[ioprio_class];
650 	per_prio = &dd->per_prio[prio];
651 	if (!rq->elv.priv[0])
652 		per_prio->stats.inserted++;
653 	rq->elv.priv[0] = per_prio;
654 
655 	if (blk_mq_sched_try_insert_merge(q, rq, free))
656 		return;
657 
658 	trace_block_rq_insert(rq);
659 
660 	if (flags & BLK_MQ_INSERT_AT_HEAD) {
661 		list_add(&rq->queuelist, &per_prio->dispatch);
662 		rq->fifo_time = jiffies;
663 	} else {
664 		deadline_add_rq_rb(per_prio, rq);
665 
666 		if (rq_mergeable(rq)) {
667 			elv_rqhash_add(q, rq);
668 			if (!q->last_merge)
669 				q->last_merge = rq;
670 		}
671 
672 		/*
673 		 * set expire time and add to fifo list
674 		 */
675 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
676 		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
677 	}
678 }
679 
680 /*
681  * Called from blk_mq_insert_request() or blk_mq_dispatch_list().
682  */
683 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
684 			       struct list_head *list,
685 			       blk_insert_t flags)
686 {
687 	struct request_queue *q = hctx->queue;
688 	struct deadline_data *dd = q->elevator->elevator_data;
689 	LIST_HEAD(free);
690 
691 	spin_lock(&dd->lock);
692 	while (!list_empty(list)) {
693 		struct request *rq;
694 
695 		rq = list_first_entry(list, struct request, queuelist);
696 		list_del_init(&rq->queuelist);
697 		dd_insert_request(hctx, rq, flags, &free);
698 	}
699 	spin_unlock(&dd->lock);
700 
701 	blk_mq_free_requests(&free);
702 }
703 
704 /* Callback from inside blk_mq_rq_ctx_init(). */
705 static void dd_prepare_request(struct request *rq)
706 {
707 	rq->elv.priv[0] = NULL;
708 }
709 
710 /*
711  * Callback from inside blk_mq_free_request().
712  */
713 static void dd_finish_request(struct request *rq)
714 {
715 	struct dd_per_prio *per_prio = rq->elv.priv[0];
716 
717 	/*
718 	 * The block layer core may call dd_finish_request() without having
719 	 * called dd_insert_requests(). Skip requests that bypassed I/O
720 	 * scheduling. See also blk_mq_request_bypass_insert().
721 	 */
722 	if (per_prio)
723 		atomic_inc(&per_prio->stats.completed);
724 }
725 
726 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
727 {
728 	return !list_empty_careful(&per_prio->dispatch) ||
729 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
730 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
731 }
732 
733 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
734 {
735 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
736 	enum dd_prio prio;
737 
738 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
739 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
740 			return true;
741 
742 	return false;
743 }
744 
745 /*
746  * sysfs parts below
747  */
748 #define SHOW_INT(__FUNC, __VAR)						\
749 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
750 {									\
751 	struct deadline_data *dd = e->elevator_data;			\
752 									\
753 	return sysfs_emit(page, "%d\n", __VAR);				\
754 }
755 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
756 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
757 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
758 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
759 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
760 SHOW_INT(deadline_front_merges_show, dd->front_merges);
761 SHOW_INT(deadline_async_depth_show, dd->async_depth);
762 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
763 #undef SHOW_INT
764 #undef SHOW_JIFFIES
765 
766 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
767 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
768 {									\
769 	struct deadline_data *dd = e->elevator_data;			\
770 	int __data, __ret;						\
771 									\
772 	__ret = kstrtoint(page, 0, &__data);				\
773 	if (__ret < 0)							\
774 		return __ret;						\
775 	if (__data < (MIN))						\
776 		__data = (MIN);						\
777 	else if (__data > (MAX))					\
778 		__data = (MAX);						\
779 	*(__PTR) = __CONV(__data);					\
780 	return count;							\
781 }
782 #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
783 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
784 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
785 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
786 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
787 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
788 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
789 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
790 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
791 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
792 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
793 #undef STORE_FUNCTION
794 #undef STORE_INT
795 #undef STORE_JIFFIES
796 
797 #define DD_ATTR(name) \
798 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
799 
800 static const struct elv_fs_entry deadline_attrs[] = {
801 	DD_ATTR(read_expire),
802 	DD_ATTR(write_expire),
803 	DD_ATTR(writes_starved),
804 	DD_ATTR(front_merges),
805 	DD_ATTR(async_depth),
806 	DD_ATTR(fifo_batch),
807 	DD_ATTR(prio_aging_expire),
808 	__ATTR_NULL
809 };
810 
811 #ifdef CONFIG_BLK_DEBUG_FS
812 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
813 static void *deadline_##name##_fifo_start(struct seq_file *m,		\
814 					  loff_t *pos)			\
815 	__acquires(&dd->lock)						\
816 {									\
817 	struct request_queue *q = m->private;				\
818 	struct deadline_data *dd = q->elevator->elevator_data;		\
819 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
820 									\
821 	spin_lock(&dd->lock);						\
822 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
823 }									\
824 									\
825 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
826 					 loff_t *pos)			\
827 {									\
828 	struct request_queue *q = m->private;				\
829 	struct deadline_data *dd = q->elevator->elevator_data;		\
830 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
831 									\
832 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
833 }									\
834 									\
835 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
836 	__releases(&dd->lock)						\
837 {									\
838 	struct request_queue *q = m->private;				\
839 	struct deadline_data *dd = q->elevator->elevator_data;		\
840 									\
841 	spin_unlock(&dd->lock);						\
842 }									\
843 									\
844 static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
845 	.start	= deadline_##name##_fifo_start,				\
846 	.next	= deadline_##name##_fifo_next,				\
847 	.stop	= deadline_##name##_fifo_stop,				\
848 	.show	= blk_mq_debugfs_rq_show,				\
849 };									\
850 									\
851 static int deadline_##name##_next_rq_show(void *data,			\
852 					  struct seq_file *m)		\
853 {									\
854 	struct request_queue *q = data;					\
855 	struct deadline_data *dd = q->elevator->elevator_data;		\
856 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
857 	struct request *rq;						\
858 									\
859 	rq = deadline_from_pos(per_prio, data_dir,			\
860 			       per_prio->latest_pos[data_dir]);		\
861 	if (rq)								\
862 		__blk_mq_debugfs_rq_show(m, rq);			\
863 	return 0;							\
864 }
865 
866 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
867 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
868 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
869 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
870 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
871 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
872 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
873 
874 static int deadline_batching_show(void *data, struct seq_file *m)
875 {
876 	struct request_queue *q = data;
877 	struct deadline_data *dd = q->elevator->elevator_data;
878 
879 	seq_printf(m, "%u\n", dd->batching);
880 	return 0;
881 }
882 
883 static int deadline_starved_show(void *data, struct seq_file *m)
884 {
885 	struct request_queue *q = data;
886 	struct deadline_data *dd = q->elevator->elevator_data;
887 
888 	seq_printf(m, "%u\n", dd->starved);
889 	return 0;
890 }
891 
892 static int dd_async_depth_show(void *data, struct seq_file *m)
893 {
894 	struct request_queue *q = data;
895 	struct deadline_data *dd = q->elevator->elevator_data;
896 
897 	seq_printf(m, "%u\n", dd->async_depth);
898 	return 0;
899 }
900 
901 static int dd_queued_show(void *data, struct seq_file *m)
902 {
903 	struct request_queue *q = data;
904 	struct deadline_data *dd = q->elevator->elevator_data;
905 	u32 rt, be, idle;
906 
907 	spin_lock(&dd->lock);
908 	rt = dd_queued(dd, DD_RT_PRIO);
909 	be = dd_queued(dd, DD_BE_PRIO);
910 	idle = dd_queued(dd, DD_IDLE_PRIO);
911 	spin_unlock(&dd->lock);
912 
913 	seq_printf(m, "%u %u %u\n", rt, be, idle);
914 
915 	return 0;
916 }
917 
918 /* Number of requests owned by the block driver for a given priority. */
919 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
920 {
921 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
922 
923 	lockdep_assert_held(&dd->lock);
924 
925 	return stats->dispatched + stats->merged -
926 		atomic_read(&stats->completed);
927 }
928 
929 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
930 {
931 	struct request_queue *q = data;
932 	struct deadline_data *dd = q->elevator->elevator_data;
933 	u32 rt, be, idle;
934 
935 	spin_lock(&dd->lock);
936 	rt = dd_owned_by_driver(dd, DD_RT_PRIO);
937 	be = dd_owned_by_driver(dd, DD_BE_PRIO);
938 	idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
939 	spin_unlock(&dd->lock);
940 
941 	seq_printf(m, "%u %u %u\n", rt, be, idle);
942 
943 	return 0;
944 }
945 
946 #define DEADLINE_DISPATCH_ATTR(prio)					\
947 static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
948 					     loff_t *pos)		\
949 	__acquires(&dd->lock)						\
950 {									\
951 	struct request_queue *q = m->private;				\
952 	struct deadline_data *dd = q->elevator->elevator_data;		\
953 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
954 									\
955 	spin_lock(&dd->lock);						\
956 	return seq_list_start(&per_prio->dispatch, *pos);		\
957 }									\
958 									\
959 static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
960 					    void *v, loff_t *pos)	\
961 {									\
962 	struct request_queue *q = m->private;				\
963 	struct deadline_data *dd = q->elevator->elevator_data;		\
964 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
965 									\
966 	return seq_list_next(v, &per_prio->dispatch, pos);		\
967 }									\
968 									\
969 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
970 	__releases(&dd->lock)						\
971 {									\
972 	struct request_queue *q = m->private;				\
973 	struct deadline_data *dd = q->elevator->elevator_data;		\
974 									\
975 	spin_unlock(&dd->lock);						\
976 }									\
977 									\
978 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
979 	.start	= deadline_dispatch##prio##_start,			\
980 	.next	= deadline_dispatch##prio##_next,			\
981 	.stop	= deadline_dispatch##prio##_stop,			\
982 	.show	= blk_mq_debugfs_rq_show,				\
983 }
984 
985 DEADLINE_DISPATCH_ATTR(0);
986 DEADLINE_DISPATCH_ATTR(1);
987 DEADLINE_DISPATCH_ATTR(2);
988 #undef DEADLINE_DISPATCH_ATTR
989 
990 #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
991 	{#name "_fifo_list", 0400,					\
992 			.seq_ops = &deadline_##name##_fifo_seq_ops}
993 #define DEADLINE_NEXT_RQ_ATTR(name)					\
994 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
995 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
996 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
997 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
998 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
999 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
1000 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
1001 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
1002 	DEADLINE_NEXT_RQ_ATTR(read0),
1003 	DEADLINE_NEXT_RQ_ATTR(write0),
1004 	DEADLINE_NEXT_RQ_ATTR(read1),
1005 	DEADLINE_NEXT_RQ_ATTR(write1),
1006 	DEADLINE_NEXT_RQ_ATTR(read2),
1007 	DEADLINE_NEXT_RQ_ATTR(write2),
1008 	{"batching", 0400, deadline_batching_show},
1009 	{"starved", 0400, deadline_starved_show},
1010 	{"async_depth", 0400, dd_async_depth_show},
1011 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1012 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1013 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1014 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
1015 	{"queued", 0400, dd_queued_show},
1016 	{},
1017 };
1018 #undef DEADLINE_QUEUE_DDIR_ATTRS
1019 #endif
1020 
1021 static struct elevator_type mq_deadline = {
1022 	.ops = {
1023 		.depth_updated		= dd_depth_updated,
1024 		.limit_depth		= dd_limit_depth,
1025 		.insert_requests	= dd_insert_requests,
1026 		.dispatch_request	= dd_dispatch_request,
1027 		.prepare_request	= dd_prepare_request,
1028 		.finish_request		= dd_finish_request,
1029 		.next_request		= elv_rb_latter_request,
1030 		.former_request		= elv_rb_former_request,
1031 		.bio_merge		= dd_bio_merge,
1032 		.request_merge		= dd_request_merge,
1033 		.requests_merged	= dd_merged_requests,
1034 		.request_merged		= dd_request_merged,
1035 		.has_work		= dd_has_work,
1036 		.init_sched		= dd_init_sched,
1037 		.exit_sched		= dd_exit_sched,
1038 	},
1039 
1040 #ifdef CONFIG_BLK_DEBUG_FS
1041 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1042 #endif
1043 	.elevator_attrs = deadline_attrs,
1044 	.elevator_name = "mq-deadline",
1045 	.elevator_alias = "deadline",
1046 	.elevator_owner = THIS_MODULE,
1047 };
1048 MODULE_ALIAS("mq-deadline-iosched");
1049 
1050 static int __init deadline_init(void)
1051 {
1052 	return elv_register(&mq_deadline);
1053 }
1054 
1055 static void __exit deadline_exit(void)
1056 {
1057 	elv_unregister(&mq_deadline);
1058 }
1059 
1060 module_init(deadline_init);
1061 module_exit(deadline_exit);
1062 
1063 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1064 MODULE_LICENSE("GPL");
1065 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1066