xref: /linux/block/mq-deadline.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/rbtree.h>
17 #include <linux/sbitmap.h>
18 
19 #include <trace/events/block.h>
20 
21 #include "elevator.h"
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-sched.h"
26 
27 /*
28  * See Documentation/block/deadline-iosched.rst
29  */
30 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 /*
33  * Time after which to dispatch lower priority requests even if higher
34  * priority requests are pending.
35  */
36 static const int prio_aging_expire = 10 * HZ;
37 static const int writes_starved = 2;    /* max times reads can starve a write */
38 static const int fifo_batch = 16;       /* # of sequential requests treated as one
39 				     by the above parameters. For throughput. */
40 
41 enum dd_data_dir {
42 	DD_READ		= READ,
43 	DD_WRITE	= WRITE,
44 };
45 
46 enum { DD_DIR_COUNT = 2 };
47 
48 enum dd_prio {
49 	DD_RT_PRIO	= 0,
50 	DD_BE_PRIO	= 1,
51 	DD_IDLE_PRIO	= 2,
52 	DD_PRIO_MAX	= 2,
53 };
54 
55 enum { DD_PRIO_COUNT = 3 };
56 
57 /*
58  * I/O statistics per I/O priority. It is fine if these counters overflow.
59  * What matters is that these counters are at least as wide as
60  * log2(max_outstanding_requests).
61  */
62 struct io_stats_per_prio {
63 	uint32_t inserted;
64 	uint32_t merged;
65 	uint32_t dispatched;
66 	atomic_t completed;
67 };
68 
69 /*
70  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
71  * present on both sort_list[] and fifo_list[].
72  */
73 struct dd_per_prio {
74 	struct list_head dispatch;
75 	struct rb_root sort_list[DD_DIR_COUNT];
76 	struct list_head fifo_list[DD_DIR_COUNT];
77 	/* Position of the most recently dispatched request. */
78 	sector_t latest_pos[DD_DIR_COUNT];
79 	struct io_stats_per_prio stats;
80 };
81 
82 struct deadline_data {
83 	/*
84 	 * run time data
85 	 */
86 
87 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
88 
89 	/* Data direction of latest dispatched request. */
90 	enum dd_data_dir last_dir;
91 	unsigned int batching;		/* number of sequential requests made */
92 	unsigned int starved;		/* times reads have starved writes */
93 
94 	/*
95 	 * settings that change how the i/o scheduler behaves
96 	 */
97 	int fifo_expire[DD_DIR_COUNT];
98 	int fifo_batch;
99 	int writes_starved;
100 	int front_merges;
101 	u32 async_depth;
102 	int prio_aging_expire;
103 
104 	spinlock_t lock;
105 };
106 
107 /* Maps an I/O priority class to a deadline scheduler priority. */
108 static const enum dd_prio ioprio_class_to_prio[] = {
109 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
110 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
111 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
112 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
113 };
114 
115 static inline struct rb_root *
116 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
117 {
118 	return &per_prio->sort_list[rq_data_dir(rq)];
119 }
120 
121 /*
122  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
123  * request.
124  */
125 static u8 dd_rq_ioclass(struct request *rq)
126 {
127 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
128 }
129 
130 /*
131  * Return the first request for which blk_rq_pos() >= @pos.
132  */
133 static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
134 				enum dd_data_dir data_dir, sector_t pos)
135 {
136 	struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
137 	struct request *rq, *res = NULL;
138 
139 	if (!node)
140 		return NULL;
141 
142 	rq = rb_entry_rq(node);
143 	while (node) {
144 		rq = rb_entry_rq(node);
145 		if (blk_rq_pos(rq) >= pos) {
146 			res = rq;
147 			node = node->rb_left;
148 		} else {
149 			node = node->rb_right;
150 		}
151 	}
152 	return res;
153 }
154 
155 static void
156 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
157 {
158 	struct rb_root *root = deadline_rb_root(per_prio, rq);
159 
160 	elv_rb_add(root, rq);
161 }
162 
163 static inline void
164 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
165 {
166 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
167 }
168 
169 /*
170  * remove rq from rbtree and fifo.
171  */
172 static void deadline_remove_request(struct request_queue *q,
173 				    struct dd_per_prio *per_prio,
174 				    struct request *rq)
175 {
176 	list_del_init(&rq->queuelist);
177 
178 	/*
179 	 * We might not be on the rbtree, if we are doing an insert merge
180 	 */
181 	if (!RB_EMPTY_NODE(&rq->rb_node))
182 		deadline_del_rq_rb(per_prio, rq);
183 
184 	elv_rqhash_del(q, rq);
185 	if (q->last_merge == rq)
186 		q->last_merge = NULL;
187 }
188 
189 static void dd_request_merged(struct request_queue *q, struct request *req,
190 			      enum elv_merge type)
191 {
192 	struct deadline_data *dd = q->elevator->elevator_data;
193 	const u8 ioprio_class = dd_rq_ioclass(req);
194 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
195 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
196 
197 	/*
198 	 * if the merge was a front merge, we need to reposition request
199 	 */
200 	if (type == ELEVATOR_FRONT_MERGE) {
201 		elv_rb_del(deadline_rb_root(per_prio, req), req);
202 		deadline_add_rq_rb(per_prio, req);
203 	}
204 }
205 
206 /*
207  * Callback function that is invoked after @next has been merged into @req.
208  */
209 static void dd_merged_requests(struct request_queue *q, struct request *req,
210 			       struct request *next)
211 {
212 	struct deadline_data *dd = q->elevator->elevator_data;
213 	const u8 ioprio_class = dd_rq_ioclass(next);
214 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215 
216 	lockdep_assert_held(&dd->lock);
217 
218 	dd->per_prio[prio].stats.merged++;
219 
220 	/*
221 	 * if next expires before rq, assign its expire time to rq
222 	 * and move into next position (next will be deleted) in fifo
223 	 */
224 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
225 		if (time_before((unsigned long)next->fifo_time,
226 				(unsigned long)req->fifo_time)) {
227 			list_move(&req->queuelist, &next->queuelist);
228 			req->fifo_time = next->fifo_time;
229 		}
230 	}
231 
232 	/*
233 	 * kill knowledge of next, this one is a goner
234 	 */
235 	deadline_remove_request(q, &dd->per_prio[prio], next);
236 }
237 
238 /*
239  * move an entry to dispatch queue
240  */
241 static void
242 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
243 		      struct request *rq)
244 {
245 	/*
246 	 * take it off the sort and fifo list
247 	 */
248 	deadline_remove_request(rq->q, per_prio, rq);
249 }
250 
251 /* Number of requests queued for a given priority level. */
252 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
253 {
254 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
255 
256 	lockdep_assert_held(&dd->lock);
257 
258 	return stats->inserted - atomic_read(&stats->completed);
259 }
260 
261 /*
262  * deadline_check_fifo returns true if and only if there are expired requests
263  * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]).
264  */
265 static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
266 				       enum dd_data_dir data_dir)
267 {
268 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
269 
270 	return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
271 }
272 
273 /*
274  * For the specified data direction, return the next request to
275  * dispatch using arrival ordered lists.
276  */
277 static struct request *
278 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
279 		      enum dd_data_dir data_dir)
280 {
281 	if (list_empty(&per_prio->fifo_list[data_dir]))
282 		return NULL;
283 
284 	return rq_entry_fifo(per_prio->fifo_list[data_dir].next);
285 }
286 
287 /*
288  * For the specified data direction, return the next request to
289  * dispatch using sector position sorted lists.
290  */
291 static struct request *
292 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
293 		      enum dd_data_dir data_dir)
294 {
295 	return deadline_from_pos(per_prio, data_dir,
296 				 per_prio->latest_pos[data_dir]);
297 }
298 
299 /*
300  * Returns true if and only if @rq started after @latest_start where
301  * @latest_start is in jiffies.
302  */
303 static bool started_after(struct deadline_data *dd, struct request *rq,
304 			  unsigned long latest_start)
305 {
306 	unsigned long start_time = (unsigned long)rq->fifo_time;
307 
308 	start_time -= dd->fifo_expire[rq_data_dir(rq)];
309 
310 	return time_after(start_time, latest_start);
311 }
312 
313 /*
314  * deadline_dispatch_requests selects the best request according to
315  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
316  */
317 static struct request *__dd_dispatch_request(struct deadline_data *dd,
318 					     struct dd_per_prio *per_prio,
319 					     unsigned long latest_start)
320 {
321 	struct request *rq, *next_rq;
322 	enum dd_data_dir data_dir;
323 	enum dd_prio prio;
324 	u8 ioprio_class;
325 
326 	lockdep_assert_held(&dd->lock);
327 
328 	if (!list_empty(&per_prio->dispatch)) {
329 		rq = list_first_entry(&per_prio->dispatch, struct request,
330 				      queuelist);
331 		if (started_after(dd, rq, latest_start))
332 			return NULL;
333 		list_del_init(&rq->queuelist);
334 		data_dir = rq_data_dir(rq);
335 		goto done;
336 	}
337 
338 	/*
339 	 * batches are currently reads XOR writes
340 	 */
341 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
342 	if (rq && dd->batching < dd->fifo_batch) {
343 		/* we have a next request and are still entitled to batch */
344 		data_dir = rq_data_dir(rq);
345 		goto dispatch_request;
346 	}
347 
348 	/*
349 	 * at this point we are not running a batch. select the appropriate
350 	 * data direction (read / write)
351 	 */
352 
353 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
354 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
355 
356 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
357 		    (dd->starved++ >= dd->writes_starved))
358 			goto dispatch_writes;
359 
360 		data_dir = DD_READ;
361 
362 		goto dispatch_find_request;
363 	}
364 
365 	/*
366 	 * there are either no reads or writes have been starved
367 	 */
368 
369 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
370 dispatch_writes:
371 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
372 
373 		dd->starved = 0;
374 
375 		data_dir = DD_WRITE;
376 
377 		goto dispatch_find_request;
378 	}
379 
380 	return NULL;
381 
382 dispatch_find_request:
383 	/*
384 	 * we are not running a batch, find best request for selected data_dir
385 	 */
386 	next_rq = deadline_next_request(dd, per_prio, data_dir);
387 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
388 		/*
389 		 * A deadline has expired, the last request was in the other
390 		 * direction, or we have run out of higher-sectored requests.
391 		 * Start again from the request with the earliest expiry time.
392 		 */
393 		rq = deadline_fifo_request(dd, per_prio, data_dir);
394 	} else {
395 		/*
396 		 * The last req was the same dir and we have a next request in
397 		 * sort order. No expired requests so continue on from here.
398 		 */
399 		rq = next_rq;
400 	}
401 
402 	if (!rq)
403 		return NULL;
404 
405 	dd->last_dir = data_dir;
406 	dd->batching = 0;
407 
408 dispatch_request:
409 	if (started_after(dd, rq, latest_start))
410 		return NULL;
411 
412 	/*
413 	 * rq is the selected appropriate request.
414 	 */
415 	dd->batching++;
416 	deadline_move_request(dd, per_prio, rq);
417 done:
418 	ioprio_class = dd_rq_ioclass(rq);
419 	prio = ioprio_class_to_prio[ioprio_class];
420 	dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
421 	dd->per_prio[prio].stats.dispatched++;
422 	rq->rq_flags |= RQF_STARTED;
423 	return rq;
424 }
425 
426 /*
427  * Check whether there are any requests with priority other than DD_RT_PRIO
428  * that were inserted more than prio_aging_expire jiffies ago.
429  */
430 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
431 						      unsigned long now)
432 {
433 	struct request *rq;
434 	enum dd_prio prio;
435 	int prio_cnt;
436 
437 	lockdep_assert_held(&dd->lock);
438 
439 	prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
440 		   !!dd_queued(dd, DD_IDLE_PRIO);
441 	if (prio_cnt < 2)
442 		return NULL;
443 
444 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
445 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
446 					   now - dd->prio_aging_expire);
447 		if (rq)
448 			return rq;
449 	}
450 
451 	return NULL;
452 }
453 
454 /*
455  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
456  *
457  * One confusing aspect here is that we get called for a specific
458  * hardware queue, but we may return a request that is for a
459  * different hardware queue. This is because mq-deadline has shared
460  * state for all hardware queues, in terms of sorting, FIFOs, etc.
461  */
462 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
463 {
464 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
465 	const unsigned long now = jiffies;
466 	struct request *rq;
467 	enum dd_prio prio;
468 
469 	spin_lock(&dd->lock);
470 	rq = dd_dispatch_prio_aged_requests(dd, now);
471 	if (rq)
472 		goto unlock;
473 
474 	/*
475 	 * Next, dispatch requests in priority order. Ignore lower priority
476 	 * requests if any higher priority requests are pending.
477 	 */
478 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
479 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
480 		if (rq || dd_queued(dd, prio))
481 			break;
482 	}
483 
484 unlock:
485 	spin_unlock(&dd->lock);
486 
487 	return rq;
488 }
489 
490 /*
491  * 'depth' is a number in the range 1..INT_MAX representing a number of
492  * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
493  * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
494  * Values larger than q->nr_requests have the same effect as q->nr_requests.
495  */
496 static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
497 {
498 	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
499 	const unsigned int nrr = hctx->queue->nr_requests;
500 
501 	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
502 }
503 
504 /*
505  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
506  * function is used by __blk_mq_get_tag().
507  */
508 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
509 {
510 	struct deadline_data *dd = data->q->elevator->elevator_data;
511 
512 	/* Do not throttle synchronous reads. */
513 	if (op_is_sync(opf) && !op_is_write(opf))
514 		return;
515 
516 	/*
517 	 * Throttle asynchronous requests and writes such that these requests
518 	 * do not block the allocation of synchronous requests.
519 	 */
520 	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
521 }
522 
523 /* Called by blk_mq_update_nr_requests(). */
524 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
525 {
526 	struct request_queue *q = hctx->queue;
527 	struct deadline_data *dd = q->elevator->elevator_data;
528 	struct blk_mq_tags *tags = hctx->sched_tags;
529 
530 	dd->async_depth = q->nr_requests;
531 
532 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
533 }
534 
535 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
536 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
537 {
538 	dd_depth_updated(hctx);
539 	return 0;
540 }
541 
542 static void dd_exit_sched(struct elevator_queue *e)
543 {
544 	struct deadline_data *dd = e->elevator_data;
545 	enum dd_prio prio;
546 
547 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
548 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
549 		const struct io_stats_per_prio *stats = &per_prio->stats;
550 		uint32_t queued;
551 
552 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
553 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
554 
555 		spin_lock(&dd->lock);
556 		queued = dd_queued(dd, prio);
557 		spin_unlock(&dd->lock);
558 
559 		WARN_ONCE(queued != 0,
560 			  "statistics for priority %d: i %u m %u d %u c %u\n",
561 			  prio, stats->inserted, stats->merged,
562 			  stats->dispatched, atomic_read(&stats->completed));
563 	}
564 
565 	kfree(dd);
566 }
567 
568 /*
569  * initialize elevator private data (deadline_data).
570  */
571 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
572 {
573 	struct deadline_data *dd;
574 	struct elevator_queue *eq;
575 	enum dd_prio prio;
576 	int ret = -ENOMEM;
577 
578 	eq = elevator_alloc(q, e);
579 	if (!eq)
580 		return ret;
581 
582 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
583 	if (!dd)
584 		goto put_eq;
585 
586 	eq->elevator_data = dd;
587 
588 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
589 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
590 
591 		INIT_LIST_HEAD(&per_prio->dispatch);
592 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
593 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
594 		per_prio->sort_list[DD_READ] = RB_ROOT;
595 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
596 	}
597 	dd->fifo_expire[DD_READ] = read_expire;
598 	dd->fifo_expire[DD_WRITE] = write_expire;
599 	dd->writes_starved = writes_starved;
600 	dd->front_merges = 1;
601 	dd->last_dir = DD_WRITE;
602 	dd->fifo_batch = fifo_batch;
603 	dd->prio_aging_expire = prio_aging_expire;
604 	spin_lock_init(&dd->lock);
605 
606 	/* We dispatch from request queue wide instead of hw queue */
607 	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
608 
609 	q->elevator = eq;
610 	return 0;
611 
612 put_eq:
613 	kobject_put(&eq->kobj);
614 	return ret;
615 }
616 
617 /*
618  * Try to merge @bio into an existing request. If @bio has been merged into
619  * an existing request, store the pointer to that request into *@rq.
620  */
621 static int dd_request_merge(struct request_queue *q, struct request **rq,
622 			    struct bio *bio)
623 {
624 	struct deadline_data *dd = q->elevator->elevator_data;
625 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
626 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
627 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
628 	sector_t sector = bio_end_sector(bio);
629 	struct request *__rq;
630 
631 	if (!dd->front_merges)
632 		return ELEVATOR_NO_MERGE;
633 
634 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
635 	if (__rq) {
636 		BUG_ON(sector != blk_rq_pos(__rq));
637 
638 		if (elv_bio_merge_ok(__rq, bio)) {
639 			*rq = __rq;
640 			if (blk_discard_mergable(__rq))
641 				return ELEVATOR_DISCARD_MERGE;
642 			return ELEVATOR_FRONT_MERGE;
643 		}
644 	}
645 
646 	return ELEVATOR_NO_MERGE;
647 }
648 
649 /*
650  * Attempt to merge a bio into an existing request. This function is called
651  * before @bio is associated with a request.
652  */
653 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
654 		unsigned int nr_segs)
655 {
656 	struct deadline_data *dd = q->elevator->elevator_data;
657 	struct request *free = NULL;
658 	bool ret;
659 
660 	spin_lock(&dd->lock);
661 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
662 	spin_unlock(&dd->lock);
663 
664 	if (free)
665 		blk_mq_free_request(free);
666 
667 	return ret;
668 }
669 
670 /*
671  * add rq to rbtree and fifo
672  */
673 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
674 			      blk_insert_t flags, struct list_head *free)
675 {
676 	struct request_queue *q = hctx->queue;
677 	struct deadline_data *dd = q->elevator->elevator_data;
678 	const enum dd_data_dir data_dir = rq_data_dir(rq);
679 	u16 ioprio = req_get_ioprio(rq);
680 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
681 	struct dd_per_prio *per_prio;
682 	enum dd_prio prio;
683 
684 	lockdep_assert_held(&dd->lock);
685 
686 	prio = ioprio_class_to_prio[ioprio_class];
687 	per_prio = &dd->per_prio[prio];
688 	if (!rq->elv.priv[0])
689 		per_prio->stats.inserted++;
690 	rq->elv.priv[0] = per_prio;
691 
692 	if (blk_mq_sched_try_insert_merge(q, rq, free))
693 		return;
694 
695 	trace_block_rq_insert(rq);
696 
697 	if (flags & BLK_MQ_INSERT_AT_HEAD) {
698 		list_add(&rq->queuelist, &per_prio->dispatch);
699 		rq->fifo_time = jiffies;
700 	} else {
701 		struct list_head *insert_before;
702 
703 		deadline_add_rq_rb(per_prio, rq);
704 
705 		if (rq_mergeable(rq)) {
706 			elv_rqhash_add(q, rq);
707 			if (!q->last_merge)
708 				q->last_merge = rq;
709 		}
710 
711 		/*
712 		 * set expire time and add to fifo list
713 		 */
714 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
715 		insert_before = &per_prio->fifo_list[data_dir];
716 		list_add_tail(&rq->queuelist, insert_before);
717 	}
718 }
719 
720 /*
721  * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
722  */
723 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
724 			       struct list_head *list,
725 			       blk_insert_t flags)
726 {
727 	struct request_queue *q = hctx->queue;
728 	struct deadline_data *dd = q->elevator->elevator_data;
729 	LIST_HEAD(free);
730 
731 	spin_lock(&dd->lock);
732 	while (!list_empty(list)) {
733 		struct request *rq;
734 
735 		rq = list_first_entry(list, struct request, queuelist);
736 		list_del_init(&rq->queuelist);
737 		dd_insert_request(hctx, rq, flags, &free);
738 	}
739 	spin_unlock(&dd->lock);
740 
741 	blk_mq_free_requests(&free);
742 }
743 
744 /* Callback from inside blk_mq_rq_ctx_init(). */
745 static void dd_prepare_request(struct request *rq)
746 {
747 	rq->elv.priv[0] = NULL;
748 }
749 
750 /*
751  * Callback from inside blk_mq_free_request().
752  */
753 static void dd_finish_request(struct request *rq)
754 {
755 	struct dd_per_prio *per_prio = rq->elv.priv[0];
756 
757 	/*
758 	 * The block layer core may call dd_finish_request() without having
759 	 * called dd_insert_requests(). Skip requests that bypassed I/O
760 	 * scheduling. See also blk_mq_request_bypass_insert().
761 	 */
762 	if (per_prio)
763 		atomic_inc(&per_prio->stats.completed);
764 }
765 
766 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
767 {
768 	return !list_empty_careful(&per_prio->dispatch) ||
769 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
770 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
771 }
772 
773 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
774 {
775 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
776 	enum dd_prio prio;
777 
778 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
779 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
780 			return true;
781 
782 	return false;
783 }
784 
785 /*
786  * sysfs parts below
787  */
788 #define SHOW_INT(__FUNC, __VAR)						\
789 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
790 {									\
791 	struct deadline_data *dd = e->elevator_data;			\
792 									\
793 	return sysfs_emit(page, "%d\n", __VAR);				\
794 }
795 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
796 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
797 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
798 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
799 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
800 SHOW_INT(deadline_front_merges_show, dd->front_merges);
801 SHOW_INT(deadline_async_depth_show, dd->async_depth);
802 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
803 #undef SHOW_INT
804 #undef SHOW_JIFFIES
805 
806 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
807 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
808 {									\
809 	struct deadline_data *dd = e->elevator_data;			\
810 	int __data, __ret;						\
811 									\
812 	__ret = kstrtoint(page, 0, &__data);				\
813 	if (__ret < 0)							\
814 		return __ret;						\
815 	if (__data < (MIN))						\
816 		__data = (MIN);						\
817 	else if (__data > (MAX))					\
818 		__data = (MAX);						\
819 	*(__PTR) = __CONV(__data);					\
820 	return count;							\
821 }
822 #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
823 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
824 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
825 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
826 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
827 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
828 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
829 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
830 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
831 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
832 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
833 #undef STORE_FUNCTION
834 #undef STORE_INT
835 #undef STORE_JIFFIES
836 
837 #define DD_ATTR(name) \
838 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
839 
840 static struct elv_fs_entry deadline_attrs[] = {
841 	DD_ATTR(read_expire),
842 	DD_ATTR(write_expire),
843 	DD_ATTR(writes_starved),
844 	DD_ATTR(front_merges),
845 	DD_ATTR(async_depth),
846 	DD_ATTR(fifo_batch),
847 	DD_ATTR(prio_aging_expire),
848 	__ATTR_NULL
849 };
850 
851 #ifdef CONFIG_BLK_DEBUG_FS
852 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
853 static void *deadline_##name##_fifo_start(struct seq_file *m,		\
854 					  loff_t *pos)			\
855 	__acquires(&dd->lock)						\
856 {									\
857 	struct request_queue *q = m->private;				\
858 	struct deadline_data *dd = q->elevator->elevator_data;		\
859 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
860 									\
861 	spin_lock(&dd->lock);						\
862 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
863 }									\
864 									\
865 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
866 					 loff_t *pos)			\
867 {									\
868 	struct request_queue *q = m->private;				\
869 	struct deadline_data *dd = q->elevator->elevator_data;		\
870 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
871 									\
872 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
873 }									\
874 									\
875 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
876 	__releases(&dd->lock)						\
877 {									\
878 	struct request_queue *q = m->private;				\
879 	struct deadline_data *dd = q->elevator->elevator_data;		\
880 									\
881 	spin_unlock(&dd->lock);						\
882 }									\
883 									\
884 static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
885 	.start	= deadline_##name##_fifo_start,				\
886 	.next	= deadline_##name##_fifo_next,				\
887 	.stop	= deadline_##name##_fifo_stop,				\
888 	.show	= blk_mq_debugfs_rq_show,				\
889 };									\
890 									\
891 static int deadline_##name##_next_rq_show(void *data,			\
892 					  struct seq_file *m)		\
893 {									\
894 	struct request_queue *q = data;					\
895 	struct deadline_data *dd = q->elevator->elevator_data;		\
896 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
897 	struct request *rq;						\
898 									\
899 	rq = deadline_from_pos(per_prio, data_dir,			\
900 			       per_prio->latest_pos[data_dir]);		\
901 	if (rq)								\
902 		__blk_mq_debugfs_rq_show(m, rq);			\
903 	return 0;							\
904 }
905 
906 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
907 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
908 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
909 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
910 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
911 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
912 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
913 
914 static int deadline_batching_show(void *data, struct seq_file *m)
915 {
916 	struct request_queue *q = data;
917 	struct deadline_data *dd = q->elevator->elevator_data;
918 
919 	seq_printf(m, "%u\n", dd->batching);
920 	return 0;
921 }
922 
923 static int deadline_starved_show(void *data, struct seq_file *m)
924 {
925 	struct request_queue *q = data;
926 	struct deadline_data *dd = q->elevator->elevator_data;
927 
928 	seq_printf(m, "%u\n", dd->starved);
929 	return 0;
930 }
931 
932 static int dd_async_depth_show(void *data, struct seq_file *m)
933 {
934 	struct request_queue *q = data;
935 	struct deadline_data *dd = q->elevator->elevator_data;
936 
937 	seq_printf(m, "%u\n", dd->async_depth);
938 	return 0;
939 }
940 
941 static int dd_queued_show(void *data, struct seq_file *m)
942 {
943 	struct request_queue *q = data;
944 	struct deadline_data *dd = q->elevator->elevator_data;
945 	u32 rt, be, idle;
946 
947 	spin_lock(&dd->lock);
948 	rt = dd_queued(dd, DD_RT_PRIO);
949 	be = dd_queued(dd, DD_BE_PRIO);
950 	idle = dd_queued(dd, DD_IDLE_PRIO);
951 	spin_unlock(&dd->lock);
952 
953 	seq_printf(m, "%u %u %u\n", rt, be, idle);
954 
955 	return 0;
956 }
957 
958 /* Number of requests owned by the block driver for a given priority. */
959 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
960 {
961 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
962 
963 	lockdep_assert_held(&dd->lock);
964 
965 	return stats->dispatched + stats->merged -
966 		atomic_read(&stats->completed);
967 }
968 
969 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
970 {
971 	struct request_queue *q = data;
972 	struct deadline_data *dd = q->elevator->elevator_data;
973 	u32 rt, be, idle;
974 
975 	spin_lock(&dd->lock);
976 	rt = dd_owned_by_driver(dd, DD_RT_PRIO);
977 	be = dd_owned_by_driver(dd, DD_BE_PRIO);
978 	idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
979 	spin_unlock(&dd->lock);
980 
981 	seq_printf(m, "%u %u %u\n", rt, be, idle);
982 
983 	return 0;
984 }
985 
986 #define DEADLINE_DISPATCH_ATTR(prio)					\
987 static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
988 					     loff_t *pos)		\
989 	__acquires(&dd->lock)						\
990 {									\
991 	struct request_queue *q = m->private;				\
992 	struct deadline_data *dd = q->elevator->elevator_data;		\
993 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
994 									\
995 	spin_lock(&dd->lock);						\
996 	return seq_list_start(&per_prio->dispatch, *pos);		\
997 }									\
998 									\
999 static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
1000 					    void *v, loff_t *pos)	\
1001 {									\
1002 	struct request_queue *q = m->private;				\
1003 	struct deadline_data *dd = q->elevator->elevator_data;		\
1004 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1005 									\
1006 	return seq_list_next(v, &per_prio->dispatch, pos);		\
1007 }									\
1008 									\
1009 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
1010 	__releases(&dd->lock)						\
1011 {									\
1012 	struct request_queue *q = m->private;				\
1013 	struct deadline_data *dd = q->elevator->elevator_data;		\
1014 									\
1015 	spin_unlock(&dd->lock);						\
1016 }									\
1017 									\
1018 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1019 	.start	= deadline_dispatch##prio##_start,			\
1020 	.next	= deadline_dispatch##prio##_next,			\
1021 	.stop	= deadline_dispatch##prio##_stop,			\
1022 	.show	= blk_mq_debugfs_rq_show,				\
1023 }
1024 
1025 DEADLINE_DISPATCH_ATTR(0);
1026 DEADLINE_DISPATCH_ATTR(1);
1027 DEADLINE_DISPATCH_ATTR(2);
1028 #undef DEADLINE_DISPATCH_ATTR
1029 
1030 #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
1031 	{#name "_fifo_list", 0400,					\
1032 			.seq_ops = &deadline_##name##_fifo_seq_ops}
1033 #define DEADLINE_NEXT_RQ_ATTR(name)					\
1034 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1035 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1036 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
1037 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
1038 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
1039 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
1040 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
1041 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
1042 	DEADLINE_NEXT_RQ_ATTR(read0),
1043 	DEADLINE_NEXT_RQ_ATTR(write0),
1044 	DEADLINE_NEXT_RQ_ATTR(read1),
1045 	DEADLINE_NEXT_RQ_ATTR(write1),
1046 	DEADLINE_NEXT_RQ_ATTR(read2),
1047 	DEADLINE_NEXT_RQ_ATTR(write2),
1048 	{"batching", 0400, deadline_batching_show},
1049 	{"starved", 0400, deadline_starved_show},
1050 	{"async_depth", 0400, dd_async_depth_show},
1051 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1052 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1053 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1054 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
1055 	{"queued", 0400, dd_queued_show},
1056 	{},
1057 };
1058 #undef DEADLINE_QUEUE_DDIR_ATTRS
1059 #endif
1060 
1061 static struct elevator_type mq_deadline = {
1062 	.ops = {
1063 		.depth_updated		= dd_depth_updated,
1064 		.limit_depth		= dd_limit_depth,
1065 		.insert_requests	= dd_insert_requests,
1066 		.dispatch_request	= dd_dispatch_request,
1067 		.prepare_request	= dd_prepare_request,
1068 		.finish_request		= dd_finish_request,
1069 		.next_request		= elv_rb_latter_request,
1070 		.former_request		= elv_rb_former_request,
1071 		.bio_merge		= dd_bio_merge,
1072 		.request_merge		= dd_request_merge,
1073 		.requests_merged	= dd_merged_requests,
1074 		.request_merged		= dd_request_merged,
1075 		.has_work		= dd_has_work,
1076 		.init_sched		= dd_init_sched,
1077 		.exit_sched		= dd_exit_sched,
1078 		.init_hctx		= dd_init_hctx,
1079 	},
1080 
1081 #ifdef CONFIG_BLK_DEBUG_FS
1082 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1083 #endif
1084 	.elevator_attrs = deadline_attrs,
1085 	.elevator_name = "mq-deadline",
1086 	.elevator_alias = "deadline",
1087 	.elevator_owner = THIS_MODULE,
1088 };
1089 MODULE_ALIAS("mq-deadline-iosched");
1090 
1091 static int __init deadline_init(void)
1092 {
1093 	return elv_register(&mq_deadline);
1094 }
1095 
1096 static void __exit deadline_exit(void)
1097 {
1098 	elv_unregister(&mq_deadline);
1099 }
1100 
1101 module_init(deadline_init);
1102 module_exit(deadline_exit);
1103 
1104 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1105 MODULE_LICENSE("GPL");
1106 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1107