xref: /linux/block/mq-deadline.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20 
21 #include "blk.h"
22 #include "blk-mq.h"
23 #include "blk-mq-debugfs.h"
24 #include "blk-mq-tag.h"
25 #include "blk-mq-sched.h"
26 
27 /*
28  * See Documentation/block/deadline-iosched.rst
29  */
30 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 static const int writes_starved = 2;    /* max times reads can starve a write */
33 static const int fifo_batch = 16;       /* # of sequential requests treated as one
34 				     by the above parameters. For throughput. */
35 
36 struct deadline_data {
37 	/*
38 	 * run time data
39 	 */
40 
41 	/*
42 	 * requests (deadline_rq s) are present on both sort_list and fifo_list
43 	 */
44 	struct rb_root sort_list[2];
45 	struct list_head fifo_list[2];
46 
47 	/*
48 	 * next in sort order. read, write or both are NULL
49 	 */
50 	struct request *next_rq[2];
51 	unsigned int batching;		/* number of sequential requests made */
52 	unsigned int starved;		/* times reads have starved writes */
53 
54 	/*
55 	 * settings that change how the i/o scheduler behaves
56 	 */
57 	int fifo_expire[2];
58 	int fifo_batch;
59 	int writes_starved;
60 	int front_merges;
61 
62 	spinlock_t lock;
63 	spinlock_t zone_lock;
64 	struct list_head dispatch;
65 };
66 
67 static inline struct rb_root *
68 deadline_rb_root(struct deadline_data *dd, struct request *rq)
69 {
70 	return &dd->sort_list[rq_data_dir(rq)];
71 }
72 
73 /*
74  * get the request after `rq' in sector-sorted order
75  */
76 static inline struct request *
77 deadline_latter_request(struct request *rq)
78 {
79 	struct rb_node *node = rb_next(&rq->rb_node);
80 
81 	if (node)
82 		return rb_entry_rq(node);
83 
84 	return NULL;
85 }
86 
87 static void
88 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
89 {
90 	struct rb_root *root = deadline_rb_root(dd, rq);
91 
92 	elv_rb_add(root, rq);
93 }
94 
95 static inline void
96 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
97 {
98 	const int data_dir = rq_data_dir(rq);
99 
100 	if (dd->next_rq[data_dir] == rq)
101 		dd->next_rq[data_dir] = deadline_latter_request(rq);
102 
103 	elv_rb_del(deadline_rb_root(dd, rq), rq);
104 }
105 
106 /*
107  * remove rq from rbtree and fifo.
108  */
109 static void deadline_remove_request(struct request_queue *q, struct request *rq)
110 {
111 	struct deadline_data *dd = q->elevator->elevator_data;
112 
113 	list_del_init(&rq->queuelist);
114 
115 	/*
116 	 * We might not be on the rbtree, if we are doing an insert merge
117 	 */
118 	if (!RB_EMPTY_NODE(&rq->rb_node))
119 		deadline_del_rq_rb(dd, rq);
120 
121 	elv_rqhash_del(q, rq);
122 	if (q->last_merge == rq)
123 		q->last_merge = NULL;
124 }
125 
126 static void dd_request_merged(struct request_queue *q, struct request *req,
127 			      enum elv_merge type)
128 {
129 	struct deadline_data *dd = q->elevator->elevator_data;
130 
131 	/*
132 	 * if the merge was a front merge, we need to reposition request
133 	 */
134 	if (type == ELEVATOR_FRONT_MERGE) {
135 		elv_rb_del(deadline_rb_root(dd, req), req);
136 		deadline_add_rq_rb(dd, req);
137 	}
138 }
139 
140 static void dd_merged_requests(struct request_queue *q, struct request *req,
141 			       struct request *next)
142 {
143 	/*
144 	 * if next expires before rq, assign its expire time to rq
145 	 * and move into next position (next will be deleted) in fifo
146 	 */
147 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
148 		if (time_before((unsigned long)next->fifo_time,
149 				(unsigned long)req->fifo_time)) {
150 			list_move(&req->queuelist, &next->queuelist);
151 			req->fifo_time = next->fifo_time;
152 		}
153 	}
154 
155 	/*
156 	 * kill knowledge of next, this one is a goner
157 	 */
158 	deadline_remove_request(q, next);
159 }
160 
161 /*
162  * move an entry to dispatch queue
163  */
164 static void
165 deadline_move_request(struct deadline_data *dd, struct request *rq)
166 {
167 	const int data_dir = rq_data_dir(rq);
168 
169 	dd->next_rq[READ] = NULL;
170 	dd->next_rq[WRITE] = NULL;
171 	dd->next_rq[data_dir] = deadline_latter_request(rq);
172 
173 	/*
174 	 * take it off the sort and fifo list
175 	 */
176 	deadline_remove_request(rq->q, rq);
177 }
178 
179 /*
180  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
181  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
182  */
183 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
184 {
185 	struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
186 
187 	/*
188 	 * rq is expired!
189 	 */
190 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
191 		return 1;
192 
193 	return 0;
194 }
195 
196 /*
197  * For the specified data direction, return the next request to
198  * dispatch using arrival ordered lists.
199  */
200 static struct request *
201 deadline_fifo_request(struct deadline_data *dd, int data_dir)
202 {
203 	struct request *rq;
204 	unsigned long flags;
205 
206 	if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
207 		return NULL;
208 
209 	if (list_empty(&dd->fifo_list[data_dir]))
210 		return NULL;
211 
212 	rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
213 	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
214 		return rq;
215 
216 	/*
217 	 * Look for a write request that can be dispatched, that is one with
218 	 * an unlocked target zone.
219 	 */
220 	spin_lock_irqsave(&dd->zone_lock, flags);
221 	list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
222 		if (blk_req_can_dispatch_to_zone(rq))
223 			goto out;
224 	}
225 	rq = NULL;
226 out:
227 	spin_unlock_irqrestore(&dd->zone_lock, flags);
228 
229 	return rq;
230 }
231 
232 /*
233  * For the specified data direction, return the next request to
234  * dispatch using sector position sorted lists.
235  */
236 static struct request *
237 deadline_next_request(struct deadline_data *dd, int data_dir)
238 {
239 	struct request *rq;
240 	unsigned long flags;
241 
242 	if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
243 		return NULL;
244 
245 	rq = dd->next_rq[data_dir];
246 	if (!rq)
247 		return NULL;
248 
249 	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
250 		return rq;
251 
252 	/*
253 	 * Look for a write request that can be dispatched, that is one with
254 	 * an unlocked target zone.
255 	 */
256 	spin_lock_irqsave(&dd->zone_lock, flags);
257 	while (rq) {
258 		if (blk_req_can_dispatch_to_zone(rq))
259 			break;
260 		rq = deadline_latter_request(rq);
261 	}
262 	spin_unlock_irqrestore(&dd->zone_lock, flags);
263 
264 	return rq;
265 }
266 
267 /*
268  * deadline_dispatch_requests selects the best request according to
269  * read/write expire, fifo_batch, etc
270  */
271 static struct request *__dd_dispatch_request(struct deadline_data *dd)
272 {
273 	struct request *rq, *next_rq;
274 	bool reads, writes;
275 	int data_dir;
276 
277 	if (!list_empty(&dd->dispatch)) {
278 		rq = list_first_entry(&dd->dispatch, struct request, queuelist);
279 		list_del_init(&rq->queuelist);
280 		goto done;
281 	}
282 
283 	reads = !list_empty(&dd->fifo_list[READ]);
284 	writes = !list_empty(&dd->fifo_list[WRITE]);
285 
286 	/*
287 	 * batches are currently reads XOR writes
288 	 */
289 	rq = deadline_next_request(dd, WRITE);
290 	if (!rq)
291 		rq = deadline_next_request(dd, READ);
292 
293 	if (rq && dd->batching < dd->fifo_batch)
294 		/* we have a next request are still entitled to batch */
295 		goto dispatch_request;
296 
297 	/*
298 	 * at this point we are not running a batch. select the appropriate
299 	 * data direction (read / write)
300 	 */
301 
302 	if (reads) {
303 		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
304 
305 		if (deadline_fifo_request(dd, WRITE) &&
306 		    (dd->starved++ >= dd->writes_starved))
307 			goto dispatch_writes;
308 
309 		data_dir = READ;
310 
311 		goto dispatch_find_request;
312 	}
313 
314 	/*
315 	 * there are either no reads or writes have been starved
316 	 */
317 
318 	if (writes) {
319 dispatch_writes:
320 		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
321 
322 		dd->starved = 0;
323 
324 		data_dir = WRITE;
325 
326 		goto dispatch_find_request;
327 	}
328 
329 	return NULL;
330 
331 dispatch_find_request:
332 	/*
333 	 * we are not running a batch, find best request for selected data_dir
334 	 */
335 	next_rq = deadline_next_request(dd, data_dir);
336 	if (deadline_check_fifo(dd, data_dir) || !next_rq) {
337 		/*
338 		 * A deadline has expired, the last request was in the other
339 		 * direction, or we have run out of higher-sectored requests.
340 		 * Start again from the request with the earliest expiry time.
341 		 */
342 		rq = deadline_fifo_request(dd, data_dir);
343 	} else {
344 		/*
345 		 * The last req was the same dir and we have a next request in
346 		 * sort order. No expired requests so continue on from here.
347 		 */
348 		rq = next_rq;
349 	}
350 
351 	/*
352 	 * For a zoned block device, if we only have writes queued and none of
353 	 * them can be dispatched, rq will be NULL.
354 	 */
355 	if (!rq)
356 		return NULL;
357 
358 	dd->batching = 0;
359 
360 dispatch_request:
361 	/*
362 	 * rq is the selected appropriate request.
363 	 */
364 	dd->batching++;
365 	deadline_move_request(dd, rq);
366 done:
367 	/*
368 	 * If the request needs its target zone locked, do it.
369 	 */
370 	blk_req_zone_write_lock(rq);
371 	rq->rq_flags |= RQF_STARTED;
372 	return rq;
373 }
374 
375 /*
376  * One confusing aspect here is that we get called for a specific
377  * hardware queue, but we may return a request that is for a
378  * different hardware queue. This is because mq-deadline has shared
379  * state for all hardware queues, in terms of sorting, FIFOs, etc.
380  */
381 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
382 {
383 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
384 	struct request *rq;
385 
386 	spin_lock(&dd->lock);
387 	rq = __dd_dispatch_request(dd);
388 	spin_unlock(&dd->lock);
389 	if (rq)
390 		atomic_dec(&rq->mq_hctx->elevator_queued);
391 
392 	return rq;
393 }
394 
395 static void dd_exit_queue(struct elevator_queue *e)
396 {
397 	struct deadline_data *dd = e->elevator_data;
398 
399 	BUG_ON(!list_empty(&dd->fifo_list[READ]));
400 	BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
401 
402 	kfree(dd);
403 }
404 
405 /*
406  * initialize elevator private data (deadline_data).
407  */
408 static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
409 {
410 	struct deadline_data *dd;
411 	struct elevator_queue *eq;
412 
413 	eq = elevator_alloc(q, e);
414 	if (!eq)
415 		return -ENOMEM;
416 
417 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
418 	if (!dd) {
419 		kobject_put(&eq->kobj);
420 		return -ENOMEM;
421 	}
422 	eq->elevator_data = dd;
423 
424 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
425 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
426 	dd->sort_list[READ] = RB_ROOT;
427 	dd->sort_list[WRITE] = RB_ROOT;
428 	dd->fifo_expire[READ] = read_expire;
429 	dd->fifo_expire[WRITE] = write_expire;
430 	dd->writes_starved = writes_starved;
431 	dd->front_merges = 1;
432 	dd->fifo_batch = fifo_batch;
433 	spin_lock_init(&dd->lock);
434 	spin_lock_init(&dd->zone_lock);
435 	INIT_LIST_HEAD(&dd->dispatch);
436 
437 	q->elevator = eq;
438 	return 0;
439 }
440 
441 static int dd_request_merge(struct request_queue *q, struct request **rq,
442 			    struct bio *bio)
443 {
444 	struct deadline_data *dd = q->elevator->elevator_data;
445 	sector_t sector = bio_end_sector(bio);
446 	struct request *__rq;
447 
448 	if (!dd->front_merges)
449 		return ELEVATOR_NO_MERGE;
450 
451 	__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
452 	if (__rq) {
453 		BUG_ON(sector != blk_rq_pos(__rq));
454 
455 		if (elv_bio_merge_ok(__rq, bio)) {
456 			*rq = __rq;
457 			return ELEVATOR_FRONT_MERGE;
458 		}
459 	}
460 
461 	return ELEVATOR_NO_MERGE;
462 }
463 
464 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
465 		unsigned int nr_segs)
466 {
467 	struct request_queue *q = hctx->queue;
468 	struct deadline_data *dd = q->elevator->elevator_data;
469 	struct request *free = NULL;
470 	bool ret;
471 
472 	spin_lock(&dd->lock);
473 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
474 	spin_unlock(&dd->lock);
475 
476 	if (free)
477 		blk_mq_free_request(free);
478 
479 	return ret;
480 }
481 
482 /*
483  * add rq to rbtree and fifo
484  */
485 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
486 			      bool at_head)
487 {
488 	struct request_queue *q = hctx->queue;
489 	struct deadline_data *dd = q->elevator->elevator_data;
490 	const int data_dir = rq_data_dir(rq);
491 
492 	/*
493 	 * This may be a requeue of a write request that has locked its
494 	 * target zone. If it is the case, this releases the zone lock.
495 	 */
496 	blk_req_zone_write_unlock(rq);
497 
498 	if (blk_mq_sched_try_insert_merge(q, rq))
499 		return;
500 
501 	blk_mq_sched_request_inserted(rq);
502 
503 	if (at_head || blk_rq_is_passthrough(rq)) {
504 		if (at_head)
505 			list_add(&rq->queuelist, &dd->dispatch);
506 		else
507 			list_add_tail(&rq->queuelist, &dd->dispatch);
508 	} else {
509 		deadline_add_rq_rb(dd, rq);
510 
511 		if (rq_mergeable(rq)) {
512 			elv_rqhash_add(q, rq);
513 			if (!q->last_merge)
514 				q->last_merge = rq;
515 		}
516 
517 		/*
518 		 * set expire time and add to fifo list
519 		 */
520 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
521 		list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
522 	}
523 }
524 
525 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
526 			       struct list_head *list, bool at_head)
527 {
528 	struct request_queue *q = hctx->queue;
529 	struct deadline_data *dd = q->elevator->elevator_data;
530 
531 	spin_lock(&dd->lock);
532 	while (!list_empty(list)) {
533 		struct request *rq;
534 
535 		rq = list_first_entry(list, struct request, queuelist);
536 		list_del_init(&rq->queuelist);
537 		dd_insert_request(hctx, rq, at_head);
538 		atomic_inc(&hctx->elevator_queued);
539 	}
540 	spin_unlock(&dd->lock);
541 }
542 
543 /*
544  * Nothing to do here. This is defined only to ensure that .finish_request
545  * method is called upon request completion.
546  */
547 static void dd_prepare_request(struct request *rq)
548 {
549 }
550 
551 /*
552  * For zoned block devices, write unlock the target zone of
553  * completed write requests. Do this while holding the zone lock
554  * spinlock so that the zone is never unlocked while deadline_fifo_request()
555  * or deadline_next_request() are executing. This function is called for
556  * all requests, whether or not these requests complete successfully.
557  *
558  * For a zoned block device, __dd_dispatch_request() may have stopped
559  * dispatching requests if all the queued requests are write requests directed
560  * at zones that are already locked due to on-going write requests. To ensure
561  * write request dispatch progress in this case, mark the queue as needing a
562  * restart to ensure that the queue is run again after completion of the
563  * request and zones being unlocked.
564  */
565 static void dd_finish_request(struct request *rq)
566 {
567 	struct request_queue *q = rq->q;
568 
569 	if (blk_queue_is_zoned(q)) {
570 		struct deadline_data *dd = q->elevator->elevator_data;
571 		unsigned long flags;
572 
573 		spin_lock_irqsave(&dd->zone_lock, flags);
574 		blk_req_zone_write_unlock(rq);
575 		if (!list_empty(&dd->fifo_list[WRITE]))
576 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
577 		spin_unlock_irqrestore(&dd->zone_lock, flags);
578 	}
579 }
580 
581 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
582 {
583 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
584 
585 	if (!atomic_read(&hctx->elevator_queued))
586 		return false;
587 
588 	return !list_empty_careful(&dd->dispatch) ||
589 		!list_empty_careful(&dd->fifo_list[0]) ||
590 		!list_empty_careful(&dd->fifo_list[1]);
591 }
592 
593 /*
594  * sysfs parts below
595  */
596 static ssize_t
597 deadline_var_show(int var, char *page)
598 {
599 	return sprintf(page, "%d\n", var);
600 }
601 
602 static void
603 deadline_var_store(int *var, const char *page)
604 {
605 	char *p = (char *) page;
606 
607 	*var = simple_strtol(p, &p, 10);
608 }
609 
610 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
611 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
612 {									\
613 	struct deadline_data *dd = e->elevator_data;			\
614 	int __data = __VAR;						\
615 	if (__CONV)							\
616 		__data = jiffies_to_msecs(__data);			\
617 	return deadline_var_show(__data, (page));			\
618 }
619 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
620 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
621 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
622 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
623 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
624 #undef SHOW_FUNCTION
625 
626 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
627 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
628 {									\
629 	struct deadline_data *dd = e->elevator_data;			\
630 	int __data;							\
631 	deadline_var_store(&__data, (page));				\
632 	if (__data < (MIN))						\
633 		__data = (MIN);						\
634 	else if (__data > (MAX))					\
635 		__data = (MAX);						\
636 	if (__CONV)							\
637 		*(__PTR) = msecs_to_jiffies(__data);			\
638 	else								\
639 		*(__PTR) = __data;					\
640 	return count;							\
641 }
642 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
643 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
644 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
645 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
646 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
647 #undef STORE_FUNCTION
648 
649 #define DD_ATTR(name) \
650 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
651 
652 static struct elv_fs_entry deadline_attrs[] = {
653 	DD_ATTR(read_expire),
654 	DD_ATTR(write_expire),
655 	DD_ATTR(writes_starved),
656 	DD_ATTR(front_merges),
657 	DD_ATTR(fifo_batch),
658 	__ATTR_NULL
659 };
660 
661 #ifdef CONFIG_BLK_DEBUG_FS
662 #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name)				\
663 static void *deadline_##name##_fifo_start(struct seq_file *m,		\
664 					  loff_t *pos)			\
665 	__acquires(&dd->lock)						\
666 {									\
667 	struct request_queue *q = m->private;				\
668 	struct deadline_data *dd = q->elevator->elevator_data;		\
669 									\
670 	spin_lock(&dd->lock);						\
671 	return seq_list_start(&dd->fifo_list[ddir], *pos);		\
672 }									\
673 									\
674 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
675 					 loff_t *pos)			\
676 {									\
677 	struct request_queue *q = m->private;				\
678 	struct deadline_data *dd = q->elevator->elevator_data;		\
679 									\
680 	return seq_list_next(v, &dd->fifo_list[ddir], pos);		\
681 }									\
682 									\
683 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
684 	__releases(&dd->lock)						\
685 {									\
686 	struct request_queue *q = m->private;				\
687 	struct deadline_data *dd = q->elevator->elevator_data;		\
688 									\
689 	spin_unlock(&dd->lock);						\
690 }									\
691 									\
692 static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
693 	.start	= deadline_##name##_fifo_start,				\
694 	.next	= deadline_##name##_fifo_next,				\
695 	.stop	= deadline_##name##_fifo_stop,				\
696 	.show	= blk_mq_debugfs_rq_show,				\
697 };									\
698 									\
699 static int deadline_##name##_next_rq_show(void *data,			\
700 					  struct seq_file *m)		\
701 {									\
702 	struct request_queue *q = data;					\
703 	struct deadline_data *dd = q->elevator->elevator_data;		\
704 	struct request *rq = dd->next_rq[ddir];				\
705 									\
706 	if (rq)								\
707 		__blk_mq_debugfs_rq_show(m, rq);			\
708 	return 0;							\
709 }
710 DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
711 DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
712 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
713 
714 static int deadline_batching_show(void *data, struct seq_file *m)
715 {
716 	struct request_queue *q = data;
717 	struct deadline_data *dd = q->elevator->elevator_data;
718 
719 	seq_printf(m, "%u\n", dd->batching);
720 	return 0;
721 }
722 
723 static int deadline_starved_show(void *data, struct seq_file *m)
724 {
725 	struct request_queue *q = data;
726 	struct deadline_data *dd = q->elevator->elevator_data;
727 
728 	seq_printf(m, "%u\n", dd->starved);
729 	return 0;
730 }
731 
732 static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
733 	__acquires(&dd->lock)
734 {
735 	struct request_queue *q = m->private;
736 	struct deadline_data *dd = q->elevator->elevator_data;
737 
738 	spin_lock(&dd->lock);
739 	return seq_list_start(&dd->dispatch, *pos);
740 }
741 
742 static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
743 {
744 	struct request_queue *q = m->private;
745 	struct deadline_data *dd = q->elevator->elevator_data;
746 
747 	return seq_list_next(v, &dd->dispatch, pos);
748 }
749 
750 static void deadline_dispatch_stop(struct seq_file *m, void *v)
751 	__releases(&dd->lock)
752 {
753 	struct request_queue *q = m->private;
754 	struct deadline_data *dd = q->elevator->elevator_data;
755 
756 	spin_unlock(&dd->lock);
757 }
758 
759 static const struct seq_operations deadline_dispatch_seq_ops = {
760 	.start	= deadline_dispatch_start,
761 	.next	= deadline_dispatch_next,
762 	.stop	= deadline_dispatch_stop,
763 	.show	= blk_mq_debugfs_rq_show,
764 };
765 
766 #define DEADLINE_QUEUE_DDIR_ATTRS(name)						\
767 	{#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops},	\
768 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
769 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
770 	DEADLINE_QUEUE_DDIR_ATTRS(read),
771 	DEADLINE_QUEUE_DDIR_ATTRS(write),
772 	{"batching", 0400, deadline_batching_show},
773 	{"starved", 0400, deadline_starved_show},
774 	{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
775 	{},
776 };
777 #undef DEADLINE_QUEUE_DDIR_ATTRS
778 #endif
779 
780 static struct elevator_type mq_deadline = {
781 	.ops = {
782 		.insert_requests	= dd_insert_requests,
783 		.dispatch_request	= dd_dispatch_request,
784 		.prepare_request	= dd_prepare_request,
785 		.finish_request		= dd_finish_request,
786 		.next_request		= elv_rb_latter_request,
787 		.former_request		= elv_rb_former_request,
788 		.bio_merge		= dd_bio_merge,
789 		.request_merge		= dd_request_merge,
790 		.requests_merged	= dd_merged_requests,
791 		.request_merged		= dd_request_merged,
792 		.has_work		= dd_has_work,
793 		.init_sched		= dd_init_queue,
794 		.exit_sched		= dd_exit_queue,
795 	},
796 
797 #ifdef CONFIG_BLK_DEBUG_FS
798 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
799 #endif
800 	.elevator_attrs = deadline_attrs,
801 	.elevator_name = "mq-deadline",
802 	.elevator_alias = "deadline",
803 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
804 	.elevator_owner = THIS_MODULE,
805 };
806 MODULE_ALIAS("mq-deadline-iosched");
807 
808 static int __init deadline_init(void)
809 {
810 	return elv_register(&mq_deadline);
811 }
812 
813 static void __exit deadline_exit(void)
814 {
815 	elv_unregister(&mq_deadline);
816 }
817 
818 module_init(deadline_init);
819 module_exit(deadline_exit);
820 
821 MODULE_AUTHOR("Jens Axboe");
822 MODULE_LICENSE("GPL");
823 MODULE_DESCRIPTION("MQ deadline IO scheduler");
824