xref: /linux/block/elevator.c (revision f37130533f68711fd6bae2c79950b8e72002bad6)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 
38 #include <trace/events/block.h>
39 
40 #include "blk.h"
41 #include "blk-cgroup.h"
42 
43 static DEFINE_SPINLOCK(elv_list_lock);
44 static LIST_HEAD(elv_list);
45 
46 /*
47  * Merge hash stuff.
48  */
49 static const int elv_hash_shift = 6;
50 #define ELV_HASH_BLOCK(sec)	((sec) >> 3)
51 #define ELV_HASH_FN(sec)	\
52 		(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
54 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
55 
56 /*
57  * Query io scheduler to see if the current process issuing bio may be
58  * merged with rq.
59  */
60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61 {
62 	struct request_queue *q = rq->q;
63 	struct elevator_queue *e = q->elevator;
64 
65 	if (e->type->ops.elevator_allow_merge_fn)
66 		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
67 
68 	return 1;
69 }
70 
71 /*
72  * can we safely merge with this request?
73  */
74 bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 {
76 	if (!blk_rq_merge_ok(rq, bio))
77 		return 0;
78 
79 	if (!elv_iosched_allow_merge(rq, bio))
80 		return 0;
81 
82 	return 1;
83 }
84 EXPORT_SYMBOL(elv_rq_merge_ok);
85 
86 static struct elevator_type *elevator_find(const char *name)
87 {
88 	struct elevator_type *e;
89 
90 	list_for_each_entry(e, &elv_list, list) {
91 		if (!strcmp(e->elevator_name, name))
92 			return e;
93 	}
94 
95 	return NULL;
96 }
97 
98 static void elevator_put(struct elevator_type *e)
99 {
100 	module_put(e->elevator_owner);
101 }
102 
103 static struct elevator_type *elevator_get(const char *name, bool try_loading)
104 {
105 	struct elevator_type *e;
106 
107 	spin_lock(&elv_list_lock);
108 
109 	e = elevator_find(name);
110 	if (!e && try_loading) {
111 		spin_unlock(&elv_list_lock);
112 		request_module("%s-iosched", name);
113 		spin_lock(&elv_list_lock);
114 		e = elevator_find(name);
115 	}
116 
117 	if (e && !try_module_get(e->elevator_owner))
118 		e = NULL;
119 
120 	spin_unlock(&elv_list_lock);
121 
122 	return e;
123 }
124 
125 static char chosen_elevator[ELV_NAME_MAX];
126 
127 static int __init elevator_setup(char *str)
128 {
129 	/*
130 	 * Be backwards-compatible with previous kernels, so users
131 	 * won't get the wrong elevator.
132 	 */
133 	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
134 	return 1;
135 }
136 
137 __setup("elevator=", elevator_setup);
138 
139 /* called during boot to load the elevator chosen by the elevator param */
140 void __init load_default_elevator_module(void)
141 {
142 	struct elevator_type *e;
143 
144 	if (!chosen_elevator[0])
145 		return;
146 
147 	spin_lock(&elv_list_lock);
148 	e = elevator_find(chosen_elevator);
149 	spin_unlock(&elv_list_lock);
150 
151 	if (!e)
152 		request_module("%s-iosched", chosen_elevator);
153 }
154 
155 static struct kobj_type elv_ktype;
156 
157 static struct elevator_queue *elevator_alloc(struct request_queue *q,
158 				  struct elevator_type *e)
159 {
160 	struct elevator_queue *eq;
161 	int i;
162 
163 	eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
164 	if (unlikely(!eq))
165 		goto err;
166 
167 	eq->type = e;
168 	kobject_init(&eq->kobj, &elv_ktype);
169 	mutex_init(&eq->sysfs_lock);
170 
171 	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
172 					GFP_KERNEL, q->node);
173 	if (!eq->hash)
174 		goto err;
175 
176 	for (i = 0; i < ELV_HASH_ENTRIES; i++)
177 		INIT_HLIST_HEAD(&eq->hash[i]);
178 
179 	return eq;
180 err:
181 	kfree(eq);
182 	elevator_put(e);
183 	return NULL;
184 }
185 
186 static void elevator_release(struct kobject *kobj)
187 {
188 	struct elevator_queue *e;
189 
190 	e = container_of(kobj, struct elevator_queue, kobj);
191 	elevator_put(e->type);
192 	kfree(e->hash);
193 	kfree(e);
194 }
195 
196 int elevator_init(struct request_queue *q, char *name)
197 {
198 	struct elevator_type *e = NULL;
199 	int err;
200 
201 	if (unlikely(q->elevator))
202 		return 0;
203 
204 	INIT_LIST_HEAD(&q->queue_head);
205 	q->last_merge = NULL;
206 	q->end_sector = 0;
207 	q->boundary_rq = NULL;
208 
209 	if (name) {
210 		e = elevator_get(name, true);
211 		if (!e)
212 			return -EINVAL;
213 	}
214 
215 	/*
216 	 * Use the default elevator specified by config boot param or
217 	 * config option.  Don't try to load modules as we could be running
218 	 * off async and request_module() isn't allowed from async.
219 	 */
220 	if (!e && *chosen_elevator) {
221 		e = elevator_get(chosen_elevator, false);
222 		if (!e)
223 			printk(KERN_ERR "I/O scheduler %s not found\n",
224 							chosen_elevator);
225 	}
226 
227 	if (!e) {
228 		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
229 		if (!e) {
230 			printk(KERN_ERR
231 				"Default I/O scheduler not found. " \
232 				"Using noop.\n");
233 			e = elevator_get("noop", false);
234 		}
235 	}
236 
237 	q->elevator = elevator_alloc(q, e);
238 	if (!q->elevator)
239 		return -ENOMEM;
240 
241 	err = e->ops.elevator_init_fn(q);
242 	if (err) {
243 		kobject_put(&q->elevator->kobj);
244 		return err;
245 	}
246 
247 	return 0;
248 }
249 EXPORT_SYMBOL(elevator_init);
250 
251 void elevator_exit(struct elevator_queue *e)
252 {
253 	mutex_lock(&e->sysfs_lock);
254 	if (e->type->ops.elevator_exit_fn)
255 		e->type->ops.elevator_exit_fn(e);
256 	mutex_unlock(&e->sysfs_lock);
257 
258 	kobject_put(&e->kobj);
259 }
260 EXPORT_SYMBOL(elevator_exit);
261 
262 static inline void __elv_rqhash_del(struct request *rq)
263 {
264 	hlist_del_init(&rq->hash);
265 }
266 
267 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
268 {
269 	if (ELV_ON_HASH(rq))
270 		__elv_rqhash_del(rq);
271 }
272 
273 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
274 {
275 	struct elevator_queue *e = q->elevator;
276 
277 	BUG_ON(ELV_ON_HASH(rq));
278 	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
279 }
280 
281 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
282 {
283 	__elv_rqhash_del(rq);
284 	elv_rqhash_add(q, rq);
285 }
286 
287 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
288 {
289 	struct elevator_queue *e = q->elevator;
290 	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
291 	struct hlist_node *entry, *next;
292 	struct request *rq;
293 
294 	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
295 		BUG_ON(!ELV_ON_HASH(rq));
296 
297 		if (unlikely(!rq_mergeable(rq))) {
298 			__elv_rqhash_del(rq);
299 			continue;
300 		}
301 
302 		if (rq_hash_key(rq) == offset)
303 			return rq;
304 	}
305 
306 	return NULL;
307 }
308 
309 /*
310  * RB-tree support functions for inserting/lookup/removal of requests
311  * in a sorted RB tree.
312  */
313 void elv_rb_add(struct rb_root *root, struct request *rq)
314 {
315 	struct rb_node **p = &root->rb_node;
316 	struct rb_node *parent = NULL;
317 	struct request *__rq;
318 
319 	while (*p) {
320 		parent = *p;
321 		__rq = rb_entry(parent, struct request, rb_node);
322 
323 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
324 			p = &(*p)->rb_left;
325 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
326 			p = &(*p)->rb_right;
327 	}
328 
329 	rb_link_node(&rq->rb_node, parent, p);
330 	rb_insert_color(&rq->rb_node, root);
331 }
332 EXPORT_SYMBOL(elv_rb_add);
333 
334 void elv_rb_del(struct rb_root *root, struct request *rq)
335 {
336 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
337 	rb_erase(&rq->rb_node, root);
338 	RB_CLEAR_NODE(&rq->rb_node);
339 }
340 EXPORT_SYMBOL(elv_rb_del);
341 
342 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
343 {
344 	struct rb_node *n = root->rb_node;
345 	struct request *rq;
346 
347 	while (n) {
348 		rq = rb_entry(n, struct request, rb_node);
349 
350 		if (sector < blk_rq_pos(rq))
351 			n = n->rb_left;
352 		else if (sector > blk_rq_pos(rq))
353 			n = n->rb_right;
354 		else
355 			return rq;
356 	}
357 
358 	return NULL;
359 }
360 EXPORT_SYMBOL(elv_rb_find);
361 
362 /*
363  * Insert rq into dispatch queue of q.  Queue lock must be held on
364  * entry.  rq is sort instead into the dispatch queue. To be used by
365  * specific elevators.
366  */
367 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
368 {
369 	sector_t boundary;
370 	struct list_head *entry;
371 	int stop_flags;
372 
373 	if (q->last_merge == rq)
374 		q->last_merge = NULL;
375 
376 	elv_rqhash_del(q, rq);
377 
378 	q->nr_sorted--;
379 
380 	boundary = q->end_sector;
381 	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
382 	list_for_each_prev(entry, &q->queue_head) {
383 		struct request *pos = list_entry_rq(entry);
384 
385 		if ((rq->cmd_flags & REQ_DISCARD) !=
386 		    (pos->cmd_flags & REQ_DISCARD))
387 			break;
388 		if (rq_data_dir(rq) != rq_data_dir(pos))
389 			break;
390 		if (pos->cmd_flags & stop_flags)
391 			break;
392 		if (blk_rq_pos(rq) >= boundary) {
393 			if (blk_rq_pos(pos) < boundary)
394 				continue;
395 		} else {
396 			if (blk_rq_pos(pos) >= boundary)
397 				break;
398 		}
399 		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
400 			break;
401 	}
402 
403 	list_add(&rq->queuelist, entry);
404 }
405 EXPORT_SYMBOL(elv_dispatch_sort);
406 
407 /*
408  * Insert rq into dispatch queue of q.  Queue lock must be held on
409  * entry.  rq is added to the back of the dispatch queue. To be used by
410  * specific elevators.
411  */
412 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
413 {
414 	if (q->last_merge == rq)
415 		q->last_merge = NULL;
416 
417 	elv_rqhash_del(q, rq);
418 
419 	q->nr_sorted--;
420 
421 	q->end_sector = rq_end_sector(rq);
422 	q->boundary_rq = rq;
423 	list_add_tail(&rq->queuelist, &q->queue_head);
424 }
425 EXPORT_SYMBOL(elv_dispatch_add_tail);
426 
427 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
428 {
429 	struct elevator_queue *e = q->elevator;
430 	struct request *__rq;
431 	int ret;
432 
433 	/*
434 	 * Levels of merges:
435 	 * 	nomerges:  No merges at all attempted
436 	 * 	noxmerges: Only simple one-hit cache try
437 	 * 	merges:	   All merge tries attempted
438 	 */
439 	if (blk_queue_nomerges(q))
440 		return ELEVATOR_NO_MERGE;
441 
442 	/*
443 	 * First try one-hit cache.
444 	 */
445 	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
446 		ret = blk_try_merge(q->last_merge, bio);
447 		if (ret != ELEVATOR_NO_MERGE) {
448 			*req = q->last_merge;
449 			return ret;
450 		}
451 	}
452 
453 	if (blk_queue_noxmerges(q))
454 		return ELEVATOR_NO_MERGE;
455 
456 	/*
457 	 * See if our hash lookup can find a potential backmerge.
458 	 */
459 	__rq = elv_rqhash_find(q, bio->bi_sector);
460 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
461 		*req = __rq;
462 		return ELEVATOR_BACK_MERGE;
463 	}
464 
465 	if (e->type->ops.elevator_merge_fn)
466 		return e->type->ops.elevator_merge_fn(q, req, bio);
467 
468 	return ELEVATOR_NO_MERGE;
469 }
470 
471 /*
472  * Attempt to do an insertion back merge. Only check for the case where
473  * we can append 'rq' to an existing request, so we can throw 'rq' away
474  * afterwards.
475  *
476  * Returns true if we merged, false otherwise
477  */
478 static bool elv_attempt_insert_merge(struct request_queue *q,
479 				     struct request *rq)
480 {
481 	struct request *__rq;
482 	bool ret;
483 
484 	if (blk_queue_nomerges(q))
485 		return false;
486 
487 	/*
488 	 * First try one-hit cache.
489 	 */
490 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
491 		return true;
492 
493 	if (blk_queue_noxmerges(q))
494 		return false;
495 
496 	ret = false;
497 	/*
498 	 * See if our hash lookup can find a potential backmerge.
499 	 */
500 	while (1) {
501 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
502 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
503 			break;
504 
505 		/* The merged request could be merged with others, try again */
506 		ret = true;
507 		rq = __rq;
508 	}
509 
510 	return ret;
511 }
512 
513 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
514 {
515 	struct elevator_queue *e = q->elevator;
516 
517 	if (e->type->ops.elevator_merged_fn)
518 		e->type->ops.elevator_merged_fn(q, rq, type);
519 
520 	if (type == ELEVATOR_BACK_MERGE)
521 		elv_rqhash_reposition(q, rq);
522 
523 	q->last_merge = rq;
524 }
525 
526 void elv_merge_requests(struct request_queue *q, struct request *rq,
527 			     struct request *next)
528 {
529 	struct elevator_queue *e = q->elevator;
530 	const int next_sorted = next->cmd_flags & REQ_SORTED;
531 
532 	if (next_sorted && e->type->ops.elevator_merge_req_fn)
533 		e->type->ops.elevator_merge_req_fn(q, rq, next);
534 
535 	elv_rqhash_reposition(q, rq);
536 
537 	if (next_sorted) {
538 		elv_rqhash_del(q, next);
539 		q->nr_sorted--;
540 	}
541 
542 	q->last_merge = rq;
543 }
544 
545 void elv_bio_merged(struct request_queue *q, struct request *rq,
546 			struct bio *bio)
547 {
548 	struct elevator_queue *e = q->elevator;
549 
550 	if (e->type->ops.elevator_bio_merged_fn)
551 		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
552 }
553 
554 void elv_requeue_request(struct request_queue *q, struct request *rq)
555 {
556 	/*
557 	 * it already went through dequeue, we need to decrement the
558 	 * in_flight count again
559 	 */
560 	if (blk_account_rq(rq)) {
561 		q->in_flight[rq_is_sync(rq)]--;
562 		if (rq->cmd_flags & REQ_SORTED)
563 			elv_deactivate_rq(q, rq);
564 	}
565 
566 	rq->cmd_flags &= ~REQ_STARTED;
567 
568 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
569 }
570 
571 void elv_drain_elevator(struct request_queue *q)
572 {
573 	static int printed;
574 
575 	lockdep_assert_held(q->queue_lock);
576 
577 	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
578 		;
579 	if (q->nr_sorted && printed++ < 10) {
580 		printk(KERN_ERR "%s: forced dispatching is broken "
581 		       "(nr_sorted=%u), please report this\n",
582 		       q->elevator->type->elevator_name, q->nr_sorted);
583 	}
584 }
585 
586 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
587 {
588 	trace_block_rq_insert(q, rq);
589 
590 	rq->q = q;
591 
592 	if (rq->cmd_flags & REQ_SOFTBARRIER) {
593 		/* barriers are scheduling boundary, update end_sector */
594 		if (rq->cmd_type == REQ_TYPE_FS) {
595 			q->end_sector = rq_end_sector(rq);
596 			q->boundary_rq = rq;
597 		}
598 	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
599 		    (where == ELEVATOR_INSERT_SORT ||
600 		     where == ELEVATOR_INSERT_SORT_MERGE))
601 		where = ELEVATOR_INSERT_BACK;
602 
603 	switch (where) {
604 	case ELEVATOR_INSERT_REQUEUE:
605 	case ELEVATOR_INSERT_FRONT:
606 		rq->cmd_flags |= REQ_SOFTBARRIER;
607 		list_add(&rq->queuelist, &q->queue_head);
608 		break;
609 
610 	case ELEVATOR_INSERT_BACK:
611 		rq->cmd_flags |= REQ_SOFTBARRIER;
612 		elv_drain_elevator(q);
613 		list_add_tail(&rq->queuelist, &q->queue_head);
614 		/*
615 		 * We kick the queue here for the following reasons.
616 		 * - The elevator might have returned NULL previously
617 		 *   to delay requests and returned them now.  As the
618 		 *   queue wasn't empty before this request, ll_rw_blk
619 		 *   won't run the queue on return, resulting in hang.
620 		 * - Usually, back inserted requests won't be merged
621 		 *   with anything.  There's no point in delaying queue
622 		 *   processing.
623 		 */
624 		__blk_run_queue(q);
625 		break;
626 
627 	case ELEVATOR_INSERT_SORT_MERGE:
628 		/*
629 		 * If we succeed in merging this request with one in the
630 		 * queue already, we are done - rq has now been freed,
631 		 * so no need to do anything further.
632 		 */
633 		if (elv_attempt_insert_merge(q, rq))
634 			break;
635 	case ELEVATOR_INSERT_SORT:
636 		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
637 		rq->cmd_flags |= REQ_SORTED;
638 		q->nr_sorted++;
639 		if (rq_mergeable(rq)) {
640 			elv_rqhash_add(q, rq);
641 			if (!q->last_merge)
642 				q->last_merge = rq;
643 		}
644 
645 		/*
646 		 * Some ioscheds (cfq) run q->request_fn directly, so
647 		 * rq cannot be accessed after calling
648 		 * elevator_add_req_fn.
649 		 */
650 		q->elevator->type->ops.elevator_add_req_fn(q, rq);
651 		break;
652 
653 	case ELEVATOR_INSERT_FLUSH:
654 		rq->cmd_flags |= REQ_SOFTBARRIER;
655 		blk_insert_flush(rq);
656 		break;
657 	default:
658 		printk(KERN_ERR "%s: bad insertion point %d\n",
659 		       __func__, where);
660 		BUG();
661 	}
662 }
663 EXPORT_SYMBOL(__elv_add_request);
664 
665 void elv_add_request(struct request_queue *q, struct request *rq, int where)
666 {
667 	unsigned long flags;
668 
669 	spin_lock_irqsave(q->queue_lock, flags);
670 	__elv_add_request(q, rq, where);
671 	spin_unlock_irqrestore(q->queue_lock, flags);
672 }
673 EXPORT_SYMBOL(elv_add_request);
674 
675 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
676 {
677 	struct elevator_queue *e = q->elevator;
678 
679 	if (e->type->ops.elevator_latter_req_fn)
680 		return e->type->ops.elevator_latter_req_fn(q, rq);
681 	return NULL;
682 }
683 
684 struct request *elv_former_request(struct request_queue *q, struct request *rq)
685 {
686 	struct elevator_queue *e = q->elevator;
687 
688 	if (e->type->ops.elevator_former_req_fn)
689 		return e->type->ops.elevator_former_req_fn(q, rq);
690 	return NULL;
691 }
692 
693 int elv_set_request(struct request_queue *q, struct request *rq,
694 		    struct bio *bio, gfp_t gfp_mask)
695 {
696 	struct elevator_queue *e = q->elevator;
697 
698 	if (e->type->ops.elevator_set_req_fn)
699 		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
700 	return 0;
701 }
702 
703 void elv_put_request(struct request_queue *q, struct request *rq)
704 {
705 	struct elevator_queue *e = q->elevator;
706 
707 	if (e->type->ops.elevator_put_req_fn)
708 		e->type->ops.elevator_put_req_fn(rq);
709 }
710 
711 int elv_may_queue(struct request_queue *q, int rw)
712 {
713 	struct elevator_queue *e = q->elevator;
714 
715 	if (e->type->ops.elevator_may_queue_fn)
716 		return e->type->ops.elevator_may_queue_fn(q, rw);
717 
718 	return ELV_MQUEUE_MAY;
719 }
720 
721 void elv_abort_queue(struct request_queue *q)
722 {
723 	struct request *rq;
724 
725 	blk_abort_flushes(q);
726 
727 	while (!list_empty(&q->queue_head)) {
728 		rq = list_entry_rq(q->queue_head.next);
729 		rq->cmd_flags |= REQ_QUIET;
730 		trace_block_rq_abort(q, rq);
731 		/*
732 		 * Mark this request as started so we don't trigger
733 		 * any debug logic in the end I/O path.
734 		 */
735 		blk_start_request(rq);
736 		__blk_end_request_all(rq, -EIO);
737 	}
738 }
739 EXPORT_SYMBOL(elv_abort_queue);
740 
741 void elv_completed_request(struct request_queue *q, struct request *rq)
742 {
743 	struct elevator_queue *e = q->elevator;
744 
745 	/*
746 	 * request is released from the driver, io must be done
747 	 */
748 	if (blk_account_rq(rq)) {
749 		q->in_flight[rq_is_sync(rq)]--;
750 		if ((rq->cmd_flags & REQ_SORTED) &&
751 		    e->type->ops.elevator_completed_req_fn)
752 			e->type->ops.elevator_completed_req_fn(q, rq);
753 	}
754 }
755 
756 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
757 
758 static ssize_t
759 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
760 {
761 	struct elv_fs_entry *entry = to_elv(attr);
762 	struct elevator_queue *e;
763 	ssize_t error;
764 
765 	if (!entry->show)
766 		return -EIO;
767 
768 	e = container_of(kobj, struct elevator_queue, kobj);
769 	mutex_lock(&e->sysfs_lock);
770 	error = e->type ? entry->show(e, page) : -ENOENT;
771 	mutex_unlock(&e->sysfs_lock);
772 	return error;
773 }
774 
775 static ssize_t
776 elv_attr_store(struct kobject *kobj, struct attribute *attr,
777 	       const char *page, size_t length)
778 {
779 	struct elv_fs_entry *entry = to_elv(attr);
780 	struct elevator_queue *e;
781 	ssize_t error;
782 
783 	if (!entry->store)
784 		return -EIO;
785 
786 	e = container_of(kobj, struct elevator_queue, kobj);
787 	mutex_lock(&e->sysfs_lock);
788 	error = e->type ? entry->store(e, page, length) : -ENOENT;
789 	mutex_unlock(&e->sysfs_lock);
790 	return error;
791 }
792 
793 static const struct sysfs_ops elv_sysfs_ops = {
794 	.show	= elv_attr_show,
795 	.store	= elv_attr_store,
796 };
797 
798 static struct kobj_type elv_ktype = {
799 	.sysfs_ops	= &elv_sysfs_ops,
800 	.release	= elevator_release,
801 };
802 
803 int elv_register_queue(struct request_queue *q)
804 {
805 	struct elevator_queue *e = q->elevator;
806 	int error;
807 
808 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
809 	if (!error) {
810 		struct elv_fs_entry *attr = e->type->elevator_attrs;
811 		if (attr) {
812 			while (attr->attr.name) {
813 				if (sysfs_create_file(&e->kobj, &attr->attr))
814 					break;
815 				attr++;
816 			}
817 		}
818 		kobject_uevent(&e->kobj, KOBJ_ADD);
819 		e->registered = 1;
820 	}
821 	return error;
822 }
823 EXPORT_SYMBOL(elv_register_queue);
824 
825 void elv_unregister_queue(struct request_queue *q)
826 {
827 	if (q) {
828 		struct elevator_queue *e = q->elevator;
829 
830 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
831 		kobject_del(&e->kobj);
832 		e->registered = 0;
833 	}
834 }
835 EXPORT_SYMBOL(elv_unregister_queue);
836 
837 int elv_register(struct elevator_type *e)
838 {
839 	char *def = "";
840 
841 	/* create icq_cache if requested */
842 	if (e->icq_size) {
843 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
844 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
845 			return -EINVAL;
846 
847 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
848 			 "%s_io_cq", e->elevator_name);
849 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
850 						 e->icq_align, 0, NULL);
851 		if (!e->icq_cache)
852 			return -ENOMEM;
853 	}
854 
855 	/* register, don't allow duplicate names */
856 	spin_lock(&elv_list_lock);
857 	if (elevator_find(e->elevator_name)) {
858 		spin_unlock(&elv_list_lock);
859 		if (e->icq_cache)
860 			kmem_cache_destroy(e->icq_cache);
861 		return -EBUSY;
862 	}
863 	list_add_tail(&e->list, &elv_list);
864 	spin_unlock(&elv_list_lock);
865 
866 	/* print pretty message */
867 	if (!strcmp(e->elevator_name, chosen_elevator) ||
868 			(!*chosen_elevator &&
869 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
870 				def = " (default)";
871 
872 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
873 								def);
874 	return 0;
875 }
876 EXPORT_SYMBOL_GPL(elv_register);
877 
878 void elv_unregister(struct elevator_type *e)
879 {
880 	/* unregister */
881 	spin_lock(&elv_list_lock);
882 	list_del_init(&e->list);
883 	spin_unlock(&elv_list_lock);
884 
885 	/*
886 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
887 	 * sure all RCU operations are complete before proceeding.
888 	 */
889 	if (e->icq_cache) {
890 		rcu_barrier();
891 		kmem_cache_destroy(e->icq_cache);
892 		e->icq_cache = NULL;
893 	}
894 }
895 EXPORT_SYMBOL_GPL(elv_unregister);
896 
897 /*
898  * switch to new_e io scheduler. be careful not to introduce deadlocks -
899  * we don't free the old io scheduler, before we have allocated what we
900  * need for the new one. this way we have a chance of going back to the old
901  * one, if the new one fails init for some reason.
902  */
903 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
904 {
905 	struct elevator_queue *old = q->elevator;
906 	bool registered = old->registered;
907 	int err;
908 
909 	/*
910 	 * Turn on BYPASS and drain all requests w/ elevator private data.
911 	 * Block layer doesn't call into a quiesced elevator - all requests
912 	 * are directly put on the dispatch list without elevator data
913 	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
914 	 * merge happens either.
915 	 */
916 	blk_queue_bypass_start(q);
917 
918 	/* unregister and clear all auxiliary data of the old elevator */
919 	if (registered)
920 		elv_unregister_queue(q);
921 
922 	spin_lock_irq(q->queue_lock);
923 	ioc_clear_queue(q);
924 	spin_unlock_irq(q->queue_lock);
925 
926 	/* allocate, init and register new elevator */
927 	err = -ENOMEM;
928 	q->elevator = elevator_alloc(q, new_e);
929 	if (!q->elevator)
930 		goto fail_init;
931 
932 	err = new_e->ops.elevator_init_fn(q);
933 	if (err) {
934 		kobject_put(&q->elevator->kobj);
935 		goto fail_init;
936 	}
937 
938 	if (registered) {
939 		err = elv_register_queue(q);
940 		if (err)
941 			goto fail_register;
942 	}
943 
944 	/* done, kill the old one and finish */
945 	elevator_exit(old);
946 	blk_queue_bypass_end(q);
947 
948 	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
949 
950 	return 0;
951 
952 fail_register:
953 	elevator_exit(q->elevator);
954 fail_init:
955 	/* switch failed, restore and re-register old elevator */
956 	q->elevator = old;
957 	elv_register_queue(q);
958 	blk_queue_bypass_end(q);
959 
960 	return err;
961 }
962 
963 /*
964  * Switch this queue to the given IO scheduler.
965  */
966 int elevator_change(struct request_queue *q, const char *name)
967 {
968 	char elevator_name[ELV_NAME_MAX];
969 	struct elevator_type *e;
970 
971 	if (!q->elevator)
972 		return -ENXIO;
973 
974 	strlcpy(elevator_name, name, sizeof(elevator_name));
975 	e = elevator_get(strstrip(elevator_name), true);
976 	if (!e) {
977 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
978 		return -EINVAL;
979 	}
980 
981 	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
982 		elevator_put(e);
983 		return 0;
984 	}
985 
986 	return elevator_switch(q, e);
987 }
988 EXPORT_SYMBOL(elevator_change);
989 
990 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
991 			  size_t count)
992 {
993 	int ret;
994 
995 	if (!q->elevator)
996 		return count;
997 
998 	ret = elevator_change(q, name);
999 	if (!ret)
1000 		return count;
1001 
1002 	printk(KERN_ERR "elevator: switch to %s failed\n", name);
1003 	return ret;
1004 }
1005 
1006 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1007 {
1008 	struct elevator_queue *e = q->elevator;
1009 	struct elevator_type *elv;
1010 	struct elevator_type *__e;
1011 	int len = 0;
1012 
1013 	if (!q->elevator || !blk_queue_stackable(q))
1014 		return sprintf(name, "none\n");
1015 
1016 	elv = e->type;
1017 
1018 	spin_lock(&elv_list_lock);
1019 	list_for_each_entry(__e, &elv_list, list) {
1020 		if (!strcmp(elv->elevator_name, __e->elevator_name))
1021 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1022 		else
1023 			len += sprintf(name+len, "%s ", __e->elevator_name);
1024 	}
1025 	spin_unlock(&elv_list_lock);
1026 
1027 	len += sprintf(len+name, "\n");
1028 	return len;
1029 }
1030 
1031 struct request *elv_rb_former_request(struct request_queue *q,
1032 				      struct request *rq)
1033 {
1034 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1035 
1036 	if (rbprev)
1037 		return rb_entry_rq(rbprev);
1038 
1039 	return NULL;
1040 }
1041 EXPORT_SYMBOL(elv_rb_former_request);
1042 
1043 struct request *elv_rb_latter_request(struct request_queue *q,
1044 				      struct request *rq)
1045 {
1046 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1047 
1048 	if (rbnext)
1049 		return rb_entry_rq(rbnext);
1050 
1051 	return NULL;
1052 }
1053 EXPORT_SYMBOL(elv_rb_latter_request);
1054