xref: /linux/block/elevator.c (revision 4f1933620f57145212cdbb1ac6ce099eeeb21c5a)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36 
37 #include <asm/uaccess.h>
38 
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41 
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47 	if (!rq_mergeable(rq))
48 		return 0;
49 
50 	/*
51 	 * different data direction or already started, don't merge
52 	 */
53 	if (bio_data_dir(bio) != rq_data_dir(rq))
54 		return 0;
55 
56 	/*
57 	 * same device and no special stuff set, merge is ok
58 	 */
59 	if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60 	    !rq->waiting && !rq->special)
61 		return 1;
62 
63 	return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66 
67 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69 	int ret = ELEVATOR_NO_MERGE;
70 
71 	/*
72 	 * we can merge and sequence is ok, check if it's possible
73 	 */
74 	if (elv_rq_merge_ok(__rq, bio)) {
75 		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76 			ret = ELEVATOR_BACK_MERGE;
77 		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78 			ret = ELEVATOR_FRONT_MERGE;
79 	}
80 
81 	return ret;
82 }
83 
84 static struct elevator_type *elevator_find(const char *name)
85 {
86 	struct elevator_type *e = NULL;
87 	struct list_head *entry;
88 
89 	list_for_each(entry, &elv_list) {
90 		struct elevator_type *__e;
91 
92 		__e = list_entry(entry, struct elevator_type, list);
93 
94 		if (!strcmp(__e->elevator_name, name)) {
95 			e = __e;
96 			break;
97 		}
98 	}
99 
100 	return e;
101 }
102 
103 static void elevator_put(struct elevator_type *e)
104 {
105 	module_put(e->elevator_owner);
106 }
107 
108 static struct elevator_type *elevator_get(const char *name)
109 {
110 	struct elevator_type *e;
111 
112 	spin_lock_irq(&elv_list_lock);
113 
114 	e = elevator_find(name);
115 	if (e && !try_module_get(e->elevator_owner))
116 		e = NULL;
117 
118 	spin_unlock_irq(&elv_list_lock);
119 
120 	return e;
121 }
122 
123 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
124 			   struct elevator_queue *eq)
125 {
126 	int ret = 0;
127 
128 	memset(eq, 0, sizeof(*eq));
129 	eq->ops = &e->ops;
130 	eq->elevator_type = e;
131 
132 	q->elevator = eq;
133 
134 	if (eq->ops->elevator_init_fn)
135 		ret = eq->ops->elevator_init_fn(q, eq);
136 
137 	return ret;
138 }
139 
140 static char chosen_elevator[16];
141 
142 static int __init elevator_setup(char *str)
143 {
144 	/*
145 	 * Be backwards-compatible with previous kernels, so users
146 	 * won't get the wrong elevator.
147 	 */
148 	if (!strcmp(str, "as"))
149 		strcpy(chosen_elevator, "anticipatory");
150 	else
151 		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
152 	return 0;
153 }
154 
155 __setup("elevator=", elevator_setup);
156 
157 int elevator_init(request_queue_t *q, char *name)
158 {
159 	struct elevator_type *e = NULL;
160 	struct elevator_queue *eq;
161 	int ret = 0;
162 
163 	INIT_LIST_HEAD(&q->queue_head);
164 	q->last_merge = NULL;
165 	q->end_sector = 0;
166 	q->boundary_rq = NULL;
167 
168 	if (name && !(e = elevator_get(name)))
169 		return -EINVAL;
170 
171 	if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
172 		printk("I/O scheduler %s not found\n", chosen_elevator);
173 
174 	if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
175 		printk("Default I/O scheduler not found, using no-op\n");
176 		e = elevator_get("noop");
177 	}
178 
179 	eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
180 	if (!eq) {
181 		elevator_put(e);
182 		return -ENOMEM;
183 	}
184 
185 	ret = elevator_attach(q, e, eq);
186 	if (ret) {
187 		kfree(eq);
188 		elevator_put(e);
189 	}
190 
191 	return ret;
192 }
193 
194 void elevator_exit(elevator_t *e)
195 {
196 	if (e->ops->elevator_exit_fn)
197 		e->ops->elevator_exit_fn(e);
198 
199 	elevator_put(e->elevator_type);
200 	e->elevator_type = NULL;
201 	kfree(e);
202 }
203 
204 /*
205  * Insert rq into dispatch queue of q.  Queue lock must be held on
206  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
207  * appended to the dispatch queue.  To be used by specific elevators.
208  */
209 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
210 {
211 	sector_t boundary;
212 	struct list_head *entry;
213 
214 	if (q->last_merge == rq)
215 		q->last_merge = NULL;
216 	q->nr_sorted--;
217 
218 	boundary = q->end_sector;
219 
220 	list_for_each_prev(entry, &q->queue_head) {
221 		struct request *pos = list_entry_rq(entry);
222 
223 		if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
224 			break;
225 		if (rq->sector >= boundary) {
226 			if (pos->sector < boundary)
227 				continue;
228 		} else {
229 			if (pos->sector >= boundary)
230 				break;
231 		}
232 		if (rq->sector >= pos->sector)
233 			break;
234 	}
235 
236 	list_add(&rq->queuelist, entry);
237 }
238 
239 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
240 {
241 	elevator_t *e = q->elevator;
242 	int ret;
243 
244 	if (q->last_merge) {
245 		ret = elv_try_merge(q->last_merge, bio);
246 		if (ret != ELEVATOR_NO_MERGE) {
247 			*req = q->last_merge;
248 			return ret;
249 		}
250 	}
251 
252 	if (e->ops->elevator_merge_fn)
253 		return e->ops->elevator_merge_fn(q, req, bio);
254 
255 	return ELEVATOR_NO_MERGE;
256 }
257 
258 void elv_merged_request(request_queue_t *q, struct request *rq)
259 {
260 	elevator_t *e = q->elevator;
261 
262 	if (e->ops->elevator_merged_fn)
263 		e->ops->elevator_merged_fn(q, rq);
264 
265 	q->last_merge = rq;
266 }
267 
268 void elv_merge_requests(request_queue_t *q, struct request *rq,
269 			     struct request *next)
270 {
271 	elevator_t *e = q->elevator;
272 
273 	if (e->ops->elevator_merge_req_fn)
274 		e->ops->elevator_merge_req_fn(q, rq, next);
275 	q->nr_sorted--;
276 
277 	q->last_merge = rq;
278 }
279 
280 void elv_requeue_request(request_queue_t *q, struct request *rq)
281 {
282 	elevator_t *e = q->elevator;
283 
284 	/*
285 	 * it already went through dequeue, we need to decrement the
286 	 * in_flight count again
287 	 */
288 	if (blk_account_rq(rq)) {
289 		q->in_flight--;
290 		if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
291 			e->ops->elevator_deactivate_req_fn(q, rq);
292 	}
293 
294 	rq->flags &= ~REQ_STARTED;
295 
296 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
297 }
298 
299 static void elv_drain_elevator(request_queue_t *q)
300 {
301 	static int printed;
302 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
303 		;
304 	if (q->nr_sorted == 0)
305 		return;
306 	if (printed++ < 10) {
307 		printk(KERN_ERR "%s: forced dispatching is broken "
308 		       "(nr_sorted=%u), please report this\n",
309 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
310 	}
311 }
312 
313 void elv_insert(request_queue_t *q, struct request *rq, int where)
314 {
315 	struct list_head *pos;
316 	unsigned ordseq;
317 
318 	rq->q = q;
319 
320 	switch (where) {
321 	case ELEVATOR_INSERT_FRONT:
322 		rq->flags |= REQ_SOFTBARRIER;
323 
324 		list_add(&rq->queuelist, &q->queue_head);
325 		break;
326 
327 	case ELEVATOR_INSERT_BACK:
328 		rq->flags |= REQ_SOFTBARRIER;
329 		elv_drain_elevator(q);
330 		list_add_tail(&rq->queuelist, &q->queue_head);
331 		/*
332 		 * We kick the queue here for the following reasons.
333 		 * - The elevator might have returned NULL previously
334 		 *   to delay requests and returned them now.  As the
335 		 *   queue wasn't empty before this request, ll_rw_blk
336 		 *   won't run the queue on return, resulting in hang.
337 		 * - Usually, back inserted requests won't be merged
338 		 *   with anything.  There's no point in delaying queue
339 		 *   processing.
340 		 */
341 		blk_remove_plug(q);
342 		q->request_fn(q);
343 		break;
344 
345 	case ELEVATOR_INSERT_SORT:
346 		BUG_ON(!blk_fs_request(rq));
347 		rq->flags |= REQ_SORTED;
348 		q->nr_sorted++;
349 		if (q->last_merge == NULL && rq_mergeable(rq))
350 			q->last_merge = rq;
351 		/*
352 		 * Some ioscheds (cfq) run q->request_fn directly, so
353 		 * rq cannot be accessed after calling
354 		 * elevator_add_req_fn.
355 		 */
356 		q->elevator->ops->elevator_add_req_fn(q, rq);
357 		break;
358 
359 	case ELEVATOR_INSERT_REQUEUE:
360 		/*
361 		 * If ordered flush isn't in progress, we do front
362 		 * insertion; otherwise, requests should be requeued
363 		 * in ordseq order.
364 		 */
365 		rq->flags |= REQ_SOFTBARRIER;
366 
367 		if (q->ordseq == 0) {
368 			list_add(&rq->queuelist, &q->queue_head);
369 			break;
370 		}
371 
372 		ordseq = blk_ordered_req_seq(rq);
373 
374 		list_for_each(pos, &q->queue_head) {
375 			struct request *pos_rq = list_entry_rq(pos);
376 			if (ordseq <= blk_ordered_req_seq(pos_rq))
377 				break;
378 		}
379 
380 		list_add_tail(&rq->queuelist, pos);
381 		break;
382 
383 	default:
384 		printk(KERN_ERR "%s: bad insertion point %d\n",
385 		       __FUNCTION__, where);
386 		BUG();
387 	}
388 
389 	if (blk_queue_plugged(q)) {
390 		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
391 			- q->in_flight;
392 
393 		if (nrq >= q->unplug_thresh)
394 			__generic_unplug_device(q);
395 	}
396 }
397 
398 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
399 		       int plug)
400 {
401 	if (q->ordcolor)
402 		rq->flags |= REQ_ORDERED_COLOR;
403 
404 	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
405 		/*
406 		 * toggle ordered color
407 		 */
408 		if (blk_barrier_rq(rq))
409 			q->ordcolor ^= 1;
410 
411 		/*
412 		 * barriers implicitly indicate back insertion
413 		 */
414 		if (where == ELEVATOR_INSERT_SORT)
415 			where = ELEVATOR_INSERT_BACK;
416 
417 		/*
418 		 * this request is scheduling boundary, update
419 		 * end_sector
420 		 */
421 		if (blk_fs_request(rq)) {
422 			q->end_sector = rq_end_sector(rq);
423 			q->boundary_rq = rq;
424 		}
425 	} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
426 		where = ELEVATOR_INSERT_BACK;
427 
428 	if (plug)
429 		blk_plug_device(q);
430 
431 	elv_insert(q, rq, where);
432 }
433 
434 void elv_add_request(request_queue_t *q, struct request *rq, int where,
435 		     int plug)
436 {
437 	unsigned long flags;
438 
439 	spin_lock_irqsave(q->queue_lock, flags);
440 	__elv_add_request(q, rq, where, plug);
441 	spin_unlock_irqrestore(q->queue_lock, flags);
442 }
443 
444 static inline struct request *__elv_next_request(request_queue_t *q)
445 {
446 	struct request *rq;
447 
448 	while (1) {
449 		while (!list_empty(&q->queue_head)) {
450 			rq = list_entry_rq(q->queue_head.next);
451 			if (blk_do_ordered(q, &rq))
452 				return rq;
453 		}
454 
455 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
456 			return NULL;
457 	}
458 }
459 
460 struct request *elv_next_request(request_queue_t *q)
461 {
462 	struct request *rq;
463 	int ret;
464 
465 	while ((rq = __elv_next_request(q)) != NULL) {
466 		if (!(rq->flags & REQ_STARTED)) {
467 			elevator_t *e = q->elevator;
468 
469 			/*
470 			 * This is the first time the device driver
471 			 * sees this request (possibly after
472 			 * requeueing).  Notify IO scheduler.
473 			 */
474 			if (blk_sorted_rq(rq) &&
475 			    e->ops->elevator_activate_req_fn)
476 				e->ops->elevator_activate_req_fn(q, rq);
477 
478 			/*
479 			 * just mark as started even if we don't start
480 			 * it, a request that has been delayed should
481 			 * not be passed by new incoming requests
482 			 */
483 			rq->flags |= REQ_STARTED;
484 		}
485 
486 		if (!q->boundary_rq || q->boundary_rq == rq) {
487 			q->end_sector = rq_end_sector(rq);
488 			q->boundary_rq = NULL;
489 		}
490 
491 		if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
492 			break;
493 
494 		ret = q->prep_rq_fn(q, rq);
495 		if (ret == BLKPREP_OK) {
496 			break;
497 		} else if (ret == BLKPREP_DEFER) {
498 			/*
499 			 * the request may have been (partially) prepped.
500 			 * we need to keep this request in the front to
501 			 * avoid resource deadlock.  REQ_STARTED will
502 			 * prevent other fs requests from passing this one.
503 			 */
504 			rq = NULL;
505 			break;
506 		} else if (ret == BLKPREP_KILL) {
507 			int nr_bytes = rq->hard_nr_sectors << 9;
508 
509 			if (!nr_bytes)
510 				nr_bytes = rq->data_len;
511 
512 			blkdev_dequeue_request(rq);
513 			rq->flags |= REQ_QUIET;
514 			end_that_request_chunk(rq, 0, nr_bytes);
515 			end_that_request_last(rq, 0);
516 		} else {
517 			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
518 								ret);
519 			break;
520 		}
521 	}
522 
523 	return rq;
524 }
525 
526 void elv_dequeue_request(request_queue_t *q, struct request *rq)
527 {
528 	BUG_ON(list_empty(&rq->queuelist));
529 
530 	list_del_init(&rq->queuelist);
531 
532 	/*
533 	 * the time frame between a request being removed from the lists
534 	 * and to it is freed is accounted as io that is in progress at
535 	 * the driver side.
536 	 */
537 	if (blk_account_rq(rq))
538 		q->in_flight++;
539 }
540 
541 int elv_queue_empty(request_queue_t *q)
542 {
543 	elevator_t *e = q->elevator;
544 
545 	if (!list_empty(&q->queue_head))
546 		return 0;
547 
548 	if (e->ops->elevator_queue_empty_fn)
549 		return e->ops->elevator_queue_empty_fn(q);
550 
551 	return 1;
552 }
553 
554 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
555 {
556 	elevator_t *e = q->elevator;
557 
558 	if (e->ops->elevator_latter_req_fn)
559 		return e->ops->elevator_latter_req_fn(q, rq);
560 	return NULL;
561 }
562 
563 struct request *elv_former_request(request_queue_t *q, struct request *rq)
564 {
565 	elevator_t *e = q->elevator;
566 
567 	if (e->ops->elevator_former_req_fn)
568 		return e->ops->elevator_former_req_fn(q, rq);
569 	return NULL;
570 }
571 
572 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
573 		    gfp_t gfp_mask)
574 {
575 	elevator_t *e = q->elevator;
576 
577 	if (e->ops->elevator_set_req_fn)
578 		return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
579 
580 	rq->elevator_private = NULL;
581 	return 0;
582 }
583 
584 void elv_put_request(request_queue_t *q, struct request *rq)
585 {
586 	elevator_t *e = q->elevator;
587 
588 	if (e->ops->elevator_put_req_fn)
589 		e->ops->elevator_put_req_fn(q, rq);
590 }
591 
592 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
593 {
594 	elevator_t *e = q->elevator;
595 
596 	if (e->ops->elevator_may_queue_fn)
597 		return e->ops->elevator_may_queue_fn(q, rw, bio);
598 
599 	return ELV_MQUEUE_MAY;
600 }
601 
602 void elv_completed_request(request_queue_t *q, struct request *rq)
603 {
604 	elevator_t *e = q->elevator;
605 
606 	/*
607 	 * request is released from the driver, io must be done
608 	 */
609 	if (blk_account_rq(rq)) {
610 		q->in_flight--;
611 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
612 			e->ops->elevator_completed_req_fn(q, rq);
613 	}
614 
615 	/*
616 	 * Check if the queue is waiting for fs requests to be
617 	 * drained for flush sequence.
618 	 */
619 	if (unlikely(q->ordseq)) {
620 		struct request *first_rq = list_entry_rq(q->queue_head.next);
621 		if (q->in_flight == 0 &&
622 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
623 		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
624 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
625 			q->request_fn(q);
626 		}
627 	}
628 }
629 
630 int elv_register_queue(struct request_queue *q)
631 {
632 	elevator_t *e = q->elevator;
633 
634 	e->kobj.parent = kobject_get(&q->kobj);
635 	if (!e->kobj.parent)
636 		return -EBUSY;
637 
638 	snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
639 	e->kobj.ktype = e->elevator_type->elevator_ktype;
640 
641 	return kobject_register(&e->kobj);
642 }
643 
644 void elv_unregister_queue(struct request_queue *q)
645 {
646 	if (q) {
647 		elevator_t *e = q->elevator;
648 		kobject_unregister(&e->kobj);
649 		kobject_put(&q->kobj);
650 	}
651 }
652 
653 int elv_register(struct elevator_type *e)
654 {
655 	spin_lock_irq(&elv_list_lock);
656 	if (elevator_find(e->elevator_name))
657 		BUG();
658 	list_add_tail(&e->list, &elv_list);
659 	spin_unlock_irq(&elv_list_lock);
660 
661 	printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
662 	if (!strcmp(e->elevator_name, chosen_elevator) ||
663 			(!*chosen_elevator &&
664 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
665 				printk(" (default)");
666 	printk("\n");
667 	return 0;
668 }
669 EXPORT_SYMBOL_GPL(elv_register);
670 
671 void elv_unregister(struct elevator_type *e)
672 {
673 	struct task_struct *g, *p;
674 
675 	/*
676 	 * Iterate every thread in the process to remove the io contexts.
677 	 */
678 	read_lock(&tasklist_lock);
679 	do_each_thread(g, p) {
680 		struct io_context *ioc = p->io_context;
681 		if (ioc && ioc->cic) {
682 			ioc->cic->exit(ioc->cic);
683 			ioc->cic->dtor(ioc->cic);
684 			ioc->cic = NULL;
685 		}
686 		if (ioc && ioc->aic) {
687 			ioc->aic->exit(ioc->aic);
688 			ioc->aic->dtor(ioc->aic);
689 			ioc->aic = NULL;
690 		}
691 	} while_each_thread(g, p);
692 	read_unlock(&tasklist_lock);
693 
694 	spin_lock_irq(&elv_list_lock);
695 	list_del_init(&e->list);
696 	spin_unlock_irq(&elv_list_lock);
697 }
698 EXPORT_SYMBOL_GPL(elv_unregister);
699 
700 /*
701  * switch to new_e io scheduler. be careful not to introduce deadlocks -
702  * we don't free the old io scheduler, before we have allocated what we
703  * need for the new one. this way we have a chance of going back to the old
704  * one, if the new one fails init for some reason.
705  */
706 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
707 {
708 	elevator_t *old_elevator, *e;
709 
710 	/*
711 	 * Allocate new elevator
712 	 */
713 	e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
714 	if (!e)
715 		goto error;
716 
717 	/*
718 	 * Turn on BYPASS and drain all requests w/ elevator private data
719 	 */
720 	spin_lock_irq(q->queue_lock);
721 
722 	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
723 
724 	elv_drain_elevator(q);
725 
726 	while (q->rq.elvpriv) {
727 		blk_remove_plug(q);
728 		q->request_fn(q);
729 		spin_unlock_irq(q->queue_lock);
730 		msleep(10);
731 		spin_lock_irq(q->queue_lock);
732 		elv_drain_elevator(q);
733 	}
734 
735 	spin_unlock_irq(q->queue_lock);
736 
737 	/*
738 	 * unregister old elevator data
739 	 */
740 	elv_unregister_queue(q);
741 	old_elevator = q->elevator;
742 
743 	/*
744 	 * attach and start new elevator
745 	 */
746 	if (elevator_attach(q, new_e, e))
747 		goto fail;
748 
749 	if (elv_register_queue(q))
750 		goto fail_register;
751 
752 	/*
753 	 * finally exit old elevator and turn off BYPASS.
754 	 */
755 	elevator_exit(old_elevator);
756 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
757 	return;
758 
759 fail_register:
760 	/*
761 	 * switch failed, exit the new io scheduler and reattach the old
762 	 * one again (along with re-adding the sysfs dir)
763 	 */
764 	elevator_exit(e);
765 	e = NULL;
766 fail:
767 	q->elevator = old_elevator;
768 	elv_register_queue(q);
769 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
770 	kfree(e);
771 error:
772 	elevator_put(new_e);
773 	printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
774 }
775 
776 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
777 {
778 	char elevator_name[ELV_NAME_MAX];
779 	size_t len;
780 	struct elevator_type *e;
781 
782 	elevator_name[sizeof(elevator_name) - 1] = '\0';
783 	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
784 	len = strlen(elevator_name);
785 
786 	if (len && elevator_name[len - 1] == '\n')
787 		elevator_name[len - 1] = '\0';
788 
789 	e = elevator_get(elevator_name);
790 	if (!e) {
791 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
792 		return -EINVAL;
793 	}
794 
795 	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
796 		elevator_put(e);
797 		return count;
798 	}
799 
800 	elevator_switch(q, e);
801 	return count;
802 }
803 
804 ssize_t elv_iosched_show(request_queue_t *q, char *name)
805 {
806 	elevator_t *e = q->elevator;
807 	struct elevator_type *elv = e->elevator_type;
808 	struct list_head *entry;
809 	int len = 0;
810 
811 	spin_lock_irq(q->queue_lock);
812 	list_for_each(entry, &elv_list) {
813 		struct elevator_type *__e;
814 
815 		__e = list_entry(entry, struct elevator_type, list);
816 		if (!strcmp(elv->elevator_name, __e->elevator_name))
817 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
818 		else
819 			len += sprintf(name+len, "%s ", __e->elevator_name);
820 	}
821 	spin_unlock_irq(q->queue_lock);
822 
823 	len += sprintf(len+name, "\n");
824 	return len;
825 }
826 
827 EXPORT_SYMBOL(elv_dispatch_sort);
828 EXPORT_SYMBOL(elv_add_request);
829 EXPORT_SYMBOL(__elv_add_request);
830 EXPORT_SYMBOL(elv_requeue_request);
831 EXPORT_SYMBOL(elv_next_request);
832 EXPORT_SYMBOL(elv_dequeue_request);
833 EXPORT_SYMBOL(elv_queue_empty);
834 EXPORT_SYMBOL(elv_completed_request);
835 EXPORT_SYMBOL(elevator_exit);
836 EXPORT_SYMBOL(elevator_init);
837