xref: /linux/block/elevator.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36 #include <linux/blktrace_api.h>
37 
38 #include <asm/uaccess.h>
39 
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42 
43 /*
44  * can we safely merge with this request?
45  */
46 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47 {
48 	if (!rq_mergeable(rq))
49 		return 0;
50 
51 	/*
52 	 * different data direction or already started, don't merge
53 	 */
54 	if (bio_data_dir(bio) != rq_data_dir(rq))
55 		return 0;
56 
57 	/*
58 	 * same device and no special stuff set, merge is ok
59 	 */
60 	if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61 	    !rq->waiting && !rq->special)
62 		return 1;
63 
64 	return 0;
65 }
66 EXPORT_SYMBOL(elv_rq_merge_ok);
67 
68 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
69 {
70 	int ret = ELEVATOR_NO_MERGE;
71 
72 	/*
73 	 * we can merge and sequence is ok, check if it's possible
74 	 */
75 	if (elv_rq_merge_ok(__rq, bio)) {
76 		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77 			ret = ELEVATOR_BACK_MERGE;
78 		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79 			ret = ELEVATOR_FRONT_MERGE;
80 	}
81 
82 	return ret;
83 }
84 
85 static struct elevator_type *elevator_find(const char *name)
86 {
87 	struct elevator_type *e = NULL;
88 	struct list_head *entry;
89 
90 	list_for_each(entry, &elv_list) {
91 		struct elevator_type *__e;
92 
93 		__e = list_entry(entry, struct elevator_type, list);
94 
95 		if (!strcmp(__e->elevator_name, name)) {
96 			e = __e;
97 			break;
98 		}
99 	}
100 
101 	return e;
102 }
103 
104 static void elevator_put(struct elevator_type *e)
105 {
106 	module_put(e->elevator_owner);
107 }
108 
109 static struct elevator_type *elevator_get(const char *name)
110 {
111 	struct elevator_type *e;
112 
113 	spin_lock_irq(&elv_list_lock);
114 
115 	e = elevator_find(name);
116 	if (e && !try_module_get(e->elevator_owner))
117 		e = NULL;
118 
119 	spin_unlock_irq(&elv_list_lock);
120 
121 	return e;
122 }
123 
124 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
125 {
126 	return eq->ops->elevator_init_fn(q, eq);
127 }
128 
129 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
130 			   void *data)
131 {
132 	q->elevator = eq;
133 	eq->elevator_data = data;
134 }
135 
136 static char chosen_elevator[16];
137 
138 static int __init elevator_setup(char *str)
139 {
140 	/*
141 	 * Be backwards-compatible with previous kernels, so users
142 	 * won't get the wrong elevator.
143 	 */
144 	if (!strcmp(str, "as"))
145 		strcpy(chosen_elevator, "anticipatory");
146 	else
147 		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
148 	return 1;
149 }
150 
151 __setup("elevator=", elevator_setup);
152 
153 static struct kobj_type elv_ktype;
154 
155 static elevator_t *elevator_alloc(struct elevator_type *e)
156 {
157 	elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
158 	if (eq) {
159 		memset(eq, 0, sizeof(*eq));
160 		eq->ops = &e->ops;
161 		eq->elevator_type = e;
162 		kobject_init(&eq->kobj);
163 		snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
164 		eq->kobj.ktype = &elv_ktype;
165 		mutex_init(&eq->sysfs_lock);
166 	} else {
167 		elevator_put(e);
168 	}
169 	return eq;
170 }
171 
172 static void elevator_release(struct kobject *kobj)
173 {
174 	elevator_t *e = container_of(kobj, elevator_t, kobj);
175 	elevator_put(e->elevator_type);
176 	kfree(e);
177 }
178 
179 int elevator_init(request_queue_t *q, char *name)
180 {
181 	struct elevator_type *e = NULL;
182 	struct elevator_queue *eq;
183 	int ret = 0;
184 	void *data;
185 
186 	INIT_LIST_HEAD(&q->queue_head);
187 	q->last_merge = NULL;
188 	q->end_sector = 0;
189 	q->boundary_rq = NULL;
190 
191 	if (name && !(e = elevator_get(name)))
192 		return -EINVAL;
193 
194 	if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
195 		printk("I/O scheduler %s not found\n", chosen_elevator);
196 
197 	if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
198 		printk("Default I/O scheduler not found, using no-op\n");
199 		e = elevator_get("noop");
200 	}
201 
202 	eq = elevator_alloc(e);
203 	if (!eq)
204 		return -ENOMEM;
205 
206 	data = elevator_init_queue(q, eq);
207 	if (!data) {
208 		kobject_put(&eq->kobj);
209 		return -ENOMEM;
210 	}
211 
212 	elevator_attach(q, eq, data);
213 	return ret;
214 }
215 
216 void elevator_exit(elevator_t *e)
217 {
218 	mutex_lock(&e->sysfs_lock);
219 	if (e->ops->elevator_exit_fn)
220 		e->ops->elevator_exit_fn(e);
221 	e->ops = NULL;
222 	mutex_unlock(&e->sysfs_lock);
223 
224 	kobject_put(&e->kobj);
225 }
226 
227 /*
228  * Insert rq into dispatch queue of q.  Queue lock must be held on
229  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
230  * appended to the dispatch queue.  To be used by specific elevators.
231  */
232 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
233 {
234 	sector_t boundary;
235 	struct list_head *entry;
236 
237 	if (q->last_merge == rq)
238 		q->last_merge = NULL;
239 	q->nr_sorted--;
240 
241 	boundary = q->end_sector;
242 
243 	list_for_each_prev(entry, &q->queue_head) {
244 		struct request *pos = list_entry_rq(entry);
245 
246 		if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
247 			break;
248 		if (rq->sector >= boundary) {
249 			if (pos->sector < boundary)
250 				continue;
251 		} else {
252 			if (pos->sector >= boundary)
253 				break;
254 		}
255 		if (rq->sector >= pos->sector)
256 			break;
257 	}
258 
259 	list_add(&rq->queuelist, entry);
260 }
261 
262 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
263 {
264 	elevator_t *e = q->elevator;
265 	int ret;
266 
267 	if (q->last_merge) {
268 		ret = elv_try_merge(q->last_merge, bio);
269 		if (ret != ELEVATOR_NO_MERGE) {
270 			*req = q->last_merge;
271 			return ret;
272 		}
273 	}
274 
275 	if (e->ops->elevator_merge_fn)
276 		return e->ops->elevator_merge_fn(q, req, bio);
277 
278 	return ELEVATOR_NO_MERGE;
279 }
280 
281 void elv_merged_request(request_queue_t *q, struct request *rq)
282 {
283 	elevator_t *e = q->elevator;
284 
285 	if (e->ops->elevator_merged_fn)
286 		e->ops->elevator_merged_fn(q, rq);
287 
288 	q->last_merge = rq;
289 }
290 
291 void elv_merge_requests(request_queue_t *q, struct request *rq,
292 			     struct request *next)
293 {
294 	elevator_t *e = q->elevator;
295 
296 	if (e->ops->elevator_merge_req_fn)
297 		e->ops->elevator_merge_req_fn(q, rq, next);
298 	q->nr_sorted--;
299 
300 	q->last_merge = rq;
301 }
302 
303 void elv_requeue_request(request_queue_t *q, struct request *rq)
304 {
305 	elevator_t *e = q->elevator;
306 
307 	/*
308 	 * it already went through dequeue, we need to decrement the
309 	 * in_flight count again
310 	 */
311 	if (blk_account_rq(rq)) {
312 		q->in_flight--;
313 		if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
314 			e->ops->elevator_deactivate_req_fn(q, rq);
315 	}
316 
317 	rq->flags &= ~REQ_STARTED;
318 
319 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
320 }
321 
322 static void elv_drain_elevator(request_queue_t *q)
323 {
324 	static int printed;
325 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
326 		;
327 	if (q->nr_sorted == 0)
328 		return;
329 	if (printed++ < 10) {
330 		printk(KERN_ERR "%s: forced dispatching is broken "
331 		       "(nr_sorted=%u), please report this\n",
332 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
333 	}
334 }
335 
336 void elv_insert(request_queue_t *q, struct request *rq, int where)
337 {
338 	struct list_head *pos;
339 	unsigned ordseq;
340 	int unplug_it = 1;
341 
342 	blk_add_trace_rq(q, rq, BLK_TA_INSERT);
343 
344 	rq->q = q;
345 
346 	switch (where) {
347 	case ELEVATOR_INSERT_FRONT:
348 		rq->flags |= REQ_SOFTBARRIER;
349 
350 		list_add(&rq->queuelist, &q->queue_head);
351 		break;
352 
353 	case ELEVATOR_INSERT_BACK:
354 		rq->flags |= REQ_SOFTBARRIER;
355 		elv_drain_elevator(q);
356 		list_add_tail(&rq->queuelist, &q->queue_head);
357 		/*
358 		 * We kick the queue here for the following reasons.
359 		 * - The elevator might have returned NULL previously
360 		 *   to delay requests and returned them now.  As the
361 		 *   queue wasn't empty before this request, ll_rw_blk
362 		 *   won't run the queue on return, resulting in hang.
363 		 * - Usually, back inserted requests won't be merged
364 		 *   with anything.  There's no point in delaying queue
365 		 *   processing.
366 		 */
367 		blk_remove_plug(q);
368 		q->request_fn(q);
369 		break;
370 
371 	case ELEVATOR_INSERT_SORT:
372 		BUG_ON(!blk_fs_request(rq));
373 		rq->flags |= REQ_SORTED;
374 		q->nr_sorted++;
375 		if (q->last_merge == NULL && rq_mergeable(rq))
376 			q->last_merge = rq;
377 		/*
378 		 * Some ioscheds (cfq) run q->request_fn directly, so
379 		 * rq cannot be accessed after calling
380 		 * elevator_add_req_fn.
381 		 */
382 		q->elevator->ops->elevator_add_req_fn(q, rq);
383 		break;
384 
385 	case ELEVATOR_INSERT_REQUEUE:
386 		/*
387 		 * If ordered flush isn't in progress, we do front
388 		 * insertion; otherwise, requests should be requeued
389 		 * in ordseq order.
390 		 */
391 		rq->flags |= REQ_SOFTBARRIER;
392 
393 		if (q->ordseq == 0) {
394 			list_add(&rq->queuelist, &q->queue_head);
395 			break;
396 		}
397 
398 		ordseq = blk_ordered_req_seq(rq);
399 
400 		list_for_each(pos, &q->queue_head) {
401 			struct request *pos_rq = list_entry_rq(pos);
402 			if (ordseq <= blk_ordered_req_seq(pos_rq))
403 				break;
404 		}
405 
406 		list_add_tail(&rq->queuelist, pos);
407 		/*
408 		 * most requeues happen because of a busy condition, don't
409 		 * force unplug of the queue for that case.
410 		 */
411 		unplug_it = 0;
412 		break;
413 
414 	default:
415 		printk(KERN_ERR "%s: bad insertion point %d\n",
416 		       __FUNCTION__, where);
417 		BUG();
418 	}
419 
420 	if (unplug_it && blk_queue_plugged(q)) {
421 		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
422 			- q->in_flight;
423 
424 		if (nrq >= q->unplug_thresh)
425 			__generic_unplug_device(q);
426 	}
427 }
428 
429 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
430 		       int plug)
431 {
432 	if (q->ordcolor)
433 		rq->flags |= REQ_ORDERED_COLOR;
434 
435 	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
436 		/*
437 		 * toggle ordered color
438 		 */
439 		if (blk_barrier_rq(rq))
440 			q->ordcolor ^= 1;
441 
442 		/*
443 		 * barriers implicitly indicate back insertion
444 		 */
445 		if (where == ELEVATOR_INSERT_SORT)
446 			where = ELEVATOR_INSERT_BACK;
447 
448 		/*
449 		 * this request is scheduling boundary, update
450 		 * end_sector
451 		 */
452 		if (blk_fs_request(rq)) {
453 			q->end_sector = rq_end_sector(rq);
454 			q->boundary_rq = rq;
455 		}
456 	} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
457 		where = ELEVATOR_INSERT_BACK;
458 
459 	if (plug)
460 		blk_plug_device(q);
461 
462 	elv_insert(q, rq, where);
463 }
464 
465 void elv_add_request(request_queue_t *q, struct request *rq, int where,
466 		     int plug)
467 {
468 	unsigned long flags;
469 
470 	spin_lock_irqsave(q->queue_lock, flags);
471 	__elv_add_request(q, rq, where, plug);
472 	spin_unlock_irqrestore(q->queue_lock, flags);
473 }
474 
475 static inline struct request *__elv_next_request(request_queue_t *q)
476 {
477 	struct request *rq;
478 
479 	while (1) {
480 		while (!list_empty(&q->queue_head)) {
481 			rq = list_entry_rq(q->queue_head.next);
482 			if (blk_do_ordered(q, &rq))
483 				return rq;
484 		}
485 
486 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
487 			return NULL;
488 	}
489 }
490 
491 struct request *elv_next_request(request_queue_t *q)
492 {
493 	struct request *rq;
494 	int ret;
495 
496 	while ((rq = __elv_next_request(q)) != NULL) {
497 		if (!(rq->flags & REQ_STARTED)) {
498 			elevator_t *e = q->elevator;
499 
500 			/*
501 			 * This is the first time the device driver
502 			 * sees this request (possibly after
503 			 * requeueing).  Notify IO scheduler.
504 			 */
505 			if (blk_sorted_rq(rq) &&
506 			    e->ops->elevator_activate_req_fn)
507 				e->ops->elevator_activate_req_fn(q, rq);
508 
509 			/*
510 			 * just mark as started even if we don't start
511 			 * it, a request that has been delayed should
512 			 * not be passed by new incoming requests
513 			 */
514 			rq->flags |= REQ_STARTED;
515 			blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
516 		}
517 
518 		if (!q->boundary_rq || q->boundary_rq == rq) {
519 			q->end_sector = rq_end_sector(rq);
520 			q->boundary_rq = NULL;
521 		}
522 
523 		if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
524 			break;
525 
526 		ret = q->prep_rq_fn(q, rq);
527 		if (ret == BLKPREP_OK) {
528 			break;
529 		} else if (ret == BLKPREP_DEFER) {
530 			/*
531 			 * the request may have been (partially) prepped.
532 			 * we need to keep this request in the front to
533 			 * avoid resource deadlock.  REQ_STARTED will
534 			 * prevent other fs requests from passing this one.
535 			 */
536 			rq = NULL;
537 			break;
538 		} else if (ret == BLKPREP_KILL) {
539 			int nr_bytes = rq->hard_nr_sectors << 9;
540 
541 			if (!nr_bytes)
542 				nr_bytes = rq->data_len;
543 
544 			blkdev_dequeue_request(rq);
545 			rq->flags |= REQ_QUIET;
546 			end_that_request_chunk(rq, 0, nr_bytes);
547 			end_that_request_last(rq, 0);
548 		} else {
549 			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
550 								ret);
551 			break;
552 		}
553 	}
554 
555 	return rq;
556 }
557 
558 void elv_dequeue_request(request_queue_t *q, struct request *rq)
559 {
560 	BUG_ON(list_empty(&rq->queuelist));
561 
562 	list_del_init(&rq->queuelist);
563 
564 	/*
565 	 * the time frame between a request being removed from the lists
566 	 * and to it is freed is accounted as io that is in progress at
567 	 * the driver side.
568 	 */
569 	if (blk_account_rq(rq))
570 		q->in_flight++;
571 }
572 
573 int elv_queue_empty(request_queue_t *q)
574 {
575 	elevator_t *e = q->elevator;
576 
577 	if (!list_empty(&q->queue_head))
578 		return 0;
579 
580 	if (e->ops->elevator_queue_empty_fn)
581 		return e->ops->elevator_queue_empty_fn(q);
582 
583 	return 1;
584 }
585 
586 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
587 {
588 	elevator_t *e = q->elevator;
589 
590 	if (e->ops->elevator_latter_req_fn)
591 		return e->ops->elevator_latter_req_fn(q, rq);
592 	return NULL;
593 }
594 
595 struct request *elv_former_request(request_queue_t *q, struct request *rq)
596 {
597 	elevator_t *e = q->elevator;
598 
599 	if (e->ops->elevator_former_req_fn)
600 		return e->ops->elevator_former_req_fn(q, rq);
601 	return NULL;
602 }
603 
604 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
605 		    gfp_t gfp_mask)
606 {
607 	elevator_t *e = q->elevator;
608 
609 	if (e->ops->elevator_set_req_fn)
610 		return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
611 
612 	rq->elevator_private = NULL;
613 	return 0;
614 }
615 
616 void elv_put_request(request_queue_t *q, struct request *rq)
617 {
618 	elevator_t *e = q->elevator;
619 
620 	if (e->ops->elevator_put_req_fn)
621 		e->ops->elevator_put_req_fn(q, rq);
622 }
623 
624 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
625 {
626 	elevator_t *e = q->elevator;
627 
628 	if (e->ops->elevator_may_queue_fn)
629 		return e->ops->elevator_may_queue_fn(q, rw, bio);
630 
631 	return ELV_MQUEUE_MAY;
632 }
633 
634 void elv_completed_request(request_queue_t *q, struct request *rq)
635 {
636 	elevator_t *e = q->elevator;
637 
638 	/*
639 	 * request is released from the driver, io must be done
640 	 */
641 	if (blk_account_rq(rq)) {
642 		q->in_flight--;
643 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
644 			e->ops->elevator_completed_req_fn(q, rq);
645 	}
646 
647 	/*
648 	 * Check if the queue is waiting for fs requests to be
649 	 * drained for flush sequence.
650 	 */
651 	if (unlikely(q->ordseq)) {
652 		struct request *first_rq = list_entry_rq(q->queue_head.next);
653 		if (q->in_flight == 0 &&
654 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
655 		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
656 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
657 			q->request_fn(q);
658 		}
659 	}
660 }
661 
662 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
663 
664 static ssize_t
665 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
666 {
667 	elevator_t *e = container_of(kobj, elevator_t, kobj);
668 	struct elv_fs_entry *entry = to_elv(attr);
669 	ssize_t error;
670 
671 	if (!entry->show)
672 		return -EIO;
673 
674 	mutex_lock(&e->sysfs_lock);
675 	error = e->ops ? entry->show(e, page) : -ENOENT;
676 	mutex_unlock(&e->sysfs_lock);
677 	return error;
678 }
679 
680 static ssize_t
681 elv_attr_store(struct kobject *kobj, struct attribute *attr,
682 	       const char *page, size_t length)
683 {
684 	elevator_t *e = container_of(kobj, elevator_t, kobj);
685 	struct elv_fs_entry *entry = to_elv(attr);
686 	ssize_t error;
687 
688 	if (!entry->store)
689 		return -EIO;
690 
691 	mutex_lock(&e->sysfs_lock);
692 	error = e->ops ? entry->store(e, page, length) : -ENOENT;
693 	mutex_unlock(&e->sysfs_lock);
694 	return error;
695 }
696 
697 static struct sysfs_ops elv_sysfs_ops = {
698 	.show	= elv_attr_show,
699 	.store	= elv_attr_store,
700 };
701 
702 static struct kobj_type elv_ktype = {
703 	.sysfs_ops	= &elv_sysfs_ops,
704 	.release	= elevator_release,
705 };
706 
707 int elv_register_queue(struct request_queue *q)
708 {
709 	elevator_t *e = q->elevator;
710 	int error;
711 
712 	e->kobj.parent = &q->kobj;
713 
714 	error = kobject_add(&e->kobj);
715 	if (!error) {
716 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
717 		if (attr) {
718 			while (attr->attr.name) {
719 				if (sysfs_create_file(&e->kobj, &attr->attr))
720 					break;
721 				attr++;
722 			}
723 		}
724 		kobject_uevent(&e->kobj, KOBJ_ADD);
725 	}
726 	return error;
727 }
728 
729 static void __elv_unregister_queue(elevator_t *e)
730 {
731 	kobject_uevent(&e->kobj, KOBJ_REMOVE);
732 	kobject_del(&e->kobj);
733 }
734 
735 void elv_unregister_queue(struct request_queue *q)
736 {
737 	if (q)
738 		__elv_unregister_queue(q->elevator);
739 }
740 
741 int elv_register(struct elevator_type *e)
742 {
743 	spin_lock_irq(&elv_list_lock);
744 	BUG_ON(elevator_find(e->elevator_name));
745 	list_add_tail(&e->list, &elv_list);
746 	spin_unlock_irq(&elv_list_lock);
747 
748 	printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
749 	if (!strcmp(e->elevator_name, chosen_elevator) ||
750 			(!*chosen_elevator &&
751 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
752 				printk(" (default)");
753 	printk("\n");
754 	return 0;
755 }
756 EXPORT_SYMBOL_GPL(elv_register);
757 
758 void elv_unregister(struct elevator_type *e)
759 {
760 	struct task_struct *g, *p;
761 
762 	/*
763 	 * Iterate every thread in the process to remove the io contexts.
764 	 */
765 	if (e->ops.trim) {
766 		read_lock(&tasklist_lock);
767 		do_each_thread(g, p) {
768 			task_lock(p);
769 			e->ops.trim(p->io_context);
770 			task_unlock(p);
771 		} while_each_thread(g, p);
772 		read_unlock(&tasklist_lock);
773 	}
774 
775 	spin_lock_irq(&elv_list_lock);
776 	list_del_init(&e->list);
777 	spin_unlock_irq(&elv_list_lock);
778 }
779 EXPORT_SYMBOL_GPL(elv_unregister);
780 
781 /*
782  * switch to new_e io scheduler. be careful not to introduce deadlocks -
783  * we don't free the old io scheduler, before we have allocated what we
784  * need for the new one. this way we have a chance of going back to the old
785  * one, if the new one fails init for some reason.
786  */
787 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
788 {
789 	elevator_t *old_elevator, *e;
790 	void *data;
791 
792 	/*
793 	 * Allocate new elevator
794 	 */
795 	e = elevator_alloc(new_e);
796 	if (!e)
797 		return 0;
798 
799 	data = elevator_init_queue(q, e);
800 	if (!data) {
801 		kobject_put(&e->kobj);
802 		return 0;
803 	}
804 
805 	/*
806 	 * Turn on BYPASS and drain all requests w/ elevator private data
807 	 */
808 	spin_lock_irq(q->queue_lock);
809 
810 	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
811 
812 	elv_drain_elevator(q);
813 
814 	while (q->rq.elvpriv) {
815 		blk_remove_plug(q);
816 		q->request_fn(q);
817 		spin_unlock_irq(q->queue_lock);
818 		msleep(10);
819 		spin_lock_irq(q->queue_lock);
820 		elv_drain_elevator(q);
821 	}
822 
823 	/*
824 	 * Remember old elevator.
825 	 */
826 	old_elevator = q->elevator;
827 
828 	/*
829 	 * attach and start new elevator
830 	 */
831 	elevator_attach(q, e, data);
832 
833 	spin_unlock_irq(q->queue_lock);
834 
835 	__elv_unregister_queue(old_elevator);
836 
837 	if (elv_register_queue(q))
838 		goto fail_register;
839 
840 	/*
841 	 * finally exit old elevator and turn off BYPASS.
842 	 */
843 	elevator_exit(old_elevator);
844 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
845 	return 1;
846 
847 fail_register:
848 	/*
849 	 * switch failed, exit the new io scheduler and reattach the old
850 	 * one again (along with re-adding the sysfs dir)
851 	 */
852 	elevator_exit(e);
853 	e = NULL;
854 	q->elevator = old_elevator;
855 	elv_register_queue(q);
856 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
857 	if (e)
858 		kobject_put(&e->kobj);
859 	return 0;
860 }
861 
862 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
863 {
864 	char elevator_name[ELV_NAME_MAX];
865 	size_t len;
866 	struct elevator_type *e;
867 
868 	elevator_name[sizeof(elevator_name) - 1] = '\0';
869 	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
870 	len = strlen(elevator_name);
871 
872 	if (len && elevator_name[len - 1] == '\n')
873 		elevator_name[len - 1] = '\0';
874 
875 	e = elevator_get(elevator_name);
876 	if (!e) {
877 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
878 		return -EINVAL;
879 	}
880 
881 	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
882 		elevator_put(e);
883 		return count;
884 	}
885 
886 	if (!elevator_switch(q, e))
887 		printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
888 	return count;
889 }
890 
891 ssize_t elv_iosched_show(request_queue_t *q, char *name)
892 {
893 	elevator_t *e = q->elevator;
894 	struct elevator_type *elv = e->elevator_type;
895 	struct list_head *entry;
896 	int len = 0;
897 
898 	spin_lock_irq(q->queue_lock);
899 	list_for_each(entry, &elv_list) {
900 		struct elevator_type *__e;
901 
902 		__e = list_entry(entry, struct elevator_type, list);
903 		if (!strcmp(elv->elevator_name, __e->elevator_name))
904 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
905 		else
906 			len += sprintf(name+len, "%s ", __e->elevator_name);
907 	}
908 	spin_unlock_irq(q->queue_lock);
909 
910 	len += sprintf(len+name, "\n");
911 	return len;
912 }
913 
914 EXPORT_SYMBOL(elv_dispatch_sort);
915 EXPORT_SYMBOL(elv_add_request);
916 EXPORT_SYMBOL(__elv_add_request);
917 EXPORT_SYMBOL(elv_next_request);
918 EXPORT_SYMBOL(elv_dequeue_request);
919 EXPORT_SYMBOL(elv_queue_empty);
920 EXPORT_SYMBOL(elevator_exit);
921 EXPORT_SYMBOL(elevator_init);
922