xref: /linux/block/elevator.c (revision b4d8d1a93c6ea042b29bb66fbb1cf6bc556c18f7)
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36 
37 #include <asm/uaccess.h>
38 
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41 
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47 	if (!rq_mergeable(rq))
48 		return 0;
49 
50 	/*
51 	 * different data direction or already started, don't merge
52 	 */
53 	if (bio_data_dir(bio) != rq_data_dir(rq))
54 		return 0;
55 
56 	/*
57 	 * same device and no special stuff set, merge is ok
58 	 */
59 	if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60 	    !rq->waiting && !rq->special)
61 		return 1;
62 
63 	return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66 
67 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69 	int ret = ELEVATOR_NO_MERGE;
70 
71 	/*
72 	 * we can merge and sequence is ok, check if it's possible
73 	 */
74 	if (elv_rq_merge_ok(__rq, bio)) {
75 		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76 			ret = ELEVATOR_BACK_MERGE;
77 		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78 			ret = ELEVATOR_FRONT_MERGE;
79 	}
80 
81 	return ret;
82 }
83 
84 static struct elevator_type *elevator_find(const char *name)
85 {
86 	struct elevator_type *e = NULL;
87 	struct list_head *entry;
88 
89 	list_for_each(entry, &elv_list) {
90 		struct elevator_type *__e;
91 
92 		__e = list_entry(entry, struct elevator_type, list);
93 
94 		if (!strcmp(__e->elevator_name, name)) {
95 			e = __e;
96 			break;
97 		}
98 	}
99 
100 	return e;
101 }
102 
103 static void elevator_put(struct elevator_type *e)
104 {
105 	module_put(e->elevator_owner);
106 }
107 
108 static struct elevator_type *elevator_get(const char *name)
109 {
110 	struct elevator_type *e;
111 
112 	spin_lock_irq(&elv_list_lock);
113 
114 	e = elevator_find(name);
115 	if (e && !try_module_get(e->elevator_owner))
116 		e = NULL;
117 
118 	spin_unlock_irq(&elv_list_lock);
119 
120 	return e;
121 }
122 
123 static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
124 {
125 	int ret = 0;
126 
127 	q->elevator = eq;
128 
129 	if (eq->ops->elevator_init_fn)
130 		ret = eq->ops->elevator_init_fn(q, eq);
131 
132 	return ret;
133 }
134 
135 static char chosen_elevator[16];
136 
137 static int __init elevator_setup(char *str)
138 {
139 	/*
140 	 * Be backwards-compatible with previous kernels, so users
141 	 * won't get the wrong elevator.
142 	 */
143 	if (!strcmp(str, "as"))
144 		strcpy(chosen_elevator, "anticipatory");
145 	else
146 		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
147 	return 0;
148 }
149 
150 __setup("elevator=", elevator_setup);
151 
152 static struct kobj_type elv_ktype;
153 
154 static elevator_t *elevator_alloc(struct elevator_type *e)
155 {
156 	elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
157 	if (eq) {
158 		memset(eq, 0, sizeof(*eq));
159 		eq->ops = &e->ops;
160 		eq->elevator_type = e;
161 		kobject_init(&eq->kobj);
162 		snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
163 		eq->kobj.ktype = &elv_ktype;
164 		mutex_init(&eq->sysfs_lock);
165 	} else {
166 		elevator_put(e);
167 	}
168 	return eq;
169 }
170 
171 static void elevator_release(struct kobject *kobj)
172 {
173 	elevator_t *e = container_of(kobj, elevator_t, kobj);
174 	elevator_put(e->elevator_type);
175 	kfree(e);
176 }
177 
178 int elevator_init(request_queue_t *q, char *name)
179 {
180 	struct elevator_type *e = NULL;
181 	struct elevator_queue *eq;
182 	int ret = 0;
183 
184 	INIT_LIST_HEAD(&q->queue_head);
185 	q->last_merge = NULL;
186 	q->end_sector = 0;
187 	q->boundary_rq = NULL;
188 
189 	if (name && !(e = elevator_get(name)))
190 		return -EINVAL;
191 
192 	if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
193 		printk("I/O scheduler %s not found\n", chosen_elevator);
194 
195 	if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
196 		printk("Default I/O scheduler not found, using no-op\n");
197 		e = elevator_get("noop");
198 	}
199 
200 	eq = elevator_alloc(e);
201 	if (!eq)
202 		return -ENOMEM;
203 
204 	ret = elevator_attach(q, eq);
205 	if (ret)
206 		kobject_put(&eq->kobj);
207 
208 	return ret;
209 }
210 
211 void elevator_exit(elevator_t *e)
212 {
213 	mutex_lock(&e->sysfs_lock);
214 	if (e->ops->elevator_exit_fn)
215 		e->ops->elevator_exit_fn(e);
216 	e->ops = NULL;
217 	mutex_unlock(&e->sysfs_lock);
218 
219 	kobject_put(&e->kobj);
220 }
221 
222 /*
223  * Insert rq into dispatch queue of q.  Queue lock must be held on
224  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
225  * appended to the dispatch queue.  To be used by specific elevators.
226  */
227 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
228 {
229 	sector_t boundary;
230 	struct list_head *entry;
231 
232 	if (q->last_merge == rq)
233 		q->last_merge = NULL;
234 	q->nr_sorted--;
235 
236 	boundary = q->end_sector;
237 
238 	list_for_each_prev(entry, &q->queue_head) {
239 		struct request *pos = list_entry_rq(entry);
240 
241 		if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
242 			break;
243 		if (rq->sector >= boundary) {
244 			if (pos->sector < boundary)
245 				continue;
246 		} else {
247 			if (pos->sector >= boundary)
248 				break;
249 		}
250 		if (rq->sector >= pos->sector)
251 			break;
252 	}
253 
254 	list_add(&rq->queuelist, entry);
255 }
256 
257 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
258 {
259 	elevator_t *e = q->elevator;
260 	int ret;
261 
262 	if (q->last_merge) {
263 		ret = elv_try_merge(q->last_merge, bio);
264 		if (ret != ELEVATOR_NO_MERGE) {
265 			*req = q->last_merge;
266 			return ret;
267 		}
268 	}
269 
270 	if (e->ops->elevator_merge_fn)
271 		return e->ops->elevator_merge_fn(q, req, bio);
272 
273 	return ELEVATOR_NO_MERGE;
274 }
275 
276 void elv_merged_request(request_queue_t *q, struct request *rq)
277 {
278 	elevator_t *e = q->elevator;
279 
280 	if (e->ops->elevator_merged_fn)
281 		e->ops->elevator_merged_fn(q, rq);
282 
283 	q->last_merge = rq;
284 }
285 
286 void elv_merge_requests(request_queue_t *q, struct request *rq,
287 			     struct request *next)
288 {
289 	elevator_t *e = q->elevator;
290 
291 	if (e->ops->elevator_merge_req_fn)
292 		e->ops->elevator_merge_req_fn(q, rq, next);
293 	q->nr_sorted--;
294 
295 	q->last_merge = rq;
296 }
297 
298 void elv_requeue_request(request_queue_t *q, struct request *rq)
299 {
300 	elevator_t *e = q->elevator;
301 
302 	/*
303 	 * it already went through dequeue, we need to decrement the
304 	 * in_flight count again
305 	 */
306 	if (blk_account_rq(rq)) {
307 		q->in_flight--;
308 		if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
309 			e->ops->elevator_deactivate_req_fn(q, rq);
310 	}
311 
312 	rq->flags &= ~REQ_STARTED;
313 
314 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
315 }
316 
317 static void elv_drain_elevator(request_queue_t *q)
318 {
319 	static int printed;
320 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
321 		;
322 	if (q->nr_sorted == 0)
323 		return;
324 	if (printed++ < 10) {
325 		printk(KERN_ERR "%s: forced dispatching is broken "
326 		       "(nr_sorted=%u), please report this\n",
327 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
328 	}
329 }
330 
331 void elv_insert(request_queue_t *q, struct request *rq, int where)
332 {
333 	struct list_head *pos;
334 	unsigned ordseq;
335 
336 	rq->q = q;
337 
338 	switch (where) {
339 	case ELEVATOR_INSERT_FRONT:
340 		rq->flags |= REQ_SOFTBARRIER;
341 
342 		list_add(&rq->queuelist, &q->queue_head);
343 		break;
344 
345 	case ELEVATOR_INSERT_BACK:
346 		rq->flags |= REQ_SOFTBARRIER;
347 		elv_drain_elevator(q);
348 		list_add_tail(&rq->queuelist, &q->queue_head);
349 		/*
350 		 * We kick the queue here for the following reasons.
351 		 * - The elevator might have returned NULL previously
352 		 *   to delay requests and returned them now.  As the
353 		 *   queue wasn't empty before this request, ll_rw_blk
354 		 *   won't run the queue on return, resulting in hang.
355 		 * - Usually, back inserted requests won't be merged
356 		 *   with anything.  There's no point in delaying queue
357 		 *   processing.
358 		 */
359 		blk_remove_plug(q);
360 		q->request_fn(q);
361 		break;
362 
363 	case ELEVATOR_INSERT_SORT:
364 		BUG_ON(!blk_fs_request(rq));
365 		rq->flags |= REQ_SORTED;
366 		q->nr_sorted++;
367 		if (q->last_merge == NULL && rq_mergeable(rq))
368 			q->last_merge = rq;
369 		/*
370 		 * Some ioscheds (cfq) run q->request_fn directly, so
371 		 * rq cannot be accessed after calling
372 		 * elevator_add_req_fn.
373 		 */
374 		q->elevator->ops->elevator_add_req_fn(q, rq);
375 		break;
376 
377 	case ELEVATOR_INSERT_REQUEUE:
378 		/*
379 		 * If ordered flush isn't in progress, we do front
380 		 * insertion; otherwise, requests should be requeued
381 		 * in ordseq order.
382 		 */
383 		rq->flags |= REQ_SOFTBARRIER;
384 
385 		if (q->ordseq == 0) {
386 			list_add(&rq->queuelist, &q->queue_head);
387 			break;
388 		}
389 
390 		ordseq = blk_ordered_req_seq(rq);
391 
392 		list_for_each(pos, &q->queue_head) {
393 			struct request *pos_rq = list_entry_rq(pos);
394 			if (ordseq <= blk_ordered_req_seq(pos_rq))
395 				break;
396 		}
397 
398 		list_add_tail(&rq->queuelist, pos);
399 		break;
400 
401 	default:
402 		printk(KERN_ERR "%s: bad insertion point %d\n",
403 		       __FUNCTION__, where);
404 		BUG();
405 	}
406 
407 	if (blk_queue_plugged(q)) {
408 		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
409 			- q->in_flight;
410 
411 		if (nrq >= q->unplug_thresh)
412 			__generic_unplug_device(q);
413 	}
414 }
415 
416 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
417 		       int plug)
418 {
419 	if (q->ordcolor)
420 		rq->flags |= REQ_ORDERED_COLOR;
421 
422 	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
423 		/*
424 		 * toggle ordered color
425 		 */
426 		if (blk_barrier_rq(rq))
427 			q->ordcolor ^= 1;
428 
429 		/*
430 		 * barriers implicitly indicate back insertion
431 		 */
432 		if (where == ELEVATOR_INSERT_SORT)
433 			where = ELEVATOR_INSERT_BACK;
434 
435 		/*
436 		 * this request is scheduling boundary, update
437 		 * end_sector
438 		 */
439 		if (blk_fs_request(rq)) {
440 			q->end_sector = rq_end_sector(rq);
441 			q->boundary_rq = rq;
442 		}
443 	} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
444 		where = ELEVATOR_INSERT_BACK;
445 
446 	if (plug)
447 		blk_plug_device(q);
448 
449 	elv_insert(q, rq, where);
450 }
451 
452 void elv_add_request(request_queue_t *q, struct request *rq, int where,
453 		     int plug)
454 {
455 	unsigned long flags;
456 
457 	spin_lock_irqsave(q->queue_lock, flags);
458 	__elv_add_request(q, rq, where, plug);
459 	spin_unlock_irqrestore(q->queue_lock, flags);
460 }
461 
462 static inline struct request *__elv_next_request(request_queue_t *q)
463 {
464 	struct request *rq;
465 
466 	while (1) {
467 		while (!list_empty(&q->queue_head)) {
468 			rq = list_entry_rq(q->queue_head.next);
469 			if (blk_do_ordered(q, &rq))
470 				return rq;
471 		}
472 
473 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
474 			return NULL;
475 	}
476 }
477 
478 struct request *elv_next_request(request_queue_t *q)
479 {
480 	struct request *rq;
481 	int ret;
482 
483 	while ((rq = __elv_next_request(q)) != NULL) {
484 		if (!(rq->flags & REQ_STARTED)) {
485 			elevator_t *e = q->elevator;
486 
487 			/*
488 			 * This is the first time the device driver
489 			 * sees this request (possibly after
490 			 * requeueing).  Notify IO scheduler.
491 			 */
492 			if (blk_sorted_rq(rq) &&
493 			    e->ops->elevator_activate_req_fn)
494 				e->ops->elevator_activate_req_fn(q, rq);
495 
496 			/*
497 			 * just mark as started even if we don't start
498 			 * it, a request that has been delayed should
499 			 * not be passed by new incoming requests
500 			 */
501 			rq->flags |= REQ_STARTED;
502 		}
503 
504 		if (!q->boundary_rq || q->boundary_rq == rq) {
505 			q->end_sector = rq_end_sector(rq);
506 			q->boundary_rq = NULL;
507 		}
508 
509 		if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
510 			break;
511 
512 		ret = q->prep_rq_fn(q, rq);
513 		if (ret == BLKPREP_OK) {
514 			break;
515 		} else if (ret == BLKPREP_DEFER) {
516 			/*
517 			 * the request may have been (partially) prepped.
518 			 * we need to keep this request in the front to
519 			 * avoid resource deadlock.  REQ_STARTED will
520 			 * prevent other fs requests from passing this one.
521 			 */
522 			rq = NULL;
523 			break;
524 		} else if (ret == BLKPREP_KILL) {
525 			int nr_bytes = rq->hard_nr_sectors << 9;
526 
527 			if (!nr_bytes)
528 				nr_bytes = rq->data_len;
529 
530 			blkdev_dequeue_request(rq);
531 			rq->flags |= REQ_QUIET;
532 			end_that_request_chunk(rq, 0, nr_bytes);
533 			end_that_request_last(rq, 0);
534 		} else {
535 			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
536 								ret);
537 			break;
538 		}
539 	}
540 
541 	return rq;
542 }
543 
544 void elv_dequeue_request(request_queue_t *q, struct request *rq)
545 {
546 	BUG_ON(list_empty(&rq->queuelist));
547 
548 	list_del_init(&rq->queuelist);
549 
550 	/*
551 	 * the time frame between a request being removed from the lists
552 	 * and to it is freed is accounted as io that is in progress at
553 	 * the driver side.
554 	 */
555 	if (blk_account_rq(rq))
556 		q->in_flight++;
557 }
558 
559 int elv_queue_empty(request_queue_t *q)
560 {
561 	elevator_t *e = q->elevator;
562 
563 	if (!list_empty(&q->queue_head))
564 		return 0;
565 
566 	if (e->ops->elevator_queue_empty_fn)
567 		return e->ops->elevator_queue_empty_fn(q);
568 
569 	return 1;
570 }
571 
572 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
573 {
574 	elevator_t *e = q->elevator;
575 
576 	if (e->ops->elevator_latter_req_fn)
577 		return e->ops->elevator_latter_req_fn(q, rq);
578 	return NULL;
579 }
580 
581 struct request *elv_former_request(request_queue_t *q, struct request *rq)
582 {
583 	elevator_t *e = q->elevator;
584 
585 	if (e->ops->elevator_former_req_fn)
586 		return e->ops->elevator_former_req_fn(q, rq);
587 	return NULL;
588 }
589 
590 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
591 		    gfp_t gfp_mask)
592 {
593 	elevator_t *e = q->elevator;
594 
595 	if (e->ops->elevator_set_req_fn)
596 		return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
597 
598 	rq->elevator_private = NULL;
599 	return 0;
600 }
601 
602 void elv_put_request(request_queue_t *q, struct request *rq)
603 {
604 	elevator_t *e = q->elevator;
605 
606 	if (e->ops->elevator_put_req_fn)
607 		e->ops->elevator_put_req_fn(q, rq);
608 }
609 
610 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
611 {
612 	elevator_t *e = q->elevator;
613 
614 	if (e->ops->elevator_may_queue_fn)
615 		return e->ops->elevator_may_queue_fn(q, rw, bio);
616 
617 	return ELV_MQUEUE_MAY;
618 }
619 
620 void elv_completed_request(request_queue_t *q, struct request *rq)
621 {
622 	elevator_t *e = q->elevator;
623 
624 	/*
625 	 * request is released from the driver, io must be done
626 	 */
627 	if (blk_account_rq(rq)) {
628 		q->in_flight--;
629 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
630 			e->ops->elevator_completed_req_fn(q, rq);
631 	}
632 
633 	/*
634 	 * Check if the queue is waiting for fs requests to be
635 	 * drained for flush sequence.
636 	 */
637 	if (unlikely(q->ordseq)) {
638 		struct request *first_rq = list_entry_rq(q->queue_head.next);
639 		if (q->in_flight == 0 &&
640 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
641 		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
642 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
643 			q->request_fn(q);
644 		}
645 	}
646 }
647 
648 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
649 
650 static ssize_t
651 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
652 {
653 	elevator_t *e = container_of(kobj, elevator_t, kobj);
654 	struct elv_fs_entry *entry = to_elv(attr);
655 	ssize_t error;
656 
657 	if (!entry->show)
658 		return -EIO;
659 
660 	mutex_lock(&e->sysfs_lock);
661 	error = e->ops ? entry->show(e, page) : -ENOENT;
662 	mutex_unlock(&e->sysfs_lock);
663 	return error;
664 }
665 
666 static ssize_t
667 elv_attr_store(struct kobject *kobj, struct attribute *attr,
668 	       const char *page, size_t length)
669 {
670 	elevator_t *e = container_of(kobj, elevator_t, kobj);
671 	struct elv_fs_entry *entry = to_elv(attr);
672 	ssize_t error;
673 
674 	if (!entry->store)
675 		return -EIO;
676 
677 	mutex_lock(&e->sysfs_lock);
678 	error = e->ops ? entry->store(e, page, length) : -ENOENT;
679 	mutex_unlock(&e->sysfs_lock);
680 	return error;
681 }
682 
683 static struct sysfs_ops elv_sysfs_ops = {
684 	.show	= elv_attr_show,
685 	.store	= elv_attr_store,
686 };
687 
688 static struct kobj_type elv_ktype = {
689 	.sysfs_ops	= &elv_sysfs_ops,
690 	.release	= elevator_release,
691 };
692 
693 int elv_register_queue(struct request_queue *q)
694 {
695 	elevator_t *e = q->elevator;
696 	int error;
697 
698 	e->kobj.parent = &q->kobj;
699 
700 	error = kobject_add(&e->kobj);
701 	if (!error) {
702 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
703 		if (attr) {
704 			while (attr->attr.name) {
705 				if (sysfs_create_file(&e->kobj, &attr->attr))
706 					break;
707 				attr++;
708 			}
709 		}
710 		kobject_uevent(&e->kobj, KOBJ_ADD);
711 	}
712 	return error;
713 }
714 
715 void elv_unregister_queue(struct request_queue *q)
716 {
717 	if (q) {
718 		elevator_t *e = q->elevator;
719 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
720 		kobject_del(&e->kobj);
721 	}
722 }
723 
724 int elv_register(struct elevator_type *e)
725 {
726 	spin_lock_irq(&elv_list_lock);
727 	if (elevator_find(e->elevator_name))
728 		BUG();
729 	list_add_tail(&e->list, &elv_list);
730 	spin_unlock_irq(&elv_list_lock);
731 
732 	printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
733 	if (!strcmp(e->elevator_name, chosen_elevator) ||
734 			(!*chosen_elevator &&
735 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
736 				printk(" (default)");
737 	printk("\n");
738 	return 0;
739 }
740 EXPORT_SYMBOL_GPL(elv_register);
741 
742 void elv_unregister(struct elevator_type *e)
743 {
744 	struct task_struct *g, *p;
745 
746 	/*
747 	 * Iterate every thread in the process to remove the io contexts.
748 	 */
749 	if (e->ops.trim) {
750 		read_lock(&tasklist_lock);
751 		do_each_thread(g, p) {
752 			task_lock(p);
753 			e->ops.trim(p->io_context);
754 			task_unlock(p);
755 		} while_each_thread(g, p);
756 		read_unlock(&tasklist_lock);
757 	}
758 
759 	spin_lock_irq(&elv_list_lock);
760 	list_del_init(&e->list);
761 	spin_unlock_irq(&elv_list_lock);
762 }
763 EXPORT_SYMBOL_GPL(elv_unregister);
764 
765 /*
766  * switch to new_e io scheduler. be careful not to introduce deadlocks -
767  * we don't free the old io scheduler, before we have allocated what we
768  * need for the new one. this way we have a chance of going back to the old
769  * one, if the new one fails init for some reason.
770  */
771 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
772 {
773 	elevator_t *old_elevator, *e;
774 
775 	/*
776 	 * Allocate new elevator
777 	 */
778 	e = elevator_alloc(new_e);
779 	if (!e)
780 		return 0;
781 
782 	/*
783 	 * Turn on BYPASS and drain all requests w/ elevator private data
784 	 */
785 	spin_lock_irq(q->queue_lock);
786 
787 	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
788 
789 	elv_drain_elevator(q);
790 
791 	while (q->rq.elvpriv) {
792 		blk_remove_plug(q);
793 		q->request_fn(q);
794 		spin_unlock_irq(q->queue_lock);
795 		msleep(10);
796 		spin_lock_irq(q->queue_lock);
797 		elv_drain_elevator(q);
798 	}
799 
800 	spin_unlock_irq(q->queue_lock);
801 
802 	/*
803 	 * unregister old elevator data
804 	 */
805 	elv_unregister_queue(q);
806 	old_elevator = q->elevator;
807 
808 	/*
809 	 * attach and start new elevator
810 	 */
811 	if (elevator_attach(q, e))
812 		goto fail;
813 
814 	if (elv_register_queue(q))
815 		goto fail_register;
816 
817 	/*
818 	 * finally exit old elevator and turn off BYPASS.
819 	 */
820 	elevator_exit(old_elevator);
821 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
822 	return 1;
823 
824 fail_register:
825 	/*
826 	 * switch failed, exit the new io scheduler and reattach the old
827 	 * one again (along with re-adding the sysfs dir)
828 	 */
829 	elevator_exit(e);
830 	e = NULL;
831 fail:
832 	q->elevator = old_elevator;
833 	elv_register_queue(q);
834 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
835 	if (e)
836 		kobject_put(&e->kobj);
837 	return 0;
838 }
839 
840 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
841 {
842 	char elevator_name[ELV_NAME_MAX];
843 	size_t len;
844 	struct elevator_type *e;
845 
846 	elevator_name[sizeof(elevator_name) - 1] = '\0';
847 	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
848 	len = strlen(elevator_name);
849 
850 	if (len && elevator_name[len - 1] == '\n')
851 		elevator_name[len - 1] = '\0';
852 
853 	e = elevator_get(elevator_name);
854 	if (!e) {
855 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
856 		return -EINVAL;
857 	}
858 
859 	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
860 		elevator_put(e);
861 		return count;
862 	}
863 
864 	if (!elevator_switch(q, e))
865 		printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
866 	return count;
867 }
868 
869 ssize_t elv_iosched_show(request_queue_t *q, char *name)
870 {
871 	elevator_t *e = q->elevator;
872 	struct elevator_type *elv = e->elevator_type;
873 	struct list_head *entry;
874 	int len = 0;
875 
876 	spin_lock_irq(q->queue_lock);
877 	list_for_each(entry, &elv_list) {
878 		struct elevator_type *__e;
879 
880 		__e = list_entry(entry, struct elevator_type, list);
881 		if (!strcmp(elv->elevator_name, __e->elevator_name))
882 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
883 		else
884 			len += sprintf(name+len, "%s ", __e->elevator_name);
885 	}
886 	spin_unlock_irq(q->queue_lock);
887 
888 	len += sprintf(len+name, "\n");
889 	return len;
890 }
891 
892 EXPORT_SYMBOL(elv_dispatch_sort);
893 EXPORT_SYMBOL(elv_add_request);
894 EXPORT_SYMBOL(__elv_add_request);
895 EXPORT_SYMBOL(elv_requeue_request);
896 EXPORT_SYMBOL(elv_next_request);
897 EXPORT_SYMBOL(elv_dequeue_request);
898 EXPORT_SYMBOL(elv_queue_empty);
899 EXPORT_SYMBOL(elv_completed_request);
900 EXPORT_SYMBOL(elevator_exit);
901 EXPORT_SYMBOL(elevator_init);
902