xref: /linux/block/blk-core.c (revision 7265706c8fd57722f622f336ec110cb35f83e739)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/blktrace_api.h>
32 #include <linux/fault-inject.h>
33 
34 #include "blk.h"
35 
36 static int __make_request(struct request_queue *q, struct bio *bio);
37 
38 /*
39  * For the allocated request tables
40  */
41 static struct kmem_cache *request_cachep;
42 
43 /*
44  * For queue allocation
45  */
46 struct kmem_cache *blk_requestq_cachep;
47 
48 /*
49  * Controlling structure to kblockd
50  */
51 static struct workqueue_struct *kblockd_workqueue;
52 
53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 
55 static void drive_stat_acct(struct request *rq, int new_io)
56 {
57 	struct hd_struct *part;
58 	int rw = rq_data_dir(rq);
59 
60 	if (!blk_fs_request(rq) || !rq->rq_disk)
61 		return;
62 
63 	part = get_part(rq->rq_disk, rq->sector);
64 	if (!new_io)
65 		__all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
66 	else {
67 		disk_round_stats(rq->rq_disk);
68 		rq->rq_disk->in_flight++;
69 		if (part) {
70 			part_round_stats(part);
71 			part->in_flight++;
72 		}
73 	}
74 }
75 
76 void blk_queue_congestion_threshold(struct request_queue *q)
77 {
78 	int nr;
79 
80 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
81 	if (nr > q->nr_requests)
82 		nr = q->nr_requests;
83 	q->nr_congestion_on = nr;
84 
85 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
86 	if (nr < 1)
87 		nr = 1;
88 	q->nr_congestion_off = nr;
89 }
90 
91 /**
92  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
93  * @bdev:	device
94  *
95  * Locates the passed device's request queue and returns the address of its
96  * backing_dev_info
97  *
98  * Will return NULL if the request queue cannot be located.
99  */
100 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
101 {
102 	struct backing_dev_info *ret = NULL;
103 	struct request_queue *q = bdev_get_queue(bdev);
104 
105 	if (q)
106 		ret = &q->backing_dev_info;
107 	return ret;
108 }
109 EXPORT_SYMBOL(blk_get_backing_dev_info);
110 
111 void blk_rq_init(struct request_queue *q, struct request *rq)
112 {
113 	memset(rq, 0, sizeof(*rq));
114 
115 	INIT_LIST_HEAD(&rq->queuelist);
116 	INIT_LIST_HEAD(&rq->donelist);
117 	rq->q = q;
118 	rq->sector = rq->hard_sector = (sector_t) -1;
119 	INIT_HLIST_NODE(&rq->hash);
120 	RB_CLEAR_NODE(&rq->rb_node);
121 	rq->cmd = rq->__cmd;
122 	rq->tag = -1;
123 	rq->ref_count = 1;
124 }
125 EXPORT_SYMBOL(blk_rq_init);
126 
127 static void req_bio_endio(struct request *rq, struct bio *bio,
128 			  unsigned int nbytes, int error)
129 {
130 	struct request_queue *q = rq->q;
131 
132 	if (&q->bar_rq != rq) {
133 		if (error)
134 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
135 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
136 			error = -EIO;
137 
138 		if (unlikely(nbytes > bio->bi_size)) {
139 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
140 			       __func__, nbytes, bio->bi_size);
141 			nbytes = bio->bi_size;
142 		}
143 
144 		bio->bi_size -= nbytes;
145 		bio->bi_sector += (nbytes >> 9);
146 
147 		if (bio_integrity(bio))
148 			bio_integrity_advance(bio, nbytes);
149 
150 		if (bio->bi_size == 0)
151 			bio_endio(bio, error);
152 	} else {
153 
154 		/*
155 		 * Okay, this is the barrier request in progress, just
156 		 * record the error;
157 		 */
158 		if (error && !q->orderr)
159 			q->orderr = error;
160 	}
161 }
162 
163 void blk_dump_rq_flags(struct request *rq, char *msg)
164 {
165 	int bit;
166 
167 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
168 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
169 		rq->cmd_flags);
170 
171 	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
172 						(unsigned long long)rq->sector,
173 						rq->nr_sectors,
174 						rq->current_nr_sectors);
175 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
176 						rq->bio, rq->biotail,
177 						rq->buffer, rq->data,
178 						rq->data_len);
179 
180 	if (blk_pc_request(rq)) {
181 		printk(KERN_INFO "  cdb: ");
182 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
183 			printk("%02x ", rq->cmd[bit]);
184 		printk("\n");
185 	}
186 }
187 EXPORT_SYMBOL(blk_dump_rq_flags);
188 
189 /*
190  * "plug" the device if there are no outstanding requests: this will
191  * force the transfer to start only after we have put all the requests
192  * on the list.
193  *
194  * This is called with interrupts off and no requests on the queue and
195  * with the queue lock held.
196  */
197 void blk_plug_device(struct request_queue *q)
198 {
199 	WARN_ON(!irqs_disabled());
200 
201 	/*
202 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
203 	 * which will restart the queueing
204 	 */
205 	if (blk_queue_stopped(q))
206 		return;
207 
208 	if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
209 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
210 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
211 	}
212 }
213 EXPORT_SYMBOL(blk_plug_device);
214 
215 /**
216  * blk_plug_device_unlocked - plug a device without queue lock held
217  * @q:    The &struct request_queue to plug
218  *
219  * Description:
220  *   Like @blk_plug_device(), but grabs the queue lock and disables
221  *   interrupts.
222  **/
223 void blk_plug_device_unlocked(struct request_queue *q)
224 {
225 	unsigned long flags;
226 
227 	spin_lock_irqsave(q->queue_lock, flags);
228 	blk_plug_device(q);
229 	spin_unlock_irqrestore(q->queue_lock, flags);
230 }
231 EXPORT_SYMBOL(blk_plug_device_unlocked);
232 
233 /*
234  * remove the queue from the plugged list, if present. called with
235  * queue lock held and interrupts disabled.
236  */
237 int blk_remove_plug(struct request_queue *q)
238 {
239 	WARN_ON(!irqs_disabled());
240 
241 	if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
242 		return 0;
243 
244 	del_timer(&q->unplug_timer);
245 	return 1;
246 }
247 EXPORT_SYMBOL(blk_remove_plug);
248 
249 /*
250  * remove the plug and let it rip..
251  */
252 void __generic_unplug_device(struct request_queue *q)
253 {
254 	if (unlikely(blk_queue_stopped(q)))
255 		return;
256 
257 	if (!blk_remove_plug(q))
258 		return;
259 
260 	q->request_fn(q);
261 }
262 EXPORT_SYMBOL(__generic_unplug_device);
263 
264 /**
265  * generic_unplug_device - fire a request queue
266  * @q:    The &struct request_queue in question
267  *
268  * Description:
269  *   Linux uses plugging to build bigger requests queues before letting
270  *   the device have at them. If a queue is plugged, the I/O scheduler
271  *   is still adding and merging requests on the queue. Once the queue
272  *   gets unplugged, the request_fn defined for the queue is invoked and
273  *   transfers started.
274  **/
275 void generic_unplug_device(struct request_queue *q)
276 {
277 	if (blk_queue_plugged(q)) {
278 		spin_lock_irq(q->queue_lock);
279 		__generic_unplug_device(q);
280 		spin_unlock_irq(q->queue_lock);
281 	}
282 }
283 EXPORT_SYMBOL(generic_unplug_device);
284 
285 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
286 				   struct page *page)
287 {
288 	struct request_queue *q = bdi->unplug_io_data;
289 
290 	blk_unplug(q);
291 }
292 
293 void blk_unplug_work(struct work_struct *work)
294 {
295 	struct request_queue *q =
296 		container_of(work, struct request_queue, unplug_work);
297 
298 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
299 				q->rq.count[READ] + q->rq.count[WRITE]);
300 
301 	q->unplug_fn(q);
302 }
303 
304 void blk_unplug_timeout(unsigned long data)
305 {
306 	struct request_queue *q = (struct request_queue *)data;
307 
308 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
309 				q->rq.count[READ] + q->rq.count[WRITE]);
310 
311 	kblockd_schedule_work(&q->unplug_work);
312 }
313 
314 void blk_unplug(struct request_queue *q)
315 {
316 	/*
317 	 * devices don't necessarily have an ->unplug_fn defined
318 	 */
319 	if (q->unplug_fn) {
320 		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
321 					q->rq.count[READ] + q->rq.count[WRITE]);
322 
323 		q->unplug_fn(q);
324 	}
325 }
326 EXPORT_SYMBOL(blk_unplug);
327 
328 /**
329  * blk_start_queue - restart a previously stopped queue
330  * @q:    The &struct request_queue in question
331  *
332  * Description:
333  *   blk_start_queue() will clear the stop flag on the queue, and call
334  *   the request_fn for the queue if it was in a stopped state when
335  *   entered. Also see blk_stop_queue(). Queue lock must be held.
336  **/
337 void blk_start_queue(struct request_queue *q)
338 {
339 	WARN_ON(!irqs_disabled());
340 
341 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
342 
343 	/*
344 	 * one level of recursion is ok and is much faster than kicking
345 	 * the unplug handling
346 	 */
347 	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
348 		q->request_fn(q);
349 		queue_flag_clear(QUEUE_FLAG_REENTER, q);
350 	} else {
351 		blk_plug_device(q);
352 		kblockd_schedule_work(&q->unplug_work);
353 	}
354 }
355 EXPORT_SYMBOL(blk_start_queue);
356 
357 /**
358  * blk_stop_queue - stop a queue
359  * @q:    The &struct request_queue in question
360  *
361  * Description:
362  *   The Linux block layer assumes that a block driver will consume all
363  *   entries on the request queue when the request_fn strategy is called.
364  *   Often this will not happen, because of hardware limitations (queue
365  *   depth settings). If a device driver gets a 'queue full' response,
366  *   or if it simply chooses not to queue more I/O at one point, it can
367  *   call this function to prevent the request_fn from being called until
368  *   the driver has signalled it's ready to go again. This happens by calling
369  *   blk_start_queue() to restart queue operations. Queue lock must be held.
370  **/
371 void blk_stop_queue(struct request_queue *q)
372 {
373 	blk_remove_plug(q);
374 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
375 }
376 EXPORT_SYMBOL(blk_stop_queue);
377 
378 /**
379  * blk_sync_queue - cancel any pending callbacks on a queue
380  * @q: the queue
381  *
382  * Description:
383  *     The block layer may perform asynchronous callback activity
384  *     on a queue, such as calling the unplug function after a timeout.
385  *     A block device may call blk_sync_queue to ensure that any
386  *     such activity is cancelled, thus allowing it to release resources
387  *     that the callbacks might use. The caller must already have made sure
388  *     that its ->make_request_fn will not re-add plugging prior to calling
389  *     this function.
390  *
391  */
392 void blk_sync_queue(struct request_queue *q)
393 {
394 	del_timer_sync(&q->unplug_timer);
395 	kblockd_flush_work(&q->unplug_work);
396 }
397 EXPORT_SYMBOL(blk_sync_queue);
398 
399 /**
400  * blk_run_queue - run a single device queue
401  * @q:	The queue to run
402  */
403 void __blk_run_queue(struct request_queue *q)
404 {
405 	blk_remove_plug(q);
406 
407 	/*
408 	 * Only recurse once to avoid overrunning the stack, let the unplug
409 	 * handling reinvoke the handler shortly if we already got there.
410 	 */
411 	if (!elv_queue_empty(q)) {
412 		if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
413 			q->request_fn(q);
414 			queue_flag_clear(QUEUE_FLAG_REENTER, q);
415 		} else {
416 			blk_plug_device(q);
417 			kblockd_schedule_work(&q->unplug_work);
418 		}
419 	}
420 }
421 EXPORT_SYMBOL(__blk_run_queue);
422 
423 /**
424  * blk_run_queue - run a single device queue
425  * @q: The queue to run
426  */
427 void blk_run_queue(struct request_queue *q)
428 {
429 	unsigned long flags;
430 
431 	spin_lock_irqsave(q->queue_lock, flags);
432 	__blk_run_queue(q);
433 	spin_unlock_irqrestore(q->queue_lock, flags);
434 }
435 EXPORT_SYMBOL(blk_run_queue);
436 
437 void blk_put_queue(struct request_queue *q)
438 {
439 	kobject_put(&q->kobj);
440 }
441 
442 void blk_cleanup_queue(struct request_queue *q)
443 {
444 	mutex_lock(&q->sysfs_lock);
445 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
446 	mutex_unlock(&q->sysfs_lock);
447 
448 	if (q->elevator)
449 		elevator_exit(q->elevator);
450 
451 	blk_put_queue(q);
452 }
453 EXPORT_SYMBOL(blk_cleanup_queue);
454 
455 static int blk_init_free_list(struct request_queue *q)
456 {
457 	struct request_list *rl = &q->rq;
458 
459 	rl->count[READ] = rl->count[WRITE] = 0;
460 	rl->starved[READ] = rl->starved[WRITE] = 0;
461 	rl->elvpriv = 0;
462 	init_waitqueue_head(&rl->wait[READ]);
463 	init_waitqueue_head(&rl->wait[WRITE]);
464 
465 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
466 				mempool_free_slab, request_cachep, q->node);
467 
468 	if (!rl->rq_pool)
469 		return -ENOMEM;
470 
471 	return 0;
472 }
473 
474 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
475 {
476 	return blk_alloc_queue_node(gfp_mask, -1);
477 }
478 EXPORT_SYMBOL(blk_alloc_queue);
479 
480 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
481 {
482 	struct request_queue *q;
483 	int err;
484 
485 	q = kmem_cache_alloc_node(blk_requestq_cachep,
486 				gfp_mask | __GFP_ZERO, node_id);
487 	if (!q)
488 		return NULL;
489 
490 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
491 	q->backing_dev_info.unplug_io_data = q;
492 	err = bdi_init(&q->backing_dev_info);
493 	if (err) {
494 		kmem_cache_free(blk_requestq_cachep, q);
495 		return NULL;
496 	}
497 
498 	init_timer(&q->unplug_timer);
499 
500 	kobject_init(&q->kobj, &blk_queue_ktype);
501 
502 	mutex_init(&q->sysfs_lock);
503 	spin_lock_init(&q->__queue_lock);
504 
505 	return q;
506 }
507 EXPORT_SYMBOL(blk_alloc_queue_node);
508 
509 /**
510  * blk_init_queue  - prepare a request queue for use with a block device
511  * @rfn:  The function to be called to process requests that have been
512  *        placed on the queue.
513  * @lock: Request queue spin lock
514  *
515  * Description:
516  *    If a block device wishes to use the standard request handling procedures,
517  *    which sorts requests and coalesces adjacent requests, then it must
518  *    call blk_init_queue().  The function @rfn will be called when there
519  *    are requests on the queue that need to be processed.  If the device
520  *    supports plugging, then @rfn may not be called immediately when requests
521  *    are available on the queue, but may be called at some time later instead.
522  *    Plugged queues are generally unplugged when a buffer belonging to one
523  *    of the requests on the queue is needed, or due to memory pressure.
524  *
525  *    @rfn is not required, or even expected, to remove all requests off the
526  *    queue, but only as many as it can handle at a time.  If it does leave
527  *    requests on the queue, it is responsible for arranging that the requests
528  *    get dealt with eventually.
529  *
530  *    The queue spin lock must be held while manipulating the requests on the
531  *    request queue; this lock will be taken also from interrupt context, so irq
532  *    disabling is needed for it.
533  *
534  *    Function returns a pointer to the initialized request queue, or NULL if
535  *    it didn't succeed.
536  *
537  * Note:
538  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
539  *    when the block device is deactivated (such as at module unload).
540  **/
541 
542 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
543 {
544 	return blk_init_queue_node(rfn, lock, -1);
545 }
546 EXPORT_SYMBOL(blk_init_queue);
547 
548 struct request_queue *
549 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
550 {
551 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
552 
553 	if (!q)
554 		return NULL;
555 
556 	q->node = node_id;
557 	if (blk_init_free_list(q)) {
558 		kmem_cache_free(blk_requestq_cachep, q);
559 		return NULL;
560 	}
561 
562 	/*
563 	 * if caller didn't supply a lock, they get per-queue locking with
564 	 * our embedded lock
565 	 */
566 	if (!lock)
567 		lock = &q->__queue_lock;
568 
569 	q->request_fn		= rfn;
570 	q->prep_rq_fn		= NULL;
571 	q->unplug_fn		= generic_unplug_device;
572 	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
573 	q->queue_lock		= lock;
574 
575 	blk_queue_segment_boundary(q, 0xffffffff);
576 
577 	blk_queue_make_request(q, __make_request);
578 	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
579 
580 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
581 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
582 
583 	q->sg_reserved_size = INT_MAX;
584 
585 	/*
586 	 * all done
587 	 */
588 	if (!elevator_init(q, NULL)) {
589 		blk_queue_congestion_threshold(q);
590 		return q;
591 	}
592 
593 	blk_put_queue(q);
594 	return NULL;
595 }
596 EXPORT_SYMBOL(blk_init_queue_node);
597 
598 int blk_get_queue(struct request_queue *q)
599 {
600 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
601 		kobject_get(&q->kobj);
602 		return 0;
603 	}
604 
605 	return 1;
606 }
607 
608 static inline void blk_free_request(struct request_queue *q, struct request *rq)
609 {
610 	if (rq->cmd_flags & REQ_ELVPRIV)
611 		elv_put_request(q, rq);
612 	mempool_free(rq, q->rq.rq_pool);
613 }
614 
615 static struct request *
616 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
617 {
618 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
619 
620 	if (!rq)
621 		return NULL;
622 
623 	blk_rq_init(q, rq);
624 
625 	/*
626 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
627 	 * see bio.h and blkdev.h
628 	 */
629 	rq->cmd_flags = rw | REQ_ALLOCED;
630 
631 	if (priv) {
632 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
633 			mempool_free(rq, q->rq.rq_pool);
634 			return NULL;
635 		}
636 		rq->cmd_flags |= REQ_ELVPRIV;
637 	}
638 
639 	return rq;
640 }
641 
642 /*
643  * ioc_batching returns true if the ioc is a valid batching request and
644  * should be given priority access to a request.
645  */
646 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
647 {
648 	if (!ioc)
649 		return 0;
650 
651 	/*
652 	 * Make sure the process is able to allocate at least 1 request
653 	 * even if the batch times out, otherwise we could theoretically
654 	 * lose wakeups.
655 	 */
656 	return ioc->nr_batch_requests == q->nr_batching ||
657 		(ioc->nr_batch_requests > 0
658 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
659 }
660 
661 /*
662  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
663  * will cause the process to be a "batcher" on all queues in the system. This
664  * is the behaviour we want though - once it gets a wakeup it should be given
665  * a nice run.
666  */
667 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
668 {
669 	if (!ioc || ioc_batching(q, ioc))
670 		return;
671 
672 	ioc->nr_batch_requests = q->nr_batching;
673 	ioc->last_waited = jiffies;
674 }
675 
676 static void __freed_request(struct request_queue *q, int rw)
677 {
678 	struct request_list *rl = &q->rq;
679 
680 	if (rl->count[rw] < queue_congestion_off_threshold(q))
681 		blk_clear_queue_congested(q, rw);
682 
683 	if (rl->count[rw] + 1 <= q->nr_requests) {
684 		if (waitqueue_active(&rl->wait[rw]))
685 			wake_up(&rl->wait[rw]);
686 
687 		blk_clear_queue_full(q, rw);
688 	}
689 }
690 
691 /*
692  * A request has just been released.  Account for it, update the full and
693  * congestion status, wake up any waiters.   Called under q->queue_lock.
694  */
695 static void freed_request(struct request_queue *q, int rw, int priv)
696 {
697 	struct request_list *rl = &q->rq;
698 
699 	rl->count[rw]--;
700 	if (priv)
701 		rl->elvpriv--;
702 
703 	__freed_request(q, rw);
704 
705 	if (unlikely(rl->starved[rw ^ 1]))
706 		__freed_request(q, rw ^ 1);
707 }
708 
709 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
710 /*
711  * Get a free request, queue_lock must be held.
712  * Returns NULL on failure, with queue_lock held.
713  * Returns !NULL on success, with queue_lock *not held*.
714  */
715 static struct request *get_request(struct request_queue *q, int rw_flags,
716 				   struct bio *bio, gfp_t gfp_mask)
717 {
718 	struct request *rq = NULL;
719 	struct request_list *rl = &q->rq;
720 	struct io_context *ioc = NULL;
721 	const int rw = rw_flags & 0x01;
722 	int may_queue, priv;
723 
724 	may_queue = elv_may_queue(q, rw_flags);
725 	if (may_queue == ELV_MQUEUE_NO)
726 		goto rq_starved;
727 
728 	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
729 		if (rl->count[rw]+1 >= q->nr_requests) {
730 			ioc = current_io_context(GFP_ATOMIC, q->node);
731 			/*
732 			 * The queue will fill after this allocation, so set
733 			 * it as full, and mark this process as "batching".
734 			 * This process will be allowed to complete a batch of
735 			 * requests, others will be blocked.
736 			 */
737 			if (!blk_queue_full(q, rw)) {
738 				ioc_set_batching(q, ioc);
739 				blk_set_queue_full(q, rw);
740 			} else {
741 				if (may_queue != ELV_MQUEUE_MUST
742 						&& !ioc_batching(q, ioc)) {
743 					/*
744 					 * The queue is full and the allocating
745 					 * process is not a "batcher", and not
746 					 * exempted by the IO scheduler
747 					 */
748 					goto out;
749 				}
750 			}
751 		}
752 		blk_set_queue_congested(q, rw);
753 	}
754 
755 	/*
756 	 * Only allow batching queuers to allocate up to 50% over the defined
757 	 * limit of requests, otherwise we could have thousands of requests
758 	 * allocated with any setting of ->nr_requests
759 	 */
760 	if (rl->count[rw] >= (3 * q->nr_requests / 2))
761 		goto out;
762 
763 	rl->count[rw]++;
764 	rl->starved[rw] = 0;
765 
766 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
767 	if (priv)
768 		rl->elvpriv++;
769 
770 	spin_unlock_irq(q->queue_lock);
771 
772 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
773 	if (unlikely(!rq)) {
774 		/*
775 		 * Allocation failed presumably due to memory. Undo anything
776 		 * we might have messed up.
777 		 *
778 		 * Allocating task should really be put onto the front of the
779 		 * wait queue, but this is pretty rare.
780 		 */
781 		spin_lock_irq(q->queue_lock);
782 		freed_request(q, rw, priv);
783 
784 		/*
785 		 * in the very unlikely event that allocation failed and no
786 		 * requests for this direction was pending, mark us starved
787 		 * so that freeing of a request in the other direction will
788 		 * notice us. another possible fix would be to split the
789 		 * rq mempool into READ and WRITE
790 		 */
791 rq_starved:
792 		if (unlikely(rl->count[rw] == 0))
793 			rl->starved[rw] = 1;
794 
795 		goto out;
796 	}
797 
798 	/*
799 	 * ioc may be NULL here, and ioc_batching will be false. That's
800 	 * OK, if the queue is under the request limit then requests need
801 	 * not count toward the nr_batch_requests limit. There will always
802 	 * be some limit enforced by BLK_BATCH_TIME.
803 	 */
804 	if (ioc_batching(q, ioc))
805 		ioc->nr_batch_requests--;
806 
807 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
808 out:
809 	return rq;
810 }
811 
812 /*
813  * No available requests for this queue, unplug the device and wait for some
814  * requests to become available.
815  *
816  * Called with q->queue_lock held, and returns with it unlocked.
817  */
818 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
819 					struct bio *bio)
820 {
821 	const int rw = rw_flags & 0x01;
822 	struct request *rq;
823 
824 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
825 	while (!rq) {
826 		DEFINE_WAIT(wait);
827 		struct io_context *ioc;
828 		struct request_list *rl = &q->rq;
829 
830 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
831 				TASK_UNINTERRUPTIBLE);
832 
833 		blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
834 
835 		__generic_unplug_device(q);
836 		spin_unlock_irq(q->queue_lock);
837 		io_schedule();
838 
839 		/*
840 		 * After sleeping, we become a "batching" process and
841 		 * will be able to allocate at least one request, and
842 		 * up to a big batch of them for a small period time.
843 		 * See ioc_batching, ioc_set_batching
844 		 */
845 		ioc = current_io_context(GFP_NOIO, q->node);
846 		ioc_set_batching(q, ioc);
847 
848 		spin_lock_irq(q->queue_lock);
849 		finish_wait(&rl->wait[rw], &wait);
850 
851 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
852 	};
853 
854 	return rq;
855 }
856 
857 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
858 {
859 	struct request *rq;
860 
861 	BUG_ON(rw != READ && rw != WRITE);
862 
863 	spin_lock_irq(q->queue_lock);
864 	if (gfp_mask & __GFP_WAIT) {
865 		rq = get_request_wait(q, rw, NULL);
866 	} else {
867 		rq = get_request(q, rw, NULL, gfp_mask);
868 		if (!rq)
869 			spin_unlock_irq(q->queue_lock);
870 	}
871 	/* q->queue_lock is unlocked at this point */
872 
873 	return rq;
874 }
875 EXPORT_SYMBOL(blk_get_request);
876 
877 /**
878  * blk_start_queueing - initiate dispatch of requests to device
879  * @q:		request queue to kick into gear
880  *
881  * This is basically a helper to remove the need to know whether a queue
882  * is plugged or not if someone just wants to initiate dispatch of requests
883  * for this queue.
884  *
885  * The queue lock must be held with interrupts disabled.
886  */
887 void blk_start_queueing(struct request_queue *q)
888 {
889 	if (!blk_queue_plugged(q))
890 		q->request_fn(q);
891 	else
892 		__generic_unplug_device(q);
893 }
894 EXPORT_SYMBOL(blk_start_queueing);
895 
896 /**
897  * blk_requeue_request - put a request back on queue
898  * @q:		request queue where request should be inserted
899  * @rq:		request to be inserted
900  *
901  * Description:
902  *    Drivers often keep queueing requests until the hardware cannot accept
903  *    more, when that condition happens we need to put the request back
904  *    on the queue. Must be called with queue lock held.
905  */
906 void blk_requeue_request(struct request_queue *q, struct request *rq)
907 {
908 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
909 
910 	if (blk_rq_tagged(rq))
911 		blk_queue_end_tag(q, rq);
912 
913 	elv_requeue_request(q, rq);
914 }
915 EXPORT_SYMBOL(blk_requeue_request);
916 
917 /**
918  * blk_insert_request - insert a special request in to a request queue
919  * @q:		request queue where request should be inserted
920  * @rq:		request to be inserted
921  * @at_head:	insert request at head or tail of queue
922  * @data:	private data
923  *
924  * Description:
925  *    Many block devices need to execute commands asynchronously, so they don't
926  *    block the whole kernel from preemption during request execution.  This is
927  *    accomplished normally by inserting aritficial requests tagged as
928  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
929  *    scheduled for actual execution by the request queue.
930  *
931  *    We have the option of inserting the head or the tail of the queue.
932  *    Typically we use the tail for new ioctls and so forth.  We use the head
933  *    of the queue for things like a QUEUE_FULL message from a device, or a
934  *    host that is unable to accept a particular command.
935  */
936 void blk_insert_request(struct request_queue *q, struct request *rq,
937 			int at_head, void *data)
938 {
939 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
940 	unsigned long flags;
941 
942 	/*
943 	 * tell I/O scheduler that this isn't a regular read/write (ie it
944 	 * must not attempt merges on this) and that it acts as a soft
945 	 * barrier
946 	 */
947 	rq->cmd_type = REQ_TYPE_SPECIAL;
948 	rq->cmd_flags |= REQ_SOFTBARRIER;
949 
950 	rq->special = data;
951 
952 	spin_lock_irqsave(q->queue_lock, flags);
953 
954 	/*
955 	 * If command is tagged, release the tag
956 	 */
957 	if (blk_rq_tagged(rq))
958 		blk_queue_end_tag(q, rq);
959 
960 	drive_stat_acct(rq, 1);
961 	__elv_add_request(q, rq, where, 0);
962 	blk_start_queueing(q);
963 	spin_unlock_irqrestore(q->queue_lock, flags);
964 }
965 EXPORT_SYMBOL(blk_insert_request);
966 
967 /*
968  * add-request adds a request to the linked list.
969  * queue lock is held and interrupts disabled, as we muck with the
970  * request queue list.
971  */
972 static inline void add_request(struct request_queue *q, struct request *req)
973 {
974 	drive_stat_acct(req, 1);
975 
976 	/*
977 	 * elevator indicated where it wants this request to be
978 	 * inserted at elevator_merge time
979 	 */
980 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
981 }
982 
983 /*
984  * disk_round_stats()	- Round off the performance stats on a struct
985  * disk_stats.
986  *
987  * The average IO queue length and utilisation statistics are maintained
988  * by observing the current state of the queue length and the amount of
989  * time it has been in this state for.
990  *
991  * Normally, that accounting is done on IO completion, but that can result
992  * in more than a second's worth of IO being accounted for within any one
993  * second, leading to >100% utilisation.  To deal with that, we call this
994  * function to do a round-off before returning the results when reading
995  * /proc/diskstats.  This accounts immediately for all queue usage up to
996  * the current jiffies and restarts the counters again.
997  */
998 void disk_round_stats(struct gendisk *disk)
999 {
1000 	unsigned long now = jiffies;
1001 
1002 	if (now == disk->stamp)
1003 		return;
1004 
1005 	if (disk->in_flight) {
1006 		__disk_stat_add(disk, time_in_queue,
1007 				disk->in_flight * (now - disk->stamp));
1008 		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
1009 	}
1010 	disk->stamp = now;
1011 }
1012 EXPORT_SYMBOL_GPL(disk_round_stats);
1013 
1014 void part_round_stats(struct hd_struct *part)
1015 {
1016 	unsigned long now = jiffies;
1017 
1018 	if (now == part->stamp)
1019 		return;
1020 
1021 	if (part->in_flight) {
1022 		__part_stat_add(part, time_in_queue,
1023 				part->in_flight * (now - part->stamp));
1024 		__part_stat_add(part, io_ticks, (now - part->stamp));
1025 	}
1026 	part->stamp = now;
1027 }
1028 
1029 /*
1030  * queue lock must be held
1031  */
1032 void __blk_put_request(struct request_queue *q, struct request *req)
1033 {
1034 	if (unlikely(!q))
1035 		return;
1036 	if (unlikely(--req->ref_count))
1037 		return;
1038 
1039 	elv_completed_request(q, req);
1040 
1041 	/*
1042 	 * Request may not have originated from ll_rw_blk. if not,
1043 	 * it didn't come out of our reserved rq pools
1044 	 */
1045 	if (req->cmd_flags & REQ_ALLOCED) {
1046 		int rw = rq_data_dir(req);
1047 		int priv = req->cmd_flags & REQ_ELVPRIV;
1048 
1049 		BUG_ON(!list_empty(&req->queuelist));
1050 		BUG_ON(!hlist_unhashed(&req->hash));
1051 
1052 		blk_free_request(q, req);
1053 		freed_request(q, rw, priv);
1054 	}
1055 }
1056 EXPORT_SYMBOL_GPL(__blk_put_request);
1057 
1058 void blk_put_request(struct request *req)
1059 {
1060 	unsigned long flags;
1061 	struct request_queue *q = req->q;
1062 
1063 	spin_lock_irqsave(q->queue_lock, flags);
1064 	__blk_put_request(q, req);
1065 	spin_unlock_irqrestore(q->queue_lock, flags);
1066 }
1067 EXPORT_SYMBOL(blk_put_request);
1068 
1069 void init_request_from_bio(struct request *req, struct bio *bio)
1070 {
1071 	req->cmd_type = REQ_TYPE_FS;
1072 
1073 	/*
1074 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1075 	 */
1076 	if (bio_rw_ahead(bio) || bio_failfast(bio))
1077 		req->cmd_flags |= REQ_FAILFAST;
1078 
1079 	/*
1080 	 * REQ_BARRIER implies no merging, but lets make it explicit
1081 	 */
1082 	if (unlikely(bio_barrier(bio)))
1083 		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1084 
1085 	if (bio_sync(bio))
1086 		req->cmd_flags |= REQ_RW_SYNC;
1087 	if (bio_rw_meta(bio))
1088 		req->cmd_flags |= REQ_RW_META;
1089 
1090 	req->errors = 0;
1091 	req->hard_sector = req->sector = bio->bi_sector;
1092 	req->ioprio = bio_prio(bio);
1093 	req->start_time = jiffies;
1094 	blk_rq_bio_prep(req->q, req, bio);
1095 }
1096 
1097 static int __make_request(struct request_queue *q, struct bio *bio)
1098 {
1099 	struct request *req;
1100 	int el_ret, nr_sectors, barrier, err;
1101 	const unsigned short prio = bio_prio(bio);
1102 	const int sync = bio_sync(bio);
1103 	int rw_flags;
1104 
1105 	nr_sectors = bio_sectors(bio);
1106 
1107 	/*
1108 	 * low level driver can indicate that it wants pages above a
1109 	 * certain limit bounced to low memory (ie for highmem, or even
1110 	 * ISA dma in theory)
1111 	 */
1112 	blk_queue_bounce(q, &bio);
1113 
1114 	barrier = bio_barrier(bio);
1115 	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1116 		err = -EOPNOTSUPP;
1117 		goto end_io;
1118 	}
1119 
1120 	spin_lock_irq(q->queue_lock);
1121 
1122 	if (unlikely(barrier) || elv_queue_empty(q))
1123 		goto get_rq;
1124 
1125 	el_ret = elv_merge(q, &req, bio);
1126 	switch (el_ret) {
1127 	case ELEVATOR_BACK_MERGE:
1128 		BUG_ON(!rq_mergeable(req));
1129 
1130 		if (!ll_back_merge_fn(q, req, bio))
1131 			break;
1132 
1133 		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1134 
1135 		req->biotail->bi_next = bio;
1136 		req->biotail = bio;
1137 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1138 		req->ioprio = ioprio_best(req->ioprio, prio);
1139 		drive_stat_acct(req, 0);
1140 		if (!attempt_back_merge(q, req))
1141 			elv_merged_request(q, req, el_ret);
1142 		goto out;
1143 
1144 	case ELEVATOR_FRONT_MERGE:
1145 		BUG_ON(!rq_mergeable(req));
1146 
1147 		if (!ll_front_merge_fn(q, req, bio))
1148 			break;
1149 
1150 		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1151 
1152 		bio->bi_next = req->bio;
1153 		req->bio = bio;
1154 
1155 		/*
1156 		 * may not be valid. if the low level driver said
1157 		 * it didn't need a bounce buffer then it better
1158 		 * not touch req->buffer either...
1159 		 */
1160 		req->buffer = bio_data(bio);
1161 		req->current_nr_sectors = bio_cur_sectors(bio);
1162 		req->hard_cur_sectors = req->current_nr_sectors;
1163 		req->sector = req->hard_sector = bio->bi_sector;
1164 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1165 		req->ioprio = ioprio_best(req->ioprio, prio);
1166 		drive_stat_acct(req, 0);
1167 		if (!attempt_front_merge(q, req))
1168 			elv_merged_request(q, req, el_ret);
1169 		goto out;
1170 
1171 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1172 	default:
1173 		;
1174 	}
1175 
1176 get_rq:
1177 	/*
1178 	 * This sync check and mask will be re-done in init_request_from_bio(),
1179 	 * but we need to set it earlier to expose the sync flag to the
1180 	 * rq allocator and io schedulers.
1181 	 */
1182 	rw_flags = bio_data_dir(bio);
1183 	if (sync)
1184 		rw_flags |= REQ_RW_SYNC;
1185 
1186 	/*
1187 	 * Grab a free request. This is might sleep but can not fail.
1188 	 * Returns with the queue unlocked.
1189 	 */
1190 	req = get_request_wait(q, rw_flags, bio);
1191 
1192 	/*
1193 	 * After dropping the lock and possibly sleeping here, our request
1194 	 * may now be mergeable after it had proven unmergeable (above).
1195 	 * We don't worry about that case for efficiency. It won't happen
1196 	 * often, and the elevators are able to handle it.
1197 	 */
1198 	init_request_from_bio(req, bio);
1199 
1200 	spin_lock_irq(q->queue_lock);
1201 	if (elv_queue_empty(q))
1202 		blk_plug_device(q);
1203 	add_request(q, req);
1204 out:
1205 	if (sync)
1206 		__generic_unplug_device(q);
1207 
1208 	spin_unlock_irq(q->queue_lock);
1209 	return 0;
1210 
1211 end_io:
1212 	bio_endio(bio, err);
1213 	return 0;
1214 }
1215 
1216 /*
1217  * If bio->bi_dev is a partition, remap the location
1218  */
1219 static inline void blk_partition_remap(struct bio *bio)
1220 {
1221 	struct block_device *bdev = bio->bi_bdev;
1222 
1223 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1224 		struct hd_struct *p = bdev->bd_part;
1225 
1226 		bio->bi_sector += p->start_sect;
1227 		bio->bi_bdev = bdev->bd_contains;
1228 
1229 		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1230 				    bdev->bd_dev, bio->bi_sector,
1231 				    bio->bi_sector - p->start_sect);
1232 	}
1233 }
1234 
1235 static void handle_bad_sector(struct bio *bio)
1236 {
1237 	char b[BDEVNAME_SIZE];
1238 
1239 	printk(KERN_INFO "attempt to access beyond end of device\n");
1240 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1241 			bdevname(bio->bi_bdev, b),
1242 			bio->bi_rw,
1243 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1244 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1245 
1246 	set_bit(BIO_EOF, &bio->bi_flags);
1247 }
1248 
1249 #ifdef CONFIG_FAIL_MAKE_REQUEST
1250 
1251 static DECLARE_FAULT_ATTR(fail_make_request);
1252 
1253 static int __init setup_fail_make_request(char *str)
1254 {
1255 	return setup_fault_attr(&fail_make_request, str);
1256 }
1257 __setup("fail_make_request=", setup_fail_make_request);
1258 
1259 static int should_fail_request(struct bio *bio)
1260 {
1261 	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1262 	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1263 		return should_fail(&fail_make_request, bio->bi_size);
1264 
1265 	return 0;
1266 }
1267 
1268 static int __init fail_make_request_debugfs(void)
1269 {
1270 	return init_fault_attr_dentries(&fail_make_request,
1271 					"fail_make_request");
1272 }
1273 
1274 late_initcall(fail_make_request_debugfs);
1275 
1276 #else /* CONFIG_FAIL_MAKE_REQUEST */
1277 
1278 static inline int should_fail_request(struct bio *bio)
1279 {
1280 	return 0;
1281 }
1282 
1283 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1284 
1285 /*
1286  * Check whether this bio extends beyond the end of the device.
1287  */
1288 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1289 {
1290 	sector_t maxsector;
1291 
1292 	if (!nr_sectors)
1293 		return 0;
1294 
1295 	/* Test device or partition size, when known. */
1296 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1297 	if (maxsector) {
1298 		sector_t sector = bio->bi_sector;
1299 
1300 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1301 			/*
1302 			 * This may well happen - the kernel calls bread()
1303 			 * without checking the size of the device, e.g., when
1304 			 * mounting a device.
1305 			 */
1306 			handle_bad_sector(bio);
1307 			return 1;
1308 		}
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 /**
1315  * generic_make_request: hand a buffer to its device driver for I/O
1316  * @bio:  The bio describing the location in memory and on the device.
1317  *
1318  * generic_make_request() is used to make I/O requests of block
1319  * devices. It is passed a &struct bio, which describes the I/O that needs
1320  * to be done.
1321  *
1322  * generic_make_request() does not return any status.  The
1323  * success/failure status of the request, along with notification of
1324  * completion, is delivered asynchronously through the bio->bi_end_io
1325  * function described (one day) else where.
1326  *
1327  * The caller of generic_make_request must make sure that bi_io_vec
1328  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1329  * set to describe the device address, and the
1330  * bi_end_io and optionally bi_private are set to describe how
1331  * completion notification should be signaled.
1332  *
1333  * generic_make_request and the drivers it calls may use bi_next if this
1334  * bio happens to be merged with someone else, and may change bi_dev and
1335  * bi_sector for remaps as it sees fit.  So the values of these fields
1336  * should NOT be depended on after the call to generic_make_request.
1337  */
1338 static inline void __generic_make_request(struct bio *bio)
1339 {
1340 	struct request_queue *q;
1341 	sector_t old_sector;
1342 	int ret, nr_sectors = bio_sectors(bio);
1343 	dev_t old_dev;
1344 	int err = -EIO;
1345 
1346 	might_sleep();
1347 
1348 	if (bio_check_eod(bio, nr_sectors))
1349 		goto end_io;
1350 
1351 	/*
1352 	 * Resolve the mapping until finished. (drivers are
1353 	 * still free to implement/resolve their own stacking
1354 	 * by explicitly returning 0)
1355 	 *
1356 	 * NOTE: we don't repeat the blk_size check for each new device.
1357 	 * Stacking drivers are expected to know what they are doing.
1358 	 */
1359 	old_sector = -1;
1360 	old_dev = 0;
1361 	do {
1362 		char b[BDEVNAME_SIZE];
1363 
1364 		q = bdev_get_queue(bio->bi_bdev);
1365 		if (!q) {
1366 			printk(KERN_ERR
1367 			       "generic_make_request: Trying to access "
1368 				"nonexistent block-device %s (%Lu)\n",
1369 				bdevname(bio->bi_bdev, b),
1370 				(long long) bio->bi_sector);
1371 end_io:
1372 			bio_endio(bio, err);
1373 			break;
1374 		}
1375 
1376 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
1377 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1378 				bdevname(bio->bi_bdev, b),
1379 				bio_sectors(bio),
1380 				q->max_hw_sectors);
1381 			goto end_io;
1382 		}
1383 
1384 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1385 			goto end_io;
1386 
1387 		if (should_fail_request(bio))
1388 			goto end_io;
1389 
1390 		/*
1391 		 * If this device has partitions, remap block n
1392 		 * of partition p to block n+start(p) of the disk.
1393 		 */
1394 		blk_partition_remap(bio);
1395 
1396 		if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1397 			goto end_io;
1398 
1399 		if (old_sector != -1)
1400 			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1401 					    old_sector);
1402 
1403 		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1404 
1405 		old_sector = bio->bi_sector;
1406 		old_dev = bio->bi_bdev->bd_dev;
1407 
1408 		if (bio_check_eod(bio, nr_sectors))
1409 			goto end_io;
1410 		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1411 			err = -EOPNOTSUPP;
1412 			goto end_io;
1413 		}
1414 
1415 		ret = q->make_request_fn(q, bio);
1416 	} while (ret);
1417 }
1418 
1419 /*
1420  * We only want one ->make_request_fn to be active at a time,
1421  * else stack usage with stacked devices could be a problem.
1422  * So use current->bio_{list,tail} to keep a list of requests
1423  * submited by a make_request_fn function.
1424  * current->bio_tail is also used as a flag to say if
1425  * generic_make_request is currently active in this task or not.
1426  * If it is NULL, then no make_request is active.  If it is non-NULL,
1427  * then a make_request is active, and new requests should be added
1428  * at the tail
1429  */
1430 void generic_make_request(struct bio *bio)
1431 {
1432 	if (current->bio_tail) {
1433 		/* make_request is active */
1434 		*(current->bio_tail) = bio;
1435 		bio->bi_next = NULL;
1436 		current->bio_tail = &bio->bi_next;
1437 		return;
1438 	}
1439 	/* following loop may be a bit non-obvious, and so deserves some
1440 	 * explanation.
1441 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1442 	 * ensure that) so we have a list with a single bio.
1443 	 * We pretend that we have just taken it off a longer list, so
1444 	 * we assign bio_list to the next (which is NULL) and bio_tail
1445 	 * to &bio_list, thus initialising the bio_list of new bios to be
1446 	 * added.  __generic_make_request may indeed add some more bios
1447 	 * through a recursive call to generic_make_request.  If it
1448 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1449 	 * from the top.  In this case we really did just take the bio
1450 	 * of the top of the list (no pretending) and so fixup bio_list and
1451 	 * bio_tail or bi_next, and call into __generic_make_request again.
1452 	 *
1453 	 * The loop was structured like this to make only one call to
1454 	 * __generic_make_request (which is important as it is large and
1455 	 * inlined) and to keep the structure simple.
1456 	 */
1457 	BUG_ON(bio->bi_next);
1458 	do {
1459 		current->bio_list = bio->bi_next;
1460 		if (bio->bi_next == NULL)
1461 			current->bio_tail = &current->bio_list;
1462 		else
1463 			bio->bi_next = NULL;
1464 		__generic_make_request(bio);
1465 		bio = current->bio_list;
1466 	} while (bio);
1467 	current->bio_tail = NULL; /* deactivate */
1468 }
1469 EXPORT_SYMBOL(generic_make_request);
1470 
1471 /**
1472  * submit_bio: submit a bio to the block device layer for I/O
1473  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1474  * @bio: The &struct bio which describes the I/O
1475  *
1476  * submit_bio() is very similar in purpose to generic_make_request(), and
1477  * uses that function to do most of the work. Both are fairly rough
1478  * interfaces, @bio must be presetup and ready for I/O.
1479  *
1480  */
1481 void submit_bio(int rw, struct bio *bio)
1482 {
1483 	int count = bio_sectors(bio);
1484 
1485 	bio->bi_rw |= rw;
1486 
1487 	/*
1488 	 * If it's a regular read/write or a barrier with data attached,
1489 	 * go through the normal accounting stuff before submission.
1490 	 */
1491 	if (!bio_empty_barrier(bio)) {
1492 
1493 		BIO_BUG_ON(!bio->bi_size);
1494 		BIO_BUG_ON(!bio->bi_io_vec);
1495 
1496 		if (rw & WRITE) {
1497 			count_vm_events(PGPGOUT, count);
1498 		} else {
1499 			task_io_account_read(bio->bi_size);
1500 			count_vm_events(PGPGIN, count);
1501 		}
1502 
1503 		if (unlikely(block_dump)) {
1504 			char b[BDEVNAME_SIZE];
1505 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1506 			current->comm, task_pid_nr(current),
1507 				(rw & WRITE) ? "WRITE" : "READ",
1508 				(unsigned long long)bio->bi_sector,
1509 				bdevname(bio->bi_bdev, b));
1510 		}
1511 	}
1512 
1513 	generic_make_request(bio);
1514 }
1515 EXPORT_SYMBOL(submit_bio);
1516 
1517 /**
1518  * __end_that_request_first - end I/O on a request
1519  * @req:      the request being processed
1520  * @error:    0 for success, < 0 for error
1521  * @nr_bytes: number of bytes to complete
1522  *
1523  * Description:
1524  *     Ends I/O on a number of bytes attached to @req, and sets it up
1525  *     for the next range of segments (if any) in the cluster.
1526  *
1527  * Return:
1528  *     0 - we are done with this request, call end_that_request_last()
1529  *     1 - still buffers pending for this request
1530  **/
1531 static int __end_that_request_first(struct request *req, int error,
1532 				    int nr_bytes)
1533 {
1534 	int total_bytes, bio_nbytes, next_idx = 0;
1535 	struct bio *bio;
1536 
1537 	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1538 
1539 	/*
1540 	 * for a REQ_BLOCK_PC request, we want to carry any eventual
1541 	 * sense key with us all the way through
1542 	 */
1543 	if (!blk_pc_request(req))
1544 		req->errors = 0;
1545 
1546 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1547 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1548 				req->rq_disk ? req->rq_disk->disk_name : "?",
1549 				(unsigned long long)req->sector);
1550 	}
1551 
1552 	if (blk_fs_request(req) && req->rq_disk) {
1553 		struct hd_struct *part = get_part(req->rq_disk, req->sector);
1554 		const int rw = rq_data_dir(req);
1555 
1556 		all_stat_add(req->rq_disk, part, sectors[rw],
1557 				nr_bytes >> 9, req->sector);
1558 	}
1559 
1560 	total_bytes = bio_nbytes = 0;
1561 	while ((bio = req->bio) != NULL) {
1562 		int nbytes;
1563 
1564 		/*
1565 		 * For an empty barrier request, the low level driver must
1566 		 * store a potential error location in ->sector. We pass
1567 		 * that back up in ->bi_sector.
1568 		 */
1569 		if (blk_empty_barrier(req))
1570 			bio->bi_sector = req->sector;
1571 
1572 		if (nr_bytes >= bio->bi_size) {
1573 			req->bio = bio->bi_next;
1574 			nbytes = bio->bi_size;
1575 			req_bio_endio(req, bio, nbytes, error);
1576 			next_idx = 0;
1577 			bio_nbytes = 0;
1578 		} else {
1579 			int idx = bio->bi_idx + next_idx;
1580 
1581 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1582 				blk_dump_rq_flags(req, "__end_that");
1583 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1584 				       __func__, bio->bi_idx, bio->bi_vcnt);
1585 				break;
1586 			}
1587 
1588 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
1589 			BIO_BUG_ON(nbytes > bio->bi_size);
1590 
1591 			/*
1592 			 * not a complete bvec done
1593 			 */
1594 			if (unlikely(nbytes > nr_bytes)) {
1595 				bio_nbytes += nr_bytes;
1596 				total_bytes += nr_bytes;
1597 				break;
1598 			}
1599 
1600 			/*
1601 			 * advance to the next vector
1602 			 */
1603 			next_idx++;
1604 			bio_nbytes += nbytes;
1605 		}
1606 
1607 		total_bytes += nbytes;
1608 		nr_bytes -= nbytes;
1609 
1610 		bio = req->bio;
1611 		if (bio) {
1612 			/*
1613 			 * end more in this run, or just return 'not-done'
1614 			 */
1615 			if (unlikely(nr_bytes <= 0))
1616 				break;
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * completely done
1622 	 */
1623 	if (!req->bio)
1624 		return 0;
1625 
1626 	/*
1627 	 * if the request wasn't completed, update state
1628 	 */
1629 	if (bio_nbytes) {
1630 		req_bio_endio(req, bio, bio_nbytes, error);
1631 		bio->bi_idx += next_idx;
1632 		bio_iovec(bio)->bv_offset += nr_bytes;
1633 		bio_iovec(bio)->bv_len -= nr_bytes;
1634 	}
1635 
1636 	blk_recalc_rq_sectors(req, total_bytes >> 9);
1637 	blk_recalc_rq_segments(req);
1638 	return 1;
1639 }
1640 
1641 /*
1642  * splice the completion data to a local structure and hand off to
1643  * process_completion_queue() to complete the requests
1644  */
1645 static void blk_done_softirq(struct softirq_action *h)
1646 {
1647 	struct list_head *cpu_list, local_list;
1648 
1649 	local_irq_disable();
1650 	cpu_list = &__get_cpu_var(blk_cpu_done);
1651 	list_replace_init(cpu_list, &local_list);
1652 	local_irq_enable();
1653 
1654 	while (!list_empty(&local_list)) {
1655 		struct request *rq;
1656 
1657 		rq = list_entry(local_list.next, struct request, donelist);
1658 		list_del_init(&rq->donelist);
1659 		rq->q->softirq_done_fn(rq);
1660 	}
1661 }
1662 
1663 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1664 				    unsigned long action, void *hcpu)
1665 {
1666 	/*
1667 	 * If a CPU goes away, splice its entries to the current CPU
1668 	 * and trigger a run of the softirq
1669 	 */
1670 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1671 		int cpu = (unsigned long) hcpu;
1672 
1673 		local_irq_disable();
1674 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
1675 				 &__get_cpu_var(blk_cpu_done));
1676 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
1677 		local_irq_enable();
1678 	}
1679 
1680 	return NOTIFY_OK;
1681 }
1682 
1683 
1684 static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1685 	.notifier_call	= blk_cpu_notify,
1686 };
1687 
1688 /**
1689  * blk_complete_request - end I/O on a request
1690  * @req:      the request being processed
1691  *
1692  * Description:
1693  *     Ends all I/O on a request. It does not handle partial completions,
1694  *     unless the driver actually implements this in its completion callback
1695  *     through requeueing. The actual completion happens out-of-order,
1696  *     through a softirq handler. The user must have registered a completion
1697  *     callback through blk_queue_softirq_done().
1698  **/
1699 
1700 void blk_complete_request(struct request *req)
1701 {
1702 	struct list_head *cpu_list;
1703 	unsigned long flags;
1704 
1705 	BUG_ON(!req->q->softirq_done_fn);
1706 
1707 	local_irq_save(flags);
1708 
1709 	cpu_list = &__get_cpu_var(blk_cpu_done);
1710 	list_add_tail(&req->donelist, cpu_list);
1711 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
1712 
1713 	local_irq_restore(flags);
1714 }
1715 EXPORT_SYMBOL(blk_complete_request);
1716 
1717 /*
1718  * queue lock must be held
1719  */
1720 static void end_that_request_last(struct request *req, int error)
1721 {
1722 	struct gendisk *disk = req->rq_disk;
1723 
1724 	if (blk_rq_tagged(req))
1725 		blk_queue_end_tag(req->q, req);
1726 
1727 	if (blk_queued_rq(req))
1728 		blkdev_dequeue_request(req);
1729 
1730 	if (unlikely(laptop_mode) && blk_fs_request(req))
1731 		laptop_io_completion();
1732 
1733 	/*
1734 	 * Account IO completion.  bar_rq isn't accounted as a normal
1735 	 * IO on queueing nor completion.  Accounting the containing
1736 	 * request is enough.
1737 	 */
1738 	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1739 		unsigned long duration = jiffies - req->start_time;
1740 		const int rw = rq_data_dir(req);
1741 		struct hd_struct *part = get_part(disk, req->sector);
1742 
1743 		__all_stat_inc(disk, part, ios[rw], req->sector);
1744 		__all_stat_add(disk, part, ticks[rw], duration, req->sector);
1745 		disk_round_stats(disk);
1746 		disk->in_flight--;
1747 		if (part) {
1748 			part_round_stats(part);
1749 			part->in_flight--;
1750 		}
1751 	}
1752 
1753 	if (req->end_io)
1754 		req->end_io(req, error);
1755 	else {
1756 		if (blk_bidi_rq(req))
1757 			__blk_put_request(req->next_rq->q, req->next_rq);
1758 
1759 		__blk_put_request(req->q, req);
1760 	}
1761 }
1762 
1763 static inline void __end_request(struct request *rq, int uptodate,
1764 				 unsigned int nr_bytes)
1765 {
1766 	int error = 0;
1767 
1768 	if (uptodate <= 0)
1769 		error = uptodate ? uptodate : -EIO;
1770 
1771 	__blk_end_request(rq, error, nr_bytes);
1772 }
1773 
1774 /**
1775  * blk_rq_bytes - Returns bytes left to complete in the entire request
1776  * @rq: the request being processed
1777  **/
1778 unsigned int blk_rq_bytes(struct request *rq)
1779 {
1780 	if (blk_fs_request(rq))
1781 		return rq->hard_nr_sectors << 9;
1782 
1783 	return rq->data_len;
1784 }
1785 EXPORT_SYMBOL_GPL(blk_rq_bytes);
1786 
1787 /**
1788  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1789  * @rq: the request being processed
1790  **/
1791 unsigned int blk_rq_cur_bytes(struct request *rq)
1792 {
1793 	if (blk_fs_request(rq))
1794 		return rq->current_nr_sectors << 9;
1795 
1796 	if (rq->bio)
1797 		return rq->bio->bi_size;
1798 
1799 	return rq->data_len;
1800 }
1801 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1802 
1803 /**
1804  * end_queued_request - end all I/O on a queued request
1805  * @rq:		the request being processed
1806  * @uptodate:	error value or 0/1 uptodate flag
1807  *
1808  * Description:
1809  *     Ends all I/O on a request, and removes it from the block layer queues.
1810  *     Not suitable for normal IO completion, unless the driver still has
1811  *     the request attached to the block layer.
1812  *
1813  **/
1814 void end_queued_request(struct request *rq, int uptodate)
1815 {
1816 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1817 }
1818 EXPORT_SYMBOL(end_queued_request);
1819 
1820 /**
1821  * end_dequeued_request - end all I/O on a dequeued request
1822  * @rq:		the request being processed
1823  * @uptodate:	error value or 0/1 uptodate flag
1824  *
1825  * Description:
1826  *     Ends all I/O on a request. The request must already have been
1827  *     dequeued using blkdev_dequeue_request(), as is normally the case
1828  *     for most drivers.
1829  *
1830  **/
1831 void end_dequeued_request(struct request *rq, int uptodate)
1832 {
1833 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1834 }
1835 EXPORT_SYMBOL(end_dequeued_request);
1836 
1837 
1838 /**
1839  * end_request - end I/O on the current segment of the request
1840  * @req:	the request being processed
1841  * @uptodate:	error value or 0/1 uptodate flag
1842  *
1843  * Description:
1844  *     Ends I/O on the current segment of a request. If that is the only
1845  *     remaining segment, the request is also completed and freed.
1846  *
1847  *     This is a remnant of how older block drivers handled IO completions.
1848  *     Modern drivers typically end IO on the full request in one go, unless
1849  *     they have a residual value to account for. For that case this function
1850  *     isn't really useful, unless the residual just happens to be the
1851  *     full current segment. In other words, don't use this function in new
1852  *     code. Either use end_request_completely(), or the
1853  *     end_that_request_chunk() (along with end_that_request_last()) for
1854  *     partial completions.
1855  *
1856  **/
1857 void end_request(struct request *req, int uptodate)
1858 {
1859 	__end_request(req, uptodate, req->hard_cur_sectors << 9);
1860 }
1861 EXPORT_SYMBOL(end_request);
1862 
1863 /**
1864  * blk_end_io - Generic end_io function to complete a request.
1865  * @rq:           the request being processed
1866  * @error:        0 for success, < 0 for error
1867  * @nr_bytes:     number of bytes to complete @rq
1868  * @bidi_bytes:   number of bytes to complete @rq->next_rq
1869  * @drv_callback: function called between completion of bios in the request
1870  *                and completion of the request.
1871  *                If the callback returns non 0, this helper returns without
1872  *                completion of the request.
1873  *
1874  * Description:
1875  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1876  *     If @rq has leftover, sets it up for the next range of segments.
1877  *
1878  * Return:
1879  *     0 - we are done with this request
1880  *     1 - this request is not freed yet, it still has pending buffers.
1881  **/
1882 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1883 		      unsigned int bidi_bytes,
1884 		      int (drv_callback)(struct request *))
1885 {
1886 	struct request_queue *q = rq->q;
1887 	unsigned long flags = 0UL;
1888 
1889 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1890 		if (__end_that_request_first(rq, error, nr_bytes))
1891 			return 1;
1892 
1893 		/* Bidi request must be completed as a whole */
1894 		if (blk_bidi_rq(rq) &&
1895 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1896 			return 1;
1897 	}
1898 
1899 	/* Special feature for tricky drivers */
1900 	if (drv_callback && drv_callback(rq))
1901 		return 1;
1902 
1903 	add_disk_randomness(rq->rq_disk);
1904 
1905 	spin_lock_irqsave(q->queue_lock, flags);
1906 	end_that_request_last(rq, error);
1907 	spin_unlock_irqrestore(q->queue_lock, flags);
1908 
1909 	return 0;
1910 }
1911 
1912 /**
1913  * blk_end_request - Helper function for drivers to complete the request.
1914  * @rq:       the request being processed
1915  * @error:    0 for success, < 0 for error
1916  * @nr_bytes: number of bytes to complete
1917  *
1918  * Description:
1919  *     Ends I/O on a number of bytes attached to @rq.
1920  *     If @rq has leftover, sets it up for the next range of segments.
1921  *
1922  * Return:
1923  *     0 - we are done with this request
1924  *     1 - still buffers pending for this request
1925  **/
1926 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1927 {
1928 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
1929 }
1930 EXPORT_SYMBOL_GPL(blk_end_request);
1931 
1932 /**
1933  * __blk_end_request - Helper function for drivers to complete the request.
1934  * @rq:       the request being processed
1935  * @error:    0 for success, < 0 for error
1936  * @nr_bytes: number of bytes to complete
1937  *
1938  * Description:
1939  *     Must be called with queue lock held unlike blk_end_request().
1940  *
1941  * Return:
1942  *     0 - we are done with this request
1943  *     1 - still buffers pending for this request
1944  **/
1945 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1946 {
1947 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1948 		if (__end_that_request_first(rq, error, nr_bytes))
1949 			return 1;
1950 	}
1951 
1952 	add_disk_randomness(rq->rq_disk);
1953 
1954 	end_that_request_last(rq, error);
1955 
1956 	return 0;
1957 }
1958 EXPORT_SYMBOL_GPL(__blk_end_request);
1959 
1960 /**
1961  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1962  * @rq:         the bidi request being processed
1963  * @error:      0 for success, < 0 for error
1964  * @nr_bytes:   number of bytes to complete @rq
1965  * @bidi_bytes: number of bytes to complete @rq->next_rq
1966  *
1967  * Description:
1968  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1969  *
1970  * Return:
1971  *     0 - we are done with this request
1972  *     1 - still buffers pending for this request
1973  **/
1974 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1975 			 unsigned int bidi_bytes)
1976 {
1977 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1978 }
1979 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1980 
1981 /**
1982  * blk_end_request_callback - Special helper function for tricky drivers
1983  * @rq:           the request being processed
1984  * @error:        0 for success, < 0 for error
1985  * @nr_bytes:     number of bytes to complete
1986  * @drv_callback: function called between completion of bios in the request
1987  *                and completion of the request.
1988  *                If the callback returns non 0, this helper returns without
1989  *                completion of the request.
1990  *
1991  * Description:
1992  *     Ends I/O on a number of bytes attached to @rq.
1993  *     If @rq has leftover, sets it up for the next range of segments.
1994  *
1995  *     This special helper function is used only for existing tricky drivers.
1996  *     (e.g. cdrom_newpc_intr() of ide-cd)
1997  *     This interface will be removed when such drivers are rewritten.
1998  *     Don't use this interface in other places anymore.
1999  *
2000  * Return:
2001  *     0 - we are done with this request
2002  *     1 - this request is not freed yet.
2003  *         this request still has pending buffers or
2004  *         the driver doesn't want to finish this request yet.
2005  **/
2006 int blk_end_request_callback(struct request *rq, int error,
2007 			     unsigned int nr_bytes,
2008 			     int (drv_callback)(struct request *))
2009 {
2010 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2011 }
2012 EXPORT_SYMBOL_GPL(blk_end_request_callback);
2013 
2014 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2015 		     struct bio *bio)
2016 {
2017 	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2018 	rq->cmd_flags |= (bio->bi_rw & 3);
2019 
2020 	rq->nr_phys_segments = bio_phys_segments(q, bio);
2021 	rq->nr_hw_segments = bio_hw_segments(q, bio);
2022 	rq->current_nr_sectors = bio_cur_sectors(bio);
2023 	rq->hard_cur_sectors = rq->current_nr_sectors;
2024 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2025 	rq->buffer = bio_data(bio);
2026 	rq->data_len = bio->bi_size;
2027 
2028 	rq->bio = rq->biotail = bio;
2029 
2030 	if (bio->bi_bdev)
2031 		rq->rq_disk = bio->bi_bdev->bd_disk;
2032 }
2033 
2034 int kblockd_schedule_work(struct work_struct *work)
2035 {
2036 	return queue_work(kblockd_workqueue, work);
2037 }
2038 EXPORT_SYMBOL(kblockd_schedule_work);
2039 
2040 void kblockd_flush_work(struct work_struct *work)
2041 {
2042 	cancel_work_sync(work);
2043 }
2044 EXPORT_SYMBOL(kblockd_flush_work);
2045 
2046 int __init blk_dev_init(void)
2047 {
2048 	int i;
2049 
2050 	kblockd_workqueue = create_workqueue("kblockd");
2051 	if (!kblockd_workqueue)
2052 		panic("Failed to create kblockd\n");
2053 
2054 	request_cachep = kmem_cache_create("blkdev_requests",
2055 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2056 
2057 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2058 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2059 
2060 	for_each_possible_cpu(i)
2061 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2062 
2063 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
2064 	register_hotcpu_notifier(&blk_cpu_notifier);
2065 
2066 	return 0;
2067 }
2068 
2069