xref: /linux/block/blk-core.c (revision f7511d5f66f01fc451747b24e79f3ada7a3af9af)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/blktrace_api.h>
32 #include <linux/fault-inject.h>
33 
34 #include "blk.h"
35 
36 static int __make_request(struct request_queue *q, struct bio *bio);
37 
38 /*
39  * For the allocated request tables
40  */
41 static struct kmem_cache *request_cachep;
42 
43 /*
44  * For queue allocation
45  */
46 struct kmem_cache *blk_requestq_cachep;
47 
48 /*
49  * Controlling structure to kblockd
50  */
51 static struct workqueue_struct *kblockd_workqueue;
52 
53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 
55 static void drive_stat_acct(struct request *rq, int new_io)
56 {
57 	int rw = rq_data_dir(rq);
58 
59 	if (!blk_fs_request(rq) || !rq->rq_disk)
60 		return;
61 
62 	if (!new_io) {
63 		__all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
64 	} else {
65 		struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
66 		disk_round_stats(rq->rq_disk);
67 		rq->rq_disk->in_flight++;
68 		if (part) {
69 			part_round_stats(part);
70 			part->in_flight++;
71 		}
72 	}
73 }
74 
75 void blk_queue_congestion_threshold(struct request_queue *q)
76 {
77 	int nr;
78 
79 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
80 	if (nr > q->nr_requests)
81 		nr = q->nr_requests;
82 	q->nr_congestion_on = nr;
83 
84 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
85 	if (nr < 1)
86 		nr = 1;
87 	q->nr_congestion_off = nr;
88 }
89 
90 /**
91  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
92  * @bdev:	device
93  *
94  * Locates the passed device's request queue and returns the address of its
95  * backing_dev_info
96  *
97  * Will return NULL if the request queue cannot be located.
98  */
99 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
100 {
101 	struct backing_dev_info *ret = NULL;
102 	struct request_queue *q = bdev_get_queue(bdev);
103 
104 	if (q)
105 		ret = &q->backing_dev_info;
106 	return ret;
107 }
108 EXPORT_SYMBOL(blk_get_backing_dev_info);
109 
110 void blk_rq_init(struct request_queue *q, struct request *rq)
111 {
112 	memset(rq, 0, sizeof(*rq));
113 
114 	INIT_LIST_HEAD(&rq->queuelist);
115 	INIT_LIST_HEAD(&rq->donelist);
116 	rq->q = q;
117 	rq->sector = rq->hard_sector = (sector_t) -1;
118 	INIT_HLIST_NODE(&rq->hash);
119 	RB_CLEAR_NODE(&rq->rb_node);
120 	rq->cmd = rq->__cmd;
121 	rq->tag = -1;
122 	rq->ref_count = 1;
123 }
124 EXPORT_SYMBOL(blk_rq_init);
125 
126 static void req_bio_endio(struct request *rq, struct bio *bio,
127 			  unsigned int nbytes, int error)
128 {
129 	struct request_queue *q = rq->q;
130 
131 	if (&q->bar_rq != rq) {
132 		if (error)
133 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
134 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
135 			error = -EIO;
136 
137 		if (unlikely(nbytes > bio->bi_size)) {
138 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
139 			       __FUNCTION__, nbytes, bio->bi_size);
140 			nbytes = bio->bi_size;
141 		}
142 
143 		bio->bi_size -= nbytes;
144 		bio->bi_sector += (nbytes >> 9);
145 		if (bio->bi_size == 0)
146 			bio_endio(bio, error);
147 	} else {
148 
149 		/*
150 		 * Okay, this is the barrier request in progress, just
151 		 * record the error;
152 		 */
153 		if (error && !q->orderr)
154 			q->orderr = error;
155 	}
156 }
157 
158 void blk_dump_rq_flags(struct request *rq, char *msg)
159 {
160 	int bit;
161 
162 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
163 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
164 		rq->cmd_flags);
165 
166 	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
167 						(unsigned long long)rq->sector,
168 						rq->nr_sectors,
169 						rq->current_nr_sectors);
170 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
171 						rq->bio, rq->biotail,
172 						rq->buffer, rq->data,
173 						rq->data_len);
174 
175 	if (blk_pc_request(rq)) {
176 		printk(KERN_INFO "  cdb: ");
177 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
178 			printk("%02x ", rq->cmd[bit]);
179 		printk("\n");
180 	}
181 }
182 EXPORT_SYMBOL(blk_dump_rq_flags);
183 
184 /*
185  * "plug" the device if there are no outstanding requests: this will
186  * force the transfer to start only after we have put all the requests
187  * on the list.
188  *
189  * This is called with interrupts off and no requests on the queue and
190  * with the queue lock held.
191  */
192 void blk_plug_device(struct request_queue *q)
193 {
194 	WARN_ON(!irqs_disabled());
195 
196 	/*
197 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
198 	 * which will restart the queueing
199 	 */
200 	if (blk_queue_stopped(q))
201 		return;
202 
203 	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
204 		__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
205 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
206 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
207 	}
208 }
209 EXPORT_SYMBOL(blk_plug_device);
210 
211 /*
212  * remove the queue from the plugged list, if present. called with
213  * queue lock held and interrupts disabled.
214  */
215 int blk_remove_plug(struct request_queue *q)
216 {
217 	WARN_ON(!irqs_disabled());
218 
219 	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
220 		return 0;
221 
222 	queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
223 	del_timer(&q->unplug_timer);
224 	return 1;
225 }
226 EXPORT_SYMBOL(blk_remove_plug);
227 
228 /*
229  * remove the plug and let it rip..
230  */
231 void __generic_unplug_device(struct request_queue *q)
232 {
233 	if (unlikely(blk_queue_stopped(q)))
234 		return;
235 
236 	if (!blk_remove_plug(q))
237 		return;
238 
239 	q->request_fn(q);
240 }
241 EXPORT_SYMBOL(__generic_unplug_device);
242 
243 /**
244  * generic_unplug_device - fire a request queue
245  * @q:    The &struct request_queue in question
246  *
247  * Description:
248  *   Linux uses plugging to build bigger requests queues before letting
249  *   the device have at them. If a queue is plugged, the I/O scheduler
250  *   is still adding and merging requests on the queue. Once the queue
251  *   gets unplugged, the request_fn defined for the queue is invoked and
252  *   transfers started.
253  **/
254 void generic_unplug_device(struct request_queue *q)
255 {
256 	spin_lock_irq(q->queue_lock);
257 	__generic_unplug_device(q);
258 	spin_unlock_irq(q->queue_lock);
259 }
260 EXPORT_SYMBOL(generic_unplug_device);
261 
262 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
263 				   struct page *page)
264 {
265 	struct request_queue *q = bdi->unplug_io_data;
266 
267 	blk_unplug(q);
268 }
269 
270 void blk_unplug_work(struct work_struct *work)
271 {
272 	struct request_queue *q =
273 		container_of(work, struct request_queue, unplug_work);
274 
275 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
276 				q->rq.count[READ] + q->rq.count[WRITE]);
277 
278 	q->unplug_fn(q);
279 }
280 
281 void blk_unplug_timeout(unsigned long data)
282 {
283 	struct request_queue *q = (struct request_queue *)data;
284 
285 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
286 				q->rq.count[READ] + q->rq.count[WRITE]);
287 
288 	kblockd_schedule_work(&q->unplug_work);
289 }
290 
291 void blk_unplug(struct request_queue *q)
292 {
293 	/*
294 	 * devices don't necessarily have an ->unplug_fn defined
295 	 */
296 	if (q->unplug_fn) {
297 		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
298 					q->rq.count[READ] + q->rq.count[WRITE]);
299 
300 		q->unplug_fn(q);
301 	}
302 }
303 EXPORT_SYMBOL(blk_unplug);
304 
305 /**
306  * blk_start_queue - restart a previously stopped queue
307  * @q:    The &struct request_queue in question
308  *
309  * Description:
310  *   blk_start_queue() will clear the stop flag on the queue, and call
311  *   the request_fn for the queue if it was in a stopped state when
312  *   entered. Also see blk_stop_queue(). Queue lock must be held.
313  **/
314 void blk_start_queue(struct request_queue *q)
315 {
316 	WARN_ON(!irqs_disabled());
317 
318 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
319 
320 	/*
321 	 * one level of recursion is ok and is much faster than kicking
322 	 * the unplug handling
323 	 */
324 	if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
325 		queue_flag_set(QUEUE_FLAG_REENTER, q);
326 		q->request_fn(q);
327 		queue_flag_clear(QUEUE_FLAG_REENTER, q);
328 	} else {
329 		blk_plug_device(q);
330 		kblockd_schedule_work(&q->unplug_work);
331 	}
332 }
333 EXPORT_SYMBOL(blk_start_queue);
334 
335 /**
336  * blk_stop_queue - stop a queue
337  * @q:    The &struct request_queue in question
338  *
339  * Description:
340  *   The Linux block layer assumes that a block driver will consume all
341  *   entries on the request queue when the request_fn strategy is called.
342  *   Often this will not happen, because of hardware limitations (queue
343  *   depth settings). If a device driver gets a 'queue full' response,
344  *   or if it simply chooses not to queue more I/O at one point, it can
345  *   call this function to prevent the request_fn from being called until
346  *   the driver has signalled it's ready to go again. This happens by calling
347  *   blk_start_queue() to restart queue operations. Queue lock must be held.
348  **/
349 void blk_stop_queue(struct request_queue *q)
350 {
351 	blk_remove_plug(q);
352 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
353 }
354 EXPORT_SYMBOL(blk_stop_queue);
355 
356 /**
357  * blk_sync_queue - cancel any pending callbacks on a queue
358  * @q: the queue
359  *
360  * Description:
361  *     The block layer may perform asynchronous callback activity
362  *     on a queue, such as calling the unplug function after a timeout.
363  *     A block device may call blk_sync_queue to ensure that any
364  *     such activity is cancelled, thus allowing it to release resources
365  *     that the callbacks might use. The caller must already have made sure
366  *     that its ->make_request_fn will not re-add plugging prior to calling
367  *     this function.
368  *
369  */
370 void blk_sync_queue(struct request_queue *q)
371 {
372 	del_timer_sync(&q->unplug_timer);
373 	kblockd_flush_work(&q->unplug_work);
374 }
375 EXPORT_SYMBOL(blk_sync_queue);
376 
377 /**
378  * blk_run_queue - run a single device queue
379  * @q:	The queue to run
380  */
381 void __blk_run_queue(struct request_queue *q)
382 {
383 	blk_remove_plug(q);
384 
385 	/*
386 	 * Only recurse once to avoid overrunning the stack, let the unplug
387 	 * handling reinvoke the handler shortly if we already got there.
388 	 */
389 	if (!elv_queue_empty(q)) {
390 		if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
391 			queue_flag_set(QUEUE_FLAG_REENTER, q);
392 			q->request_fn(q);
393 			queue_flag_clear(QUEUE_FLAG_REENTER, q);
394 		} else {
395 			blk_plug_device(q);
396 			kblockd_schedule_work(&q->unplug_work);
397 		}
398 	}
399 }
400 EXPORT_SYMBOL(__blk_run_queue);
401 
402 /**
403  * blk_run_queue - run a single device queue
404  * @q: The queue to run
405  */
406 void blk_run_queue(struct request_queue *q)
407 {
408 	unsigned long flags;
409 
410 	spin_lock_irqsave(q->queue_lock, flags);
411 	__blk_run_queue(q);
412 	spin_unlock_irqrestore(q->queue_lock, flags);
413 }
414 EXPORT_SYMBOL(blk_run_queue);
415 
416 void blk_put_queue(struct request_queue *q)
417 {
418 	kobject_put(&q->kobj);
419 }
420 
421 void blk_cleanup_queue(struct request_queue *q)
422 {
423 	mutex_lock(&q->sysfs_lock);
424 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
425 	mutex_unlock(&q->sysfs_lock);
426 
427 	if (q->elevator)
428 		elevator_exit(q->elevator);
429 
430 	blk_put_queue(q);
431 }
432 EXPORT_SYMBOL(blk_cleanup_queue);
433 
434 static int blk_init_free_list(struct request_queue *q)
435 {
436 	struct request_list *rl = &q->rq;
437 
438 	rl->count[READ] = rl->count[WRITE] = 0;
439 	rl->starved[READ] = rl->starved[WRITE] = 0;
440 	rl->elvpriv = 0;
441 	init_waitqueue_head(&rl->wait[READ]);
442 	init_waitqueue_head(&rl->wait[WRITE]);
443 
444 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
445 				mempool_free_slab, request_cachep, q->node);
446 
447 	if (!rl->rq_pool)
448 		return -ENOMEM;
449 
450 	return 0;
451 }
452 
453 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
454 {
455 	return blk_alloc_queue_node(gfp_mask, -1);
456 }
457 EXPORT_SYMBOL(blk_alloc_queue);
458 
459 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
460 {
461 	struct request_queue *q;
462 	int err;
463 
464 	q = kmem_cache_alloc_node(blk_requestq_cachep,
465 				gfp_mask | __GFP_ZERO, node_id);
466 	if (!q)
467 		return NULL;
468 
469 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
470 	q->backing_dev_info.unplug_io_data = q;
471 	err = bdi_init(&q->backing_dev_info);
472 	if (err) {
473 		kmem_cache_free(blk_requestq_cachep, q);
474 		return NULL;
475 	}
476 
477 	init_timer(&q->unplug_timer);
478 
479 	kobject_init(&q->kobj, &blk_queue_ktype);
480 
481 	mutex_init(&q->sysfs_lock);
482 
483 	return q;
484 }
485 EXPORT_SYMBOL(blk_alloc_queue_node);
486 
487 /**
488  * blk_init_queue  - prepare a request queue for use with a block device
489  * @rfn:  The function to be called to process requests that have been
490  *        placed on the queue.
491  * @lock: Request queue spin lock
492  *
493  * Description:
494  *    If a block device wishes to use the standard request handling procedures,
495  *    which sorts requests and coalesces adjacent requests, then it must
496  *    call blk_init_queue().  The function @rfn will be called when there
497  *    are requests on the queue that need to be processed.  If the device
498  *    supports plugging, then @rfn may not be called immediately when requests
499  *    are available on the queue, but may be called at some time later instead.
500  *    Plugged queues are generally unplugged when a buffer belonging to one
501  *    of the requests on the queue is needed, or due to memory pressure.
502  *
503  *    @rfn is not required, or even expected, to remove all requests off the
504  *    queue, but only as many as it can handle at a time.  If it does leave
505  *    requests on the queue, it is responsible for arranging that the requests
506  *    get dealt with eventually.
507  *
508  *    The queue spin lock must be held while manipulating the requests on the
509  *    request queue; this lock will be taken also from interrupt context, so irq
510  *    disabling is needed for it.
511  *
512  *    Function returns a pointer to the initialized request queue, or NULL if
513  *    it didn't succeed.
514  *
515  * Note:
516  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
517  *    when the block device is deactivated (such as at module unload).
518  **/
519 
520 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
521 {
522 	return blk_init_queue_node(rfn, lock, -1);
523 }
524 EXPORT_SYMBOL(blk_init_queue);
525 
526 struct request_queue *
527 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
528 {
529 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
530 
531 	if (!q)
532 		return NULL;
533 
534 	q->node = node_id;
535 	if (blk_init_free_list(q)) {
536 		kmem_cache_free(blk_requestq_cachep, q);
537 		return NULL;
538 	}
539 
540 	/*
541 	 * if caller didn't supply a lock, they get per-queue locking with
542 	 * our embedded lock
543 	 */
544 	if (!lock) {
545 		spin_lock_init(&q->__queue_lock);
546 		lock = &q->__queue_lock;
547 	}
548 
549 	q->request_fn		= rfn;
550 	q->prep_rq_fn		= NULL;
551 	q->unplug_fn		= generic_unplug_device;
552 	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
553 	q->queue_lock		= lock;
554 
555 	blk_queue_segment_boundary(q, 0xffffffff);
556 
557 	blk_queue_make_request(q, __make_request);
558 	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
559 
560 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
561 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
562 
563 	q->sg_reserved_size = INT_MAX;
564 
565 	/*
566 	 * all done
567 	 */
568 	if (!elevator_init(q, NULL)) {
569 		blk_queue_congestion_threshold(q);
570 		return q;
571 	}
572 
573 	blk_put_queue(q);
574 	return NULL;
575 }
576 EXPORT_SYMBOL(blk_init_queue_node);
577 
578 int blk_get_queue(struct request_queue *q)
579 {
580 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
581 		kobject_get(&q->kobj);
582 		return 0;
583 	}
584 
585 	return 1;
586 }
587 
588 static inline void blk_free_request(struct request_queue *q, struct request *rq)
589 {
590 	if (rq->cmd_flags & REQ_ELVPRIV)
591 		elv_put_request(q, rq);
592 	mempool_free(rq, q->rq.rq_pool);
593 }
594 
595 static struct request *
596 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
597 {
598 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
599 
600 	if (!rq)
601 		return NULL;
602 
603 	blk_rq_init(q, rq);
604 
605 	/*
606 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
607 	 * see bio.h and blkdev.h
608 	 */
609 	rq->cmd_flags = rw | REQ_ALLOCED;
610 
611 	if (priv) {
612 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
613 			mempool_free(rq, q->rq.rq_pool);
614 			return NULL;
615 		}
616 		rq->cmd_flags |= REQ_ELVPRIV;
617 	}
618 
619 	return rq;
620 }
621 
622 /*
623  * ioc_batching returns true if the ioc is a valid batching request and
624  * should be given priority access to a request.
625  */
626 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
627 {
628 	if (!ioc)
629 		return 0;
630 
631 	/*
632 	 * Make sure the process is able to allocate at least 1 request
633 	 * even if the batch times out, otherwise we could theoretically
634 	 * lose wakeups.
635 	 */
636 	return ioc->nr_batch_requests == q->nr_batching ||
637 		(ioc->nr_batch_requests > 0
638 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
639 }
640 
641 /*
642  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
643  * will cause the process to be a "batcher" on all queues in the system. This
644  * is the behaviour we want though - once it gets a wakeup it should be given
645  * a nice run.
646  */
647 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
648 {
649 	if (!ioc || ioc_batching(q, ioc))
650 		return;
651 
652 	ioc->nr_batch_requests = q->nr_batching;
653 	ioc->last_waited = jiffies;
654 }
655 
656 static void __freed_request(struct request_queue *q, int rw)
657 {
658 	struct request_list *rl = &q->rq;
659 
660 	if (rl->count[rw] < queue_congestion_off_threshold(q))
661 		blk_clear_queue_congested(q, rw);
662 
663 	if (rl->count[rw] + 1 <= q->nr_requests) {
664 		if (waitqueue_active(&rl->wait[rw]))
665 			wake_up(&rl->wait[rw]);
666 
667 		blk_clear_queue_full(q, rw);
668 	}
669 }
670 
671 /*
672  * A request has just been released.  Account for it, update the full and
673  * congestion status, wake up any waiters.   Called under q->queue_lock.
674  */
675 static void freed_request(struct request_queue *q, int rw, int priv)
676 {
677 	struct request_list *rl = &q->rq;
678 
679 	rl->count[rw]--;
680 	if (priv)
681 		rl->elvpriv--;
682 
683 	__freed_request(q, rw);
684 
685 	if (unlikely(rl->starved[rw ^ 1]))
686 		__freed_request(q, rw ^ 1);
687 }
688 
689 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
690 /*
691  * Get a free request, queue_lock must be held.
692  * Returns NULL on failure, with queue_lock held.
693  * Returns !NULL on success, with queue_lock *not held*.
694  */
695 static struct request *get_request(struct request_queue *q, int rw_flags,
696 				   struct bio *bio, gfp_t gfp_mask)
697 {
698 	struct request *rq = NULL;
699 	struct request_list *rl = &q->rq;
700 	struct io_context *ioc = NULL;
701 	const int rw = rw_flags & 0x01;
702 	int may_queue, priv;
703 
704 	may_queue = elv_may_queue(q, rw_flags);
705 	if (may_queue == ELV_MQUEUE_NO)
706 		goto rq_starved;
707 
708 	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
709 		if (rl->count[rw]+1 >= q->nr_requests) {
710 			ioc = current_io_context(GFP_ATOMIC, q->node);
711 			/*
712 			 * The queue will fill after this allocation, so set
713 			 * it as full, and mark this process as "batching".
714 			 * This process will be allowed to complete a batch of
715 			 * requests, others will be blocked.
716 			 */
717 			if (!blk_queue_full(q, rw)) {
718 				ioc_set_batching(q, ioc);
719 				blk_set_queue_full(q, rw);
720 			} else {
721 				if (may_queue != ELV_MQUEUE_MUST
722 						&& !ioc_batching(q, ioc)) {
723 					/*
724 					 * The queue is full and the allocating
725 					 * process is not a "batcher", and not
726 					 * exempted by the IO scheduler
727 					 */
728 					goto out;
729 				}
730 			}
731 		}
732 		blk_set_queue_congested(q, rw);
733 	}
734 
735 	/*
736 	 * Only allow batching queuers to allocate up to 50% over the defined
737 	 * limit of requests, otherwise we could have thousands of requests
738 	 * allocated with any setting of ->nr_requests
739 	 */
740 	if (rl->count[rw] >= (3 * q->nr_requests / 2))
741 		goto out;
742 
743 	rl->count[rw]++;
744 	rl->starved[rw] = 0;
745 
746 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
747 	if (priv)
748 		rl->elvpriv++;
749 
750 	spin_unlock_irq(q->queue_lock);
751 
752 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
753 	if (unlikely(!rq)) {
754 		/*
755 		 * Allocation failed presumably due to memory. Undo anything
756 		 * we might have messed up.
757 		 *
758 		 * Allocating task should really be put onto the front of the
759 		 * wait queue, but this is pretty rare.
760 		 */
761 		spin_lock_irq(q->queue_lock);
762 		freed_request(q, rw, priv);
763 
764 		/*
765 		 * in the very unlikely event that allocation failed and no
766 		 * requests for this direction was pending, mark us starved
767 		 * so that freeing of a request in the other direction will
768 		 * notice us. another possible fix would be to split the
769 		 * rq mempool into READ and WRITE
770 		 */
771 rq_starved:
772 		if (unlikely(rl->count[rw] == 0))
773 			rl->starved[rw] = 1;
774 
775 		goto out;
776 	}
777 
778 	/*
779 	 * ioc may be NULL here, and ioc_batching will be false. That's
780 	 * OK, if the queue is under the request limit then requests need
781 	 * not count toward the nr_batch_requests limit. There will always
782 	 * be some limit enforced by BLK_BATCH_TIME.
783 	 */
784 	if (ioc_batching(q, ioc))
785 		ioc->nr_batch_requests--;
786 
787 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
788 out:
789 	return rq;
790 }
791 
792 /*
793  * No available requests for this queue, unplug the device and wait for some
794  * requests to become available.
795  *
796  * Called with q->queue_lock held, and returns with it unlocked.
797  */
798 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
799 					struct bio *bio)
800 {
801 	const int rw = rw_flags & 0x01;
802 	struct request *rq;
803 
804 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
805 	while (!rq) {
806 		DEFINE_WAIT(wait);
807 		struct request_list *rl = &q->rq;
808 
809 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
810 				TASK_UNINTERRUPTIBLE);
811 
812 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
813 
814 		if (!rq) {
815 			struct io_context *ioc;
816 
817 			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
818 
819 			__generic_unplug_device(q);
820 			spin_unlock_irq(q->queue_lock);
821 			io_schedule();
822 
823 			/*
824 			 * After sleeping, we become a "batching" process and
825 			 * will be able to allocate at least one request, and
826 			 * up to a big batch of them for a small period time.
827 			 * See ioc_batching, ioc_set_batching
828 			 */
829 			ioc = current_io_context(GFP_NOIO, q->node);
830 			ioc_set_batching(q, ioc);
831 
832 			spin_lock_irq(q->queue_lock);
833 		}
834 		finish_wait(&rl->wait[rw], &wait);
835 	}
836 
837 	return rq;
838 }
839 
840 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
841 {
842 	struct request *rq;
843 
844 	BUG_ON(rw != READ && rw != WRITE);
845 
846 	spin_lock_irq(q->queue_lock);
847 	if (gfp_mask & __GFP_WAIT) {
848 		rq = get_request_wait(q, rw, NULL);
849 	} else {
850 		rq = get_request(q, rw, NULL, gfp_mask);
851 		if (!rq)
852 			spin_unlock_irq(q->queue_lock);
853 	}
854 	/* q->queue_lock is unlocked at this point */
855 
856 	return rq;
857 }
858 EXPORT_SYMBOL(blk_get_request);
859 
860 /**
861  * blk_start_queueing - initiate dispatch of requests to device
862  * @q:		request queue to kick into gear
863  *
864  * This is basically a helper to remove the need to know whether a queue
865  * is plugged or not if someone just wants to initiate dispatch of requests
866  * for this queue.
867  *
868  * The queue lock must be held with interrupts disabled.
869  */
870 void blk_start_queueing(struct request_queue *q)
871 {
872 	if (!blk_queue_plugged(q))
873 		q->request_fn(q);
874 	else
875 		__generic_unplug_device(q);
876 }
877 EXPORT_SYMBOL(blk_start_queueing);
878 
879 /**
880  * blk_requeue_request - put a request back on queue
881  * @q:		request queue where request should be inserted
882  * @rq:		request to be inserted
883  *
884  * Description:
885  *    Drivers often keep queueing requests until the hardware cannot accept
886  *    more, when that condition happens we need to put the request back
887  *    on the queue. Must be called with queue lock held.
888  */
889 void blk_requeue_request(struct request_queue *q, struct request *rq)
890 {
891 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
892 
893 	if (blk_rq_tagged(rq))
894 		blk_queue_end_tag(q, rq);
895 
896 	elv_requeue_request(q, rq);
897 }
898 EXPORT_SYMBOL(blk_requeue_request);
899 
900 /**
901  * blk_insert_request - insert a special request in to a request queue
902  * @q:		request queue where request should be inserted
903  * @rq:		request to be inserted
904  * @at_head:	insert request at head or tail of queue
905  * @data:	private data
906  *
907  * Description:
908  *    Many block devices need to execute commands asynchronously, so they don't
909  *    block the whole kernel from preemption during request execution.  This is
910  *    accomplished normally by inserting aritficial requests tagged as
911  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
912  *    scheduled for actual execution by the request queue.
913  *
914  *    We have the option of inserting the head or the tail of the queue.
915  *    Typically we use the tail for new ioctls and so forth.  We use the head
916  *    of the queue for things like a QUEUE_FULL message from a device, or a
917  *    host that is unable to accept a particular command.
918  */
919 void blk_insert_request(struct request_queue *q, struct request *rq,
920 			int at_head, void *data)
921 {
922 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
923 	unsigned long flags;
924 
925 	/*
926 	 * tell I/O scheduler that this isn't a regular read/write (ie it
927 	 * must not attempt merges on this) and that it acts as a soft
928 	 * barrier
929 	 */
930 	rq->cmd_type = REQ_TYPE_SPECIAL;
931 	rq->cmd_flags |= REQ_SOFTBARRIER;
932 
933 	rq->special = data;
934 
935 	spin_lock_irqsave(q->queue_lock, flags);
936 
937 	/*
938 	 * If command is tagged, release the tag
939 	 */
940 	if (blk_rq_tagged(rq))
941 		blk_queue_end_tag(q, rq);
942 
943 	drive_stat_acct(rq, 1);
944 	__elv_add_request(q, rq, where, 0);
945 	blk_start_queueing(q);
946 	spin_unlock_irqrestore(q->queue_lock, flags);
947 }
948 EXPORT_SYMBOL(blk_insert_request);
949 
950 /*
951  * add-request adds a request to the linked list.
952  * queue lock is held and interrupts disabled, as we muck with the
953  * request queue list.
954  */
955 static inline void add_request(struct request_queue *q, struct request *req)
956 {
957 	drive_stat_acct(req, 1);
958 
959 	/*
960 	 * elevator indicated where it wants this request to be
961 	 * inserted at elevator_merge time
962 	 */
963 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
964 }
965 
966 /*
967  * disk_round_stats()	- Round off the performance stats on a struct
968  * disk_stats.
969  *
970  * The average IO queue length and utilisation statistics are maintained
971  * by observing the current state of the queue length and the amount of
972  * time it has been in this state for.
973  *
974  * Normally, that accounting is done on IO completion, but that can result
975  * in more than a second's worth of IO being accounted for within any one
976  * second, leading to >100% utilisation.  To deal with that, we call this
977  * function to do a round-off before returning the results when reading
978  * /proc/diskstats.  This accounts immediately for all queue usage up to
979  * the current jiffies and restarts the counters again.
980  */
981 void disk_round_stats(struct gendisk *disk)
982 {
983 	unsigned long now = jiffies;
984 
985 	if (now == disk->stamp)
986 		return;
987 
988 	if (disk->in_flight) {
989 		__disk_stat_add(disk, time_in_queue,
990 				disk->in_flight * (now - disk->stamp));
991 		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
992 	}
993 	disk->stamp = now;
994 }
995 EXPORT_SYMBOL_GPL(disk_round_stats);
996 
997 void part_round_stats(struct hd_struct *part)
998 {
999 	unsigned long now = jiffies;
1000 
1001 	if (now == part->stamp)
1002 		return;
1003 
1004 	if (part->in_flight) {
1005 		__part_stat_add(part, time_in_queue,
1006 				part->in_flight * (now - part->stamp));
1007 		__part_stat_add(part, io_ticks, (now - part->stamp));
1008 	}
1009 	part->stamp = now;
1010 }
1011 
1012 /*
1013  * queue lock must be held
1014  */
1015 void __blk_put_request(struct request_queue *q, struct request *req)
1016 {
1017 	if (unlikely(!q))
1018 		return;
1019 	if (unlikely(--req->ref_count))
1020 		return;
1021 
1022 	elv_completed_request(q, req);
1023 
1024 	/*
1025 	 * Request may not have originated from ll_rw_blk. if not,
1026 	 * it didn't come out of our reserved rq pools
1027 	 */
1028 	if (req->cmd_flags & REQ_ALLOCED) {
1029 		int rw = rq_data_dir(req);
1030 		int priv = req->cmd_flags & REQ_ELVPRIV;
1031 
1032 		BUG_ON(!list_empty(&req->queuelist));
1033 		BUG_ON(!hlist_unhashed(&req->hash));
1034 
1035 		blk_free_request(q, req);
1036 		freed_request(q, rw, priv);
1037 	}
1038 }
1039 EXPORT_SYMBOL_GPL(__blk_put_request);
1040 
1041 void blk_put_request(struct request *req)
1042 {
1043 	unsigned long flags;
1044 	struct request_queue *q = req->q;
1045 
1046 	/*
1047 	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
1048 	 * following if (q) test.
1049 	 */
1050 	if (q) {
1051 		spin_lock_irqsave(q->queue_lock, flags);
1052 		__blk_put_request(q, req);
1053 		spin_unlock_irqrestore(q->queue_lock, flags);
1054 	}
1055 }
1056 EXPORT_SYMBOL(blk_put_request);
1057 
1058 void init_request_from_bio(struct request *req, struct bio *bio)
1059 {
1060 	req->cmd_type = REQ_TYPE_FS;
1061 
1062 	/*
1063 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1064 	 */
1065 	if (bio_rw_ahead(bio) || bio_failfast(bio))
1066 		req->cmd_flags |= REQ_FAILFAST;
1067 
1068 	/*
1069 	 * REQ_BARRIER implies no merging, but lets make it explicit
1070 	 */
1071 	if (unlikely(bio_barrier(bio)))
1072 		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1073 
1074 	if (bio_sync(bio))
1075 		req->cmd_flags |= REQ_RW_SYNC;
1076 	if (bio_rw_meta(bio))
1077 		req->cmd_flags |= REQ_RW_META;
1078 
1079 	req->errors = 0;
1080 	req->hard_sector = req->sector = bio->bi_sector;
1081 	req->ioprio = bio_prio(bio);
1082 	req->start_time = jiffies;
1083 	blk_rq_bio_prep(req->q, req, bio);
1084 }
1085 
1086 static int __make_request(struct request_queue *q, struct bio *bio)
1087 {
1088 	struct request *req;
1089 	int el_ret, nr_sectors, barrier, err;
1090 	const unsigned short prio = bio_prio(bio);
1091 	const int sync = bio_sync(bio);
1092 	int rw_flags;
1093 
1094 	nr_sectors = bio_sectors(bio);
1095 
1096 	/*
1097 	 * low level driver can indicate that it wants pages above a
1098 	 * certain limit bounced to low memory (ie for highmem, or even
1099 	 * ISA dma in theory)
1100 	 */
1101 	blk_queue_bounce(q, &bio);
1102 
1103 	barrier = bio_barrier(bio);
1104 	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1105 		err = -EOPNOTSUPP;
1106 		goto end_io;
1107 	}
1108 
1109 	spin_lock_irq(q->queue_lock);
1110 
1111 	if (unlikely(barrier) || elv_queue_empty(q))
1112 		goto get_rq;
1113 
1114 	el_ret = elv_merge(q, &req, bio);
1115 	switch (el_ret) {
1116 	case ELEVATOR_BACK_MERGE:
1117 		BUG_ON(!rq_mergeable(req));
1118 
1119 		if (!ll_back_merge_fn(q, req, bio))
1120 			break;
1121 
1122 		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1123 
1124 		req->biotail->bi_next = bio;
1125 		req->biotail = bio;
1126 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1127 		req->ioprio = ioprio_best(req->ioprio, prio);
1128 		drive_stat_acct(req, 0);
1129 		if (!attempt_back_merge(q, req))
1130 			elv_merged_request(q, req, el_ret);
1131 		goto out;
1132 
1133 	case ELEVATOR_FRONT_MERGE:
1134 		BUG_ON(!rq_mergeable(req));
1135 
1136 		if (!ll_front_merge_fn(q, req, bio))
1137 			break;
1138 
1139 		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1140 
1141 		bio->bi_next = req->bio;
1142 		req->bio = bio;
1143 
1144 		/*
1145 		 * may not be valid. if the low level driver said
1146 		 * it didn't need a bounce buffer then it better
1147 		 * not touch req->buffer either...
1148 		 */
1149 		req->buffer = bio_data(bio);
1150 		req->current_nr_sectors = bio_cur_sectors(bio);
1151 		req->hard_cur_sectors = req->current_nr_sectors;
1152 		req->sector = req->hard_sector = bio->bi_sector;
1153 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1154 		req->ioprio = ioprio_best(req->ioprio, prio);
1155 		drive_stat_acct(req, 0);
1156 		if (!attempt_front_merge(q, req))
1157 			elv_merged_request(q, req, el_ret);
1158 		goto out;
1159 
1160 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1161 	default:
1162 		;
1163 	}
1164 
1165 get_rq:
1166 	/*
1167 	 * This sync check and mask will be re-done in init_request_from_bio(),
1168 	 * but we need to set it earlier to expose the sync flag to the
1169 	 * rq allocator and io schedulers.
1170 	 */
1171 	rw_flags = bio_data_dir(bio);
1172 	if (sync)
1173 		rw_flags |= REQ_RW_SYNC;
1174 
1175 	/*
1176 	 * Grab a free request. This is might sleep but can not fail.
1177 	 * Returns with the queue unlocked.
1178 	 */
1179 	req = get_request_wait(q, rw_flags, bio);
1180 
1181 	/*
1182 	 * After dropping the lock and possibly sleeping here, our request
1183 	 * may now be mergeable after it had proven unmergeable (above).
1184 	 * We don't worry about that case for efficiency. It won't happen
1185 	 * often, and the elevators are able to handle it.
1186 	 */
1187 	init_request_from_bio(req, bio);
1188 
1189 	spin_lock_irq(q->queue_lock);
1190 	if (elv_queue_empty(q))
1191 		blk_plug_device(q);
1192 	add_request(q, req);
1193 out:
1194 	if (sync)
1195 		__generic_unplug_device(q);
1196 
1197 	spin_unlock_irq(q->queue_lock);
1198 	return 0;
1199 
1200 end_io:
1201 	bio_endio(bio, err);
1202 	return 0;
1203 }
1204 
1205 /*
1206  * If bio->bi_dev is a partition, remap the location
1207  */
1208 static inline void blk_partition_remap(struct bio *bio)
1209 {
1210 	struct block_device *bdev = bio->bi_bdev;
1211 
1212 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1213 		struct hd_struct *p = bdev->bd_part;
1214 
1215 		bio->bi_sector += p->start_sect;
1216 		bio->bi_bdev = bdev->bd_contains;
1217 
1218 		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1219 				    bdev->bd_dev, bio->bi_sector,
1220 				    bio->bi_sector - p->start_sect);
1221 	}
1222 }
1223 
1224 static void handle_bad_sector(struct bio *bio)
1225 {
1226 	char b[BDEVNAME_SIZE];
1227 
1228 	printk(KERN_INFO "attempt to access beyond end of device\n");
1229 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1230 			bdevname(bio->bi_bdev, b),
1231 			bio->bi_rw,
1232 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1233 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1234 
1235 	set_bit(BIO_EOF, &bio->bi_flags);
1236 }
1237 
1238 #ifdef CONFIG_FAIL_MAKE_REQUEST
1239 
1240 static DECLARE_FAULT_ATTR(fail_make_request);
1241 
1242 static int __init setup_fail_make_request(char *str)
1243 {
1244 	return setup_fault_attr(&fail_make_request, str);
1245 }
1246 __setup("fail_make_request=", setup_fail_make_request);
1247 
1248 static int should_fail_request(struct bio *bio)
1249 {
1250 	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1251 	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1252 		return should_fail(&fail_make_request, bio->bi_size);
1253 
1254 	return 0;
1255 }
1256 
1257 static int __init fail_make_request_debugfs(void)
1258 {
1259 	return init_fault_attr_dentries(&fail_make_request,
1260 					"fail_make_request");
1261 }
1262 
1263 late_initcall(fail_make_request_debugfs);
1264 
1265 #else /* CONFIG_FAIL_MAKE_REQUEST */
1266 
1267 static inline int should_fail_request(struct bio *bio)
1268 {
1269 	return 0;
1270 }
1271 
1272 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1273 
1274 /*
1275  * Check whether this bio extends beyond the end of the device.
1276  */
1277 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1278 {
1279 	sector_t maxsector;
1280 
1281 	if (!nr_sectors)
1282 		return 0;
1283 
1284 	/* Test device or partition size, when known. */
1285 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1286 	if (maxsector) {
1287 		sector_t sector = bio->bi_sector;
1288 
1289 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1290 			/*
1291 			 * This may well happen - the kernel calls bread()
1292 			 * without checking the size of the device, e.g., when
1293 			 * mounting a device.
1294 			 */
1295 			handle_bad_sector(bio);
1296 			return 1;
1297 		}
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 /**
1304  * generic_make_request: hand a buffer to its device driver for I/O
1305  * @bio:  The bio describing the location in memory and on the device.
1306  *
1307  * generic_make_request() is used to make I/O requests of block
1308  * devices. It is passed a &struct bio, which describes the I/O that needs
1309  * to be done.
1310  *
1311  * generic_make_request() does not return any status.  The
1312  * success/failure status of the request, along with notification of
1313  * completion, is delivered asynchronously through the bio->bi_end_io
1314  * function described (one day) else where.
1315  *
1316  * The caller of generic_make_request must make sure that bi_io_vec
1317  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1318  * set to describe the device address, and the
1319  * bi_end_io and optionally bi_private are set to describe how
1320  * completion notification should be signaled.
1321  *
1322  * generic_make_request and the drivers it calls may use bi_next if this
1323  * bio happens to be merged with someone else, and may change bi_dev and
1324  * bi_sector for remaps as it sees fit.  So the values of these fields
1325  * should NOT be depended on after the call to generic_make_request.
1326  */
1327 static inline void __generic_make_request(struct bio *bio)
1328 {
1329 	struct request_queue *q;
1330 	sector_t old_sector;
1331 	int ret, nr_sectors = bio_sectors(bio);
1332 	dev_t old_dev;
1333 	int err = -EIO;
1334 
1335 	might_sleep();
1336 
1337 	if (bio_check_eod(bio, nr_sectors))
1338 		goto end_io;
1339 
1340 	/*
1341 	 * Resolve the mapping until finished. (drivers are
1342 	 * still free to implement/resolve their own stacking
1343 	 * by explicitly returning 0)
1344 	 *
1345 	 * NOTE: we don't repeat the blk_size check for each new device.
1346 	 * Stacking drivers are expected to know what they are doing.
1347 	 */
1348 	old_sector = -1;
1349 	old_dev = 0;
1350 	do {
1351 		char b[BDEVNAME_SIZE];
1352 
1353 		q = bdev_get_queue(bio->bi_bdev);
1354 		if (!q) {
1355 			printk(KERN_ERR
1356 			       "generic_make_request: Trying to access "
1357 				"nonexistent block-device %s (%Lu)\n",
1358 				bdevname(bio->bi_bdev, b),
1359 				(long long) bio->bi_sector);
1360 end_io:
1361 			bio_endio(bio, err);
1362 			break;
1363 		}
1364 
1365 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
1366 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1367 				bdevname(bio->bi_bdev, b),
1368 				bio_sectors(bio),
1369 				q->max_hw_sectors);
1370 			goto end_io;
1371 		}
1372 
1373 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1374 			goto end_io;
1375 
1376 		if (should_fail_request(bio))
1377 			goto end_io;
1378 
1379 		/*
1380 		 * If this device has partitions, remap block n
1381 		 * of partition p to block n+start(p) of the disk.
1382 		 */
1383 		blk_partition_remap(bio);
1384 
1385 		if (old_sector != -1)
1386 			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1387 					    old_sector);
1388 
1389 		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1390 
1391 		old_sector = bio->bi_sector;
1392 		old_dev = bio->bi_bdev->bd_dev;
1393 
1394 		if (bio_check_eod(bio, nr_sectors))
1395 			goto end_io;
1396 		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1397 			err = -EOPNOTSUPP;
1398 			goto end_io;
1399 		}
1400 
1401 		ret = q->make_request_fn(q, bio);
1402 	} while (ret);
1403 }
1404 
1405 /*
1406  * We only want one ->make_request_fn to be active at a time,
1407  * else stack usage with stacked devices could be a problem.
1408  * So use current->bio_{list,tail} to keep a list of requests
1409  * submited by a make_request_fn function.
1410  * current->bio_tail is also used as a flag to say if
1411  * generic_make_request is currently active in this task or not.
1412  * If it is NULL, then no make_request is active.  If it is non-NULL,
1413  * then a make_request is active, and new requests should be added
1414  * at the tail
1415  */
1416 void generic_make_request(struct bio *bio)
1417 {
1418 	if (current->bio_tail) {
1419 		/* make_request is active */
1420 		*(current->bio_tail) = bio;
1421 		bio->bi_next = NULL;
1422 		current->bio_tail = &bio->bi_next;
1423 		return;
1424 	}
1425 	/* following loop may be a bit non-obvious, and so deserves some
1426 	 * explanation.
1427 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1428 	 * ensure that) so we have a list with a single bio.
1429 	 * We pretend that we have just taken it off a longer list, so
1430 	 * we assign bio_list to the next (which is NULL) and bio_tail
1431 	 * to &bio_list, thus initialising the bio_list of new bios to be
1432 	 * added.  __generic_make_request may indeed add some more bios
1433 	 * through a recursive call to generic_make_request.  If it
1434 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1435 	 * from the top.  In this case we really did just take the bio
1436 	 * of the top of the list (no pretending) and so fixup bio_list and
1437 	 * bio_tail or bi_next, and call into __generic_make_request again.
1438 	 *
1439 	 * The loop was structured like this to make only one call to
1440 	 * __generic_make_request (which is important as it is large and
1441 	 * inlined) and to keep the structure simple.
1442 	 */
1443 	BUG_ON(bio->bi_next);
1444 	do {
1445 		current->bio_list = bio->bi_next;
1446 		if (bio->bi_next == NULL)
1447 			current->bio_tail = &current->bio_list;
1448 		else
1449 			bio->bi_next = NULL;
1450 		__generic_make_request(bio);
1451 		bio = current->bio_list;
1452 	} while (bio);
1453 	current->bio_tail = NULL; /* deactivate */
1454 }
1455 EXPORT_SYMBOL(generic_make_request);
1456 
1457 /**
1458  * submit_bio: submit a bio to the block device layer for I/O
1459  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1460  * @bio: The &struct bio which describes the I/O
1461  *
1462  * submit_bio() is very similar in purpose to generic_make_request(), and
1463  * uses that function to do most of the work. Both are fairly rough
1464  * interfaces, @bio must be presetup and ready for I/O.
1465  *
1466  */
1467 void submit_bio(int rw, struct bio *bio)
1468 {
1469 	int count = bio_sectors(bio);
1470 
1471 	bio->bi_rw |= rw;
1472 
1473 	/*
1474 	 * If it's a regular read/write or a barrier with data attached,
1475 	 * go through the normal accounting stuff before submission.
1476 	 */
1477 	if (!bio_empty_barrier(bio)) {
1478 
1479 		BIO_BUG_ON(!bio->bi_size);
1480 		BIO_BUG_ON(!bio->bi_io_vec);
1481 
1482 		if (rw & WRITE) {
1483 			count_vm_events(PGPGOUT, count);
1484 		} else {
1485 			task_io_account_read(bio->bi_size);
1486 			count_vm_events(PGPGIN, count);
1487 		}
1488 
1489 		if (unlikely(block_dump)) {
1490 			char b[BDEVNAME_SIZE];
1491 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1492 			current->comm, task_pid_nr(current),
1493 				(rw & WRITE) ? "WRITE" : "READ",
1494 				(unsigned long long)bio->bi_sector,
1495 				bdevname(bio->bi_bdev, b));
1496 		}
1497 	}
1498 
1499 	generic_make_request(bio);
1500 }
1501 EXPORT_SYMBOL(submit_bio);
1502 
1503 /**
1504  * __end_that_request_first - end I/O on a request
1505  * @req:      the request being processed
1506  * @error:    0 for success, < 0 for error
1507  * @nr_bytes: number of bytes to complete
1508  *
1509  * Description:
1510  *     Ends I/O on a number of bytes attached to @req, and sets it up
1511  *     for the next range of segments (if any) in the cluster.
1512  *
1513  * Return:
1514  *     0 - we are done with this request, call end_that_request_last()
1515  *     1 - still buffers pending for this request
1516  **/
1517 static int __end_that_request_first(struct request *req, int error,
1518 				    int nr_bytes)
1519 {
1520 	int total_bytes, bio_nbytes, next_idx = 0;
1521 	struct bio *bio;
1522 
1523 	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1524 
1525 	/*
1526 	 * for a REQ_BLOCK_PC request, we want to carry any eventual
1527 	 * sense key with us all the way through
1528 	 */
1529 	if (!blk_pc_request(req))
1530 		req->errors = 0;
1531 
1532 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1533 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1534 				req->rq_disk ? req->rq_disk->disk_name : "?",
1535 				(unsigned long long)req->sector);
1536 	}
1537 
1538 	if (blk_fs_request(req) && req->rq_disk) {
1539 		const int rw = rq_data_dir(req);
1540 
1541 		all_stat_add(req->rq_disk, sectors[rw],
1542 			     nr_bytes >> 9, req->sector);
1543 	}
1544 
1545 	total_bytes = bio_nbytes = 0;
1546 	while ((bio = req->bio) != NULL) {
1547 		int nbytes;
1548 
1549 		/*
1550 		 * For an empty barrier request, the low level driver must
1551 		 * store a potential error location in ->sector. We pass
1552 		 * that back up in ->bi_sector.
1553 		 */
1554 		if (blk_empty_barrier(req))
1555 			bio->bi_sector = req->sector;
1556 
1557 		if (nr_bytes >= bio->bi_size) {
1558 			req->bio = bio->bi_next;
1559 			nbytes = bio->bi_size;
1560 			req_bio_endio(req, bio, nbytes, error);
1561 			next_idx = 0;
1562 			bio_nbytes = 0;
1563 		} else {
1564 			int idx = bio->bi_idx + next_idx;
1565 
1566 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1567 				blk_dump_rq_flags(req, "__end_that");
1568 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1569 						__FUNCTION__, bio->bi_idx,
1570 						bio->bi_vcnt);
1571 				break;
1572 			}
1573 
1574 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
1575 			BIO_BUG_ON(nbytes > bio->bi_size);
1576 
1577 			/*
1578 			 * not a complete bvec done
1579 			 */
1580 			if (unlikely(nbytes > nr_bytes)) {
1581 				bio_nbytes += nr_bytes;
1582 				total_bytes += nr_bytes;
1583 				break;
1584 			}
1585 
1586 			/*
1587 			 * advance to the next vector
1588 			 */
1589 			next_idx++;
1590 			bio_nbytes += nbytes;
1591 		}
1592 
1593 		total_bytes += nbytes;
1594 		nr_bytes -= nbytes;
1595 
1596 		bio = req->bio;
1597 		if (bio) {
1598 			/*
1599 			 * end more in this run, or just return 'not-done'
1600 			 */
1601 			if (unlikely(nr_bytes <= 0))
1602 				break;
1603 		}
1604 	}
1605 
1606 	/*
1607 	 * completely done
1608 	 */
1609 	if (!req->bio)
1610 		return 0;
1611 
1612 	/*
1613 	 * if the request wasn't completed, update state
1614 	 */
1615 	if (bio_nbytes) {
1616 		req_bio_endio(req, bio, bio_nbytes, error);
1617 		bio->bi_idx += next_idx;
1618 		bio_iovec(bio)->bv_offset += nr_bytes;
1619 		bio_iovec(bio)->bv_len -= nr_bytes;
1620 	}
1621 
1622 	blk_recalc_rq_sectors(req, total_bytes >> 9);
1623 	blk_recalc_rq_segments(req);
1624 	return 1;
1625 }
1626 
1627 /*
1628  * splice the completion data to a local structure and hand off to
1629  * process_completion_queue() to complete the requests
1630  */
1631 static void blk_done_softirq(struct softirq_action *h)
1632 {
1633 	struct list_head *cpu_list, local_list;
1634 
1635 	local_irq_disable();
1636 	cpu_list = &__get_cpu_var(blk_cpu_done);
1637 	list_replace_init(cpu_list, &local_list);
1638 	local_irq_enable();
1639 
1640 	while (!list_empty(&local_list)) {
1641 		struct request *rq;
1642 
1643 		rq = list_entry(local_list.next, struct request, donelist);
1644 		list_del_init(&rq->donelist);
1645 		rq->q->softirq_done_fn(rq);
1646 	}
1647 }
1648 
1649 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1650 				    unsigned long action, void *hcpu)
1651 {
1652 	/*
1653 	 * If a CPU goes away, splice its entries to the current CPU
1654 	 * and trigger a run of the softirq
1655 	 */
1656 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1657 		int cpu = (unsigned long) hcpu;
1658 
1659 		local_irq_disable();
1660 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
1661 				 &__get_cpu_var(blk_cpu_done));
1662 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
1663 		local_irq_enable();
1664 	}
1665 
1666 	return NOTIFY_OK;
1667 }
1668 
1669 
1670 static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1671 	.notifier_call	= blk_cpu_notify,
1672 };
1673 
1674 /**
1675  * blk_complete_request - end I/O on a request
1676  * @req:      the request being processed
1677  *
1678  * Description:
1679  *     Ends all I/O on a request. It does not handle partial completions,
1680  *     unless the driver actually implements this in its completion callback
1681  *     through requeueing. The actual completion happens out-of-order,
1682  *     through a softirq handler. The user must have registered a completion
1683  *     callback through blk_queue_softirq_done().
1684  **/
1685 
1686 void blk_complete_request(struct request *req)
1687 {
1688 	struct list_head *cpu_list;
1689 	unsigned long flags;
1690 
1691 	BUG_ON(!req->q->softirq_done_fn);
1692 
1693 	local_irq_save(flags);
1694 
1695 	cpu_list = &__get_cpu_var(blk_cpu_done);
1696 	list_add_tail(&req->donelist, cpu_list);
1697 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
1698 
1699 	local_irq_restore(flags);
1700 }
1701 EXPORT_SYMBOL(blk_complete_request);
1702 
1703 /*
1704  * queue lock must be held
1705  */
1706 static void end_that_request_last(struct request *req, int error)
1707 {
1708 	struct gendisk *disk = req->rq_disk;
1709 
1710 	if (blk_rq_tagged(req))
1711 		blk_queue_end_tag(req->q, req);
1712 
1713 	if (blk_queued_rq(req))
1714 		blkdev_dequeue_request(req);
1715 
1716 	if (unlikely(laptop_mode) && blk_fs_request(req))
1717 		laptop_io_completion();
1718 
1719 	/*
1720 	 * Account IO completion.  bar_rq isn't accounted as a normal
1721 	 * IO on queueing nor completion.  Accounting the containing
1722 	 * request is enough.
1723 	 */
1724 	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1725 		unsigned long duration = jiffies - req->start_time;
1726 		const int rw = rq_data_dir(req);
1727 		struct hd_struct *part = get_part(disk, req->sector);
1728 
1729 		__all_stat_inc(disk, ios[rw], req->sector);
1730 		__all_stat_add(disk, ticks[rw], duration, req->sector);
1731 		disk_round_stats(disk);
1732 		disk->in_flight--;
1733 		if (part) {
1734 			part_round_stats(part);
1735 			part->in_flight--;
1736 		}
1737 	}
1738 
1739 	if (req->end_io)
1740 		req->end_io(req, error);
1741 	else {
1742 		if (blk_bidi_rq(req))
1743 			__blk_put_request(req->next_rq->q, req->next_rq);
1744 
1745 		__blk_put_request(req->q, req);
1746 	}
1747 }
1748 
1749 static inline void __end_request(struct request *rq, int uptodate,
1750 				 unsigned int nr_bytes)
1751 {
1752 	int error = 0;
1753 
1754 	if (uptodate <= 0)
1755 		error = uptodate ? uptodate : -EIO;
1756 
1757 	__blk_end_request(rq, error, nr_bytes);
1758 }
1759 
1760 /**
1761  * blk_rq_bytes - Returns bytes left to complete in the entire request
1762  * @rq: the request being processed
1763  **/
1764 unsigned int blk_rq_bytes(struct request *rq)
1765 {
1766 	if (blk_fs_request(rq))
1767 		return rq->hard_nr_sectors << 9;
1768 
1769 	return rq->data_len;
1770 }
1771 EXPORT_SYMBOL_GPL(blk_rq_bytes);
1772 
1773 /**
1774  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1775  * @rq: the request being processed
1776  **/
1777 unsigned int blk_rq_cur_bytes(struct request *rq)
1778 {
1779 	if (blk_fs_request(rq))
1780 		return rq->current_nr_sectors << 9;
1781 
1782 	if (rq->bio)
1783 		return rq->bio->bi_size;
1784 
1785 	return rq->data_len;
1786 }
1787 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1788 
1789 /**
1790  * end_queued_request - end all I/O on a queued request
1791  * @rq:		the request being processed
1792  * @uptodate:	error value or 0/1 uptodate flag
1793  *
1794  * Description:
1795  *     Ends all I/O on a request, and removes it from the block layer queues.
1796  *     Not suitable for normal IO completion, unless the driver still has
1797  *     the request attached to the block layer.
1798  *
1799  **/
1800 void end_queued_request(struct request *rq, int uptodate)
1801 {
1802 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1803 }
1804 EXPORT_SYMBOL(end_queued_request);
1805 
1806 /**
1807  * end_dequeued_request - end all I/O on a dequeued request
1808  * @rq:		the request being processed
1809  * @uptodate:	error value or 0/1 uptodate flag
1810  *
1811  * Description:
1812  *     Ends all I/O on a request. The request must already have been
1813  *     dequeued using blkdev_dequeue_request(), as is normally the case
1814  *     for most drivers.
1815  *
1816  **/
1817 void end_dequeued_request(struct request *rq, int uptodate)
1818 {
1819 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1820 }
1821 EXPORT_SYMBOL(end_dequeued_request);
1822 
1823 
1824 /**
1825  * end_request - end I/O on the current segment of the request
1826  * @req:	the request being processed
1827  * @uptodate:	error value or 0/1 uptodate flag
1828  *
1829  * Description:
1830  *     Ends I/O on the current segment of a request. If that is the only
1831  *     remaining segment, the request is also completed and freed.
1832  *
1833  *     This is a remnant of how older block drivers handled IO completions.
1834  *     Modern drivers typically end IO on the full request in one go, unless
1835  *     they have a residual value to account for. For that case this function
1836  *     isn't really useful, unless the residual just happens to be the
1837  *     full current segment. In other words, don't use this function in new
1838  *     code. Either use end_request_completely(), or the
1839  *     end_that_request_chunk() (along with end_that_request_last()) for
1840  *     partial completions.
1841  *
1842  **/
1843 void end_request(struct request *req, int uptodate)
1844 {
1845 	__end_request(req, uptodate, req->hard_cur_sectors << 9);
1846 }
1847 EXPORT_SYMBOL(end_request);
1848 
1849 /**
1850  * blk_end_io - Generic end_io function to complete a request.
1851  * @rq:           the request being processed
1852  * @error:        0 for success, < 0 for error
1853  * @nr_bytes:     number of bytes to complete @rq
1854  * @bidi_bytes:   number of bytes to complete @rq->next_rq
1855  * @drv_callback: function called between completion of bios in the request
1856  *                and completion of the request.
1857  *                If the callback returns non 0, this helper returns without
1858  *                completion of the request.
1859  *
1860  * Description:
1861  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1862  *     If @rq has leftover, sets it up for the next range of segments.
1863  *
1864  * Return:
1865  *     0 - we are done with this request
1866  *     1 - this request is not freed yet, it still has pending buffers.
1867  **/
1868 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1869 		      unsigned int bidi_bytes,
1870 		      int (drv_callback)(struct request *))
1871 {
1872 	struct request_queue *q = rq->q;
1873 	unsigned long flags = 0UL;
1874 
1875 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1876 		if (__end_that_request_first(rq, error, nr_bytes))
1877 			return 1;
1878 
1879 		/* Bidi request must be completed as a whole */
1880 		if (blk_bidi_rq(rq) &&
1881 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1882 			return 1;
1883 	}
1884 
1885 	/* Special feature for tricky drivers */
1886 	if (drv_callback && drv_callback(rq))
1887 		return 1;
1888 
1889 	add_disk_randomness(rq->rq_disk);
1890 
1891 	spin_lock_irqsave(q->queue_lock, flags);
1892 	end_that_request_last(rq, error);
1893 	spin_unlock_irqrestore(q->queue_lock, flags);
1894 
1895 	return 0;
1896 }
1897 
1898 /**
1899  * blk_end_request - Helper function for drivers to complete the request.
1900  * @rq:       the request being processed
1901  * @error:    0 for success, < 0 for error
1902  * @nr_bytes: number of bytes to complete
1903  *
1904  * Description:
1905  *     Ends I/O on a number of bytes attached to @rq.
1906  *     If @rq has leftover, sets it up for the next range of segments.
1907  *
1908  * Return:
1909  *     0 - we are done with this request
1910  *     1 - still buffers pending for this request
1911  **/
1912 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1913 {
1914 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
1915 }
1916 EXPORT_SYMBOL_GPL(blk_end_request);
1917 
1918 /**
1919  * __blk_end_request - Helper function for drivers to complete the request.
1920  * @rq:       the request being processed
1921  * @error:    0 for success, < 0 for error
1922  * @nr_bytes: number of bytes to complete
1923  *
1924  * Description:
1925  *     Must be called with queue lock held unlike blk_end_request().
1926  *
1927  * Return:
1928  *     0 - we are done with this request
1929  *     1 - still buffers pending for this request
1930  **/
1931 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1932 {
1933 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1934 		if (__end_that_request_first(rq, error, nr_bytes))
1935 			return 1;
1936 	}
1937 
1938 	add_disk_randomness(rq->rq_disk);
1939 
1940 	end_that_request_last(rq, error);
1941 
1942 	return 0;
1943 }
1944 EXPORT_SYMBOL_GPL(__blk_end_request);
1945 
1946 /**
1947  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1948  * @rq:         the bidi request being processed
1949  * @error:      0 for success, < 0 for error
1950  * @nr_bytes:   number of bytes to complete @rq
1951  * @bidi_bytes: number of bytes to complete @rq->next_rq
1952  *
1953  * Description:
1954  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1955  *
1956  * Return:
1957  *     0 - we are done with this request
1958  *     1 - still buffers pending for this request
1959  **/
1960 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1961 			 unsigned int bidi_bytes)
1962 {
1963 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1964 }
1965 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1966 
1967 /**
1968  * blk_end_request_callback - Special helper function for tricky drivers
1969  * @rq:           the request being processed
1970  * @error:        0 for success, < 0 for error
1971  * @nr_bytes:     number of bytes to complete
1972  * @drv_callback: function called between completion of bios in the request
1973  *                and completion of the request.
1974  *                If the callback returns non 0, this helper returns without
1975  *                completion of the request.
1976  *
1977  * Description:
1978  *     Ends I/O on a number of bytes attached to @rq.
1979  *     If @rq has leftover, sets it up for the next range of segments.
1980  *
1981  *     This special helper function is used only for existing tricky drivers.
1982  *     (e.g. cdrom_newpc_intr() of ide-cd)
1983  *     This interface will be removed when such drivers are rewritten.
1984  *     Don't use this interface in other places anymore.
1985  *
1986  * Return:
1987  *     0 - we are done with this request
1988  *     1 - this request is not freed yet.
1989  *         this request still has pending buffers or
1990  *         the driver doesn't want to finish this request yet.
1991  **/
1992 int blk_end_request_callback(struct request *rq, int error,
1993 			     unsigned int nr_bytes,
1994 			     int (drv_callback)(struct request *))
1995 {
1996 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
1997 }
1998 EXPORT_SYMBOL_GPL(blk_end_request_callback);
1999 
2000 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2001 		     struct bio *bio)
2002 {
2003 	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2004 	rq->cmd_flags |= (bio->bi_rw & 3);
2005 
2006 	rq->nr_phys_segments = bio_phys_segments(q, bio);
2007 	rq->nr_hw_segments = bio_hw_segments(q, bio);
2008 	rq->current_nr_sectors = bio_cur_sectors(bio);
2009 	rq->hard_cur_sectors = rq->current_nr_sectors;
2010 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2011 	rq->buffer = bio_data(bio);
2012 	rq->data_len = bio->bi_size;
2013 
2014 	rq->bio = rq->biotail = bio;
2015 
2016 	if (bio->bi_bdev)
2017 		rq->rq_disk = bio->bi_bdev->bd_disk;
2018 }
2019 
2020 int kblockd_schedule_work(struct work_struct *work)
2021 {
2022 	return queue_work(kblockd_workqueue, work);
2023 }
2024 EXPORT_SYMBOL(kblockd_schedule_work);
2025 
2026 void kblockd_flush_work(struct work_struct *work)
2027 {
2028 	cancel_work_sync(work);
2029 }
2030 EXPORT_SYMBOL(kblockd_flush_work);
2031 
2032 int __init blk_dev_init(void)
2033 {
2034 	int i;
2035 
2036 	kblockd_workqueue = create_workqueue("kblockd");
2037 	if (!kblockd_workqueue)
2038 		panic("Failed to create kblockd\n");
2039 
2040 	request_cachep = kmem_cache_create("blkdev_requests",
2041 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2042 
2043 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2044 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2045 
2046 	for_each_possible_cpu(i)
2047 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2048 
2049 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
2050 	register_hotcpu_notifier(&blk_cpu_notifier);
2051 
2052 	return 0;
2053 }
2054 
2055