xref: /linux/block/blk-core.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/blktrace_api.h>
32 #include <linux/fault-inject.h>
33 
34 #include "blk.h"
35 
36 static int __make_request(struct request_queue *q, struct bio *bio);
37 
38 /*
39  * For the allocated request tables
40  */
41 struct kmem_cache *request_cachep;
42 
43 /*
44  * For queue allocation
45  */
46 struct kmem_cache *blk_requestq_cachep;
47 
48 /*
49  * Controlling structure to kblockd
50  */
51 static struct workqueue_struct *kblockd_workqueue;
52 
53 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 
55 static void drive_stat_acct(struct request *rq, int new_io)
56 {
57 	int rw = rq_data_dir(rq);
58 
59 	if (!blk_fs_request(rq) || !rq->rq_disk)
60 		return;
61 
62 	if (!new_io) {
63 		__all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
64 	} else {
65 		struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
66 		disk_round_stats(rq->rq_disk);
67 		rq->rq_disk->in_flight++;
68 		if (part) {
69 			part_round_stats(part);
70 			part->in_flight++;
71 		}
72 	}
73 }
74 
75 void blk_queue_congestion_threshold(struct request_queue *q)
76 {
77 	int nr;
78 
79 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
80 	if (nr > q->nr_requests)
81 		nr = q->nr_requests;
82 	q->nr_congestion_on = nr;
83 
84 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
85 	if (nr < 1)
86 		nr = 1;
87 	q->nr_congestion_off = nr;
88 }
89 
90 /**
91  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
92  * @bdev:	device
93  *
94  * Locates the passed device's request queue and returns the address of its
95  * backing_dev_info
96  *
97  * Will return NULL if the request queue cannot be located.
98  */
99 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
100 {
101 	struct backing_dev_info *ret = NULL;
102 	struct request_queue *q = bdev_get_queue(bdev);
103 
104 	if (q)
105 		ret = &q->backing_dev_info;
106 	return ret;
107 }
108 EXPORT_SYMBOL(blk_get_backing_dev_info);
109 
110 /*
111  * We can't just memset() the structure, since the allocation path
112  * already stored some information in the request.
113  */
114 void rq_init(struct request_queue *q, struct request *rq)
115 {
116 	INIT_LIST_HEAD(&rq->queuelist);
117 	INIT_LIST_HEAD(&rq->donelist);
118 	rq->q = q;
119 	rq->sector = rq->hard_sector = (sector_t) -1;
120 	rq->nr_sectors = rq->hard_nr_sectors = 0;
121 	rq->current_nr_sectors = rq->hard_cur_sectors = 0;
122 	rq->bio = rq->biotail = NULL;
123 	INIT_HLIST_NODE(&rq->hash);
124 	RB_CLEAR_NODE(&rq->rb_node);
125 	rq->rq_disk = NULL;
126 	rq->nr_phys_segments = 0;
127 	rq->nr_hw_segments = 0;
128 	rq->ioprio = 0;
129 	rq->special = NULL;
130 	rq->buffer = NULL;
131 	rq->tag = -1;
132 	rq->errors = 0;
133 	rq->ref_count = 1;
134 	rq->cmd_len = 0;
135 	memset(rq->cmd, 0, sizeof(rq->cmd));
136 	rq->data_len = 0;
137 	rq->sense_len = 0;
138 	rq->data = NULL;
139 	rq->sense = NULL;
140 	rq->end_io = NULL;
141 	rq->end_io_data = NULL;
142 	rq->next_rq = NULL;
143 }
144 
145 static void req_bio_endio(struct request *rq, struct bio *bio,
146 			  unsigned int nbytes, int error)
147 {
148 	struct request_queue *q = rq->q;
149 
150 	if (&q->bar_rq != rq) {
151 		if (error)
152 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
153 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
154 			error = -EIO;
155 
156 		if (unlikely(nbytes > bio->bi_size)) {
157 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
158 			       __FUNCTION__, nbytes, bio->bi_size);
159 			nbytes = bio->bi_size;
160 		}
161 
162 		bio->bi_size -= nbytes;
163 		bio->bi_sector += (nbytes >> 9);
164 		if (bio->bi_size == 0)
165 			bio_endio(bio, error);
166 	} else {
167 
168 		/*
169 		 * Okay, this is the barrier request in progress, just
170 		 * record the error;
171 		 */
172 		if (error && !q->orderr)
173 			q->orderr = error;
174 	}
175 }
176 
177 void blk_dump_rq_flags(struct request *rq, char *msg)
178 {
179 	int bit;
180 
181 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
182 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
183 		rq->cmd_flags);
184 
185 	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
186 						(unsigned long long)rq->sector,
187 						rq->nr_sectors,
188 						rq->current_nr_sectors);
189 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
190 						rq->bio, rq->biotail,
191 						rq->buffer, rq->data,
192 						rq->data_len);
193 
194 	if (blk_pc_request(rq)) {
195 		printk(KERN_INFO "  cdb: ");
196 		for (bit = 0; bit < sizeof(rq->cmd); bit++)
197 			printk("%02x ", rq->cmd[bit]);
198 		printk("\n");
199 	}
200 }
201 EXPORT_SYMBOL(blk_dump_rq_flags);
202 
203 /*
204  * "plug" the device if there are no outstanding requests: this will
205  * force the transfer to start only after we have put all the requests
206  * on the list.
207  *
208  * This is called with interrupts off and no requests on the queue and
209  * with the queue lock held.
210  */
211 void blk_plug_device(struct request_queue *q)
212 {
213 	WARN_ON(!irqs_disabled());
214 
215 	/*
216 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
217 	 * which will restart the queueing
218 	 */
219 	if (blk_queue_stopped(q))
220 		return;
221 
222 	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
223 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
224 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
225 	}
226 }
227 EXPORT_SYMBOL(blk_plug_device);
228 
229 /*
230  * remove the queue from the plugged list, if present. called with
231  * queue lock held and interrupts disabled.
232  */
233 int blk_remove_plug(struct request_queue *q)
234 {
235 	WARN_ON(!irqs_disabled());
236 
237 	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
238 		return 0;
239 
240 	del_timer(&q->unplug_timer);
241 	return 1;
242 }
243 EXPORT_SYMBOL(blk_remove_plug);
244 
245 /*
246  * remove the plug and let it rip..
247  */
248 void __generic_unplug_device(struct request_queue *q)
249 {
250 	if (unlikely(blk_queue_stopped(q)))
251 		return;
252 
253 	if (!blk_remove_plug(q))
254 		return;
255 
256 	q->request_fn(q);
257 }
258 EXPORT_SYMBOL(__generic_unplug_device);
259 
260 /**
261  * generic_unplug_device - fire a request queue
262  * @q:    The &struct request_queue in question
263  *
264  * Description:
265  *   Linux uses plugging to build bigger requests queues before letting
266  *   the device have at them. If a queue is plugged, the I/O scheduler
267  *   is still adding and merging requests on the queue. Once the queue
268  *   gets unplugged, the request_fn defined for the queue is invoked and
269  *   transfers started.
270  **/
271 void generic_unplug_device(struct request_queue *q)
272 {
273 	spin_lock_irq(q->queue_lock);
274 	__generic_unplug_device(q);
275 	spin_unlock_irq(q->queue_lock);
276 }
277 EXPORT_SYMBOL(generic_unplug_device);
278 
279 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
280 				   struct page *page)
281 {
282 	struct request_queue *q = bdi->unplug_io_data;
283 
284 	blk_unplug(q);
285 }
286 
287 void blk_unplug_work(struct work_struct *work)
288 {
289 	struct request_queue *q =
290 		container_of(work, struct request_queue, unplug_work);
291 
292 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
293 				q->rq.count[READ] + q->rq.count[WRITE]);
294 
295 	q->unplug_fn(q);
296 }
297 
298 void blk_unplug_timeout(unsigned long data)
299 {
300 	struct request_queue *q = (struct request_queue *)data;
301 
302 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
303 				q->rq.count[READ] + q->rq.count[WRITE]);
304 
305 	kblockd_schedule_work(&q->unplug_work);
306 }
307 
308 void blk_unplug(struct request_queue *q)
309 {
310 	/*
311 	 * devices don't necessarily have an ->unplug_fn defined
312 	 */
313 	if (q->unplug_fn) {
314 		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
315 					q->rq.count[READ] + q->rq.count[WRITE]);
316 
317 		q->unplug_fn(q);
318 	}
319 }
320 EXPORT_SYMBOL(blk_unplug);
321 
322 /**
323  * blk_start_queue - restart a previously stopped queue
324  * @q:    The &struct request_queue in question
325  *
326  * Description:
327  *   blk_start_queue() will clear the stop flag on the queue, and call
328  *   the request_fn for the queue if it was in a stopped state when
329  *   entered. Also see blk_stop_queue(). Queue lock must be held.
330  **/
331 void blk_start_queue(struct request_queue *q)
332 {
333 	WARN_ON(!irqs_disabled());
334 
335 	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
336 
337 	/*
338 	 * one level of recursion is ok and is much faster than kicking
339 	 * the unplug handling
340 	 */
341 	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
342 		q->request_fn(q);
343 		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
344 	} else {
345 		blk_plug_device(q);
346 		kblockd_schedule_work(&q->unplug_work);
347 	}
348 }
349 EXPORT_SYMBOL(blk_start_queue);
350 
351 /**
352  * blk_stop_queue - stop a queue
353  * @q:    The &struct request_queue in question
354  *
355  * Description:
356  *   The Linux block layer assumes that a block driver will consume all
357  *   entries on the request queue when the request_fn strategy is called.
358  *   Often this will not happen, because of hardware limitations (queue
359  *   depth settings). If a device driver gets a 'queue full' response,
360  *   or if it simply chooses not to queue more I/O at one point, it can
361  *   call this function to prevent the request_fn from being called until
362  *   the driver has signalled it's ready to go again. This happens by calling
363  *   blk_start_queue() to restart queue operations. Queue lock must be held.
364  **/
365 void blk_stop_queue(struct request_queue *q)
366 {
367 	blk_remove_plug(q);
368 	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
369 }
370 EXPORT_SYMBOL(blk_stop_queue);
371 
372 /**
373  * blk_sync_queue - cancel any pending callbacks on a queue
374  * @q: the queue
375  *
376  * Description:
377  *     The block layer may perform asynchronous callback activity
378  *     on a queue, such as calling the unplug function after a timeout.
379  *     A block device may call blk_sync_queue to ensure that any
380  *     such activity is cancelled, thus allowing it to release resources
381  *     that the callbacks might use. The caller must already have made sure
382  *     that its ->make_request_fn will not re-add plugging prior to calling
383  *     this function.
384  *
385  */
386 void blk_sync_queue(struct request_queue *q)
387 {
388 	del_timer_sync(&q->unplug_timer);
389 	kblockd_flush_work(&q->unplug_work);
390 }
391 EXPORT_SYMBOL(blk_sync_queue);
392 
393 /**
394  * blk_run_queue - run a single device queue
395  * @q:	The queue to run
396  */
397 void blk_run_queue(struct request_queue *q)
398 {
399 	unsigned long flags;
400 
401 	spin_lock_irqsave(q->queue_lock, flags);
402 	blk_remove_plug(q);
403 
404 	/*
405 	 * Only recurse once to avoid overrunning the stack, let the unplug
406 	 * handling reinvoke the handler shortly if we already got there.
407 	 */
408 	if (!elv_queue_empty(q)) {
409 		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
410 			q->request_fn(q);
411 			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
412 		} else {
413 			blk_plug_device(q);
414 			kblockd_schedule_work(&q->unplug_work);
415 		}
416 	}
417 
418 	spin_unlock_irqrestore(q->queue_lock, flags);
419 }
420 EXPORT_SYMBOL(blk_run_queue);
421 
422 void blk_put_queue(struct request_queue *q)
423 {
424 	kobject_put(&q->kobj);
425 }
426 EXPORT_SYMBOL(blk_put_queue);
427 
428 void blk_cleanup_queue(struct request_queue *q)
429 {
430 	mutex_lock(&q->sysfs_lock);
431 	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
432 	mutex_unlock(&q->sysfs_lock);
433 
434 	if (q->elevator)
435 		elevator_exit(q->elevator);
436 
437 	blk_put_queue(q);
438 }
439 EXPORT_SYMBOL(blk_cleanup_queue);
440 
441 static int blk_init_free_list(struct request_queue *q)
442 {
443 	struct request_list *rl = &q->rq;
444 
445 	rl->count[READ] = rl->count[WRITE] = 0;
446 	rl->starved[READ] = rl->starved[WRITE] = 0;
447 	rl->elvpriv = 0;
448 	init_waitqueue_head(&rl->wait[READ]);
449 	init_waitqueue_head(&rl->wait[WRITE]);
450 
451 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
452 				mempool_free_slab, request_cachep, q->node);
453 
454 	if (!rl->rq_pool)
455 		return -ENOMEM;
456 
457 	return 0;
458 }
459 
460 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
461 {
462 	return blk_alloc_queue_node(gfp_mask, -1);
463 }
464 EXPORT_SYMBOL(blk_alloc_queue);
465 
466 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
467 {
468 	struct request_queue *q;
469 	int err;
470 
471 	q = kmem_cache_alloc_node(blk_requestq_cachep,
472 				gfp_mask | __GFP_ZERO, node_id);
473 	if (!q)
474 		return NULL;
475 
476 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
477 	q->backing_dev_info.unplug_io_data = q;
478 	err = bdi_init(&q->backing_dev_info);
479 	if (err) {
480 		kmem_cache_free(blk_requestq_cachep, q);
481 		return NULL;
482 	}
483 
484 	init_timer(&q->unplug_timer);
485 
486 	kobject_init(&q->kobj, &blk_queue_ktype);
487 
488 	mutex_init(&q->sysfs_lock);
489 
490 	return q;
491 }
492 EXPORT_SYMBOL(blk_alloc_queue_node);
493 
494 /**
495  * blk_init_queue  - prepare a request queue for use with a block device
496  * @rfn:  The function to be called to process requests that have been
497  *        placed on the queue.
498  * @lock: Request queue spin lock
499  *
500  * Description:
501  *    If a block device wishes to use the standard request handling procedures,
502  *    which sorts requests and coalesces adjacent requests, then it must
503  *    call blk_init_queue().  The function @rfn will be called when there
504  *    are requests on the queue that need to be processed.  If the device
505  *    supports plugging, then @rfn may not be called immediately when requests
506  *    are available on the queue, but may be called at some time later instead.
507  *    Plugged queues are generally unplugged when a buffer belonging to one
508  *    of the requests on the queue is needed, or due to memory pressure.
509  *
510  *    @rfn is not required, or even expected, to remove all requests off the
511  *    queue, but only as many as it can handle at a time.  If it does leave
512  *    requests on the queue, it is responsible for arranging that the requests
513  *    get dealt with eventually.
514  *
515  *    The queue spin lock must be held while manipulating the requests on the
516  *    request queue; this lock will be taken also from interrupt context, so irq
517  *    disabling is needed for it.
518  *
519  *    Function returns a pointer to the initialized request queue, or NULL if
520  *    it didn't succeed.
521  *
522  * Note:
523  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
524  *    when the block device is deactivated (such as at module unload).
525  **/
526 
527 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
528 {
529 	return blk_init_queue_node(rfn, lock, -1);
530 }
531 EXPORT_SYMBOL(blk_init_queue);
532 
533 struct request_queue *
534 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
535 {
536 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
537 
538 	if (!q)
539 		return NULL;
540 
541 	q->node = node_id;
542 	if (blk_init_free_list(q)) {
543 		kmem_cache_free(blk_requestq_cachep, q);
544 		return NULL;
545 	}
546 
547 	/*
548 	 * if caller didn't supply a lock, they get per-queue locking with
549 	 * our embedded lock
550 	 */
551 	if (!lock) {
552 		spin_lock_init(&q->__queue_lock);
553 		lock = &q->__queue_lock;
554 	}
555 
556 	q->request_fn		= rfn;
557 	q->prep_rq_fn		= NULL;
558 	q->unplug_fn		= generic_unplug_device;
559 	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
560 	q->queue_lock		= lock;
561 
562 	blk_queue_segment_boundary(q, 0xffffffff);
563 
564 	blk_queue_make_request(q, __make_request);
565 	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
566 
567 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
568 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
569 
570 	q->sg_reserved_size = INT_MAX;
571 
572 	/*
573 	 * all done
574 	 */
575 	if (!elevator_init(q, NULL)) {
576 		blk_queue_congestion_threshold(q);
577 		return q;
578 	}
579 
580 	blk_put_queue(q);
581 	return NULL;
582 }
583 EXPORT_SYMBOL(blk_init_queue_node);
584 
585 int blk_get_queue(struct request_queue *q)
586 {
587 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
588 		kobject_get(&q->kobj);
589 		return 0;
590 	}
591 
592 	return 1;
593 }
594 EXPORT_SYMBOL(blk_get_queue);
595 
596 static inline void blk_free_request(struct request_queue *q, struct request *rq)
597 {
598 	if (rq->cmd_flags & REQ_ELVPRIV)
599 		elv_put_request(q, rq);
600 	mempool_free(rq, q->rq.rq_pool);
601 }
602 
603 static struct request *
604 blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
605 {
606 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
607 
608 	if (!rq)
609 		return NULL;
610 
611 	/*
612 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
613 	 * see bio.h and blkdev.h
614 	 */
615 	rq->cmd_flags = rw | REQ_ALLOCED;
616 
617 	if (priv) {
618 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
619 			mempool_free(rq, q->rq.rq_pool);
620 			return NULL;
621 		}
622 		rq->cmd_flags |= REQ_ELVPRIV;
623 	}
624 
625 	return rq;
626 }
627 
628 /*
629  * ioc_batching returns true if the ioc is a valid batching request and
630  * should be given priority access to a request.
631  */
632 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
633 {
634 	if (!ioc)
635 		return 0;
636 
637 	/*
638 	 * Make sure the process is able to allocate at least 1 request
639 	 * even if the batch times out, otherwise we could theoretically
640 	 * lose wakeups.
641 	 */
642 	return ioc->nr_batch_requests == q->nr_batching ||
643 		(ioc->nr_batch_requests > 0
644 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
645 }
646 
647 /*
648  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
649  * will cause the process to be a "batcher" on all queues in the system. This
650  * is the behaviour we want though - once it gets a wakeup it should be given
651  * a nice run.
652  */
653 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
654 {
655 	if (!ioc || ioc_batching(q, ioc))
656 		return;
657 
658 	ioc->nr_batch_requests = q->nr_batching;
659 	ioc->last_waited = jiffies;
660 }
661 
662 static void __freed_request(struct request_queue *q, int rw)
663 {
664 	struct request_list *rl = &q->rq;
665 
666 	if (rl->count[rw] < queue_congestion_off_threshold(q))
667 		blk_clear_queue_congested(q, rw);
668 
669 	if (rl->count[rw] + 1 <= q->nr_requests) {
670 		if (waitqueue_active(&rl->wait[rw]))
671 			wake_up(&rl->wait[rw]);
672 
673 		blk_clear_queue_full(q, rw);
674 	}
675 }
676 
677 /*
678  * A request has just been released.  Account for it, update the full and
679  * congestion status, wake up any waiters.   Called under q->queue_lock.
680  */
681 static void freed_request(struct request_queue *q, int rw, int priv)
682 {
683 	struct request_list *rl = &q->rq;
684 
685 	rl->count[rw]--;
686 	if (priv)
687 		rl->elvpriv--;
688 
689 	__freed_request(q, rw);
690 
691 	if (unlikely(rl->starved[rw ^ 1]))
692 		__freed_request(q, rw ^ 1);
693 }
694 
695 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
696 /*
697  * Get a free request, queue_lock must be held.
698  * Returns NULL on failure, with queue_lock held.
699  * Returns !NULL on success, with queue_lock *not held*.
700  */
701 static struct request *get_request(struct request_queue *q, int rw_flags,
702 				   struct bio *bio, gfp_t gfp_mask)
703 {
704 	struct request *rq = NULL;
705 	struct request_list *rl = &q->rq;
706 	struct io_context *ioc = NULL;
707 	const int rw = rw_flags & 0x01;
708 	int may_queue, priv;
709 
710 	may_queue = elv_may_queue(q, rw_flags);
711 	if (may_queue == ELV_MQUEUE_NO)
712 		goto rq_starved;
713 
714 	if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
715 		if (rl->count[rw]+1 >= q->nr_requests) {
716 			ioc = current_io_context(GFP_ATOMIC, q->node);
717 			/*
718 			 * The queue will fill after this allocation, so set
719 			 * it as full, and mark this process as "batching".
720 			 * This process will be allowed to complete a batch of
721 			 * requests, others will be blocked.
722 			 */
723 			if (!blk_queue_full(q, rw)) {
724 				ioc_set_batching(q, ioc);
725 				blk_set_queue_full(q, rw);
726 			} else {
727 				if (may_queue != ELV_MQUEUE_MUST
728 						&& !ioc_batching(q, ioc)) {
729 					/*
730 					 * The queue is full and the allocating
731 					 * process is not a "batcher", and not
732 					 * exempted by the IO scheduler
733 					 */
734 					goto out;
735 				}
736 			}
737 		}
738 		blk_set_queue_congested(q, rw);
739 	}
740 
741 	/*
742 	 * Only allow batching queuers to allocate up to 50% over the defined
743 	 * limit of requests, otherwise we could have thousands of requests
744 	 * allocated with any setting of ->nr_requests
745 	 */
746 	if (rl->count[rw] >= (3 * q->nr_requests / 2))
747 		goto out;
748 
749 	rl->count[rw]++;
750 	rl->starved[rw] = 0;
751 
752 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
753 	if (priv)
754 		rl->elvpriv++;
755 
756 	spin_unlock_irq(q->queue_lock);
757 
758 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
759 	if (unlikely(!rq)) {
760 		/*
761 		 * Allocation failed presumably due to memory. Undo anything
762 		 * we might have messed up.
763 		 *
764 		 * Allocating task should really be put onto the front of the
765 		 * wait queue, but this is pretty rare.
766 		 */
767 		spin_lock_irq(q->queue_lock);
768 		freed_request(q, rw, priv);
769 
770 		/*
771 		 * in the very unlikely event that allocation failed and no
772 		 * requests for this direction was pending, mark us starved
773 		 * so that freeing of a request in the other direction will
774 		 * notice us. another possible fix would be to split the
775 		 * rq mempool into READ and WRITE
776 		 */
777 rq_starved:
778 		if (unlikely(rl->count[rw] == 0))
779 			rl->starved[rw] = 1;
780 
781 		goto out;
782 	}
783 
784 	/*
785 	 * ioc may be NULL here, and ioc_batching will be false. That's
786 	 * OK, if the queue is under the request limit then requests need
787 	 * not count toward the nr_batch_requests limit. There will always
788 	 * be some limit enforced by BLK_BATCH_TIME.
789 	 */
790 	if (ioc_batching(q, ioc))
791 		ioc->nr_batch_requests--;
792 
793 	rq_init(q, rq);
794 
795 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
796 out:
797 	return rq;
798 }
799 
800 /*
801  * No available requests for this queue, unplug the device and wait for some
802  * requests to become available.
803  *
804  * Called with q->queue_lock held, and returns with it unlocked.
805  */
806 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
807 					struct bio *bio)
808 {
809 	const int rw = rw_flags & 0x01;
810 	struct request *rq;
811 
812 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
813 	while (!rq) {
814 		DEFINE_WAIT(wait);
815 		struct request_list *rl = &q->rq;
816 
817 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
818 				TASK_UNINTERRUPTIBLE);
819 
820 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
821 
822 		if (!rq) {
823 			struct io_context *ioc;
824 
825 			blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
826 
827 			__generic_unplug_device(q);
828 			spin_unlock_irq(q->queue_lock);
829 			io_schedule();
830 
831 			/*
832 			 * After sleeping, we become a "batching" process and
833 			 * will be able to allocate at least one request, and
834 			 * up to a big batch of them for a small period time.
835 			 * See ioc_batching, ioc_set_batching
836 			 */
837 			ioc = current_io_context(GFP_NOIO, q->node);
838 			ioc_set_batching(q, ioc);
839 
840 			spin_lock_irq(q->queue_lock);
841 		}
842 		finish_wait(&rl->wait[rw], &wait);
843 	}
844 
845 	return rq;
846 }
847 
848 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
849 {
850 	struct request *rq;
851 
852 	BUG_ON(rw != READ && rw != WRITE);
853 
854 	spin_lock_irq(q->queue_lock);
855 	if (gfp_mask & __GFP_WAIT) {
856 		rq = get_request_wait(q, rw, NULL);
857 	} else {
858 		rq = get_request(q, rw, NULL, gfp_mask);
859 		if (!rq)
860 			spin_unlock_irq(q->queue_lock);
861 	}
862 	/* q->queue_lock is unlocked at this point */
863 
864 	return rq;
865 }
866 EXPORT_SYMBOL(blk_get_request);
867 
868 /**
869  * blk_start_queueing - initiate dispatch of requests to device
870  * @q:		request queue to kick into gear
871  *
872  * This is basically a helper to remove the need to know whether a queue
873  * is plugged or not if someone just wants to initiate dispatch of requests
874  * for this queue.
875  *
876  * The queue lock must be held with interrupts disabled.
877  */
878 void blk_start_queueing(struct request_queue *q)
879 {
880 	if (!blk_queue_plugged(q))
881 		q->request_fn(q);
882 	else
883 		__generic_unplug_device(q);
884 }
885 EXPORT_SYMBOL(blk_start_queueing);
886 
887 /**
888  * blk_requeue_request - put a request back on queue
889  * @q:		request queue where request should be inserted
890  * @rq:		request to be inserted
891  *
892  * Description:
893  *    Drivers often keep queueing requests until the hardware cannot accept
894  *    more, when that condition happens we need to put the request back
895  *    on the queue. Must be called with queue lock held.
896  */
897 void blk_requeue_request(struct request_queue *q, struct request *rq)
898 {
899 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
900 
901 	if (blk_rq_tagged(rq))
902 		blk_queue_end_tag(q, rq);
903 
904 	elv_requeue_request(q, rq);
905 }
906 EXPORT_SYMBOL(blk_requeue_request);
907 
908 /**
909  * blk_insert_request - insert a special request in to a request queue
910  * @q:		request queue where request should be inserted
911  * @rq:		request to be inserted
912  * @at_head:	insert request at head or tail of queue
913  * @data:	private data
914  *
915  * Description:
916  *    Many block devices need to execute commands asynchronously, so they don't
917  *    block the whole kernel from preemption during request execution.  This is
918  *    accomplished normally by inserting aritficial requests tagged as
919  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
920  *    scheduled for actual execution by the request queue.
921  *
922  *    We have the option of inserting the head or the tail of the queue.
923  *    Typically we use the tail for new ioctls and so forth.  We use the head
924  *    of the queue for things like a QUEUE_FULL message from a device, or a
925  *    host that is unable to accept a particular command.
926  */
927 void blk_insert_request(struct request_queue *q, struct request *rq,
928 			int at_head, void *data)
929 {
930 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
931 	unsigned long flags;
932 
933 	/*
934 	 * tell I/O scheduler that this isn't a regular read/write (ie it
935 	 * must not attempt merges on this) and that it acts as a soft
936 	 * barrier
937 	 */
938 	rq->cmd_type = REQ_TYPE_SPECIAL;
939 	rq->cmd_flags |= REQ_SOFTBARRIER;
940 
941 	rq->special = data;
942 
943 	spin_lock_irqsave(q->queue_lock, flags);
944 
945 	/*
946 	 * If command is tagged, release the tag
947 	 */
948 	if (blk_rq_tagged(rq))
949 		blk_queue_end_tag(q, rq);
950 
951 	drive_stat_acct(rq, 1);
952 	__elv_add_request(q, rq, where, 0);
953 	blk_start_queueing(q);
954 	spin_unlock_irqrestore(q->queue_lock, flags);
955 }
956 EXPORT_SYMBOL(blk_insert_request);
957 
958 /*
959  * add-request adds a request to the linked list.
960  * queue lock is held and interrupts disabled, as we muck with the
961  * request queue list.
962  */
963 static inline void add_request(struct request_queue *q, struct request *req)
964 {
965 	drive_stat_acct(req, 1);
966 
967 	/*
968 	 * elevator indicated where it wants this request to be
969 	 * inserted at elevator_merge time
970 	 */
971 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
972 }
973 
974 /*
975  * disk_round_stats()	- Round off the performance stats on a struct
976  * disk_stats.
977  *
978  * The average IO queue length and utilisation statistics are maintained
979  * by observing the current state of the queue length and the amount of
980  * time it has been in this state for.
981  *
982  * Normally, that accounting is done on IO completion, but that can result
983  * in more than a second's worth of IO being accounted for within any one
984  * second, leading to >100% utilisation.  To deal with that, we call this
985  * function to do a round-off before returning the results when reading
986  * /proc/diskstats.  This accounts immediately for all queue usage up to
987  * the current jiffies and restarts the counters again.
988  */
989 void disk_round_stats(struct gendisk *disk)
990 {
991 	unsigned long now = jiffies;
992 
993 	if (now == disk->stamp)
994 		return;
995 
996 	if (disk->in_flight) {
997 		__disk_stat_add(disk, time_in_queue,
998 				disk->in_flight * (now - disk->stamp));
999 		__disk_stat_add(disk, io_ticks, (now - disk->stamp));
1000 	}
1001 	disk->stamp = now;
1002 }
1003 EXPORT_SYMBOL_GPL(disk_round_stats);
1004 
1005 void part_round_stats(struct hd_struct *part)
1006 {
1007 	unsigned long now = jiffies;
1008 
1009 	if (now == part->stamp)
1010 		return;
1011 
1012 	if (part->in_flight) {
1013 		__part_stat_add(part, time_in_queue,
1014 				part->in_flight * (now - part->stamp));
1015 		__part_stat_add(part, io_ticks, (now - part->stamp));
1016 	}
1017 	part->stamp = now;
1018 }
1019 
1020 /*
1021  * queue lock must be held
1022  */
1023 void __blk_put_request(struct request_queue *q, struct request *req)
1024 {
1025 	if (unlikely(!q))
1026 		return;
1027 	if (unlikely(--req->ref_count))
1028 		return;
1029 
1030 	elv_completed_request(q, req);
1031 
1032 	/*
1033 	 * Request may not have originated from ll_rw_blk. if not,
1034 	 * it didn't come out of our reserved rq pools
1035 	 */
1036 	if (req->cmd_flags & REQ_ALLOCED) {
1037 		int rw = rq_data_dir(req);
1038 		int priv = req->cmd_flags & REQ_ELVPRIV;
1039 
1040 		BUG_ON(!list_empty(&req->queuelist));
1041 		BUG_ON(!hlist_unhashed(&req->hash));
1042 
1043 		blk_free_request(q, req);
1044 		freed_request(q, rw, priv);
1045 	}
1046 }
1047 EXPORT_SYMBOL_GPL(__blk_put_request);
1048 
1049 void blk_put_request(struct request *req)
1050 {
1051 	unsigned long flags;
1052 	struct request_queue *q = req->q;
1053 
1054 	/*
1055 	 * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
1056 	 * following if (q) test.
1057 	 */
1058 	if (q) {
1059 		spin_lock_irqsave(q->queue_lock, flags);
1060 		__blk_put_request(q, req);
1061 		spin_unlock_irqrestore(q->queue_lock, flags);
1062 	}
1063 }
1064 EXPORT_SYMBOL(blk_put_request);
1065 
1066 void init_request_from_bio(struct request *req, struct bio *bio)
1067 {
1068 	req->cmd_type = REQ_TYPE_FS;
1069 
1070 	/*
1071 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1072 	 */
1073 	if (bio_rw_ahead(bio) || bio_failfast(bio))
1074 		req->cmd_flags |= REQ_FAILFAST;
1075 
1076 	/*
1077 	 * REQ_BARRIER implies no merging, but lets make it explicit
1078 	 */
1079 	if (unlikely(bio_barrier(bio)))
1080 		req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1081 
1082 	if (bio_sync(bio))
1083 		req->cmd_flags |= REQ_RW_SYNC;
1084 	if (bio_rw_meta(bio))
1085 		req->cmd_flags |= REQ_RW_META;
1086 
1087 	req->errors = 0;
1088 	req->hard_sector = req->sector = bio->bi_sector;
1089 	req->ioprio = bio_prio(bio);
1090 	req->start_time = jiffies;
1091 	blk_rq_bio_prep(req->q, req, bio);
1092 }
1093 
1094 static int __make_request(struct request_queue *q, struct bio *bio)
1095 {
1096 	struct request *req;
1097 	int el_ret, nr_sectors, barrier, err;
1098 	const unsigned short prio = bio_prio(bio);
1099 	const int sync = bio_sync(bio);
1100 	int rw_flags;
1101 
1102 	nr_sectors = bio_sectors(bio);
1103 
1104 	/*
1105 	 * low level driver can indicate that it wants pages above a
1106 	 * certain limit bounced to low memory (ie for highmem, or even
1107 	 * ISA dma in theory)
1108 	 */
1109 	blk_queue_bounce(q, &bio);
1110 
1111 	barrier = bio_barrier(bio);
1112 	if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1113 		err = -EOPNOTSUPP;
1114 		goto end_io;
1115 	}
1116 
1117 	spin_lock_irq(q->queue_lock);
1118 
1119 	if (unlikely(barrier) || elv_queue_empty(q))
1120 		goto get_rq;
1121 
1122 	el_ret = elv_merge(q, &req, bio);
1123 	switch (el_ret) {
1124 	case ELEVATOR_BACK_MERGE:
1125 		BUG_ON(!rq_mergeable(req));
1126 
1127 		if (!ll_back_merge_fn(q, req, bio))
1128 			break;
1129 
1130 		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1131 
1132 		req->biotail->bi_next = bio;
1133 		req->biotail = bio;
1134 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1135 		req->ioprio = ioprio_best(req->ioprio, prio);
1136 		drive_stat_acct(req, 0);
1137 		if (!attempt_back_merge(q, req))
1138 			elv_merged_request(q, req, el_ret);
1139 		goto out;
1140 
1141 	case ELEVATOR_FRONT_MERGE:
1142 		BUG_ON(!rq_mergeable(req));
1143 
1144 		if (!ll_front_merge_fn(q, req, bio))
1145 			break;
1146 
1147 		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1148 
1149 		bio->bi_next = req->bio;
1150 		req->bio = bio;
1151 
1152 		/*
1153 		 * may not be valid. if the low level driver said
1154 		 * it didn't need a bounce buffer then it better
1155 		 * not touch req->buffer either...
1156 		 */
1157 		req->buffer = bio_data(bio);
1158 		req->current_nr_sectors = bio_cur_sectors(bio);
1159 		req->hard_cur_sectors = req->current_nr_sectors;
1160 		req->sector = req->hard_sector = bio->bi_sector;
1161 		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1162 		req->ioprio = ioprio_best(req->ioprio, prio);
1163 		drive_stat_acct(req, 0);
1164 		if (!attempt_front_merge(q, req))
1165 			elv_merged_request(q, req, el_ret);
1166 		goto out;
1167 
1168 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1169 	default:
1170 		;
1171 	}
1172 
1173 get_rq:
1174 	/*
1175 	 * This sync check and mask will be re-done in init_request_from_bio(),
1176 	 * but we need to set it earlier to expose the sync flag to the
1177 	 * rq allocator and io schedulers.
1178 	 */
1179 	rw_flags = bio_data_dir(bio);
1180 	if (sync)
1181 		rw_flags |= REQ_RW_SYNC;
1182 
1183 	/*
1184 	 * Grab a free request. This is might sleep but can not fail.
1185 	 * Returns with the queue unlocked.
1186 	 */
1187 	req = get_request_wait(q, rw_flags, bio);
1188 
1189 	/*
1190 	 * After dropping the lock and possibly sleeping here, our request
1191 	 * may now be mergeable after it had proven unmergeable (above).
1192 	 * We don't worry about that case for efficiency. It won't happen
1193 	 * often, and the elevators are able to handle it.
1194 	 */
1195 	init_request_from_bio(req, bio);
1196 
1197 	spin_lock_irq(q->queue_lock);
1198 	if (elv_queue_empty(q))
1199 		blk_plug_device(q);
1200 	add_request(q, req);
1201 out:
1202 	if (sync)
1203 		__generic_unplug_device(q);
1204 
1205 	spin_unlock_irq(q->queue_lock);
1206 	return 0;
1207 
1208 end_io:
1209 	bio_endio(bio, err);
1210 	return 0;
1211 }
1212 
1213 /*
1214  * If bio->bi_dev is a partition, remap the location
1215  */
1216 static inline void blk_partition_remap(struct bio *bio)
1217 {
1218 	struct block_device *bdev = bio->bi_bdev;
1219 
1220 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1221 		struct hd_struct *p = bdev->bd_part;
1222 
1223 		bio->bi_sector += p->start_sect;
1224 		bio->bi_bdev = bdev->bd_contains;
1225 
1226 		blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1227 				    bdev->bd_dev, bio->bi_sector,
1228 				    bio->bi_sector - p->start_sect);
1229 	}
1230 }
1231 
1232 static void handle_bad_sector(struct bio *bio)
1233 {
1234 	char b[BDEVNAME_SIZE];
1235 
1236 	printk(KERN_INFO "attempt to access beyond end of device\n");
1237 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1238 			bdevname(bio->bi_bdev, b),
1239 			bio->bi_rw,
1240 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1241 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1242 
1243 	set_bit(BIO_EOF, &bio->bi_flags);
1244 }
1245 
1246 #ifdef CONFIG_FAIL_MAKE_REQUEST
1247 
1248 static DECLARE_FAULT_ATTR(fail_make_request);
1249 
1250 static int __init setup_fail_make_request(char *str)
1251 {
1252 	return setup_fault_attr(&fail_make_request, str);
1253 }
1254 __setup("fail_make_request=", setup_fail_make_request);
1255 
1256 static int should_fail_request(struct bio *bio)
1257 {
1258 	if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1259 	    (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1260 		return should_fail(&fail_make_request, bio->bi_size);
1261 
1262 	return 0;
1263 }
1264 
1265 static int __init fail_make_request_debugfs(void)
1266 {
1267 	return init_fault_attr_dentries(&fail_make_request,
1268 					"fail_make_request");
1269 }
1270 
1271 late_initcall(fail_make_request_debugfs);
1272 
1273 #else /* CONFIG_FAIL_MAKE_REQUEST */
1274 
1275 static inline int should_fail_request(struct bio *bio)
1276 {
1277 	return 0;
1278 }
1279 
1280 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1281 
1282 /*
1283  * Check whether this bio extends beyond the end of the device.
1284  */
1285 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1286 {
1287 	sector_t maxsector;
1288 
1289 	if (!nr_sectors)
1290 		return 0;
1291 
1292 	/* Test device or partition size, when known. */
1293 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1294 	if (maxsector) {
1295 		sector_t sector = bio->bi_sector;
1296 
1297 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1298 			/*
1299 			 * This may well happen - the kernel calls bread()
1300 			 * without checking the size of the device, e.g., when
1301 			 * mounting a device.
1302 			 */
1303 			handle_bad_sector(bio);
1304 			return 1;
1305 		}
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * generic_make_request: hand a buffer to its device driver for I/O
1313  * @bio:  The bio describing the location in memory and on the device.
1314  *
1315  * generic_make_request() is used to make I/O requests of block
1316  * devices. It is passed a &struct bio, which describes the I/O that needs
1317  * to be done.
1318  *
1319  * generic_make_request() does not return any status.  The
1320  * success/failure status of the request, along with notification of
1321  * completion, is delivered asynchronously through the bio->bi_end_io
1322  * function described (one day) else where.
1323  *
1324  * The caller of generic_make_request must make sure that bi_io_vec
1325  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1326  * set to describe the device address, and the
1327  * bi_end_io and optionally bi_private are set to describe how
1328  * completion notification should be signaled.
1329  *
1330  * generic_make_request and the drivers it calls may use bi_next if this
1331  * bio happens to be merged with someone else, and may change bi_dev and
1332  * bi_sector for remaps as it sees fit.  So the values of these fields
1333  * should NOT be depended on after the call to generic_make_request.
1334  */
1335 static inline void __generic_make_request(struct bio *bio)
1336 {
1337 	struct request_queue *q;
1338 	sector_t old_sector;
1339 	int ret, nr_sectors = bio_sectors(bio);
1340 	dev_t old_dev;
1341 	int err = -EIO;
1342 
1343 	might_sleep();
1344 
1345 	if (bio_check_eod(bio, nr_sectors))
1346 		goto end_io;
1347 
1348 	/*
1349 	 * Resolve the mapping until finished. (drivers are
1350 	 * still free to implement/resolve their own stacking
1351 	 * by explicitly returning 0)
1352 	 *
1353 	 * NOTE: we don't repeat the blk_size check for each new device.
1354 	 * Stacking drivers are expected to know what they are doing.
1355 	 */
1356 	old_sector = -1;
1357 	old_dev = 0;
1358 	do {
1359 		char b[BDEVNAME_SIZE];
1360 
1361 		q = bdev_get_queue(bio->bi_bdev);
1362 		if (!q) {
1363 			printk(KERN_ERR
1364 			       "generic_make_request: Trying to access "
1365 				"nonexistent block-device %s (%Lu)\n",
1366 				bdevname(bio->bi_bdev, b),
1367 				(long long) bio->bi_sector);
1368 end_io:
1369 			bio_endio(bio, err);
1370 			break;
1371 		}
1372 
1373 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
1374 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1375 				bdevname(bio->bi_bdev, b),
1376 				bio_sectors(bio),
1377 				q->max_hw_sectors);
1378 			goto end_io;
1379 		}
1380 
1381 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1382 			goto end_io;
1383 
1384 		if (should_fail_request(bio))
1385 			goto end_io;
1386 
1387 		/*
1388 		 * If this device has partitions, remap block n
1389 		 * of partition p to block n+start(p) of the disk.
1390 		 */
1391 		blk_partition_remap(bio);
1392 
1393 		if (old_sector != -1)
1394 			blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1395 					    old_sector);
1396 
1397 		blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1398 
1399 		old_sector = bio->bi_sector;
1400 		old_dev = bio->bi_bdev->bd_dev;
1401 
1402 		if (bio_check_eod(bio, nr_sectors))
1403 			goto end_io;
1404 		if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1405 			err = -EOPNOTSUPP;
1406 			goto end_io;
1407 		}
1408 
1409 		ret = q->make_request_fn(q, bio);
1410 	} while (ret);
1411 }
1412 
1413 /*
1414  * We only want one ->make_request_fn to be active at a time,
1415  * else stack usage with stacked devices could be a problem.
1416  * So use current->bio_{list,tail} to keep a list of requests
1417  * submited by a make_request_fn function.
1418  * current->bio_tail is also used as a flag to say if
1419  * generic_make_request is currently active in this task or not.
1420  * If it is NULL, then no make_request is active.  If it is non-NULL,
1421  * then a make_request is active, and new requests should be added
1422  * at the tail
1423  */
1424 void generic_make_request(struct bio *bio)
1425 {
1426 	if (current->bio_tail) {
1427 		/* make_request is active */
1428 		*(current->bio_tail) = bio;
1429 		bio->bi_next = NULL;
1430 		current->bio_tail = &bio->bi_next;
1431 		return;
1432 	}
1433 	/* following loop may be a bit non-obvious, and so deserves some
1434 	 * explanation.
1435 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1436 	 * ensure that) so we have a list with a single bio.
1437 	 * We pretend that we have just taken it off a longer list, so
1438 	 * we assign bio_list to the next (which is NULL) and bio_tail
1439 	 * to &bio_list, thus initialising the bio_list of new bios to be
1440 	 * added.  __generic_make_request may indeed add some more bios
1441 	 * through a recursive call to generic_make_request.  If it
1442 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1443 	 * from the top.  In this case we really did just take the bio
1444 	 * of the top of the list (no pretending) and so fixup bio_list and
1445 	 * bio_tail or bi_next, and call into __generic_make_request again.
1446 	 *
1447 	 * The loop was structured like this to make only one call to
1448 	 * __generic_make_request (which is important as it is large and
1449 	 * inlined) and to keep the structure simple.
1450 	 */
1451 	BUG_ON(bio->bi_next);
1452 	do {
1453 		current->bio_list = bio->bi_next;
1454 		if (bio->bi_next == NULL)
1455 			current->bio_tail = &current->bio_list;
1456 		else
1457 			bio->bi_next = NULL;
1458 		__generic_make_request(bio);
1459 		bio = current->bio_list;
1460 	} while (bio);
1461 	current->bio_tail = NULL; /* deactivate */
1462 }
1463 EXPORT_SYMBOL(generic_make_request);
1464 
1465 /**
1466  * submit_bio: submit a bio to the block device layer for I/O
1467  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1468  * @bio: The &struct bio which describes the I/O
1469  *
1470  * submit_bio() is very similar in purpose to generic_make_request(), and
1471  * uses that function to do most of the work. Both are fairly rough
1472  * interfaces, @bio must be presetup and ready for I/O.
1473  *
1474  */
1475 void submit_bio(int rw, struct bio *bio)
1476 {
1477 	int count = bio_sectors(bio);
1478 
1479 	bio->bi_rw |= rw;
1480 
1481 	/*
1482 	 * If it's a regular read/write or a barrier with data attached,
1483 	 * go through the normal accounting stuff before submission.
1484 	 */
1485 	if (!bio_empty_barrier(bio)) {
1486 
1487 		BIO_BUG_ON(!bio->bi_size);
1488 		BIO_BUG_ON(!bio->bi_io_vec);
1489 
1490 		if (rw & WRITE) {
1491 			count_vm_events(PGPGOUT, count);
1492 		} else {
1493 			task_io_account_read(bio->bi_size);
1494 			count_vm_events(PGPGIN, count);
1495 		}
1496 
1497 		if (unlikely(block_dump)) {
1498 			char b[BDEVNAME_SIZE];
1499 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1500 			current->comm, task_pid_nr(current),
1501 				(rw & WRITE) ? "WRITE" : "READ",
1502 				(unsigned long long)bio->bi_sector,
1503 				bdevname(bio->bi_bdev, b));
1504 		}
1505 	}
1506 
1507 	generic_make_request(bio);
1508 }
1509 EXPORT_SYMBOL(submit_bio);
1510 
1511 /**
1512  * __end_that_request_first - end I/O on a request
1513  * @req:      the request being processed
1514  * @error:    0 for success, < 0 for error
1515  * @nr_bytes: number of bytes to complete
1516  *
1517  * Description:
1518  *     Ends I/O on a number of bytes attached to @req, and sets it up
1519  *     for the next range of segments (if any) in the cluster.
1520  *
1521  * Return:
1522  *     0 - we are done with this request, call end_that_request_last()
1523  *     1 - still buffers pending for this request
1524  **/
1525 static int __end_that_request_first(struct request *req, int error,
1526 				    int nr_bytes)
1527 {
1528 	int total_bytes, bio_nbytes, next_idx = 0;
1529 	struct bio *bio;
1530 
1531 	blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1532 
1533 	/*
1534 	 * for a REQ_BLOCK_PC request, we want to carry any eventual
1535 	 * sense key with us all the way through
1536 	 */
1537 	if (!blk_pc_request(req))
1538 		req->errors = 0;
1539 
1540 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1541 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1542 				req->rq_disk ? req->rq_disk->disk_name : "?",
1543 				(unsigned long long)req->sector);
1544 	}
1545 
1546 	if (blk_fs_request(req) && req->rq_disk) {
1547 		const int rw = rq_data_dir(req);
1548 
1549 		all_stat_add(req->rq_disk, sectors[rw],
1550 			     nr_bytes >> 9, req->sector);
1551 	}
1552 
1553 	total_bytes = bio_nbytes = 0;
1554 	while ((bio = req->bio) != NULL) {
1555 		int nbytes;
1556 
1557 		/*
1558 		 * For an empty barrier request, the low level driver must
1559 		 * store a potential error location in ->sector. We pass
1560 		 * that back up in ->bi_sector.
1561 		 */
1562 		if (blk_empty_barrier(req))
1563 			bio->bi_sector = req->sector;
1564 
1565 		if (nr_bytes >= bio->bi_size) {
1566 			req->bio = bio->bi_next;
1567 			nbytes = bio->bi_size;
1568 			req_bio_endio(req, bio, nbytes, error);
1569 			next_idx = 0;
1570 			bio_nbytes = 0;
1571 		} else {
1572 			int idx = bio->bi_idx + next_idx;
1573 
1574 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1575 				blk_dump_rq_flags(req, "__end_that");
1576 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1577 						__FUNCTION__, bio->bi_idx,
1578 						bio->bi_vcnt);
1579 				break;
1580 			}
1581 
1582 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
1583 			BIO_BUG_ON(nbytes > bio->bi_size);
1584 
1585 			/*
1586 			 * not a complete bvec done
1587 			 */
1588 			if (unlikely(nbytes > nr_bytes)) {
1589 				bio_nbytes += nr_bytes;
1590 				total_bytes += nr_bytes;
1591 				break;
1592 			}
1593 
1594 			/*
1595 			 * advance to the next vector
1596 			 */
1597 			next_idx++;
1598 			bio_nbytes += nbytes;
1599 		}
1600 
1601 		total_bytes += nbytes;
1602 		nr_bytes -= nbytes;
1603 
1604 		bio = req->bio;
1605 		if (bio) {
1606 			/*
1607 			 * end more in this run, or just return 'not-done'
1608 			 */
1609 			if (unlikely(nr_bytes <= 0))
1610 				break;
1611 		}
1612 	}
1613 
1614 	/*
1615 	 * completely done
1616 	 */
1617 	if (!req->bio)
1618 		return 0;
1619 
1620 	/*
1621 	 * if the request wasn't completed, update state
1622 	 */
1623 	if (bio_nbytes) {
1624 		req_bio_endio(req, bio, bio_nbytes, error);
1625 		bio->bi_idx += next_idx;
1626 		bio_iovec(bio)->bv_offset += nr_bytes;
1627 		bio_iovec(bio)->bv_len -= nr_bytes;
1628 	}
1629 
1630 	blk_recalc_rq_sectors(req, total_bytes >> 9);
1631 	blk_recalc_rq_segments(req);
1632 	return 1;
1633 }
1634 
1635 /*
1636  * splice the completion data to a local structure and hand off to
1637  * process_completion_queue() to complete the requests
1638  */
1639 static void blk_done_softirq(struct softirq_action *h)
1640 {
1641 	struct list_head *cpu_list, local_list;
1642 
1643 	local_irq_disable();
1644 	cpu_list = &__get_cpu_var(blk_cpu_done);
1645 	list_replace_init(cpu_list, &local_list);
1646 	local_irq_enable();
1647 
1648 	while (!list_empty(&local_list)) {
1649 		struct request *rq;
1650 
1651 		rq = list_entry(local_list.next, struct request, donelist);
1652 		list_del_init(&rq->donelist);
1653 		rq->q->softirq_done_fn(rq);
1654 	}
1655 }
1656 
1657 static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1658 				    unsigned long action, void *hcpu)
1659 {
1660 	/*
1661 	 * If a CPU goes away, splice its entries to the current CPU
1662 	 * and trigger a run of the softirq
1663 	 */
1664 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1665 		int cpu = (unsigned long) hcpu;
1666 
1667 		local_irq_disable();
1668 		list_splice_init(&per_cpu(blk_cpu_done, cpu),
1669 				 &__get_cpu_var(blk_cpu_done));
1670 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
1671 		local_irq_enable();
1672 	}
1673 
1674 	return NOTIFY_OK;
1675 }
1676 
1677 
1678 static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1679 	.notifier_call	= blk_cpu_notify,
1680 };
1681 
1682 /**
1683  * blk_complete_request - end I/O on a request
1684  * @req:      the request being processed
1685  *
1686  * Description:
1687  *     Ends all I/O on a request. It does not handle partial completions,
1688  *     unless the driver actually implements this in its completion callback
1689  *     through requeueing. The actual completion happens out-of-order,
1690  *     through a softirq handler. The user must have registered a completion
1691  *     callback through blk_queue_softirq_done().
1692  **/
1693 
1694 void blk_complete_request(struct request *req)
1695 {
1696 	struct list_head *cpu_list;
1697 	unsigned long flags;
1698 
1699 	BUG_ON(!req->q->softirq_done_fn);
1700 
1701 	local_irq_save(flags);
1702 
1703 	cpu_list = &__get_cpu_var(blk_cpu_done);
1704 	list_add_tail(&req->donelist, cpu_list);
1705 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
1706 
1707 	local_irq_restore(flags);
1708 }
1709 EXPORT_SYMBOL(blk_complete_request);
1710 
1711 /*
1712  * queue lock must be held
1713  */
1714 static void end_that_request_last(struct request *req, int error)
1715 {
1716 	struct gendisk *disk = req->rq_disk;
1717 
1718 	if (blk_rq_tagged(req))
1719 		blk_queue_end_tag(req->q, req);
1720 
1721 	if (blk_queued_rq(req))
1722 		blkdev_dequeue_request(req);
1723 
1724 	if (unlikely(laptop_mode) && blk_fs_request(req))
1725 		laptop_io_completion();
1726 
1727 	/*
1728 	 * Account IO completion.  bar_rq isn't accounted as a normal
1729 	 * IO on queueing nor completion.  Accounting the containing
1730 	 * request is enough.
1731 	 */
1732 	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1733 		unsigned long duration = jiffies - req->start_time;
1734 		const int rw = rq_data_dir(req);
1735 		struct hd_struct *part = get_part(disk, req->sector);
1736 
1737 		__all_stat_inc(disk, ios[rw], req->sector);
1738 		__all_stat_add(disk, ticks[rw], duration, req->sector);
1739 		disk_round_stats(disk);
1740 		disk->in_flight--;
1741 		if (part) {
1742 			part_round_stats(part);
1743 			part->in_flight--;
1744 		}
1745 	}
1746 
1747 	if (req->end_io)
1748 		req->end_io(req, error);
1749 	else {
1750 		if (blk_bidi_rq(req))
1751 			__blk_put_request(req->next_rq->q, req->next_rq);
1752 
1753 		__blk_put_request(req->q, req);
1754 	}
1755 }
1756 
1757 static inline void __end_request(struct request *rq, int uptodate,
1758 				 unsigned int nr_bytes)
1759 {
1760 	int error = 0;
1761 
1762 	if (uptodate <= 0)
1763 		error = uptodate ? uptodate : -EIO;
1764 
1765 	__blk_end_request(rq, error, nr_bytes);
1766 }
1767 
1768 /**
1769  * blk_rq_bytes - Returns bytes left to complete in the entire request
1770  **/
1771 unsigned int blk_rq_bytes(struct request *rq)
1772 {
1773 	if (blk_fs_request(rq))
1774 		return rq->hard_nr_sectors << 9;
1775 
1776 	return rq->data_len;
1777 }
1778 EXPORT_SYMBOL_GPL(blk_rq_bytes);
1779 
1780 /**
1781  * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1782  **/
1783 unsigned int blk_rq_cur_bytes(struct request *rq)
1784 {
1785 	if (blk_fs_request(rq))
1786 		return rq->current_nr_sectors << 9;
1787 
1788 	if (rq->bio)
1789 		return rq->bio->bi_size;
1790 
1791 	return rq->data_len;
1792 }
1793 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1794 
1795 /**
1796  * end_queued_request - end all I/O on a queued request
1797  * @rq:		the request being processed
1798  * @uptodate:	error value or 0/1 uptodate flag
1799  *
1800  * Description:
1801  *     Ends all I/O on a request, and removes it from the block layer queues.
1802  *     Not suitable for normal IO completion, unless the driver still has
1803  *     the request attached to the block layer.
1804  *
1805  **/
1806 void end_queued_request(struct request *rq, int uptodate)
1807 {
1808 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1809 }
1810 EXPORT_SYMBOL(end_queued_request);
1811 
1812 /**
1813  * end_dequeued_request - end all I/O on a dequeued request
1814  * @rq:		the request being processed
1815  * @uptodate:	error value or 0/1 uptodate flag
1816  *
1817  * Description:
1818  *     Ends all I/O on a request. The request must already have been
1819  *     dequeued using blkdev_dequeue_request(), as is normally the case
1820  *     for most drivers.
1821  *
1822  **/
1823 void end_dequeued_request(struct request *rq, int uptodate)
1824 {
1825 	__end_request(rq, uptodate, blk_rq_bytes(rq));
1826 }
1827 EXPORT_SYMBOL(end_dequeued_request);
1828 
1829 
1830 /**
1831  * end_request - end I/O on the current segment of the request
1832  * @req:	the request being processed
1833  * @uptodate:	error value or 0/1 uptodate flag
1834  *
1835  * Description:
1836  *     Ends I/O on the current segment of a request. If that is the only
1837  *     remaining segment, the request is also completed and freed.
1838  *
1839  *     This is a remnant of how older block drivers handled IO completions.
1840  *     Modern drivers typically end IO on the full request in one go, unless
1841  *     they have a residual value to account for. For that case this function
1842  *     isn't really useful, unless the residual just happens to be the
1843  *     full current segment. In other words, don't use this function in new
1844  *     code. Either use end_request_completely(), or the
1845  *     end_that_request_chunk() (along with end_that_request_last()) for
1846  *     partial completions.
1847  *
1848  **/
1849 void end_request(struct request *req, int uptodate)
1850 {
1851 	__end_request(req, uptodate, req->hard_cur_sectors << 9);
1852 }
1853 EXPORT_SYMBOL(end_request);
1854 
1855 /**
1856  * blk_end_io - Generic end_io function to complete a request.
1857  * @rq:           the request being processed
1858  * @error:        0 for success, < 0 for error
1859  * @nr_bytes:     number of bytes to complete @rq
1860  * @bidi_bytes:   number of bytes to complete @rq->next_rq
1861  * @drv_callback: function called between completion of bios in the request
1862  *                and completion of the request.
1863  *                If the callback returns non 0, this helper returns without
1864  *                completion of the request.
1865  *
1866  * Description:
1867  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1868  *     If @rq has leftover, sets it up for the next range of segments.
1869  *
1870  * Return:
1871  *     0 - we are done with this request
1872  *     1 - this request is not freed yet, it still has pending buffers.
1873  **/
1874 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1875 		      unsigned int bidi_bytes,
1876 		      int (drv_callback)(struct request *))
1877 {
1878 	struct request_queue *q = rq->q;
1879 	unsigned long flags = 0UL;
1880 
1881 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1882 		if (__end_that_request_first(rq, error, nr_bytes))
1883 			return 1;
1884 
1885 		/* Bidi request must be completed as a whole */
1886 		if (blk_bidi_rq(rq) &&
1887 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1888 			return 1;
1889 	}
1890 
1891 	/* Special feature for tricky drivers */
1892 	if (drv_callback && drv_callback(rq))
1893 		return 1;
1894 
1895 	add_disk_randomness(rq->rq_disk);
1896 
1897 	spin_lock_irqsave(q->queue_lock, flags);
1898 	end_that_request_last(rq, error);
1899 	spin_unlock_irqrestore(q->queue_lock, flags);
1900 
1901 	return 0;
1902 }
1903 
1904 /**
1905  * blk_end_request - Helper function for drivers to complete the request.
1906  * @rq:       the request being processed
1907  * @error:    0 for success, < 0 for error
1908  * @nr_bytes: number of bytes to complete
1909  *
1910  * Description:
1911  *     Ends I/O on a number of bytes attached to @rq.
1912  *     If @rq has leftover, sets it up for the next range of segments.
1913  *
1914  * Return:
1915  *     0 - we are done with this request
1916  *     1 - still buffers pending for this request
1917  **/
1918 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1919 {
1920 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
1921 }
1922 EXPORT_SYMBOL_GPL(blk_end_request);
1923 
1924 /**
1925  * __blk_end_request - Helper function for drivers to complete the request.
1926  * @rq:       the request being processed
1927  * @error:    0 for success, < 0 for error
1928  * @nr_bytes: number of bytes to complete
1929  *
1930  * Description:
1931  *     Must be called with queue lock held unlike blk_end_request().
1932  *
1933  * Return:
1934  *     0 - we are done with this request
1935  *     1 - still buffers pending for this request
1936  **/
1937 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1938 {
1939 	if (blk_fs_request(rq) || blk_pc_request(rq)) {
1940 		if (__end_that_request_first(rq, error, nr_bytes))
1941 			return 1;
1942 	}
1943 
1944 	add_disk_randomness(rq->rq_disk);
1945 
1946 	end_that_request_last(rq, error);
1947 
1948 	return 0;
1949 }
1950 EXPORT_SYMBOL_GPL(__blk_end_request);
1951 
1952 /**
1953  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1954  * @rq:         the bidi request being processed
1955  * @error:      0 for success, < 0 for error
1956  * @nr_bytes:   number of bytes to complete @rq
1957  * @bidi_bytes: number of bytes to complete @rq->next_rq
1958  *
1959  * Description:
1960  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1961  *
1962  * Return:
1963  *     0 - we are done with this request
1964  *     1 - still buffers pending for this request
1965  **/
1966 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1967 			 unsigned int bidi_bytes)
1968 {
1969 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1970 }
1971 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1972 
1973 /**
1974  * blk_end_request_callback - Special helper function for tricky drivers
1975  * @rq:           the request being processed
1976  * @error:        0 for success, < 0 for error
1977  * @nr_bytes:     number of bytes to complete
1978  * @drv_callback: function called between completion of bios in the request
1979  *                and completion of the request.
1980  *                If the callback returns non 0, this helper returns without
1981  *                completion of the request.
1982  *
1983  * Description:
1984  *     Ends I/O on a number of bytes attached to @rq.
1985  *     If @rq has leftover, sets it up for the next range of segments.
1986  *
1987  *     This special helper function is used only for existing tricky drivers.
1988  *     (e.g. cdrom_newpc_intr() of ide-cd)
1989  *     This interface will be removed when such drivers are rewritten.
1990  *     Don't use this interface in other places anymore.
1991  *
1992  * Return:
1993  *     0 - we are done with this request
1994  *     1 - this request is not freed yet.
1995  *         this request still has pending buffers or
1996  *         the driver doesn't want to finish this request yet.
1997  **/
1998 int blk_end_request_callback(struct request *rq, int error,
1999 			     unsigned int nr_bytes,
2000 			     int (drv_callback)(struct request *))
2001 {
2002 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2003 }
2004 EXPORT_SYMBOL_GPL(blk_end_request_callback);
2005 
2006 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2007 		     struct bio *bio)
2008 {
2009 	/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2010 	rq->cmd_flags |= (bio->bi_rw & 3);
2011 
2012 	rq->nr_phys_segments = bio_phys_segments(q, bio);
2013 	rq->nr_hw_segments = bio_hw_segments(q, bio);
2014 	rq->current_nr_sectors = bio_cur_sectors(bio);
2015 	rq->hard_cur_sectors = rq->current_nr_sectors;
2016 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2017 	rq->buffer = bio_data(bio);
2018 	rq->data_len = bio->bi_size;
2019 
2020 	rq->bio = rq->biotail = bio;
2021 
2022 	if (bio->bi_bdev)
2023 		rq->rq_disk = bio->bi_bdev->bd_disk;
2024 }
2025 
2026 int kblockd_schedule_work(struct work_struct *work)
2027 {
2028 	return queue_work(kblockd_workqueue, work);
2029 }
2030 EXPORT_SYMBOL(kblockd_schedule_work);
2031 
2032 void kblockd_flush_work(struct work_struct *work)
2033 {
2034 	cancel_work_sync(work);
2035 }
2036 EXPORT_SYMBOL(kblockd_flush_work);
2037 
2038 int __init blk_dev_init(void)
2039 {
2040 	int i;
2041 
2042 	kblockd_workqueue = create_workqueue("kblockd");
2043 	if (!kblockd_workqueue)
2044 		panic("Failed to create kblockd\n");
2045 
2046 	request_cachep = kmem_cache_create("blkdev_requests",
2047 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2048 
2049 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2050 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2051 
2052 	for_each_possible_cpu(i)
2053 		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2054 
2055 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
2056 	register_hotcpu_notifier(&blk_cpu_notifier);
2057 
2058 	return 0;
2059 }
2060 
2061