xref: /linux/block/blk-core.c (revision b34bce45530ca897aea35915e0e42eb3c8047b52)
1 /*
2  * Copyright (C) 1991, 1992 Linus Torvalds
3  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
4  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7  *	-  July2000
8  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9  */
10 
11 /*
12  * This handles all read/write requests to block devices
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/fault-inject.h>
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/block.h>
33 
34 #include "blk.h"
35 
36 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
37 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
39 
40 static int __make_request(struct request_queue *q, struct bio *bio);
41 
42 /*
43  * For the allocated request tables
44  */
45 static struct kmem_cache *request_cachep;
46 
47 /*
48  * For queue allocation
49  */
50 struct kmem_cache *blk_requestq_cachep;
51 
52 /*
53  * Controlling structure to kblockd
54  */
55 static struct workqueue_struct *kblockd_workqueue;
56 
57 static void drive_stat_acct(struct request *rq, int new_io)
58 {
59 	struct hd_struct *part;
60 	int rw = rq_data_dir(rq);
61 	int cpu;
62 
63 	if (!blk_do_io_stat(rq))
64 		return;
65 
66 	cpu = part_stat_lock();
67 	part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
68 
69 	if (!new_io)
70 		part_stat_inc(cpu, part, merges[rw]);
71 	else {
72 		part_round_stats(cpu, part);
73 		part_inc_in_flight(part, rw);
74 	}
75 
76 	part_stat_unlock();
77 }
78 
79 void blk_queue_congestion_threshold(struct request_queue *q)
80 {
81 	int nr;
82 
83 	nr = q->nr_requests - (q->nr_requests / 8) + 1;
84 	if (nr > q->nr_requests)
85 		nr = q->nr_requests;
86 	q->nr_congestion_on = nr;
87 
88 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
89 	if (nr < 1)
90 		nr = 1;
91 	q->nr_congestion_off = nr;
92 }
93 
94 /**
95  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
96  * @bdev:	device
97  *
98  * Locates the passed device's request queue and returns the address of its
99  * backing_dev_info
100  *
101  * Will return NULL if the request queue cannot be located.
102  */
103 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
104 {
105 	struct backing_dev_info *ret = NULL;
106 	struct request_queue *q = bdev_get_queue(bdev);
107 
108 	if (q)
109 		ret = &q->backing_dev_info;
110 	return ret;
111 }
112 EXPORT_SYMBOL(blk_get_backing_dev_info);
113 
114 void blk_rq_init(struct request_queue *q, struct request *rq)
115 {
116 	memset(rq, 0, sizeof(*rq));
117 
118 	INIT_LIST_HEAD(&rq->queuelist);
119 	INIT_LIST_HEAD(&rq->timeout_list);
120 	rq->cpu = -1;
121 	rq->q = q;
122 	rq->__sector = (sector_t) -1;
123 	INIT_HLIST_NODE(&rq->hash);
124 	RB_CLEAR_NODE(&rq->rb_node);
125 	rq->cmd = rq->__cmd;
126 	rq->cmd_len = BLK_MAX_CDB;
127 	rq->tag = -1;
128 	rq->ref_count = 1;
129 	rq->start_time = jiffies;
130 	set_start_time_ns(rq);
131 }
132 EXPORT_SYMBOL(blk_rq_init);
133 
134 static void req_bio_endio(struct request *rq, struct bio *bio,
135 			  unsigned int nbytes, int error)
136 {
137 	struct request_queue *q = rq->q;
138 
139 	if (&q->bar_rq != rq) {
140 		if (error)
141 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
142 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
143 			error = -EIO;
144 
145 		if (unlikely(nbytes > bio->bi_size)) {
146 			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
147 			       __func__, nbytes, bio->bi_size);
148 			nbytes = bio->bi_size;
149 		}
150 
151 		if (unlikely(rq->cmd_flags & REQ_QUIET))
152 			set_bit(BIO_QUIET, &bio->bi_flags);
153 
154 		bio->bi_size -= nbytes;
155 		bio->bi_sector += (nbytes >> 9);
156 
157 		if (bio_integrity(bio))
158 			bio_integrity_advance(bio, nbytes);
159 
160 		if (bio->bi_size == 0)
161 			bio_endio(bio, error);
162 	} else {
163 
164 		/*
165 		 * Okay, this is the barrier request in progress, just
166 		 * record the error;
167 		 */
168 		if (error && !q->orderr)
169 			q->orderr = error;
170 	}
171 }
172 
173 void blk_dump_rq_flags(struct request *rq, char *msg)
174 {
175 	int bit;
176 
177 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
178 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
179 		rq->cmd_flags);
180 
181 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
182 	       (unsigned long long)blk_rq_pos(rq),
183 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
184 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
185 	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
186 
187 	if (blk_pc_request(rq)) {
188 		printk(KERN_INFO "  cdb: ");
189 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
190 			printk("%02x ", rq->cmd[bit]);
191 		printk("\n");
192 	}
193 }
194 EXPORT_SYMBOL(blk_dump_rq_flags);
195 
196 /*
197  * "plug" the device if there are no outstanding requests: this will
198  * force the transfer to start only after we have put all the requests
199  * on the list.
200  *
201  * This is called with interrupts off and no requests on the queue and
202  * with the queue lock held.
203  */
204 void blk_plug_device(struct request_queue *q)
205 {
206 	WARN_ON(!irqs_disabled());
207 
208 	/*
209 	 * don't plug a stopped queue, it must be paired with blk_start_queue()
210 	 * which will restart the queueing
211 	 */
212 	if (blk_queue_stopped(q))
213 		return;
214 
215 	if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
216 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
217 		trace_block_plug(q);
218 	}
219 }
220 EXPORT_SYMBOL(blk_plug_device);
221 
222 /**
223  * blk_plug_device_unlocked - plug a device without queue lock held
224  * @q:    The &struct request_queue to plug
225  *
226  * Description:
227  *   Like @blk_plug_device(), but grabs the queue lock and disables
228  *   interrupts.
229  **/
230 void blk_plug_device_unlocked(struct request_queue *q)
231 {
232 	unsigned long flags;
233 
234 	spin_lock_irqsave(q->queue_lock, flags);
235 	blk_plug_device(q);
236 	spin_unlock_irqrestore(q->queue_lock, flags);
237 }
238 EXPORT_SYMBOL(blk_plug_device_unlocked);
239 
240 /*
241  * remove the queue from the plugged list, if present. called with
242  * queue lock held and interrupts disabled.
243  */
244 int blk_remove_plug(struct request_queue *q)
245 {
246 	WARN_ON(!irqs_disabled());
247 
248 	if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
249 		return 0;
250 
251 	del_timer(&q->unplug_timer);
252 	return 1;
253 }
254 EXPORT_SYMBOL(blk_remove_plug);
255 
256 /*
257  * remove the plug and let it rip..
258  */
259 void __generic_unplug_device(struct request_queue *q)
260 {
261 	if (unlikely(blk_queue_stopped(q)))
262 		return;
263 	if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
264 		return;
265 
266 	q->request_fn(q);
267 }
268 
269 /**
270  * generic_unplug_device - fire a request queue
271  * @q:    The &struct request_queue in question
272  *
273  * Description:
274  *   Linux uses plugging to build bigger requests queues before letting
275  *   the device have at them. If a queue is plugged, the I/O scheduler
276  *   is still adding and merging requests on the queue. Once the queue
277  *   gets unplugged, the request_fn defined for the queue is invoked and
278  *   transfers started.
279  **/
280 void generic_unplug_device(struct request_queue *q)
281 {
282 	if (blk_queue_plugged(q)) {
283 		spin_lock_irq(q->queue_lock);
284 		__generic_unplug_device(q);
285 		spin_unlock_irq(q->queue_lock);
286 	}
287 }
288 EXPORT_SYMBOL(generic_unplug_device);
289 
290 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
291 				   struct page *page)
292 {
293 	struct request_queue *q = bdi->unplug_io_data;
294 
295 	blk_unplug(q);
296 }
297 
298 void blk_unplug_work(struct work_struct *work)
299 {
300 	struct request_queue *q =
301 		container_of(work, struct request_queue, unplug_work);
302 
303 	trace_block_unplug_io(q);
304 	q->unplug_fn(q);
305 }
306 
307 void blk_unplug_timeout(unsigned long data)
308 {
309 	struct request_queue *q = (struct request_queue *)data;
310 
311 	trace_block_unplug_timer(q);
312 	kblockd_schedule_work(q, &q->unplug_work);
313 }
314 
315 void blk_unplug(struct request_queue *q)
316 {
317 	/*
318 	 * devices don't necessarily have an ->unplug_fn defined
319 	 */
320 	if (q->unplug_fn) {
321 		trace_block_unplug_io(q);
322 		q->unplug_fn(q);
323 	}
324 }
325 EXPORT_SYMBOL(blk_unplug);
326 
327 /**
328  * blk_start_queue - restart a previously stopped queue
329  * @q:    The &struct request_queue in question
330  *
331  * Description:
332  *   blk_start_queue() will clear the stop flag on the queue, and call
333  *   the request_fn for the queue if it was in a stopped state when
334  *   entered. Also see blk_stop_queue(). Queue lock must be held.
335  **/
336 void blk_start_queue(struct request_queue *q)
337 {
338 	WARN_ON(!irqs_disabled());
339 
340 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
341 	__blk_run_queue(q);
342 }
343 EXPORT_SYMBOL(blk_start_queue);
344 
345 /**
346  * blk_stop_queue - stop a queue
347  * @q:    The &struct request_queue in question
348  *
349  * Description:
350  *   The Linux block layer assumes that a block driver will consume all
351  *   entries on the request queue when the request_fn strategy is called.
352  *   Often this will not happen, because of hardware limitations (queue
353  *   depth settings). If a device driver gets a 'queue full' response,
354  *   or if it simply chooses not to queue more I/O at one point, it can
355  *   call this function to prevent the request_fn from being called until
356  *   the driver has signalled it's ready to go again. This happens by calling
357  *   blk_start_queue() to restart queue operations. Queue lock must be held.
358  **/
359 void blk_stop_queue(struct request_queue *q)
360 {
361 	blk_remove_plug(q);
362 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
363 }
364 EXPORT_SYMBOL(blk_stop_queue);
365 
366 /**
367  * blk_sync_queue - cancel any pending callbacks on a queue
368  * @q: the queue
369  *
370  * Description:
371  *     The block layer may perform asynchronous callback activity
372  *     on a queue, such as calling the unplug function after a timeout.
373  *     A block device may call blk_sync_queue to ensure that any
374  *     such activity is cancelled, thus allowing it to release resources
375  *     that the callbacks might use. The caller must already have made sure
376  *     that its ->make_request_fn will not re-add plugging prior to calling
377  *     this function.
378  *
379  */
380 void blk_sync_queue(struct request_queue *q)
381 {
382 	del_timer_sync(&q->unplug_timer);
383 	del_timer_sync(&q->timeout);
384 	cancel_work_sync(&q->unplug_work);
385 }
386 EXPORT_SYMBOL(blk_sync_queue);
387 
388 /**
389  * __blk_run_queue - run a single device queue
390  * @q:	The queue to run
391  *
392  * Description:
393  *    See @blk_run_queue. This variant must be called with the queue lock
394  *    held and interrupts disabled.
395  *
396  */
397 void __blk_run_queue(struct request_queue *q)
398 {
399 	blk_remove_plug(q);
400 
401 	if (unlikely(blk_queue_stopped(q)))
402 		return;
403 
404 	if (elv_queue_empty(q))
405 		return;
406 
407 	/*
408 	 * Only recurse once to avoid overrunning the stack, let the unplug
409 	 * handling reinvoke the handler shortly if we already got there.
410 	 */
411 	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
412 		q->request_fn(q);
413 		queue_flag_clear(QUEUE_FLAG_REENTER, q);
414 	} else {
415 		queue_flag_set(QUEUE_FLAG_PLUGGED, q);
416 		kblockd_schedule_work(q, &q->unplug_work);
417 	}
418 }
419 EXPORT_SYMBOL(__blk_run_queue);
420 
421 /**
422  * blk_run_queue - run a single device queue
423  * @q: The queue to run
424  *
425  * Description:
426  *    Invoke request handling on this queue, if it has pending work to do.
427  *    May be used to restart queueing when a request has completed.
428  */
429 void blk_run_queue(struct request_queue *q)
430 {
431 	unsigned long flags;
432 
433 	spin_lock_irqsave(q->queue_lock, flags);
434 	__blk_run_queue(q);
435 	spin_unlock_irqrestore(q->queue_lock, flags);
436 }
437 EXPORT_SYMBOL(blk_run_queue);
438 
439 void blk_put_queue(struct request_queue *q)
440 {
441 	kobject_put(&q->kobj);
442 }
443 
444 void blk_cleanup_queue(struct request_queue *q)
445 {
446 	/*
447 	 * We know we have process context here, so we can be a little
448 	 * cautious and ensure that pending block actions on this device
449 	 * are done before moving on. Going into this function, we should
450 	 * not have processes doing IO to this device.
451 	 */
452 	blk_sync_queue(q);
453 
454 	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
455 	mutex_lock(&q->sysfs_lock);
456 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
457 	mutex_unlock(&q->sysfs_lock);
458 
459 	if (q->elevator)
460 		elevator_exit(q->elevator);
461 
462 	blk_put_queue(q);
463 }
464 EXPORT_SYMBOL(blk_cleanup_queue);
465 
466 static int blk_init_free_list(struct request_queue *q)
467 {
468 	struct request_list *rl = &q->rq;
469 
470 	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
471 	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
472 	rl->elvpriv = 0;
473 	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
474 	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
475 
476 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
477 				mempool_free_slab, request_cachep, q->node);
478 
479 	if (!rl->rq_pool)
480 		return -ENOMEM;
481 
482 	return 0;
483 }
484 
485 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
486 {
487 	return blk_alloc_queue_node(gfp_mask, -1);
488 }
489 EXPORT_SYMBOL(blk_alloc_queue);
490 
491 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
492 {
493 	struct request_queue *q;
494 	int err;
495 
496 	q = kmem_cache_alloc_node(blk_requestq_cachep,
497 				gfp_mask | __GFP_ZERO, node_id);
498 	if (!q)
499 		return NULL;
500 
501 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
502 	q->backing_dev_info.unplug_io_data = q;
503 	q->backing_dev_info.ra_pages =
504 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
505 	q->backing_dev_info.state = 0;
506 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
507 	q->backing_dev_info.name = "block";
508 
509 	err = bdi_init(&q->backing_dev_info);
510 	if (err) {
511 		kmem_cache_free(blk_requestq_cachep, q);
512 		return NULL;
513 	}
514 
515 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
516 		    laptop_mode_timer_fn, (unsigned long) q);
517 	init_timer(&q->unplug_timer);
518 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
519 	INIT_LIST_HEAD(&q->timeout_list);
520 	INIT_WORK(&q->unplug_work, blk_unplug_work);
521 
522 	kobject_init(&q->kobj, &blk_queue_ktype);
523 
524 	mutex_init(&q->sysfs_lock);
525 	spin_lock_init(&q->__queue_lock);
526 
527 	return q;
528 }
529 EXPORT_SYMBOL(blk_alloc_queue_node);
530 
531 /**
532  * blk_init_queue  - prepare a request queue for use with a block device
533  * @rfn:  The function to be called to process requests that have been
534  *        placed on the queue.
535  * @lock: Request queue spin lock
536  *
537  * Description:
538  *    If a block device wishes to use the standard request handling procedures,
539  *    which sorts requests and coalesces adjacent requests, then it must
540  *    call blk_init_queue().  The function @rfn will be called when there
541  *    are requests on the queue that need to be processed.  If the device
542  *    supports plugging, then @rfn may not be called immediately when requests
543  *    are available on the queue, but may be called at some time later instead.
544  *    Plugged queues are generally unplugged when a buffer belonging to one
545  *    of the requests on the queue is needed, or due to memory pressure.
546  *
547  *    @rfn is not required, or even expected, to remove all requests off the
548  *    queue, but only as many as it can handle at a time.  If it does leave
549  *    requests on the queue, it is responsible for arranging that the requests
550  *    get dealt with eventually.
551  *
552  *    The queue spin lock must be held while manipulating the requests on the
553  *    request queue; this lock will be taken also from interrupt context, so irq
554  *    disabling is needed for it.
555  *
556  *    Function returns a pointer to the initialized request queue, or %NULL if
557  *    it didn't succeed.
558  *
559  * Note:
560  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
561  *    when the block device is deactivated (such as at module unload).
562  **/
563 
564 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
565 {
566 	return blk_init_queue_node(rfn, lock, -1);
567 }
568 EXPORT_SYMBOL(blk_init_queue);
569 
570 struct request_queue *
571 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
572 {
573 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
574 
575 	return blk_init_allocated_queue_node(q, rfn, lock, node_id);
576 }
577 EXPORT_SYMBOL(blk_init_queue_node);
578 
579 struct request_queue *
580 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
581 			 spinlock_t *lock)
582 {
583 	return blk_init_allocated_queue_node(q, rfn, lock, -1);
584 }
585 EXPORT_SYMBOL(blk_init_allocated_queue);
586 
587 struct request_queue *
588 blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
589 			      spinlock_t *lock, int node_id)
590 {
591 	if (!q)
592 		return NULL;
593 
594 	q->node = node_id;
595 	if (blk_init_free_list(q)) {
596 		kmem_cache_free(blk_requestq_cachep, q);
597 		return NULL;
598 	}
599 
600 	q->request_fn		= rfn;
601 	q->prep_rq_fn		= NULL;
602 	q->unplug_fn		= generic_unplug_device;
603 	q->queue_flags		= QUEUE_FLAG_DEFAULT;
604 	q->queue_lock		= lock;
605 
606 	/*
607 	 * This also sets hw/phys segments, boundary and size
608 	 */
609 	blk_queue_make_request(q, __make_request);
610 
611 	q->sg_reserved_size = INT_MAX;
612 
613 	/*
614 	 * all done
615 	 */
616 	if (!elevator_init(q, NULL)) {
617 		blk_queue_congestion_threshold(q);
618 		return q;
619 	}
620 
621 	blk_put_queue(q);
622 	return NULL;
623 }
624 EXPORT_SYMBOL(blk_init_allocated_queue_node);
625 
626 int blk_get_queue(struct request_queue *q)
627 {
628 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
629 		kobject_get(&q->kobj);
630 		return 0;
631 	}
632 
633 	return 1;
634 }
635 
636 static inline void blk_free_request(struct request_queue *q, struct request *rq)
637 {
638 	if (rq->cmd_flags & REQ_ELVPRIV)
639 		elv_put_request(q, rq);
640 	mempool_free(rq, q->rq.rq_pool);
641 }
642 
643 static struct request *
644 blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
645 {
646 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
647 
648 	if (!rq)
649 		return NULL;
650 
651 	blk_rq_init(q, rq);
652 
653 	rq->cmd_flags = flags | REQ_ALLOCED;
654 
655 	if (priv) {
656 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
657 			mempool_free(rq, q->rq.rq_pool);
658 			return NULL;
659 		}
660 		rq->cmd_flags |= REQ_ELVPRIV;
661 	}
662 
663 	return rq;
664 }
665 
666 /*
667  * ioc_batching returns true if the ioc is a valid batching request and
668  * should be given priority access to a request.
669  */
670 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
671 {
672 	if (!ioc)
673 		return 0;
674 
675 	/*
676 	 * Make sure the process is able to allocate at least 1 request
677 	 * even if the batch times out, otherwise we could theoretically
678 	 * lose wakeups.
679 	 */
680 	return ioc->nr_batch_requests == q->nr_batching ||
681 		(ioc->nr_batch_requests > 0
682 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
683 }
684 
685 /*
686  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
687  * will cause the process to be a "batcher" on all queues in the system. This
688  * is the behaviour we want though - once it gets a wakeup it should be given
689  * a nice run.
690  */
691 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
692 {
693 	if (!ioc || ioc_batching(q, ioc))
694 		return;
695 
696 	ioc->nr_batch_requests = q->nr_batching;
697 	ioc->last_waited = jiffies;
698 }
699 
700 static void __freed_request(struct request_queue *q, int sync)
701 {
702 	struct request_list *rl = &q->rq;
703 
704 	if (rl->count[sync] < queue_congestion_off_threshold(q))
705 		blk_clear_queue_congested(q, sync);
706 
707 	if (rl->count[sync] + 1 <= q->nr_requests) {
708 		if (waitqueue_active(&rl->wait[sync]))
709 			wake_up(&rl->wait[sync]);
710 
711 		blk_clear_queue_full(q, sync);
712 	}
713 }
714 
715 /*
716  * A request has just been released.  Account for it, update the full and
717  * congestion status, wake up any waiters.   Called under q->queue_lock.
718  */
719 static void freed_request(struct request_queue *q, int sync, int priv)
720 {
721 	struct request_list *rl = &q->rq;
722 
723 	rl->count[sync]--;
724 	if (priv)
725 		rl->elvpriv--;
726 
727 	__freed_request(q, sync);
728 
729 	if (unlikely(rl->starved[sync ^ 1]))
730 		__freed_request(q, sync ^ 1);
731 }
732 
733 /*
734  * Get a free request, queue_lock must be held.
735  * Returns NULL on failure, with queue_lock held.
736  * Returns !NULL on success, with queue_lock *not held*.
737  */
738 static struct request *get_request(struct request_queue *q, int rw_flags,
739 				   struct bio *bio, gfp_t gfp_mask)
740 {
741 	struct request *rq = NULL;
742 	struct request_list *rl = &q->rq;
743 	struct io_context *ioc = NULL;
744 	const bool is_sync = rw_is_sync(rw_flags) != 0;
745 	int may_queue, priv;
746 
747 	may_queue = elv_may_queue(q, rw_flags);
748 	if (may_queue == ELV_MQUEUE_NO)
749 		goto rq_starved;
750 
751 	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
752 		if (rl->count[is_sync]+1 >= q->nr_requests) {
753 			ioc = current_io_context(GFP_ATOMIC, q->node);
754 			/*
755 			 * The queue will fill after this allocation, so set
756 			 * it as full, and mark this process as "batching".
757 			 * This process will be allowed to complete a batch of
758 			 * requests, others will be blocked.
759 			 */
760 			if (!blk_queue_full(q, is_sync)) {
761 				ioc_set_batching(q, ioc);
762 				blk_set_queue_full(q, is_sync);
763 			} else {
764 				if (may_queue != ELV_MQUEUE_MUST
765 						&& !ioc_batching(q, ioc)) {
766 					/*
767 					 * The queue is full and the allocating
768 					 * process is not a "batcher", and not
769 					 * exempted by the IO scheduler
770 					 */
771 					goto out;
772 				}
773 			}
774 		}
775 		blk_set_queue_congested(q, is_sync);
776 	}
777 
778 	/*
779 	 * Only allow batching queuers to allocate up to 50% over the defined
780 	 * limit of requests, otherwise we could have thousands of requests
781 	 * allocated with any setting of ->nr_requests
782 	 */
783 	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
784 		goto out;
785 
786 	rl->count[is_sync]++;
787 	rl->starved[is_sync] = 0;
788 
789 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
790 	if (priv)
791 		rl->elvpriv++;
792 
793 	if (blk_queue_io_stat(q))
794 		rw_flags |= REQ_IO_STAT;
795 	spin_unlock_irq(q->queue_lock);
796 
797 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
798 	if (unlikely(!rq)) {
799 		/*
800 		 * Allocation failed presumably due to memory. Undo anything
801 		 * we might have messed up.
802 		 *
803 		 * Allocating task should really be put onto the front of the
804 		 * wait queue, but this is pretty rare.
805 		 */
806 		spin_lock_irq(q->queue_lock);
807 		freed_request(q, is_sync, priv);
808 
809 		/*
810 		 * in the very unlikely event that allocation failed and no
811 		 * requests for this direction was pending, mark us starved
812 		 * so that freeing of a request in the other direction will
813 		 * notice us. another possible fix would be to split the
814 		 * rq mempool into READ and WRITE
815 		 */
816 rq_starved:
817 		if (unlikely(rl->count[is_sync] == 0))
818 			rl->starved[is_sync] = 1;
819 
820 		goto out;
821 	}
822 
823 	/*
824 	 * ioc may be NULL here, and ioc_batching will be false. That's
825 	 * OK, if the queue is under the request limit then requests need
826 	 * not count toward the nr_batch_requests limit. There will always
827 	 * be some limit enforced by BLK_BATCH_TIME.
828 	 */
829 	if (ioc_batching(q, ioc))
830 		ioc->nr_batch_requests--;
831 
832 	trace_block_getrq(q, bio, rw_flags & 1);
833 out:
834 	return rq;
835 }
836 
837 /*
838  * No available requests for this queue, unplug the device and wait for some
839  * requests to become available.
840  *
841  * Called with q->queue_lock held, and returns with it unlocked.
842  */
843 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
844 					struct bio *bio)
845 {
846 	const bool is_sync = rw_is_sync(rw_flags) != 0;
847 	struct request *rq;
848 
849 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
850 	while (!rq) {
851 		DEFINE_WAIT(wait);
852 		struct io_context *ioc;
853 		struct request_list *rl = &q->rq;
854 
855 		prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
856 				TASK_UNINTERRUPTIBLE);
857 
858 		trace_block_sleeprq(q, bio, rw_flags & 1);
859 
860 		__generic_unplug_device(q);
861 		spin_unlock_irq(q->queue_lock);
862 		io_schedule();
863 
864 		/*
865 		 * After sleeping, we become a "batching" process and
866 		 * will be able to allocate at least one request, and
867 		 * up to a big batch of them for a small period time.
868 		 * See ioc_batching, ioc_set_batching
869 		 */
870 		ioc = current_io_context(GFP_NOIO, q->node);
871 		ioc_set_batching(q, ioc);
872 
873 		spin_lock_irq(q->queue_lock);
874 		finish_wait(&rl->wait[is_sync], &wait);
875 
876 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
877 	};
878 
879 	return rq;
880 }
881 
882 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
883 {
884 	struct request *rq;
885 
886 	BUG_ON(rw != READ && rw != WRITE);
887 
888 	spin_lock_irq(q->queue_lock);
889 	if (gfp_mask & __GFP_WAIT) {
890 		rq = get_request_wait(q, rw, NULL);
891 	} else {
892 		rq = get_request(q, rw, NULL, gfp_mask);
893 		if (!rq)
894 			spin_unlock_irq(q->queue_lock);
895 	}
896 	/* q->queue_lock is unlocked at this point */
897 
898 	return rq;
899 }
900 EXPORT_SYMBOL(blk_get_request);
901 
902 /**
903  * blk_make_request - given a bio, allocate a corresponding struct request.
904  * @q: target request queue
905  * @bio:  The bio describing the memory mappings that will be submitted for IO.
906  *        It may be a chained-bio properly constructed by block/bio layer.
907  * @gfp_mask: gfp flags to be used for memory allocation
908  *
909  * blk_make_request is the parallel of generic_make_request for BLOCK_PC
910  * type commands. Where the struct request needs to be farther initialized by
911  * the caller. It is passed a &struct bio, which describes the memory info of
912  * the I/O transfer.
913  *
914  * The caller of blk_make_request must make sure that bi_io_vec
915  * are set to describe the memory buffers. That bio_data_dir() will return
916  * the needed direction of the request. (And all bio's in the passed bio-chain
917  * are properly set accordingly)
918  *
919  * If called under none-sleepable conditions, mapped bio buffers must not
920  * need bouncing, by calling the appropriate masked or flagged allocator,
921  * suitable for the target device. Otherwise the call to blk_queue_bounce will
922  * BUG.
923  *
924  * WARNING: When allocating/cloning a bio-chain, careful consideration should be
925  * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
926  * anything but the first bio in the chain. Otherwise you risk waiting for IO
927  * completion of a bio that hasn't been submitted yet, thus resulting in a
928  * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
929  * of bio_alloc(), as that avoids the mempool deadlock.
930  * If possible a big IO should be split into smaller parts when allocation
931  * fails. Partial allocation should not be an error, or you risk a live-lock.
932  */
933 struct request *blk_make_request(struct request_queue *q, struct bio *bio,
934 				 gfp_t gfp_mask)
935 {
936 	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
937 
938 	if (unlikely(!rq))
939 		return ERR_PTR(-ENOMEM);
940 
941 	for_each_bio(bio) {
942 		struct bio *bounce_bio = bio;
943 		int ret;
944 
945 		blk_queue_bounce(q, &bounce_bio);
946 		ret = blk_rq_append_bio(q, rq, bounce_bio);
947 		if (unlikely(ret)) {
948 			blk_put_request(rq);
949 			return ERR_PTR(ret);
950 		}
951 	}
952 
953 	return rq;
954 }
955 EXPORT_SYMBOL(blk_make_request);
956 
957 /**
958  * blk_requeue_request - put a request back on queue
959  * @q:		request queue where request should be inserted
960  * @rq:		request to be inserted
961  *
962  * Description:
963  *    Drivers often keep queueing requests until the hardware cannot accept
964  *    more, when that condition happens we need to put the request back
965  *    on the queue. Must be called with queue lock held.
966  */
967 void blk_requeue_request(struct request_queue *q, struct request *rq)
968 {
969 	blk_delete_timer(rq);
970 	blk_clear_rq_complete(rq);
971 	trace_block_rq_requeue(q, rq);
972 
973 	if (blk_rq_tagged(rq))
974 		blk_queue_end_tag(q, rq);
975 
976 	BUG_ON(blk_queued_rq(rq));
977 
978 	elv_requeue_request(q, rq);
979 }
980 EXPORT_SYMBOL(blk_requeue_request);
981 
982 /**
983  * blk_insert_request - insert a special request into a request queue
984  * @q:		request queue where request should be inserted
985  * @rq:		request to be inserted
986  * @at_head:	insert request at head or tail of queue
987  * @data:	private data
988  *
989  * Description:
990  *    Many block devices need to execute commands asynchronously, so they don't
991  *    block the whole kernel from preemption during request execution.  This is
992  *    accomplished normally by inserting aritficial requests tagged as
993  *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
994  *    be scheduled for actual execution by the request queue.
995  *
996  *    We have the option of inserting the head or the tail of the queue.
997  *    Typically we use the tail for new ioctls and so forth.  We use the head
998  *    of the queue for things like a QUEUE_FULL message from a device, or a
999  *    host that is unable to accept a particular command.
1000  */
1001 void blk_insert_request(struct request_queue *q, struct request *rq,
1002 			int at_head, void *data)
1003 {
1004 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
1005 	unsigned long flags;
1006 
1007 	/*
1008 	 * tell I/O scheduler that this isn't a regular read/write (ie it
1009 	 * must not attempt merges on this) and that it acts as a soft
1010 	 * barrier
1011 	 */
1012 	rq->cmd_type = REQ_TYPE_SPECIAL;
1013 
1014 	rq->special = data;
1015 
1016 	spin_lock_irqsave(q->queue_lock, flags);
1017 
1018 	/*
1019 	 * If command is tagged, release the tag
1020 	 */
1021 	if (blk_rq_tagged(rq))
1022 		blk_queue_end_tag(q, rq);
1023 
1024 	drive_stat_acct(rq, 1);
1025 	__elv_add_request(q, rq, where, 0);
1026 	__blk_run_queue(q);
1027 	spin_unlock_irqrestore(q->queue_lock, flags);
1028 }
1029 EXPORT_SYMBOL(blk_insert_request);
1030 
1031 /*
1032  * add-request adds a request to the linked list.
1033  * queue lock is held and interrupts disabled, as we muck with the
1034  * request queue list.
1035  */
1036 static inline void add_request(struct request_queue *q, struct request *req)
1037 {
1038 	drive_stat_acct(req, 1);
1039 
1040 	/*
1041 	 * elevator indicated where it wants this request to be
1042 	 * inserted at elevator_merge time
1043 	 */
1044 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1045 }
1046 
1047 static void part_round_stats_single(int cpu, struct hd_struct *part,
1048 				    unsigned long now)
1049 {
1050 	if (now == part->stamp)
1051 		return;
1052 
1053 	if (part_in_flight(part)) {
1054 		__part_stat_add(cpu, part, time_in_queue,
1055 				part_in_flight(part) * (now - part->stamp));
1056 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1057 	}
1058 	part->stamp = now;
1059 }
1060 
1061 /**
1062  * part_round_stats() - Round off the performance stats on a struct disk_stats.
1063  * @cpu: cpu number for stats access
1064  * @part: target partition
1065  *
1066  * The average IO queue length and utilisation statistics are maintained
1067  * by observing the current state of the queue length and the amount of
1068  * time it has been in this state for.
1069  *
1070  * Normally, that accounting is done on IO completion, but that can result
1071  * in more than a second's worth of IO being accounted for within any one
1072  * second, leading to >100% utilisation.  To deal with that, we call this
1073  * function to do a round-off before returning the results when reading
1074  * /proc/diskstats.  This accounts immediately for all queue usage up to
1075  * the current jiffies and restarts the counters again.
1076  */
1077 void part_round_stats(int cpu, struct hd_struct *part)
1078 {
1079 	unsigned long now = jiffies;
1080 
1081 	if (part->partno)
1082 		part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1083 	part_round_stats_single(cpu, part, now);
1084 }
1085 EXPORT_SYMBOL_GPL(part_round_stats);
1086 
1087 /*
1088  * queue lock must be held
1089  */
1090 void __blk_put_request(struct request_queue *q, struct request *req)
1091 {
1092 	if (unlikely(!q))
1093 		return;
1094 	if (unlikely(--req->ref_count))
1095 		return;
1096 
1097 	elv_completed_request(q, req);
1098 
1099 	/* this is a bio leak */
1100 	WARN_ON(req->bio != NULL);
1101 
1102 	/*
1103 	 * Request may not have originated from ll_rw_blk. if not,
1104 	 * it didn't come out of our reserved rq pools
1105 	 */
1106 	if (req->cmd_flags & REQ_ALLOCED) {
1107 		int is_sync = rq_is_sync(req) != 0;
1108 		int priv = req->cmd_flags & REQ_ELVPRIV;
1109 
1110 		BUG_ON(!list_empty(&req->queuelist));
1111 		BUG_ON(!hlist_unhashed(&req->hash));
1112 
1113 		blk_free_request(q, req);
1114 		freed_request(q, is_sync, priv);
1115 	}
1116 }
1117 EXPORT_SYMBOL_GPL(__blk_put_request);
1118 
1119 void blk_put_request(struct request *req)
1120 {
1121 	unsigned long flags;
1122 	struct request_queue *q = req->q;
1123 
1124 	spin_lock_irqsave(q->queue_lock, flags);
1125 	__blk_put_request(q, req);
1126 	spin_unlock_irqrestore(q->queue_lock, flags);
1127 }
1128 EXPORT_SYMBOL(blk_put_request);
1129 
1130 void init_request_from_bio(struct request *req, struct bio *bio)
1131 {
1132 	req->cpu = bio->bi_comp_cpu;
1133 	req->cmd_type = REQ_TYPE_FS;
1134 
1135 	/*
1136 	 * Inherit FAILFAST from bio (for read-ahead, and explicit
1137 	 * FAILFAST).  FAILFAST flags are identical for req and bio.
1138 	 */
1139 	if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1140 		req->cmd_flags |= REQ_FAILFAST_MASK;
1141 	else
1142 		req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1143 
1144 	if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
1145 		req->cmd_flags |= REQ_DISCARD;
1146 		if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1147 			req->cmd_flags |= REQ_SOFTBARRIER;
1148 	} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1149 		req->cmd_flags |= REQ_HARDBARRIER;
1150 
1151 	if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1152 		req->cmd_flags |= REQ_RW_SYNC;
1153 	if (bio_rw_flagged(bio, BIO_RW_META))
1154 		req->cmd_flags |= REQ_RW_META;
1155 	if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1156 		req->cmd_flags |= REQ_NOIDLE;
1157 
1158 	req->errors = 0;
1159 	req->__sector = bio->bi_sector;
1160 	req->ioprio = bio_prio(bio);
1161 	blk_rq_bio_prep(req->q, req, bio);
1162 }
1163 
1164 /*
1165  * Only disabling plugging for non-rotational devices if it does tagging
1166  * as well, otherwise we do need the proper merging
1167  */
1168 static inline bool queue_should_plug(struct request_queue *q)
1169 {
1170 	return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1171 }
1172 
1173 static int __make_request(struct request_queue *q, struct bio *bio)
1174 {
1175 	struct request *req;
1176 	int el_ret;
1177 	unsigned int bytes = bio->bi_size;
1178 	const unsigned short prio = bio_prio(bio);
1179 	const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
1180 	const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
1181 	const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1182 	int rw_flags;
1183 
1184 	if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
1185 	    (q->next_ordered == QUEUE_ORDERED_NONE)) {
1186 		bio_endio(bio, -EOPNOTSUPP);
1187 		return 0;
1188 	}
1189 	/*
1190 	 * low level driver can indicate that it wants pages above a
1191 	 * certain limit bounced to low memory (ie for highmem, or even
1192 	 * ISA dma in theory)
1193 	 */
1194 	blk_queue_bounce(q, &bio);
1195 
1196 	spin_lock_irq(q->queue_lock);
1197 
1198 	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
1199 		goto get_rq;
1200 
1201 	el_ret = elv_merge(q, &req, bio);
1202 	switch (el_ret) {
1203 	case ELEVATOR_BACK_MERGE:
1204 		BUG_ON(!rq_mergeable(req));
1205 
1206 		if (!ll_back_merge_fn(q, req, bio))
1207 			break;
1208 
1209 		trace_block_bio_backmerge(q, bio);
1210 
1211 		if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1212 			blk_rq_set_mixed_merge(req);
1213 
1214 		req->biotail->bi_next = bio;
1215 		req->biotail = bio;
1216 		req->__data_len += bytes;
1217 		req->ioprio = ioprio_best(req->ioprio, prio);
1218 		if (!blk_rq_cpu_valid(req))
1219 			req->cpu = bio->bi_comp_cpu;
1220 		drive_stat_acct(req, 0);
1221 		elv_bio_merged(q, req, bio);
1222 		if (!attempt_back_merge(q, req))
1223 			elv_merged_request(q, req, el_ret);
1224 		goto out;
1225 
1226 	case ELEVATOR_FRONT_MERGE:
1227 		BUG_ON(!rq_mergeable(req));
1228 
1229 		if (!ll_front_merge_fn(q, req, bio))
1230 			break;
1231 
1232 		trace_block_bio_frontmerge(q, bio);
1233 
1234 		if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
1235 			blk_rq_set_mixed_merge(req);
1236 			req->cmd_flags &= ~REQ_FAILFAST_MASK;
1237 			req->cmd_flags |= ff;
1238 		}
1239 
1240 		bio->bi_next = req->bio;
1241 		req->bio = bio;
1242 
1243 		/*
1244 		 * may not be valid. if the low level driver said
1245 		 * it didn't need a bounce buffer then it better
1246 		 * not touch req->buffer either...
1247 		 */
1248 		req->buffer = bio_data(bio);
1249 		req->__sector = bio->bi_sector;
1250 		req->__data_len += bytes;
1251 		req->ioprio = ioprio_best(req->ioprio, prio);
1252 		if (!blk_rq_cpu_valid(req))
1253 			req->cpu = bio->bi_comp_cpu;
1254 		drive_stat_acct(req, 0);
1255 		elv_bio_merged(q, req, bio);
1256 		if (!attempt_front_merge(q, req))
1257 			elv_merged_request(q, req, el_ret);
1258 		goto out;
1259 
1260 	/* ELV_NO_MERGE: elevator says don't/can't merge. */
1261 	default:
1262 		;
1263 	}
1264 
1265 get_rq:
1266 	/*
1267 	 * This sync check and mask will be re-done in init_request_from_bio(),
1268 	 * but we need to set it earlier to expose the sync flag to the
1269 	 * rq allocator and io schedulers.
1270 	 */
1271 	rw_flags = bio_data_dir(bio);
1272 	if (sync)
1273 		rw_flags |= REQ_RW_SYNC;
1274 
1275 	/*
1276 	 * Grab a free request. This is might sleep but can not fail.
1277 	 * Returns with the queue unlocked.
1278 	 */
1279 	req = get_request_wait(q, rw_flags, bio);
1280 
1281 	/*
1282 	 * After dropping the lock and possibly sleeping here, our request
1283 	 * may now be mergeable after it had proven unmergeable (above).
1284 	 * We don't worry about that case for efficiency. It won't happen
1285 	 * often, and the elevators are able to handle it.
1286 	 */
1287 	init_request_from_bio(req, bio);
1288 
1289 	spin_lock_irq(q->queue_lock);
1290 	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1291 	    bio_flagged(bio, BIO_CPU_AFFINE))
1292 		req->cpu = blk_cpu_to_group(smp_processor_id());
1293 	if (queue_should_plug(q) && elv_queue_empty(q))
1294 		blk_plug_device(q);
1295 	add_request(q, req);
1296 out:
1297 	if (unplug || !queue_should_plug(q))
1298 		__generic_unplug_device(q);
1299 	spin_unlock_irq(q->queue_lock);
1300 	return 0;
1301 }
1302 
1303 /*
1304  * If bio->bi_dev is a partition, remap the location
1305  */
1306 static inline void blk_partition_remap(struct bio *bio)
1307 {
1308 	struct block_device *bdev = bio->bi_bdev;
1309 
1310 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1311 		struct hd_struct *p = bdev->bd_part;
1312 
1313 		bio->bi_sector += p->start_sect;
1314 		bio->bi_bdev = bdev->bd_contains;
1315 
1316 		trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
1317 				    bdev->bd_dev,
1318 				    bio->bi_sector - p->start_sect);
1319 	}
1320 }
1321 
1322 static void handle_bad_sector(struct bio *bio)
1323 {
1324 	char b[BDEVNAME_SIZE];
1325 
1326 	printk(KERN_INFO "attempt to access beyond end of device\n");
1327 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1328 			bdevname(bio->bi_bdev, b),
1329 			bio->bi_rw,
1330 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
1331 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1332 
1333 	set_bit(BIO_EOF, &bio->bi_flags);
1334 }
1335 
1336 #ifdef CONFIG_FAIL_MAKE_REQUEST
1337 
1338 static DECLARE_FAULT_ATTR(fail_make_request);
1339 
1340 static int __init setup_fail_make_request(char *str)
1341 {
1342 	return setup_fault_attr(&fail_make_request, str);
1343 }
1344 __setup("fail_make_request=", setup_fail_make_request);
1345 
1346 static int should_fail_request(struct bio *bio)
1347 {
1348 	struct hd_struct *part = bio->bi_bdev->bd_part;
1349 
1350 	if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1351 		return should_fail(&fail_make_request, bio->bi_size);
1352 
1353 	return 0;
1354 }
1355 
1356 static int __init fail_make_request_debugfs(void)
1357 {
1358 	return init_fault_attr_dentries(&fail_make_request,
1359 					"fail_make_request");
1360 }
1361 
1362 late_initcall(fail_make_request_debugfs);
1363 
1364 #else /* CONFIG_FAIL_MAKE_REQUEST */
1365 
1366 static inline int should_fail_request(struct bio *bio)
1367 {
1368 	return 0;
1369 }
1370 
1371 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1372 
1373 /*
1374  * Check whether this bio extends beyond the end of the device.
1375  */
1376 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1377 {
1378 	sector_t maxsector;
1379 
1380 	if (!nr_sectors)
1381 		return 0;
1382 
1383 	/* Test device or partition size, when known. */
1384 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1385 	if (maxsector) {
1386 		sector_t sector = bio->bi_sector;
1387 
1388 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1389 			/*
1390 			 * This may well happen - the kernel calls bread()
1391 			 * without checking the size of the device, e.g., when
1392 			 * mounting a device.
1393 			 */
1394 			handle_bad_sector(bio);
1395 			return 1;
1396 		}
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 /**
1403  * generic_make_request - hand a buffer to its device driver for I/O
1404  * @bio:  The bio describing the location in memory and on the device.
1405  *
1406  * generic_make_request() is used to make I/O requests of block
1407  * devices. It is passed a &struct bio, which describes the I/O that needs
1408  * to be done.
1409  *
1410  * generic_make_request() does not return any status.  The
1411  * success/failure status of the request, along with notification of
1412  * completion, is delivered asynchronously through the bio->bi_end_io
1413  * function described (one day) else where.
1414  *
1415  * The caller of generic_make_request must make sure that bi_io_vec
1416  * are set to describe the memory buffer, and that bi_dev and bi_sector are
1417  * set to describe the device address, and the
1418  * bi_end_io and optionally bi_private are set to describe how
1419  * completion notification should be signaled.
1420  *
1421  * generic_make_request and the drivers it calls may use bi_next if this
1422  * bio happens to be merged with someone else, and may change bi_dev and
1423  * bi_sector for remaps as it sees fit.  So the values of these fields
1424  * should NOT be depended on after the call to generic_make_request.
1425  */
1426 static inline void __generic_make_request(struct bio *bio)
1427 {
1428 	struct request_queue *q;
1429 	sector_t old_sector;
1430 	int ret, nr_sectors = bio_sectors(bio);
1431 	dev_t old_dev;
1432 	int err = -EIO;
1433 
1434 	might_sleep();
1435 
1436 	if (bio_check_eod(bio, nr_sectors))
1437 		goto end_io;
1438 
1439 	/*
1440 	 * Resolve the mapping until finished. (drivers are
1441 	 * still free to implement/resolve their own stacking
1442 	 * by explicitly returning 0)
1443 	 *
1444 	 * NOTE: we don't repeat the blk_size check for each new device.
1445 	 * Stacking drivers are expected to know what they are doing.
1446 	 */
1447 	old_sector = -1;
1448 	old_dev = 0;
1449 	do {
1450 		char b[BDEVNAME_SIZE];
1451 
1452 		q = bdev_get_queue(bio->bi_bdev);
1453 		if (unlikely(!q)) {
1454 			printk(KERN_ERR
1455 			       "generic_make_request: Trying to access "
1456 				"nonexistent block-device %s (%Lu)\n",
1457 				bdevname(bio->bi_bdev, b),
1458 				(long long) bio->bi_sector);
1459 			goto end_io;
1460 		}
1461 
1462 		if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1463 			     nr_sectors > queue_max_hw_sectors(q))) {
1464 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1465 			       bdevname(bio->bi_bdev, b),
1466 			       bio_sectors(bio),
1467 			       queue_max_hw_sectors(q));
1468 			goto end_io;
1469 		}
1470 
1471 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1472 			goto end_io;
1473 
1474 		if (should_fail_request(bio))
1475 			goto end_io;
1476 
1477 		/*
1478 		 * If this device has partitions, remap block n
1479 		 * of partition p to block n+start(p) of the disk.
1480 		 */
1481 		blk_partition_remap(bio);
1482 
1483 		if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1484 			goto end_io;
1485 
1486 		if (old_sector != -1)
1487 			trace_block_remap(q, bio, old_dev, old_sector);
1488 
1489 		old_sector = bio->bi_sector;
1490 		old_dev = bio->bi_bdev->bd_dev;
1491 
1492 		if (bio_check_eod(bio, nr_sectors))
1493 			goto end_io;
1494 
1495 		if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1496 		    !blk_queue_discard(q)) {
1497 			err = -EOPNOTSUPP;
1498 			goto end_io;
1499 		}
1500 
1501 		trace_block_bio_queue(q, bio);
1502 
1503 		ret = q->make_request_fn(q, bio);
1504 	} while (ret);
1505 
1506 	return;
1507 
1508 end_io:
1509 	bio_endio(bio, err);
1510 }
1511 
1512 /*
1513  * We only want one ->make_request_fn to be active at a time,
1514  * else stack usage with stacked devices could be a problem.
1515  * So use current->bio_list to keep a list of requests
1516  * submited by a make_request_fn function.
1517  * current->bio_list is also used as a flag to say if
1518  * generic_make_request is currently active in this task or not.
1519  * If it is NULL, then no make_request is active.  If it is non-NULL,
1520  * then a make_request is active, and new requests should be added
1521  * at the tail
1522  */
1523 void generic_make_request(struct bio *bio)
1524 {
1525 	struct bio_list bio_list_on_stack;
1526 
1527 	if (current->bio_list) {
1528 		/* make_request is active */
1529 		bio_list_add(current->bio_list, bio);
1530 		return;
1531 	}
1532 	/* following loop may be a bit non-obvious, and so deserves some
1533 	 * explanation.
1534 	 * Before entering the loop, bio->bi_next is NULL (as all callers
1535 	 * ensure that) so we have a list with a single bio.
1536 	 * We pretend that we have just taken it off a longer list, so
1537 	 * we assign bio_list to a pointer to the bio_list_on_stack,
1538 	 * thus initialising the bio_list of new bios to be
1539 	 * added.  __generic_make_request may indeed add some more bios
1540 	 * through a recursive call to generic_make_request.  If it
1541 	 * did, we find a non-NULL value in bio_list and re-enter the loop
1542 	 * from the top.  In this case we really did just take the bio
1543 	 * of the top of the list (no pretending) and so remove it from
1544 	 * bio_list, and call into __generic_make_request again.
1545 	 *
1546 	 * The loop was structured like this to make only one call to
1547 	 * __generic_make_request (which is important as it is large and
1548 	 * inlined) and to keep the structure simple.
1549 	 */
1550 	BUG_ON(bio->bi_next);
1551 	bio_list_init(&bio_list_on_stack);
1552 	current->bio_list = &bio_list_on_stack;
1553 	do {
1554 		__generic_make_request(bio);
1555 		bio = bio_list_pop(current->bio_list);
1556 	} while (bio);
1557 	current->bio_list = NULL; /* deactivate */
1558 }
1559 EXPORT_SYMBOL(generic_make_request);
1560 
1561 /**
1562  * submit_bio - submit a bio to the block device layer for I/O
1563  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1564  * @bio: The &struct bio which describes the I/O
1565  *
1566  * submit_bio() is very similar in purpose to generic_make_request(), and
1567  * uses that function to do most of the work. Both are fairly rough
1568  * interfaces; @bio must be presetup and ready for I/O.
1569  *
1570  */
1571 void submit_bio(int rw, struct bio *bio)
1572 {
1573 	int count = bio_sectors(bio);
1574 
1575 	bio->bi_rw |= rw;
1576 
1577 	/*
1578 	 * If it's a regular read/write or a barrier with data attached,
1579 	 * go through the normal accounting stuff before submission.
1580 	 */
1581 	if (bio_has_data(bio)) {
1582 		if (rw & WRITE) {
1583 			count_vm_events(PGPGOUT, count);
1584 		} else {
1585 			task_io_account_read(bio->bi_size);
1586 			count_vm_events(PGPGIN, count);
1587 		}
1588 
1589 		if (unlikely(block_dump)) {
1590 			char b[BDEVNAME_SIZE];
1591 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1592 			current->comm, task_pid_nr(current),
1593 				(rw & WRITE) ? "WRITE" : "READ",
1594 				(unsigned long long)bio->bi_sector,
1595 				bdevname(bio->bi_bdev, b));
1596 		}
1597 	}
1598 
1599 	generic_make_request(bio);
1600 }
1601 EXPORT_SYMBOL(submit_bio);
1602 
1603 /**
1604  * blk_rq_check_limits - Helper function to check a request for the queue limit
1605  * @q:  the queue
1606  * @rq: the request being checked
1607  *
1608  * Description:
1609  *    @rq may have been made based on weaker limitations of upper-level queues
1610  *    in request stacking drivers, and it may violate the limitation of @q.
1611  *    Since the block layer and the underlying device driver trust @rq
1612  *    after it is inserted to @q, it should be checked against @q before
1613  *    the insertion using this generic function.
1614  *
1615  *    This function should also be useful for request stacking drivers
1616  *    in some cases below, so export this fuction.
1617  *    Request stacking drivers like request-based dm may change the queue
1618  *    limits while requests are in the queue (e.g. dm's table swapping).
1619  *    Such request stacking drivers should check those requests agaist
1620  *    the new queue limits again when they dispatch those requests,
1621  *    although such checkings are also done against the old queue limits
1622  *    when submitting requests.
1623  */
1624 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1625 {
1626 	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1627 	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1628 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
1629 		return -EIO;
1630 	}
1631 
1632 	/*
1633 	 * queue's settings related to segment counting like q->bounce_pfn
1634 	 * may differ from that of other stacking queues.
1635 	 * Recalculate it to check the request correctly on this queue's
1636 	 * limitation.
1637 	 */
1638 	blk_recalc_rq_segments(rq);
1639 	if (rq->nr_phys_segments > queue_max_segments(q)) {
1640 		printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1641 		return -EIO;
1642 	}
1643 
1644 	return 0;
1645 }
1646 EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1647 
1648 /**
1649  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1650  * @q:  the queue to submit the request
1651  * @rq: the request being queued
1652  */
1653 int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1654 {
1655 	unsigned long flags;
1656 
1657 	if (blk_rq_check_limits(q, rq))
1658 		return -EIO;
1659 
1660 #ifdef CONFIG_FAIL_MAKE_REQUEST
1661 	if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1662 	    should_fail(&fail_make_request, blk_rq_bytes(rq)))
1663 		return -EIO;
1664 #endif
1665 
1666 	spin_lock_irqsave(q->queue_lock, flags);
1667 
1668 	/*
1669 	 * Submitting request must be dequeued before calling this function
1670 	 * because it will be linked to another request_queue
1671 	 */
1672 	BUG_ON(blk_queued_rq(rq));
1673 
1674 	drive_stat_acct(rq, 1);
1675 	__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1676 
1677 	spin_unlock_irqrestore(q->queue_lock, flags);
1678 
1679 	return 0;
1680 }
1681 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1682 
1683 /**
1684  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1685  * @rq: request to examine
1686  *
1687  * Description:
1688  *     A request could be merge of IOs which require different failure
1689  *     handling.  This function determines the number of bytes which
1690  *     can be failed from the beginning of the request without
1691  *     crossing into area which need to be retried further.
1692  *
1693  * Return:
1694  *     The number of bytes to fail.
1695  *
1696  * Context:
1697  *     queue_lock must be held.
1698  */
1699 unsigned int blk_rq_err_bytes(const struct request *rq)
1700 {
1701 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1702 	unsigned int bytes = 0;
1703 	struct bio *bio;
1704 
1705 	if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1706 		return blk_rq_bytes(rq);
1707 
1708 	/*
1709 	 * Currently the only 'mixing' which can happen is between
1710 	 * different fastfail types.  We can safely fail portions
1711 	 * which have all the failfast bits that the first one has -
1712 	 * the ones which are at least as eager to fail as the first
1713 	 * one.
1714 	 */
1715 	for (bio = rq->bio; bio; bio = bio->bi_next) {
1716 		if ((bio->bi_rw & ff) != ff)
1717 			break;
1718 		bytes += bio->bi_size;
1719 	}
1720 
1721 	/* this could lead to infinite loop */
1722 	BUG_ON(blk_rq_bytes(rq) && !bytes);
1723 	return bytes;
1724 }
1725 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1726 
1727 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1728 {
1729 	if (blk_do_io_stat(req)) {
1730 		const int rw = rq_data_dir(req);
1731 		struct hd_struct *part;
1732 		int cpu;
1733 
1734 		cpu = part_stat_lock();
1735 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1736 		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1737 		part_stat_unlock();
1738 	}
1739 }
1740 
1741 static void blk_account_io_done(struct request *req)
1742 {
1743 	/*
1744 	 * Account IO completion.  bar_rq isn't accounted as a normal
1745 	 * IO on queueing nor completion.  Accounting the containing
1746 	 * request is enough.
1747 	 */
1748 	if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
1749 		unsigned long duration = jiffies - req->start_time;
1750 		const int rw = rq_data_dir(req);
1751 		struct hd_struct *part;
1752 		int cpu;
1753 
1754 		cpu = part_stat_lock();
1755 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1756 
1757 		part_stat_inc(cpu, part, ios[rw]);
1758 		part_stat_add(cpu, part, ticks[rw], duration);
1759 		part_round_stats(cpu, part);
1760 		part_dec_in_flight(part, rw);
1761 
1762 		part_stat_unlock();
1763 	}
1764 }
1765 
1766 /**
1767  * blk_peek_request - peek at the top of a request queue
1768  * @q: request queue to peek at
1769  *
1770  * Description:
1771  *     Return the request at the top of @q.  The returned request
1772  *     should be started using blk_start_request() before LLD starts
1773  *     processing it.
1774  *
1775  * Return:
1776  *     Pointer to the request at the top of @q if available.  Null
1777  *     otherwise.
1778  *
1779  * Context:
1780  *     queue_lock must be held.
1781  */
1782 struct request *blk_peek_request(struct request_queue *q)
1783 {
1784 	struct request *rq;
1785 	int ret;
1786 
1787 	while ((rq = __elv_next_request(q)) != NULL) {
1788 		if (!(rq->cmd_flags & REQ_STARTED)) {
1789 			/*
1790 			 * This is the first time the device driver
1791 			 * sees this request (possibly after
1792 			 * requeueing).  Notify IO scheduler.
1793 			 */
1794 			if (blk_sorted_rq(rq))
1795 				elv_activate_rq(q, rq);
1796 
1797 			/*
1798 			 * just mark as started even if we don't start
1799 			 * it, a request that has been delayed should
1800 			 * not be passed by new incoming requests
1801 			 */
1802 			rq->cmd_flags |= REQ_STARTED;
1803 			trace_block_rq_issue(q, rq);
1804 		}
1805 
1806 		if (!q->boundary_rq || q->boundary_rq == rq) {
1807 			q->end_sector = rq_end_sector(rq);
1808 			q->boundary_rq = NULL;
1809 		}
1810 
1811 		if (rq->cmd_flags & REQ_DONTPREP)
1812 			break;
1813 
1814 		if (q->dma_drain_size && blk_rq_bytes(rq)) {
1815 			/*
1816 			 * make sure space for the drain appears we
1817 			 * know we can do this because max_hw_segments
1818 			 * has been adjusted to be one fewer than the
1819 			 * device can handle
1820 			 */
1821 			rq->nr_phys_segments++;
1822 		}
1823 
1824 		if (!q->prep_rq_fn)
1825 			break;
1826 
1827 		ret = q->prep_rq_fn(q, rq);
1828 		if (ret == BLKPREP_OK) {
1829 			break;
1830 		} else if (ret == BLKPREP_DEFER) {
1831 			/*
1832 			 * the request may have been (partially) prepped.
1833 			 * we need to keep this request in the front to
1834 			 * avoid resource deadlock.  REQ_STARTED will
1835 			 * prevent other fs requests from passing this one.
1836 			 */
1837 			if (q->dma_drain_size && blk_rq_bytes(rq) &&
1838 			    !(rq->cmd_flags & REQ_DONTPREP)) {
1839 				/*
1840 				 * remove the space for the drain we added
1841 				 * so that we don't add it again
1842 				 */
1843 				--rq->nr_phys_segments;
1844 			}
1845 
1846 			rq = NULL;
1847 			break;
1848 		} else if (ret == BLKPREP_KILL) {
1849 			rq->cmd_flags |= REQ_QUIET;
1850 			/*
1851 			 * Mark this request as started so we don't trigger
1852 			 * any debug logic in the end I/O path.
1853 			 */
1854 			blk_start_request(rq);
1855 			__blk_end_request_all(rq, -EIO);
1856 		} else {
1857 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1858 			break;
1859 		}
1860 	}
1861 
1862 	return rq;
1863 }
1864 EXPORT_SYMBOL(blk_peek_request);
1865 
1866 void blk_dequeue_request(struct request *rq)
1867 {
1868 	struct request_queue *q = rq->q;
1869 
1870 	BUG_ON(list_empty(&rq->queuelist));
1871 	BUG_ON(ELV_ON_HASH(rq));
1872 
1873 	list_del_init(&rq->queuelist);
1874 
1875 	/*
1876 	 * the time frame between a request being removed from the lists
1877 	 * and to it is freed is accounted as io that is in progress at
1878 	 * the driver side.
1879 	 */
1880 	if (blk_account_rq(rq)) {
1881 		q->in_flight[rq_is_sync(rq)]++;
1882 		set_io_start_time_ns(rq);
1883 	}
1884 }
1885 
1886 /**
1887  * blk_start_request - start request processing on the driver
1888  * @req: request to dequeue
1889  *
1890  * Description:
1891  *     Dequeue @req and start timeout timer on it.  This hands off the
1892  *     request to the driver.
1893  *
1894  *     Block internal functions which don't want to start timer should
1895  *     call blk_dequeue_request().
1896  *
1897  * Context:
1898  *     queue_lock must be held.
1899  */
1900 void blk_start_request(struct request *req)
1901 {
1902 	blk_dequeue_request(req);
1903 
1904 	/*
1905 	 * We are now handing the request to the hardware, initialize
1906 	 * resid_len to full count and add the timeout handler.
1907 	 */
1908 	req->resid_len = blk_rq_bytes(req);
1909 	if (unlikely(blk_bidi_rq(req)))
1910 		req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
1911 
1912 	blk_add_timer(req);
1913 }
1914 EXPORT_SYMBOL(blk_start_request);
1915 
1916 /**
1917  * blk_fetch_request - fetch a request from a request queue
1918  * @q: request queue to fetch a request from
1919  *
1920  * Description:
1921  *     Return the request at the top of @q.  The request is started on
1922  *     return and LLD can start processing it immediately.
1923  *
1924  * Return:
1925  *     Pointer to the request at the top of @q if available.  Null
1926  *     otherwise.
1927  *
1928  * Context:
1929  *     queue_lock must be held.
1930  */
1931 struct request *blk_fetch_request(struct request_queue *q)
1932 {
1933 	struct request *rq;
1934 
1935 	rq = blk_peek_request(q);
1936 	if (rq)
1937 		blk_start_request(rq);
1938 	return rq;
1939 }
1940 EXPORT_SYMBOL(blk_fetch_request);
1941 
1942 /**
1943  * blk_update_request - Special helper function for request stacking drivers
1944  * @req:      the request being processed
1945  * @error:    %0 for success, < %0 for error
1946  * @nr_bytes: number of bytes to complete @req
1947  *
1948  * Description:
1949  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
1950  *     the request structure even if @req doesn't have leftover.
1951  *     If @req has leftover, sets it up for the next range of segments.
1952  *
1953  *     This special helper function is only for request stacking drivers
1954  *     (e.g. request-based dm) so that they can handle partial completion.
1955  *     Actual device drivers should use blk_end_request instead.
1956  *
1957  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1958  *     %false return from this function.
1959  *
1960  * Return:
1961  *     %false - this request doesn't have any more data
1962  *     %true  - this request has more data
1963  **/
1964 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1965 {
1966 	int total_bytes, bio_nbytes, next_idx = 0;
1967 	struct bio *bio;
1968 
1969 	if (!req->bio)
1970 		return false;
1971 
1972 	trace_block_rq_complete(req->q, req);
1973 
1974 	/*
1975 	 * For fs requests, rq is just carrier of independent bio's
1976 	 * and each partial completion should be handled separately.
1977 	 * Reset per-request error on each partial completion.
1978 	 *
1979 	 * TODO: tj: This is too subtle.  It would be better to let
1980 	 * low level drivers do what they see fit.
1981 	 */
1982 	if (blk_fs_request(req))
1983 		req->errors = 0;
1984 
1985 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1986 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1987 				req->rq_disk ? req->rq_disk->disk_name : "?",
1988 				(unsigned long long)blk_rq_pos(req));
1989 	}
1990 
1991 	blk_account_io_completion(req, nr_bytes);
1992 
1993 	total_bytes = bio_nbytes = 0;
1994 	while ((bio = req->bio) != NULL) {
1995 		int nbytes;
1996 
1997 		if (nr_bytes >= bio->bi_size) {
1998 			req->bio = bio->bi_next;
1999 			nbytes = bio->bi_size;
2000 			req_bio_endio(req, bio, nbytes, error);
2001 			next_idx = 0;
2002 			bio_nbytes = 0;
2003 		} else {
2004 			int idx = bio->bi_idx + next_idx;
2005 
2006 			if (unlikely(idx >= bio->bi_vcnt)) {
2007 				blk_dump_rq_flags(req, "__end_that");
2008 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2009 				       __func__, idx, bio->bi_vcnt);
2010 				break;
2011 			}
2012 
2013 			nbytes = bio_iovec_idx(bio, idx)->bv_len;
2014 			BIO_BUG_ON(nbytes > bio->bi_size);
2015 
2016 			/*
2017 			 * not a complete bvec done
2018 			 */
2019 			if (unlikely(nbytes > nr_bytes)) {
2020 				bio_nbytes += nr_bytes;
2021 				total_bytes += nr_bytes;
2022 				break;
2023 			}
2024 
2025 			/*
2026 			 * advance to the next vector
2027 			 */
2028 			next_idx++;
2029 			bio_nbytes += nbytes;
2030 		}
2031 
2032 		total_bytes += nbytes;
2033 		nr_bytes -= nbytes;
2034 
2035 		bio = req->bio;
2036 		if (bio) {
2037 			/*
2038 			 * end more in this run, or just return 'not-done'
2039 			 */
2040 			if (unlikely(nr_bytes <= 0))
2041 				break;
2042 		}
2043 	}
2044 
2045 	/*
2046 	 * completely done
2047 	 */
2048 	if (!req->bio) {
2049 		/*
2050 		 * Reset counters so that the request stacking driver
2051 		 * can find how many bytes remain in the request
2052 		 * later.
2053 		 */
2054 		req->__data_len = 0;
2055 		return false;
2056 	}
2057 
2058 	/*
2059 	 * if the request wasn't completed, update state
2060 	 */
2061 	if (bio_nbytes) {
2062 		req_bio_endio(req, bio, bio_nbytes, error);
2063 		bio->bi_idx += next_idx;
2064 		bio_iovec(bio)->bv_offset += nr_bytes;
2065 		bio_iovec(bio)->bv_len -= nr_bytes;
2066 	}
2067 
2068 	req->__data_len -= total_bytes;
2069 	req->buffer = bio_data(req->bio);
2070 
2071 	/* update sector only for requests with clear definition of sector */
2072 	if (blk_fs_request(req) || blk_discard_rq(req))
2073 		req->__sector += total_bytes >> 9;
2074 
2075 	/* mixed attributes always follow the first bio */
2076 	if (req->cmd_flags & REQ_MIXED_MERGE) {
2077 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
2078 		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2079 	}
2080 
2081 	/*
2082 	 * If total number of sectors is less than the first segment
2083 	 * size, something has gone terribly wrong.
2084 	 */
2085 	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2086 		printk(KERN_ERR "blk: request botched\n");
2087 		req->__data_len = blk_rq_cur_bytes(req);
2088 	}
2089 
2090 	/* recalculate the number of segments */
2091 	blk_recalc_rq_segments(req);
2092 
2093 	return true;
2094 }
2095 EXPORT_SYMBOL_GPL(blk_update_request);
2096 
2097 static bool blk_update_bidi_request(struct request *rq, int error,
2098 				    unsigned int nr_bytes,
2099 				    unsigned int bidi_bytes)
2100 {
2101 	if (blk_update_request(rq, error, nr_bytes))
2102 		return true;
2103 
2104 	/* Bidi request must be completed as a whole */
2105 	if (unlikely(blk_bidi_rq(rq)) &&
2106 	    blk_update_request(rq->next_rq, error, bidi_bytes))
2107 		return true;
2108 
2109 	add_disk_randomness(rq->rq_disk);
2110 
2111 	return false;
2112 }
2113 
2114 /*
2115  * queue lock must be held
2116  */
2117 static void blk_finish_request(struct request *req, int error)
2118 {
2119 	if (blk_rq_tagged(req))
2120 		blk_queue_end_tag(req->q, req);
2121 
2122 	BUG_ON(blk_queued_rq(req));
2123 
2124 	if (unlikely(laptop_mode) && blk_fs_request(req))
2125 		laptop_io_completion(&req->q->backing_dev_info);
2126 
2127 	blk_delete_timer(req);
2128 
2129 	blk_account_io_done(req);
2130 
2131 	if (req->end_io)
2132 		req->end_io(req, error);
2133 	else {
2134 		if (blk_bidi_rq(req))
2135 			__blk_put_request(req->next_rq->q, req->next_rq);
2136 
2137 		__blk_put_request(req->q, req);
2138 	}
2139 }
2140 
2141 /**
2142  * blk_end_bidi_request - Complete a bidi request
2143  * @rq:         the request to complete
2144  * @error:      %0 for success, < %0 for error
2145  * @nr_bytes:   number of bytes to complete @rq
2146  * @bidi_bytes: number of bytes to complete @rq->next_rq
2147  *
2148  * Description:
2149  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2150  *     Drivers that supports bidi can safely call this member for any
2151  *     type of request, bidi or uni.  In the later case @bidi_bytes is
2152  *     just ignored.
2153  *
2154  * Return:
2155  *     %false - we are done with this request
2156  *     %true  - still buffers pending for this request
2157  **/
2158 static bool blk_end_bidi_request(struct request *rq, int error,
2159 				 unsigned int nr_bytes, unsigned int bidi_bytes)
2160 {
2161 	struct request_queue *q = rq->q;
2162 	unsigned long flags;
2163 
2164 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2165 		return true;
2166 
2167 	spin_lock_irqsave(q->queue_lock, flags);
2168 	blk_finish_request(rq, error);
2169 	spin_unlock_irqrestore(q->queue_lock, flags);
2170 
2171 	return false;
2172 }
2173 
2174 /**
2175  * __blk_end_bidi_request - Complete a bidi request with queue lock held
2176  * @rq:         the request to complete
2177  * @error:      %0 for success, < %0 for error
2178  * @nr_bytes:   number of bytes to complete @rq
2179  * @bidi_bytes: number of bytes to complete @rq->next_rq
2180  *
2181  * Description:
2182  *     Identical to blk_end_bidi_request() except that queue lock is
2183  *     assumed to be locked on entry and remains so on return.
2184  *
2185  * Return:
2186  *     %false - we are done with this request
2187  *     %true  - still buffers pending for this request
2188  **/
2189 static bool __blk_end_bidi_request(struct request *rq, int error,
2190 				   unsigned int nr_bytes, unsigned int bidi_bytes)
2191 {
2192 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2193 		return true;
2194 
2195 	blk_finish_request(rq, error);
2196 
2197 	return false;
2198 }
2199 
2200 /**
2201  * blk_end_request - Helper function for drivers to complete the request.
2202  * @rq:       the request being processed
2203  * @error:    %0 for success, < %0 for error
2204  * @nr_bytes: number of bytes to complete
2205  *
2206  * Description:
2207  *     Ends I/O on a number of bytes attached to @rq.
2208  *     If @rq has leftover, sets it up for the next range of segments.
2209  *
2210  * Return:
2211  *     %false - we are done with this request
2212  *     %true  - still buffers pending for this request
2213  **/
2214 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2215 {
2216 	return blk_end_bidi_request(rq, error, nr_bytes, 0);
2217 }
2218 EXPORT_SYMBOL(blk_end_request);
2219 
2220 /**
2221  * blk_end_request_all - Helper function for drives to finish the request.
2222  * @rq: the request to finish
2223  * @error: %0 for success, < %0 for error
2224  *
2225  * Description:
2226  *     Completely finish @rq.
2227  */
2228 void blk_end_request_all(struct request *rq, int error)
2229 {
2230 	bool pending;
2231 	unsigned int bidi_bytes = 0;
2232 
2233 	if (unlikely(blk_bidi_rq(rq)))
2234 		bidi_bytes = blk_rq_bytes(rq->next_rq);
2235 
2236 	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2237 	BUG_ON(pending);
2238 }
2239 EXPORT_SYMBOL(blk_end_request_all);
2240 
2241 /**
2242  * blk_end_request_cur - Helper function to finish the current request chunk.
2243  * @rq: the request to finish the current chunk for
2244  * @error: %0 for success, < %0 for error
2245  *
2246  * Description:
2247  *     Complete the current consecutively mapped chunk from @rq.
2248  *
2249  * Return:
2250  *     %false - we are done with this request
2251  *     %true  - still buffers pending for this request
2252  */
2253 bool blk_end_request_cur(struct request *rq, int error)
2254 {
2255 	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2256 }
2257 EXPORT_SYMBOL(blk_end_request_cur);
2258 
2259 /**
2260  * blk_end_request_err - Finish a request till the next failure boundary.
2261  * @rq: the request to finish till the next failure boundary for
2262  * @error: must be negative errno
2263  *
2264  * Description:
2265  *     Complete @rq till the next failure boundary.
2266  *
2267  * Return:
2268  *     %false - we are done with this request
2269  *     %true  - still buffers pending for this request
2270  */
2271 bool blk_end_request_err(struct request *rq, int error)
2272 {
2273 	WARN_ON(error >= 0);
2274 	return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2275 }
2276 EXPORT_SYMBOL_GPL(blk_end_request_err);
2277 
2278 /**
2279  * __blk_end_request - Helper function for drivers to complete the request.
2280  * @rq:       the request being processed
2281  * @error:    %0 for success, < %0 for error
2282  * @nr_bytes: number of bytes to complete
2283  *
2284  * Description:
2285  *     Must be called with queue lock held unlike blk_end_request().
2286  *
2287  * Return:
2288  *     %false - we are done with this request
2289  *     %true  - still buffers pending for this request
2290  **/
2291 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2292 {
2293 	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2294 }
2295 EXPORT_SYMBOL(__blk_end_request);
2296 
2297 /**
2298  * __blk_end_request_all - Helper function for drives to finish the request.
2299  * @rq: the request to finish
2300  * @error: %0 for success, < %0 for error
2301  *
2302  * Description:
2303  *     Completely finish @rq.  Must be called with queue lock held.
2304  */
2305 void __blk_end_request_all(struct request *rq, int error)
2306 {
2307 	bool pending;
2308 	unsigned int bidi_bytes = 0;
2309 
2310 	if (unlikely(blk_bidi_rq(rq)))
2311 		bidi_bytes = blk_rq_bytes(rq->next_rq);
2312 
2313 	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2314 	BUG_ON(pending);
2315 }
2316 EXPORT_SYMBOL(__blk_end_request_all);
2317 
2318 /**
2319  * __blk_end_request_cur - Helper function to finish the current request chunk.
2320  * @rq: the request to finish the current chunk for
2321  * @error: %0 for success, < %0 for error
2322  *
2323  * Description:
2324  *     Complete the current consecutively mapped chunk from @rq.  Must
2325  *     be called with queue lock held.
2326  *
2327  * Return:
2328  *     %false - we are done with this request
2329  *     %true  - still buffers pending for this request
2330  */
2331 bool __blk_end_request_cur(struct request *rq, int error)
2332 {
2333 	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2334 }
2335 EXPORT_SYMBOL(__blk_end_request_cur);
2336 
2337 /**
2338  * __blk_end_request_err - Finish a request till the next failure boundary.
2339  * @rq: the request to finish till the next failure boundary for
2340  * @error: must be negative errno
2341  *
2342  * Description:
2343  *     Complete @rq till the next failure boundary.  Must be called
2344  *     with queue lock held.
2345  *
2346  * Return:
2347  *     %false - we are done with this request
2348  *     %true  - still buffers pending for this request
2349  */
2350 bool __blk_end_request_err(struct request *rq, int error)
2351 {
2352 	WARN_ON(error >= 0);
2353 	return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2354 }
2355 EXPORT_SYMBOL_GPL(__blk_end_request_err);
2356 
2357 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2358 		     struct bio *bio)
2359 {
2360 	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2361 	rq->cmd_flags |= bio->bi_rw & REQ_RW;
2362 
2363 	if (bio_has_data(bio)) {
2364 		rq->nr_phys_segments = bio_phys_segments(q, bio);
2365 		rq->buffer = bio_data(bio);
2366 	}
2367 	rq->__data_len = bio->bi_size;
2368 	rq->bio = rq->biotail = bio;
2369 
2370 	if (bio->bi_bdev)
2371 		rq->rq_disk = bio->bi_bdev->bd_disk;
2372 }
2373 
2374 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2375 /**
2376  * rq_flush_dcache_pages - Helper function to flush all pages in a request
2377  * @rq: the request to be flushed
2378  *
2379  * Description:
2380  *     Flush all pages in @rq.
2381  */
2382 void rq_flush_dcache_pages(struct request *rq)
2383 {
2384 	struct req_iterator iter;
2385 	struct bio_vec *bvec;
2386 
2387 	rq_for_each_segment(bvec, rq, iter)
2388 		flush_dcache_page(bvec->bv_page);
2389 }
2390 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2391 #endif
2392 
2393 /**
2394  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2395  * @q : the queue of the device being checked
2396  *
2397  * Description:
2398  *    Check if underlying low-level drivers of a device are busy.
2399  *    If the drivers want to export their busy state, they must set own
2400  *    exporting function using blk_queue_lld_busy() first.
2401  *
2402  *    Basically, this function is used only by request stacking drivers
2403  *    to stop dispatching requests to underlying devices when underlying
2404  *    devices are busy.  This behavior helps more I/O merging on the queue
2405  *    of the request stacking driver and prevents I/O throughput regression
2406  *    on burst I/O load.
2407  *
2408  * Return:
2409  *    0 - Not busy (The request stacking driver should dispatch request)
2410  *    1 - Busy (The request stacking driver should stop dispatching request)
2411  */
2412 int blk_lld_busy(struct request_queue *q)
2413 {
2414 	if (q->lld_busy_fn)
2415 		return q->lld_busy_fn(q);
2416 
2417 	return 0;
2418 }
2419 EXPORT_SYMBOL_GPL(blk_lld_busy);
2420 
2421 /**
2422  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2423  * @rq: the clone request to be cleaned up
2424  *
2425  * Description:
2426  *     Free all bios in @rq for a cloned request.
2427  */
2428 void blk_rq_unprep_clone(struct request *rq)
2429 {
2430 	struct bio *bio;
2431 
2432 	while ((bio = rq->bio) != NULL) {
2433 		rq->bio = bio->bi_next;
2434 
2435 		bio_put(bio);
2436 	}
2437 }
2438 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2439 
2440 /*
2441  * Copy attributes of the original request to the clone request.
2442  * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2443  */
2444 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2445 {
2446 	dst->cpu = src->cpu;
2447 	dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
2448 	dst->cmd_type = src->cmd_type;
2449 	dst->__sector = blk_rq_pos(src);
2450 	dst->__data_len = blk_rq_bytes(src);
2451 	dst->nr_phys_segments = src->nr_phys_segments;
2452 	dst->ioprio = src->ioprio;
2453 	dst->extra_len = src->extra_len;
2454 }
2455 
2456 /**
2457  * blk_rq_prep_clone - Helper function to setup clone request
2458  * @rq: the request to be setup
2459  * @rq_src: original request to be cloned
2460  * @bs: bio_set that bios for clone are allocated from
2461  * @gfp_mask: memory allocation mask for bio
2462  * @bio_ctr: setup function to be called for each clone bio.
2463  *           Returns %0 for success, non %0 for failure.
2464  * @data: private data to be passed to @bio_ctr
2465  *
2466  * Description:
2467  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2468  *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2469  *     are not copied, and copying such parts is the caller's responsibility.
2470  *     Also, pages which the original bios are pointing to are not copied
2471  *     and the cloned bios just point same pages.
2472  *     So cloned bios must be completed before original bios, which means
2473  *     the caller must complete @rq before @rq_src.
2474  */
2475 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2476 		      struct bio_set *bs, gfp_t gfp_mask,
2477 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
2478 		      void *data)
2479 {
2480 	struct bio *bio, *bio_src;
2481 
2482 	if (!bs)
2483 		bs = fs_bio_set;
2484 
2485 	blk_rq_init(NULL, rq);
2486 
2487 	__rq_for_each_bio(bio_src, rq_src) {
2488 		bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2489 		if (!bio)
2490 			goto free_and_out;
2491 
2492 		__bio_clone(bio, bio_src);
2493 
2494 		if (bio_integrity(bio_src) &&
2495 		    bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2496 			goto free_and_out;
2497 
2498 		if (bio_ctr && bio_ctr(bio, bio_src, data))
2499 			goto free_and_out;
2500 
2501 		if (rq->bio) {
2502 			rq->biotail->bi_next = bio;
2503 			rq->biotail = bio;
2504 		} else
2505 			rq->bio = rq->biotail = bio;
2506 	}
2507 
2508 	__blk_rq_prep_clone(rq, rq_src);
2509 
2510 	return 0;
2511 
2512 free_and_out:
2513 	if (bio)
2514 		bio_free(bio, bs);
2515 	blk_rq_unprep_clone(rq);
2516 
2517 	return -ENOMEM;
2518 }
2519 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2520 
2521 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2522 {
2523 	return queue_work(kblockd_workqueue, work);
2524 }
2525 EXPORT_SYMBOL(kblockd_schedule_work);
2526 
2527 int __init blk_dev_init(void)
2528 {
2529 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2530 			sizeof(((struct request *)0)->cmd_flags));
2531 
2532 	kblockd_workqueue = create_workqueue("kblockd");
2533 	if (!kblockd_workqueue)
2534 		panic("Failed to create kblockd\n");
2535 
2536 	request_cachep = kmem_cache_create("blkdev_requests",
2537 			sizeof(struct request), 0, SLAB_PANIC, NULL);
2538 
2539 	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2540 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2541 
2542 	return 0;
2543 }
2544