1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/block.h> 34 35 #include "blk.h" 36 37 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 40 41 static int __make_request(struct request_queue *q, struct bio *bio); 42 43 /* 44 * For the allocated request tables 45 */ 46 static struct kmem_cache *request_cachep; 47 48 /* 49 * For queue allocation 50 */ 51 struct kmem_cache *blk_requestq_cachep; 52 53 /* 54 * Controlling structure to kblockd 55 */ 56 static struct workqueue_struct *kblockd_workqueue; 57 58 static void drive_stat_acct(struct request *rq, int new_io) 59 { 60 struct hd_struct *part; 61 int rw = rq_data_dir(rq); 62 int cpu; 63 64 if (!blk_do_io_stat(rq)) 65 return; 66 67 cpu = part_stat_lock(); 68 69 if (!new_io) { 70 part = rq->part; 71 part_stat_inc(cpu, part, merges[rw]); 72 } else { 73 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 74 if (!hd_struct_try_get(part)) { 75 /* 76 * The partition is already being removed, 77 * the request will be accounted on the disk only 78 * 79 * We take a reference on disk->part0 although that 80 * partition will never be deleted, so we can treat 81 * it as any other partition. 82 */ 83 part = &rq->rq_disk->part0; 84 hd_struct_get(part); 85 } 86 part_round_stats(cpu, part); 87 part_inc_in_flight(part, rw); 88 rq->part = part; 89 } 90 91 part_stat_unlock(); 92 } 93 94 void blk_queue_congestion_threshold(struct request_queue *q) 95 { 96 int nr; 97 98 nr = q->nr_requests - (q->nr_requests / 8) + 1; 99 if (nr > q->nr_requests) 100 nr = q->nr_requests; 101 q->nr_congestion_on = nr; 102 103 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 104 if (nr < 1) 105 nr = 1; 106 q->nr_congestion_off = nr; 107 } 108 109 /** 110 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * @bdev: device 112 * 113 * Locates the passed device's request queue and returns the address of its 114 * backing_dev_info 115 * 116 * Will return NULL if the request queue cannot be located. 117 */ 118 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 119 { 120 struct backing_dev_info *ret = NULL; 121 struct request_queue *q = bdev_get_queue(bdev); 122 123 if (q) 124 ret = &q->backing_dev_info; 125 return ret; 126 } 127 EXPORT_SYMBOL(blk_get_backing_dev_info); 128 129 void blk_rq_init(struct request_queue *q, struct request *rq) 130 { 131 memset(rq, 0, sizeof(*rq)); 132 133 INIT_LIST_HEAD(&rq->queuelist); 134 INIT_LIST_HEAD(&rq->timeout_list); 135 rq->cpu = -1; 136 rq->q = q; 137 rq->__sector = (sector_t) -1; 138 INIT_HLIST_NODE(&rq->hash); 139 RB_CLEAR_NODE(&rq->rb_node); 140 rq->cmd = rq->__cmd; 141 rq->cmd_len = BLK_MAX_CDB; 142 rq->tag = -1; 143 rq->ref_count = 1; 144 rq->start_time = jiffies; 145 set_start_time_ns(rq); 146 rq->part = NULL; 147 } 148 EXPORT_SYMBOL(blk_rq_init); 149 150 static void req_bio_endio(struct request *rq, struct bio *bio, 151 unsigned int nbytes, int error) 152 { 153 if (error) 154 clear_bit(BIO_UPTODATE, &bio->bi_flags); 155 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 156 error = -EIO; 157 158 if (unlikely(nbytes > bio->bi_size)) { 159 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 160 __func__, nbytes, bio->bi_size); 161 nbytes = bio->bi_size; 162 } 163 164 if (unlikely(rq->cmd_flags & REQ_QUIET)) 165 set_bit(BIO_QUIET, &bio->bi_flags); 166 167 bio->bi_size -= nbytes; 168 bio->bi_sector += (nbytes >> 9); 169 170 if (bio_integrity(bio)) 171 bio_integrity_advance(bio, nbytes); 172 173 /* don't actually finish bio if it's part of flush sequence */ 174 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 175 bio_endio(bio, error); 176 } 177 178 void blk_dump_rq_flags(struct request *rq, char *msg) 179 { 180 int bit; 181 182 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 183 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 184 rq->cmd_flags); 185 186 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 187 (unsigned long long)blk_rq_pos(rq), 188 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 189 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 190 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 191 192 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 193 printk(KERN_INFO " cdb: "); 194 for (bit = 0; bit < BLK_MAX_CDB; bit++) 195 printk("%02x ", rq->cmd[bit]); 196 printk("\n"); 197 } 198 } 199 EXPORT_SYMBOL(blk_dump_rq_flags); 200 201 static void blk_delay_work(struct work_struct *work) 202 { 203 struct request_queue *q; 204 205 q = container_of(work, struct request_queue, delay_work.work); 206 spin_lock_irq(q->queue_lock); 207 __blk_run_queue(q); 208 spin_unlock_irq(q->queue_lock); 209 } 210 211 /** 212 * blk_delay_queue - restart queueing after defined interval 213 * @q: The &struct request_queue in question 214 * @msecs: Delay in msecs 215 * 216 * Description: 217 * Sometimes queueing needs to be postponed for a little while, to allow 218 * resources to come back. This function will make sure that queueing is 219 * restarted around the specified time. 220 */ 221 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 222 { 223 queue_delayed_work(kblockd_workqueue, &q->delay_work, 224 msecs_to_jiffies(msecs)); 225 } 226 EXPORT_SYMBOL(blk_delay_queue); 227 228 /** 229 * blk_start_queue - restart a previously stopped queue 230 * @q: The &struct request_queue in question 231 * 232 * Description: 233 * blk_start_queue() will clear the stop flag on the queue, and call 234 * the request_fn for the queue if it was in a stopped state when 235 * entered. Also see blk_stop_queue(). Queue lock must be held. 236 **/ 237 void blk_start_queue(struct request_queue *q) 238 { 239 WARN_ON(!irqs_disabled()); 240 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 242 __blk_run_queue(q); 243 } 244 EXPORT_SYMBOL(blk_start_queue); 245 246 /** 247 * blk_stop_queue - stop a queue 248 * @q: The &struct request_queue in question 249 * 250 * Description: 251 * The Linux block layer assumes that a block driver will consume all 252 * entries on the request queue when the request_fn strategy is called. 253 * Often this will not happen, because of hardware limitations (queue 254 * depth settings). If a device driver gets a 'queue full' response, 255 * or if it simply chooses not to queue more I/O at one point, it can 256 * call this function to prevent the request_fn from being called until 257 * the driver has signalled it's ready to go again. This happens by calling 258 * blk_start_queue() to restart queue operations. Queue lock must be held. 259 **/ 260 void blk_stop_queue(struct request_queue *q) 261 { 262 __cancel_delayed_work(&q->delay_work); 263 queue_flag_set(QUEUE_FLAG_STOPPED, q); 264 } 265 EXPORT_SYMBOL(blk_stop_queue); 266 267 /** 268 * blk_sync_queue - cancel any pending callbacks on a queue 269 * @q: the queue 270 * 271 * Description: 272 * The block layer may perform asynchronous callback activity 273 * on a queue, such as calling the unplug function after a timeout. 274 * A block device may call blk_sync_queue to ensure that any 275 * such activity is cancelled, thus allowing it to release resources 276 * that the callbacks might use. The caller must already have made sure 277 * that its ->make_request_fn will not re-add plugging prior to calling 278 * this function. 279 * 280 * This function does not cancel any asynchronous activity arising 281 * out of elevator or throttling code. That would require elevaotor_exit() 282 * and blk_throtl_exit() to be called with queue lock initialized. 283 * 284 */ 285 void blk_sync_queue(struct request_queue *q) 286 { 287 del_timer_sync(&q->timeout); 288 cancel_delayed_work_sync(&q->delay_work); 289 } 290 EXPORT_SYMBOL(blk_sync_queue); 291 292 /** 293 * __blk_run_queue - run a single device queue 294 * @q: The queue to run 295 * 296 * Description: 297 * See @blk_run_queue. This variant must be called with the queue lock 298 * held and interrupts disabled. 299 */ 300 void __blk_run_queue(struct request_queue *q) 301 { 302 if (unlikely(blk_queue_stopped(q))) 303 return; 304 305 q->request_fn(q); 306 } 307 EXPORT_SYMBOL(__blk_run_queue); 308 309 /** 310 * blk_run_queue_async - run a single device queue in workqueue context 311 * @q: The queue to run 312 * 313 * Description: 314 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 315 * of us. 316 */ 317 void blk_run_queue_async(struct request_queue *q) 318 { 319 if (likely(!blk_queue_stopped(q))) { 320 __cancel_delayed_work(&q->delay_work); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 322 } 323 } 324 EXPORT_SYMBOL(blk_run_queue_async); 325 326 /** 327 * blk_run_queue - run a single device queue 328 * @q: The queue to run 329 * 330 * Description: 331 * Invoke request handling on this queue, if it has pending work to do. 332 * May be used to restart queueing when a request has completed. 333 */ 334 void blk_run_queue(struct request_queue *q) 335 { 336 unsigned long flags; 337 338 spin_lock_irqsave(q->queue_lock, flags); 339 __blk_run_queue(q); 340 spin_unlock_irqrestore(q->queue_lock, flags); 341 } 342 EXPORT_SYMBOL(blk_run_queue); 343 344 void blk_put_queue(struct request_queue *q) 345 { 346 kobject_put(&q->kobj); 347 } 348 349 /* 350 * Note: If a driver supplied the queue lock, it should not zap that lock 351 * unexpectedly as some queue cleanup components like elevator_exit() and 352 * blk_throtl_exit() need queue lock. 353 */ 354 void blk_cleanup_queue(struct request_queue *q) 355 { 356 /* 357 * We know we have process context here, so we can be a little 358 * cautious and ensure that pending block actions on this device 359 * are done before moving on. Going into this function, we should 360 * not have processes doing IO to this device. 361 */ 362 blk_sync_queue(q); 363 364 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 365 mutex_lock(&q->sysfs_lock); 366 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 367 mutex_unlock(&q->sysfs_lock); 368 369 if (q->elevator) 370 elevator_exit(q->elevator); 371 372 blk_throtl_exit(q); 373 374 blk_put_queue(q); 375 } 376 EXPORT_SYMBOL(blk_cleanup_queue); 377 378 static int blk_init_free_list(struct request_queue *q) 379 { 380 struct request_list *rl = &q->rq; 381 382 if (unlikely(rl->rq_pool)) 383 return 0; 384 385 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 386 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 387 rl->elvpriv = 0; 388 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 389 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 390 391 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 392 mempool_free_slab, request_cachep, q->node); 393 394 if (!rl->rq_pool) 395 return -ENOMEM; 396 397 return 0; 398 } 399 400 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 401 { 402 return blk_alloc_queue_node(gfp_mask, -1); 403 } 404 EXPORT_SYMBOL(blk_alloc_queue); 405 406 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 407 { 408 struct request_queue *q; 409 int err; 410 411 q = kmem_cache_alloc_node(blk_requestq_cachep, 412 gfp_mask | __GFP_ZERO, node_id); 413 if (!q) 414 return NULL; 415 416 q->backing_dev_info.ra_pages = 417 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 418 q->backing_dev_info.state = 0; 419 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 420 q->backing_dev_info.name = "block"; 421 422 err = bdi_init(&q->backing_dev_info); 423 if (err) { 424 kmem_cache_free(blk_requestq_cachep, q); 425 return NULL; 426 } 427 428 if (blk_throtl_init(q)) { 429 kmem_cache_free(blk_requestq_cachep, q); 430 return NULL; 431 } 432 433 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 434 laptop_mode_timer_fn, (unsigned long) q); 435 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 436 INIT_LIST_HEAD(&q->timeout_list); 437 INIT_LIST_HEAD(&q->flush_queue[0]); 438 INIT_LIST_HEAD(&q->flush_queue[1]); 439 INIT_LIST_HEAD(&q->flush_data_in_flight); 440 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 441 442 kobject_init(&q->kobj, &blk_queue_ktype); 443 444 mutex_init(&q->sysfs_lock); 445 spin_lock_init(&q->__queue_lock); 446 447 /* 448 * By default initialize queue_lock to internal lock and driver can 449 * override it later if need be. 450 */ 451 q->queue_lock = &q->__queue_lock; 452 453 return q; 454 } 455 EXPORT_SYMBOL(blk_alloc_queue_node); 456 457 /** 458 * blk_init_queue - prepare a request queue for use with a block device 459 * @rfn: The function to be called to process requests that have been 460 * placed on the queue. 461 * @lock: Request queue spin lock 462 * 463 * Description: 464 * If a block device wishes to use the standard request handling procedures, 465 * which sorts requests and coalesces adjacent requests, then it must 466 * call blk_init_queue(). The function @rfn will be called when there 467 * are requests on the queue that need to be processed. If the device 468 * supports plugging, then @rfn may not be called immediately when requests 469 * are available on the queue, but may be called at some time later instead. 470 * Plugged queues are generally unplugged when a buffer belonging to one 471 * of the requests on the queue is needed, or due to memory pressure. 472 * 473 * @rfn is not required, or even expected, to remove all requests off the 474 * queue, but only as many as it can handle at a time. If it does leave 475 * requests on the queue, it is responsible for arranging that the requests 476 * get dealt with eventually. 477 * 478 * The queue spin lock must be held while manipulating the requests on the 479 * request queue; this lock will be taken also from interrupt context, so irq 480 * disabling is needed for it. 481 * 482 * Function returns a pointer to the initialized request queue, or %NULL if 483 * it didn't succeed. 484 * 485 * Note: 486 * blk_init_queue() must be paired with a blk_cleanup_queue() call 487 * when the block device is deactivated (such as at module unload). 488 **/ 489 490 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 491 { 492 return blk_init_queue_node(rfn, lock, -1); 493 } 494 EXPORT_SYMBOL(blk_init_queue); 495 496 struct request_queue * 497 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 498 { 499 struct request_queue *uninit_q, *q; 500 501 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 502 if (!uninit_q) 503 return NULL; 504 505 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 506 if (!q) 507 blk_cleanup_queue(uninit_q); 508 509 return q; 510 } 511 EXPORT_SYMBOL(blk_init_queue_node); 512 513 struct request_queue * 514 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 515 spinlock_t *lock) 516 { 517 return blk_init_allocated_queue_node(q, rfn, lock, -1); 518 } 519 EXPORT_SYMBOL(blk_init_allocated_queue); 520 521 struct request_queue * 522 blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, 523 spinlock_t *lock, int node_id) 524 { 525 if (!q) 526 return NULL; 527 528 q->node = node_id; 529 if (blk_init_free_list(q)) 530 return NULL; 531 532 q->request_fn = rfn; 533 q->prep_rq_fn = NULL; 534 q->unprep_rq_fn = NULL; 535 q->queue_flags = QUEUE_FLAG_DEFAULT; 536 537 /* Override internal queue lock with supplied lock pointer */ 538 if (lock) 539 q->queue_lock = lock; 540 541 /* 542 * This also sets hw/phys segments, boundary and size 543 */ 544 blk_queue_make_request(q, __make_request); 545 546 q->sg_reserved_size = INT_MAX; 547 548 /* 549 * all done 550 */ 551 if (!elevator_init(q, NULL)) { 552 blk_queue_congestion_threshold(q); 553 return q; 554 } 555 556 return NULL; 557 } 558 EXPORT_SYMBOL(blk_init_allocated_queue_node); 559 560 int blk_get_queue(struct request_queue *q) 561 { 562 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 563 kobject_get(&q->kobj); 564 return 0; 565 } 566 567 return 1; 568 } 569 570 static inline void blk_free_request(struct request_queue *q, struct request *rq) 571 { 572 if (rq->cmd_flags & REQ_ELVPRIV) 573 elv_put_request(q, rq); 574 mempool_free(rq, q->rq.rq_pool); 575 } 576 577 static struct request * 578 blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 579 { 580 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 581 582 if (!rq) 583 return NULL; 584 585 blk_rq_init(q, rq); 586 587 rq->cmd_flags = flags | REQ_ALLOCED; 588 589 if (priv) { 590 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 591 mempool_free(rq, q->rq.rq_pool); 592 return NULL; 593 } 594 rq->cmd_flags |= REQ_ELVPRIV; 595 } 596 597 return rq; 598 } 599 600 /* 601 * ioc_batching returns true if the ioc is a valid batching request and 602 * should be given priority access to a request. 603 */ 604 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 605 { 606 if (!ioc) 607 return 0; 608 609 /* 610 * Make sure the process is able to allocate at least 1 request 611 * even if the batch times out, otherwise we could theoretically 612 * lose wakeups. 613 */ 614 return ioc->nr_batch_requests == q->nr_batching || 615 (ioc->nr_batch_requests > 0 616 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 617 } 618 619 /* 620 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 621 * will cause the process to be a "batcher" on all queues in the system. This 622 * is the behaviour we want though - once it gets a wakeup it should be given 623 * a nice run. 624 */ 625 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 626 { 627 if (!ioc || ioc_batching(q, ioc)) 628 return; 629 630 ioc->nr_batch_requests = q->nr_batching; 631 ioc->last_waited = jiffies; 632 } 633 634 static void __freed_request(struct request_queue *q, int sync) 635 { 636 struct request_list *rl = &q->rq; 637 638 if (rl->count[sync] < queue_congestion_off_threshold(q)) 639 blk_clear_queue_congested(q, sync); 640 641 if (rl->count[sync] + 1 <= q->nr_requests) { 642 if (waitqueue_active(&rl->wait[sync])) 643 wake_up(&rl->wait[sync]); 644 645 blk_clear_queue_full(q, sync); 646 } 647 } 648 649 /* 650 * A request has just been released. Account for it, update the full and 651 * congestion status, wake up any waiters. Called under q->queue_lock. 652 */ 653 static void freed_request(struct request_queue *q, int sync, int priv) 654 { 655 struct request_list *rl = &q->rq; 656 657 rl->count[sync]--; 658 if (priv) 659 rl->elvpriv--; 660 661 __freed_request(q, sync); 662 663 if (unlikely(rl->starved[sync ^ 1])) 664 __freed_request(q, sync ^ 1); 665 } 666 667 /* 668 * Determine if elevator data should be initialized when allocating the 669 * request associated with @bio. 670 */ 671 static bool blk_rq_should_init_elevator(struct bio *bio) 672 { 673 if (!bio) 674 return true; 675 676 /* 677 * Flush requests do not use the elevator so skip initialization. 678 * This allows a request to share the flush and elevator data. 679 */ 680 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 681 return false; 682 683 return true; 684 } 685 686 /* 687 * Get a free request, queue_lock must be held. 688 * Returns NULL on failure, with queue_lock held. 689 * Returns !NULL on success, with queue_lock *not held*. 690 */ 691 static struct request *get_request(struct request_queue *q, int rw_flags, 692 struct bio *bio, gfp_t gfp_mask) 693 { 694 struct request *rq = NULL; 695 struct request_list *rl = &q->rq; 696 struct io_context *ioc = NULL; 697 const bool is_sync = rw_is_sync(rw_flags) != 0; 698 int may_queue, priv = 0; 699 700 may_queue = elv_may_queue(q, rw_flags); 701 if (may_queue == ELV_MQUEUE_NO) 702 goto rq_starved; 703 704 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 705 if (rl->count[is_sync]+1 >= q->nr_requests) { 706 ioc = current_io_context(GFP_ATOMIC, q->node); 707 /* 708 * The queue will fill after this allocation, so set 709 * it as full, and mark this process as "batching". 710 * This process will be allowed to complete a batch of 711 * requests, others will be blocked. 712 */ 713 if (!blk_queue_full(q, is_sync)) { 714 ioc_set_batching(q, ioc); 715 blk_set_queue_full(q, is_sync); 716 } else { 717 if (may_queue != ELV_MQUEUE_MUST 718 && !ioc_batching(q, ioc)) { 719 /* 720 * The queue is full and the allocating 721 * process is not a "batcher", and not 722 * exempted by the IO scheduler 723 */ 724 goto out; 725 } 726 } 727 } 728 blk_set_queue_congested(q, is_sync); 729 } 730 731 /* 732 * Only allow batching queuers to allocate up to 50% over the defined 733 * limit of requests, otherwise we could have thousands of requests 734 * allocated with any setting of ->nr_requests 735 */ 736 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 737 goto out; 738 739 rl->count[is_sync]++; 740 rl->starved[is_sync] = 0; 741 742 if (blk_rq_should_init_elevator(bio)) { 743 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 744 if (priv) 745 rl->elvpriv++; 746 } 747 748 if (blk_queue_io_stat(q)) 749 rw_flags |= REQ_IO_STAT; 750 spin_unlock_irq(q->queue_lock); 751 752 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 753 if (unlikely(!rq)) { 754 /* 755 * Allocation failed presumably due to memory. Undo anything 756 * we might have messed up. 757 * 758 * Allocating task should really be put onto the front of the 759 * wait queue, but this is pretty rare. 760 */ 761 spin_lock_irq(q->queue_lock); 762 freed_request(q, is_sync, priv); 763 764 /* 765 * in the very unlikely event that allocation failed and no 766 * requests for this direction was pending, mark us starved 767 * so that freeing of a request in the other direction will 768 * notice us. another possible fix would be to split the 769 * rq mempool into READ and WRITE 770 */ 771 rq_starved: 772 if (unlikely(rl->count[is_sync] == 0)) 773 rl->starved[is_sync] = 1; 774 775 goto out; 776 } 777 778 /* 779 * ioc may be NULL here, and ioc_batching will be false. That's 780 * OK, if the queue is under the request limit then requests need 781 * not count toward the nr_batch_requests limit. There will always 782 * be some limit enforced by BLK_BATCH_TIME. 783 */ 784 if (ioc_batching(q, ioc)) 785 ioc->nr_batch_requests--; 786 787 trace_block_getrq(q, bio, rw_flags & 1); 788 out: 789 return rq; 790 } 791 792 /* 793 * No available requests for this queue, wait for some requests to become 794 * available. 795 * 796 * Called with q->queue_lock held, and returns with it unlocked. 797 */ 798 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 799 struct bio *bio) 800 { 801 const bool is_sync = rw_is_sync(rw_flags) != 0; 802 struct request *rq; 803 804 rq = get_request(q, rw_flags, bio, GFP_NOIO); 805 while (!rq) { 806 DEFINE_WAIT(wait); 807 struct io_context *ioc; 808 struct request_list *rl = &q->rq; 809 810 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 811 TASK_UNINTERRUPTIBLE); 812 813 trace_block_sleeprq(q, bio, rw_flags & 1); 814 815 spin_unlock_irq(q->queue_lock); 816 io_schedule(); 817 818 /* 819 * After sleeping, we become a "batching" process and 820 * will be able to allocate at least one request, and 821 * up to a big batch of them for a small period time. 822 * See ioc_batching, ioc_set_batching 823 */ 824 ioc = current_io_context(GFP_NOIO, q->node); 825 ioc_set_batching(q, ioc); 826 827 spin_lock_irq(q->queue_lock); 828 finish_wait(&rl->wait[is_sync], &wait); 829 830 rq = get_request(q, rw_flags, bio, GFP_NOIO); 831 }; 832 833 return rq; 834 } 835 836 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 837 { 838 struct request *rq; 839 840 BUG_ON(rw != READ && rw != WRITE); 841 842 spin_lock_irq(q->queue_lock); 843 if (gfp_mask & __GFP_WAIT) { 844 rq = get_request_wait(q, rw, NULL); 845 } else { 846 rq = get_request(q, rw, NULL, gfp_mask); 847 if (!rq) 848 spin_unlock_irq(q->queue_lock); 849 } 850 /* q->queue_lock is unlocked at this point */ 851 852 return rq; 853 } 854 EXPORT_SYMBOL(blk_get_request); 855 856 /** 857 * blk_make_request - given a bio, allocate a corresponding struct request. 858 * @q: target request queue 859 * @bio: The bio describing the memory mappings that will be submitted for IO. 860 * It may be a chained-bio properly constructed by block/bio layer. 861 * @gfp_mask: gfp flags to be used for memory allocation 862 * 863 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 864 * type commands. Where the struct request needs to be farther initialized by 865 * the caller. It is passed a &struct bio, which describes the memory info of 866 * the I/O transfer. 867 * 868 * The caller of blk_make_request must make sure that bi_io_vec 869 * are set to describe the memory buffers. That bio_data_dir() will return 870 * the needed direction of the request. (And all bio's in the passed bio-chain 871 * are properly set accordingly) 872 * 873 * If called under none-sleepable conditions, mapped bio buffers must not 874 * need bouncing, by calling the appropriate masked or flagged allocator, 875 * suitable for the target device. Otherwise the call to blk_queue_bounce will 876 * BUG. 877 * 878 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 879 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 880 * anything but the first bio in the chain. Otherwise you risk waiting for IO 881 * completion of a bio that hasn't been submitted yet, thus resulting in a 882 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 883 * of bio_alloc(), as that avoids the mempool deadlock. 884 * If possible a big IO should be split into smaller parts when allocation 885 * fails. Partial allocation should not be an error, or you risk a live-lock. 886 */ 887 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 888 gfp_t gfp_mask) 889 { 890 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 891 892 if (unlikely(!rq)) 893 return ERR_PTR(-ENOMEM); 894 895 for_each_bio(bio) { 896 struct bio *bounce_bio = bio; 897 int ret; 898 899 blk_queue_bounce(q, &bounce_bio); 900 ret = blk_rq_append_bio(q, rq, bounce_bio); 901 if (unlikely(ret)) { 902 blk_put_request(rq); 903 return ERR_PTR(ret); 904 } 905 } 906 907 return rq; 908 } 909 EXPORT_SYMBOL(blk_make_request); 910 911 /** 912 * blk_requeue_request - put a request back on queue 913 * @q: request queue where request should be inserted 914 * @rq: request to be inserted 915 * 916 * Description: 917 * Drivers often keep queueing requests until the hardware cannot accept 918 * more, when that condition happens we need to put the request back 919 * on the queue. Must be called with queue lock held. 920 */ 921 void blk_requeue_request(struct request_queue *q, struct request *rq) 922 { 923 blk_delete_timer(rq); 924 blk_clear_rq_complete(rq); 925 trace_block_rq_requeue(q, rq); 926 927 if (blk_rq_tagged(rq)) 928 blk_queue_end_tag(q, rq); 929 930 BUG_ON(blk_queued_rq(rq)); 931 932 elv_requeue_request(q, rq); 933 } 934 EXPORT_SYMBOL(blk_requeue_request); 935 936 static void add_acct_request(struct request_queue *q, struct request *rq, 937 int where) 938 { 939 drive_stat_acct(rq, 1); 940 __elv_add_request(q, rq, where); 941 } 942 943 /** 944 * blk_insert_request - insert a special request into a request queue 945 * @q: request queue where request should be inserted 946 * @rq: request to be inserted 947 * @at_head: insert request at head or tail of queue 948 * @data: private data 949 * 950 * Description: 951 * Many block devices need to execute commands asynchronously, so they don't 952 * block the whole kernel from preemption during request execution. This is 953 * accomplished normally by inserting aritficial requests tagged as 954 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them 955 * be scheduled for actual execution by the request queue. 956 * 957 * We have the option of inserting the head or the tail of the queue. 958 * Typically we use the tail for new ioctls and so forth. We use the head 959 * of the queue for things like a QUEUE_FULL message from a device, or a 960 * host that is unable to accept a particular command. 961 */ 962 void blk_insert_request(struct request_queue *q, struct request *rq, 963 int at_head, void *data) 964 { 965 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 966 unsigned long flags; 967 968 /* 969 * tell I/O scheduler that this isn't a regular read/write (ie it 970 * must not attempt merges on this) and that it acts as a soft 971 * barrier 972 */ 973 rq->cmd_type = REQ_TYPE_SPECIAL; 974 975 rq->special = data; 976 977 spin_lock_irqsave(q->queue_lock, flags); 978 979 /* 980 * If command is tagged, release the tag 981 */ 982 if (blk_rq_tagged(rq)) 983 blk_queue_end_tag(q, rq); 984 985 add_acct_request(q, rq, where); 986 __blk_run_queue(q); 987 spin_unlock_irqrestore(q->queue_lock, flags); 988 } 989 EXPORT_SYMBOL(blk_insert_request); 990 991 static void part_round_stats_single(int cpu, struct hd_struct *part, 992 unsigned long now) 993 { 994 if (now == part->stamp) 995 return; 996 997 if (part_in_flight(part)) { 998 __part_stat_add(cpu, part, time_in_queue, 999 part_in_flight(part) * (now - part->stamp)); 1000 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1001 } 1002 part->stamp = now; 1003 } 1004 1005 /** 1006 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1007 * @cpu: cpu number for stats access 1008 * @part: target partition 1009 * 1010 * The average IO queue length and utilisation statistics are maintained 1011 * by observing the current state of the queue length and the amount of 1012 * time it has been in this state for. 1013 * 1014 * Normally, that accounting is done on IO completion, but that can result 1015 * in more than a second's worth of IO being accounted for within any one 1016 * second, leading to >100% utilisation. To deal with that, we call this 1017 * function to do a round-off before returning the results when reading 1018 * /proc/diskstats. This accounts immediately for all queue usage up to 1019 * the current jiffies and restarts the counters again. 1020 */ 1021 void part_round_stats(int cpu, struct hd_struct *part) 1022 { 1023 unsigned long now = jiffies; 1024 1025 if (part->partno) 1026 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1027 part_round_stats_single(cpu, part, now); 1028 } 1029 EXPORT_SYMBOL_GPL(part_round_stats); 1030 1031 /* 1032 * queue lock must be held 1033 */ 1034 void __blk_put_request(struct request_queue *q, struct request *req) 1035 { 1036 if (unlikely(!q)) 1037 return; 1038 if (unlikely(--req->ref_count)) 1039 return; 1040 1041 elv_completed_request(q, req); 1042 1043 /* this is a bio leak */ 1044 WARN_ON(req->bio != NULL); 1045 1046 /* 1047 * Request may not have originated from ll_rw_blk. if not, 1048 * it didn't come out of our reserved rq pools 1049 */ 1050 if (req->cmd_flags & REQ_ALLOCED) { 1051 int is_sync = rq_is_sync(req) != 0; 1052 int priv = req->cmd_flags & REQ_ELVPRIV; 1053 1054 BUG_ON(!list_empty(&req->queuelist)); 1055 BUG_ON(!hlist_unhashed(&req->hash)); 1056 1057 blk_free_request(q, req); 1058 freed_request(q, is_sync, priv); 1059 } 1060 } 1061 EXPORT_SYMBOL_GPL(__blk_put_request); 1062 1063 void blk_put_request(struct request *req) 1064 { 1065 unsigned long flags; 1066 struct request_queue *q = req->q; 1067 1068 spin_lock_irqsave(q->queue_lock, flags); 1069 __blk_put_request(q, req); 1070 spin_unlock_irqrestore(q->queue_lock, flags); 1071 } 1072 EXPORT_SYMBOL(blk_put_request); 1073 1074 /** 1075 * blk_add_request_payload - add a payload to a request 1076 * @rq: request to update 1077 * @page: page backing the payload 1078 * @len: length of the payload. 1079 * 1080 * This allows to later add a payload to an already submitted request by 1081 * a block driver. The driver needs to take care of freeing the payload 1082 * itself. 1083 * 1084 * Note that this is a quite horrible hack and nothing but handling of 1085 * discard requests should ever use it. 1086 */ 1087 void blk_add_request_payload(struct request *rq, struct page *page, 1088 unsigned int len) 1089 { 1090 struct bio *bio = rq->bio; 1091 1092 bio->bi_io_vec->bv_page = page; 1093 bio->bi_io_vec->bv_offset = 0; 1094 bio->bi_io_vec->bv_len = len; 1095 1096 bio->bi_size = len; 1097 bio->bi_vcnt = 1; 1098 bio->bi_phys_segments = 1; 1099 1100 rq->__data_len = rq->resid_len = len; 1101 rq->nr_phys_segments = 1; 1102 rq->buffer = bio_data(bio); 1103 } 1104 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1105 1106 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1107 struct bio *bio) 1108 { 1109 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1110 1111 if (!ll_back_merge_fn(q, req, bio)) 1112 return false; 1113 1114 trace_block_bio_backmerge(q, bio); 1115 1116 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1117 blk_rq_set_mixed_merge(req); 1118 1119 req->biotail->bi_next = bio; 1120 req->biotail = bio; 1121 req->__data_len += bio->bi_size; 1122 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1123 1124 drive_stat_acct(req, 0); 1125 elv_bio_merged(q, req, bio); 1126 return true; 1127 } 1128 1129 static bool bio_attempt_front_merge(struct request_queue *q, 1130 struct request *req, struct bio *bio) 1131 { 1132 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1133 sector_t sector; 1134 1135 if (!ll_front_merge_fn(q, req, bio)) 1136 return false; 1137 1138 trace_block_bio_frontmerge(q, bio); 1139 1140 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1141 blk_rq_set_mixed_merge(req); 1142 1143 sector = bio->bi_sector; 1144 1145 bio->bi_next = req->bio; 1146 req->bio = bio; 1147 1148 /* 1149 * may not be valid. if the low level driver said 1150 * it didn't need a bounce buffer then it better 1151 * not touch req->buffer either... 1152 */ 1153 req->buffer = bio_data(bio); 1154 req->__sector = bio->bi_sector; 1155 req->__data_len += bio->bi_size; 1156 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1157 1158 drive_stat_acct(req, 0); 1159 elv_bio_merged(q, req, bio); 1160 return true; 1161 } 1162 1163 /* 1164 * Attempts to merge with the plugged list in the current process. Returns 1165 * true if merge was successful, otherwise false. 1166 */ 1167 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1168 struct bio *bio) 1169 { 1170 struct blk_plug *plug; 1171 struct request *rq; 1172 bool ret = false; 1173 1174 plug = tsk->plug; 1175 if (!plug) 1176 goto out; 1177 1178 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1179 int el_ret; 1180 1181 if (rq->q != q) 1182 continue; 1183 1184 el_ret = elv_try_merge(rq, bio); 1185 if (el_ret == ELEVATOR_BACK_MERGE) { 1186 ret = bio_attempt_back_merge(q, rq, bio); 1187 if (ret) 1188 break; 1189 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1190 ret = bio_attempt_front_merge(q, rq, bio); 1191 if (ret) 1192 break; 1193 } 1194 } 1195 out: 1196 return ret; 1197 } 1198 1199 void init_request_from_bio(struct request *req, struct bio *bio) 1200 { 1201 req->cpu = bio->bi_comp_cpu; 1202 req->cmd_type = REQ_TYPE_FS; 1203 1204 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1205 if (bio->bi_rw & REQ_RAHEAD) 1206 req->cmd_flags |= REQ_FAILFAST_MASK; 1207 1208 req->errors = 0; 1209 req->__sector = bio->bi_sector; 1210 req->ioprio = bio_prio(bio); 1211 blk_rq_bio_prep(req->q, req, bio); 1212 } 1213 1214 static int __make_request(struct request_queue *q, struct bio *bio) 1215 { 1216 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1217 struct blk_plug *plug; 1218 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1219 struct request *req; 1220 1221 /* 1222 * low level driver can indicate that it wants pages above a 1223 * certain limit bounced to low memory (ie for highmem, or even 1224 * ISA dma in theory) 1225 */ 1226 blk_queue_bounce(q, &bio); 1227 1228 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1229 spin_lock_irq(q->queue_lock); 1230 where = ELEVATOR_INSERT_FLUSH; 1231 goto get_rq; 1232 } 1233 1234 /* 1235 * Check if we can merge with the plugged list before grabbing 1236 * any locks. 1237 */ 1238 if (attempt_plug_merge(current, q, bio)) 1239 goto out; 1240 1241 spin_lock_irq(q->queue_lock); 1242 1243 el_ret = elv_merge(q, &req, bio); 1244 if (el_ret == ELEVATOR_BACK_MERGE) { 1245 if (bio_attempt_back_merge(q, req, bio)) { 1246 if (!attempt_back_merge(q, req)) 1247 elv_merged_request(q, req, el_ret); 1248 goto out_unlock; 1249 } 1250 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1251 if (bio_attempt_front_merge(q, req, bio)) { 1252 if (!attempt_front_merge(q, req)) 1253 elv_merged_request(q, req, el_ret); 1254 goto out_unlock; 1255 } 1256 } 1257 1258 get_rq: 1259 /* 1260 * This sync check and mask will be re-done in init_request_from_bio(), 1261 * but we need to set it earlier to expose the sync flag to the 1262 * rq allocator and io schedulers. 1263 */ 1264 rw_flags = bio_data_dir(bio); 1265 if (sync) 1266 rw_flags |= REQ_SYNC; 1267 1268 /* 1269 * Grab a free request. This is might sleep but can not fail. 1270 * Returns with the queue unlocked. 1271 */ 1272 req = get_request_wait(q, rw_flags, bio); 1273 1274 /* 1275 * After dropping the lock and possibly sleeping here, our request 1276 * may now be mergeable after it had proven unmergeable (above). 1277 * We don't worry about that case for efficiency. It won't happen 1278 * often, and the elevators are able to handle it. 1279 */ 1280 init_request_from_bio(req, bio); 1281 1282 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || 1283 bio_flagged(bio, BIO_CPU_AFFINE)) { 1284 req->cpu = blk_cpu_to_group(get_cpu()); 1285 put_cpu(); 1286 } 1287 1288 plug = current->plug; 1289 if (plug) { 1290 /* 1291 * If this is the first request added after a plug, fire 1292 * of a plug trace. If others have been added before, check 1293 * if we have multiple devices in this plug. If so, make a 1294 * note to sort the list before dispatch. 1295 */ 1296 if (list_empty(&plug->list)) 1297 trace_block_plug(q); 1298 else if (!plug->should_sort) { 1299 struct request *__rq; 1300 1301 __rq = list_entry_rq(plug->list.prev); 1302 if (__rq->q != q) 1303 plug->should_sort = 1; 1304 } 1305 list_add_tail(&req->queuelist, &plug->list); 1306 drive_stat_acct(req, 1); 1307 } else { 1308 spin_lock_irq(q->queue_lock); 1309 add_acct_request(q, req, where); 1310 __blk_run_queue(q); 1311 out_unlock: 1312 spin_unlock_irq(q->queue_lock); 1313 } 1314 out: 1315 return 0; 1316 } 1317 1318 /* 1319 * If bio->bi_dev is a partition, remap the location 1320 */ 1321 static inline void blk_partition_remap(struct bio *bio) 1322 { 1323 struct block_device *bdev = bio->bi_bdev; 1324 1325 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1326 struct hd_struct *p = bdev->bd_part; 1327 1328 bio->bi_sector += p->start_sect; 1329 bio->bi_bdev = bdev->bd_contains; 1330 1331 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1332 bdev->bd_dev, 1333 bio->bi_sector - p->start_sect); 1334 } 1335 } 1336 1337 static void handle_bad_sector(struct bio *bio) 1338 { 1339 char b[BDEVNAME_SIZE]; 1340 1341 printk(KERN_INFO "attempt to access beyond end of device\n"); 1342 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1343 bdevname(bio->bi_bdev, b), 1344 bio->bi_rw, 1345 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1346 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1347 1348 set_bit(BIO_EOF, &bio->bi_flags); 1349 } 1350 1351 #ifdef CONFIG_FAIL_MAKE_REQUEST 1352 1353 static DECLARE_FAULT_ATTR(fail_make_request); 1354 1355 static int __init setup_fail_make_request(char *str) 1356 { 1357 return setup_fault_attr(&fail_make_request, str); 1358 } 1359 __setup("fail_make_request=", setup_fail_make_request); 1360 1361 static int should_fail_request(struct bio *bio) 1362 { 1363 struct hd_struct *part = bio->bi_bdev->bd_part; 1364 1365 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) 1366 return should_fail(&fail_make_request, bio->bi_size); 1367 1368 return 0; 1369 } 1370 1371 static int __init fail_make_request_debugfs(void) 1372 { 1373 return init_fault_attr_dentries(&fail_make_request, 1374 "fail_make_request"); 1375 } 1376 1377 late_initcall(fail_make_request_debugfs); 1378 1379 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1380 1381 static inline int should_fail_request(struct bio *bio) 1382 { 1383 return 0; 1384 } 1385 1386 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1387 1388 /* 1389 * Check whether this bio extends beyond the end of the device. 1390 */ 1391 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1392 { 1393 sector_t maxsector; 1394 1395 if (!nr_sectors) 1396 return 0; 1397 1398 /* Test device or partition size, when known. */ 1399 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1400 if (maxsector) { 1401 sector_t sector = bio->bi_sector; 1402 1403 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1404 /* 1405 * This may well happen - the kernel calls bread() 1406 * without checking the size of the device, e.g., when 1407 * mounting a device. 1408 */ 1409 handle_bad_sector(bio); 1410 return 1; 1411 } 1412 } 1413 1414 return 0; 1415 } 1416 1417 /** 1418 * generic_make_request - hand a buffer to its device driver for I/O 1419 * @bio: The bio describing the location in memory and on the device. 1420 * 1421 * generic_make_request() is used to make I/O requests of block 1422 * devices. It is passed a &struct bio, which describes the I/O that needs 1423 * to be done. 1424 * 1425 * generic_make_request() does not return any status. The 1426 * success/failure status of the request, along with notification of 1427 * completion, is delivered asynchronously through the bio->bi_end_io 1428 * function described (one day) else where. 1429 * 1430 * The caller of generic_make_request must make sure that bi_io_vec 1431 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1432 * set to describe the device address, and the 1433 * bi_end_io and optionally bi_private are set to describe how 1434 * completion notification should be signaled. 1435 * 1436 * generic_make_request and the drivers it calls may use bi_next if this 1437 * bio happens to be merged with someone else, and may change bi_dev and 1438 * bi_sector for remaps as it sees fit. So the values of these fields 1439 * should NOT be depended on after the call to generic_make_request. 1440 */ 1441 static inline void __generic_make_request(struct bio *bio) 1442 { 1443 struct request_queue *q; 1444 sector_t old_sector; 1445 int ret, nr_sectors = bio_sectors(bio); 1446 dev_t old_dev; 1447 int err = -EIO; 1448 1449 might_sleep(); 1450 1451 if (bio_check_eod(bio, nr_sectors)) 1452 goto end_io; 1453 1454 /* 1455 * Resolve the mapping until finished. (drivers are 1456 * still free to implement/resolve their own stacking 1457 * by explicitly returning 0) 1458 * 1459 * NOTE: we don't repeat the blk_size check for each new device. 1460 * Stacking drivers are expected to know what they are doing. 1461 */ 1462 old_sector = -1; 1463 old_dev = 0; 1464 do { 1465 char b[BDEVNAME_SIZE]; 1466 1467 q = bdev_get_queue(bio->bi_bdev); 1468 if (unlikely(!q)) { 1469 printk(KERN_ERR 1470 "generic_make_request: Trying to access " 1471 "nonexistent block-device %s (%Lu)\n", 1472 bdevname(bio->bi_bdev, b), 1473 (long long) bio->bi_sector); 1474 goto end_io; 1475 } 1476 1477 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1478 nr_sectors > queue_max_hw_sectors(q))) { 1479 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1480 bdevname(bio->bi_bdev, b), 1481 bio_sectors(bio), 1482 queue_max_hw_sectors(q)); 1483 goto end_io; 1484 } 1485 1486 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 1487 goto end_io; 1488 1489 if (should_fail_request(bio)) 1490 goto end_io; 1491 1492 /* 1493 * If this device has partitions, remap block n 1494 * of partition p to block n+start(p) of the disk. 1495 */ 1496 blk_partition_remap(bio); 1497 1498 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1499 goto end_io; 1500 1501 if (old_sector != -1) 1502 trace_block_bio_remap(q, bio, old_dev, old_sector); 1503 1504 old_sector = bio->bi_sector; 1505 old_dev = bio->bi_bdev->bd_dev; 1506 1507 if (bio_check_eod(bio, nr_sectors)) 1508 goto end_io; 1509 1510 /* 1511 * Filter flush bio's early so that make_request based 1512 * drivers without flush support don't have to worry 1513 * about them. 1514 */ 1515 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1516 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1517 if (!nr_sectors) { 1518 err = 0; 1519 goto end_io; 1520 } 1521 } 1522 1523 if ((bio->bi_rw & REQ_DISCARD) && 1524 (!blk_queue_discard(q) || 1525 ((bio->bi_rw & REQ_SECURE) && 1526 !blk_queue_secdiscard(q)))) { 1527 err = -EOPNOTSUPP; 1528 goto end_io; 1529 } 1530 1531 if (blk_throtl_bio(q, &bio)) 1532 goto end_io; 1533 1534 /* 1535 * If bio = NULL, bio has been throttled and will be submitted 1536 * later. 1537 */ 1538 if (!bio) 1539 break; 1540 1541 trace_block_bio_queue(q, bio); 1542 1543 ret = q->make_request_fn(q, bio); 1544 } while (ret); 1545 1546 return; 1547 1548 end_io: 1549 bio_endio(bio, err); 1550 } 1551 1552 /* 1553 * We only want one ->make_request_fn to be active at a time, 1554 * else stack usage with stacked devices could be a problem. 1555 * So use current->bio_list to keep a list of requests 1556 * submited by a make_request_fn function. 1557 * current->bio_list is also used as a flag to say if 1558 * generic_make_request is currently active in this task or not. 1559 * If it is NULL, then no make_request is active. If it is non-NULL, 1560 * then a make_request is active, and new requests should be added 1561 * at the tail 1562 */ 1563 void generic_make_request(struct bio *bio) 1564 { 1565 struct bio_list bio_list_on_stack; 1566 1567 if (current->bio_list) { 1568 /* make_request is active */ 1569 bio_list_add(current->bio_list, bio); 1570 return; 1571 } 1572 /* following loop may be a bit non-obvious, and so deserves some 1573 * explanation. 1574 * Before entering the loop, bio->bi_next is NULL (as all callers 1575 * ensure that) so we have a list with a single bio. 1576 * We pretend that we have just taken it off a longer list, so 1577 * we assign bio_list to a pointer to the bio_list_on_stack, 1578 * thus initialising the bio_list of new bios to be 1579 * added. __generic_make_request may indeed add some more bios 1580 * through a recursive call to generic_make_request. If it 1581 * did, we find a non-NULL value in bio_list and re-enter the loop 1582 * from the top. In this case we really did just take the bio 1583 * of the top of the list (no pretending) and so remove it from 1584 * bio_list, and call into __generic_make_request again. 1585 * 1586 * The loop was structured like this to make only one call to 1587 * __generic_make_request (which is important as it is large and 1588 * inlined) and to keep the structure simple. 1589 */ 1590 BUG_ON(bio->bi_next); 1591 bio_list_init(&bio_list_on_stack); 1592 current->bio_list = &bio_list_on_stack; 1593 do { 1594 __generic_make_request(bio); 1595 bio = bio_list_pop(current->bio_list); 1596 } while (bio); 1597 current->bio_list = NULL; /* deactivate */ 1598 } 1599 EXPORT_SYMBOL(generic_make_request); 1600 1601 /** 1602 * submit_bio - submit a bio to the block device layer for I/O 1603 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1604 * @bio: The &struct bio which describes the I/O 1605 * 1606 * submit_bio() is very similar in purpose to generic_make_request(), and 1607 * uses that function to do most of the work. Both are fairly rough 1608 * interfaces; @bio must be presetup and ready for I/O. 1609 * 1610 */ 1611 void submit_bio(int rw, struct bio *bio) 1612 { 1613 int count = bio_sectors(bio); 1614 1615 bio->bi_rw |= rw; 1616 1617 /* 1618 * If it's a regular read/write or a barrier with data attached, 1619 * go through the normal accounting stuff before submission. 1620 */ 1621 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1622 if (rw & WRITE) { 1623 count_vm_events(PGPGOUT, count); 1624 } else { 1625 task_io_account_read(bio->bi_size); 1626 count_vm_events(PGPGIN, count); 1627 } 1628 1629 if (unlikely(block_dump)) { 1630 char b[BDEVNAME_SIZE]; 1631 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1632 current->comm, task_pid_nr(current), 1633 (rw & WRITE) ? "WRITE" : "READ", 1634 (unsigned long long)bio->bi_sector, 1635 bdevname(bio->bi_bdev, b), 1636 count); 1637 } 1638 } 1639 1640 generic_make_request(bio); 1641 } 1642 EXPORT_SYMBOL(submit_bio); 1643 1644 /** 1645 * blk_rq_check_limits - Helper function to check a request for the queue limit 1646 * @q: the queue 1647 * @rq: the request being checked 1648 * 1649 * Description: 1650 * @rq may have been made based on weaker limitations of upper-level queues 1651 * in request stacking drivers, and it may violate the limitation of @q. 1652 * Since the block layer and the underlying device driver trust @rq 1653 * after it is inserted to @q, it should be checked against @q before 1654 * the insertion using this generic function. 1655 * 1656 * This function should also be useful for request stacking drivers 1657 * in some cases below, so export this function. 1658 * Request stacking drivers like request-based dm may change the queue 1659 * limits while requests are in the queue (e.g. dm's table swapping). 1660 * Such request stacking drivers should check those requests agaist 1661 * the new queue limits again when they dispatch those requests, 1662 * although such checkings are also done against the old queue limits 1663 * when submitting requests. 1664 */ 1665 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1666 { 1667 if (rq->cmd_flags & REQ_DISCARD) 1668 return 0; 1669 1670 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1671 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1672 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1673 return -EIO; 1674 } 1675 1676 /* 1677 * queue's settings related to segment counting like q->bounce_pfn 1678 * may differ from that of other stacking queues. 1679 * Recalculate it to check the request correctly on this queue's 1680 * limitation. 1681 */ 1682 blk_recalc_rq_segments(rq); 1683 if (rq->nr_phys_segments > queue_max_segments(q)) { 1684 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1685 return -EIO; 1686 } 1687 1688 return 0; 1689 } 1690 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1691 1692 /** 1693 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1694 * @q: the queue to submit the request 1695 * @rq: the request being queued 1696 */ 1697 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1698 { 1699 unsigned long flags; 1700 1701 if (blk_rq_check_limits(q, rq)) 1702 return -EIO; 1703 1704 #ifdef CONFIG_FAIL_MAKE_REQUEST 1705 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && 1706 should_fail(&fail_make_request, blk_rq_bytes(rq))) 1707 return -EIO; 1708 #endif 1709 1710 spin_lock_irqsave(q->queue_lock, flags); 1711 1712 /* 1713 * Submitting request must be dequeued before calling this function 1714 * because it will be linked to another request_queue 1715 */ 1716 BUG_ON(blk_queued_rq(rq)); 1717 1718 add_acct_request(q, rq, ELEVATOR_INSERT_BACK); 1719 spin_unlock_irqrestore(q->queue_lock, flags); 1720 1721 return 0; 1722 } 1723 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1724 1725 /** 1726 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1727 * @rq: request to examine 1728 * 1729 * Description: 1730 * A request could be merge of IOs which require different failure 1731 * handling. This function determines the number of bytes which 1732 * can be failed from the beginning of the request without 1733 * crossing into area which need to be retried further. 1734 * 1735 * Return: 1736 * The number of bytes to fail. 1737 * 1738 * Context: 1739 * queue_lock must be held. 1740 */ 1741 unsigned int blk_rq_err_bytes(const struct request *rq) 1742 { 1743 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1744 unsigned int bytes = 0; 1745 struct bio *bio; 1746 1747 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1748 return blk_rq_bytes(rq); 1749 1750 /* 1751 * Currently the only 'mixing' which can happen is between 1752 * different fastfail types. We can safely fail portions 1753 * which have all the failfast bits that the first one has - 1754 * the ones which are at least as eager to fail as the first 1755 * one. 1756 */ 1757 for (bio = rq->bio; bio; bio = bio->bi_next) { 1758 if ((bio->bi_rw & ff) != ff) 1759 break; 1760 bytes += bio->bi_size; 1761 } 1762 1763 /* this could lead to infinite loop */ 1764 BUG_ON(blk_rq_bytes(rq) && !bytes); 1765 return bytes; 1766 } 1767 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1768 1769 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1770 { 1771 if (blk_do_io_stat(req)) { 1772 const int rw = rq_data_dir(req); 1773 struct hd_struct *part; 1774 int cpu; 1775 1776 cpu = part_stat_lock(); 1777 part = req->part; 1778 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1779 part_stat_unlock(); 1780 } 1781 } 1782 1783 static void blk_account_io_done(struct request *req) 1784 { 1785 /* 1786 * Account IO completion. flush_rq isn't accounted as a 1787 * normal IO on queueing nor completion. Accounting the 1788 * containing request is enough. 1789 */ 1790 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1791 unsigned long duration = jiffies - req->start_time; 1792 const int rw = rq_data_dir(req); 1793 struct hd_struct *part; 1794 int cpu; 1795 1796 cpu = part_stat_lock(); 1797 part = req->part; 1798 1799 part_stat_inc(cpu, part, ios[rw]); 1800 part_stat_add(cpu, part, ticks[rw], duration); 1801 part_round_stats(cpu, part); 1802 part_dec_in_flight(part, rw); 1803 1804 hd_struct_put(part); 1805 part_stat_unlock(); 1806 } 1807 } 1808 1809 /** 1810 * blk_peek_request - peek at the top of a request queue 1811 * @q: request queue to peek at 1812 * 1813 * Description: 1814 * Return the request at the top of @q. The returned request 1815 * should be started using blk_start_request() before LLD starts 1816 * processing it. 1817 * 1818 * Return: 1819 * Pointer to the request at the top of @q if available. Null 1820 * otherwise. 1821 * 1822 * Context: 1823 * queue_lock must be held. 1824 */ 1825 struct request *blk_peek_request(struct request_queue *q) 1826 { 1827 struct request *rq; 1828 int ret; 1829 1830 while ((rq = __elv_next_request(q)) != NULL) { 1831 if (!(rq->cmd_flags & REQ_STARTED)) { 1832 /* 1833 * This is the first time the device driver 1834 * sees this request (possibly after 1835 * requeueing). Notify IO scheduler. 1836 */ 1837 if (rq->cmd_flags & REQ_SORTED) 1838 elv_activate_rq(q, rq); 1839 1840 /* 1841 * just mark as started even if we don't start 1842 * it, a request that has been delayed should 1843 * not be passed by new incoming requests 1844 */ 1845 rq->cmd_flags |= REQ_STARTED; 1846 trace_block_rq_issue(q, rq); 1847 } 1848 1849 if (!q->boundary_rq || q->boundary_rq == rq) { 1850 q->end_sector = rq_end_sector(rq); 1851 q->boundary_rq = NULL; 1852 } 1853 1854 if (rq->cmd_flags & REQ_DONTPREP) 1855 break; 1856 1857 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1858 /* 1859 * make sure space for the drain appears we 1860 * know we can do this because max_hw_segments 1861 * has been adjusted to be one fewer than the 1862 * device can handle 1863 */ 1864 rq->nr_phys_segments++; 1865 } 1866 1867 if (!q->prep_rq_fn) 1868 break; 1869 1870 ret = q->prep_rq_fn(q, rq); 1871 if (ret == BLKPREP_OK) { 1872 break; 1873 } else if (ret == BLKPREP_DEFER) { 1874 /* 1875 * the request may have been (partially) prepped. 1876 * we need to keep this request in the front to 1877 * avoid resource deadlock. REQ_STARTED will 1878 * prevent other fs requests from passing this one. 1879 */ 1880 if (q->dma_drain_size && blk_rq_bytes(rq) && 1881 !(rq->cmd_flags & REQ_DONTPREP)) { 1882 /* 1883 * remove the space for the drain we added 1884 * so that we don't add it again 1885 */ 1886 --rq->nr_phys_segments; 1887 } 1888 1889 rq = NULL; 1890 break; 1891 } else if (ret == BLKPREP_KILL) { 1892 rq->cmd_flags |= REQ_QUIET; 1893 /* 1894 * Mark this request as started so we don't trigger 1895 * any debug logic in the end I/O path. 1896 */ 1897 blk_start_request(rq); 1898 __blk_end_request_all(rq, -EIO); 1899 } else { 1900 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1901 break; 1902 } 1903 } 1904 1905 return rq; 1906 } 1907 EXPORT_SYMBOL(blk_peek_request); 1908 1909 void blk_dequeue_request(struct request *rq) 1910 { 1911 struct request_queue *q = rq->q; 1912 1913 BUG_ON(list_empty(&rq->queuelist)); 1914 BUG_ON(ELV_ON_HASH(rq)); 1915 1916 list_del_init(&rq->queuelist); 1917 1918 /* 1919 * the time frame between a request being removed from the lists 1920 * and to it is freed is accounted as io that is in progress at 1921 * the driver side. 1922 */ 1923 if (blk_account_rq(rq)) { 1924 q->in_flight[rq_is_sync(rq)]++; 1925 set_io_start_time_ns(rq); 1926 } 1927 } 1928 1929 /** 1930 * blk_start_request - start request processing on the driver 1931 * @req: request to dequeue 1932 * 1933 * Description: 1934 * Dequeue @req and start timeout timer on it. This hands off the 1935 * request to the driver. 1936 * 1937 * Block internal functions which don't want to start timer should 1938 * call blk_dequeue_request(). 1939 * 1940 * Context: 1941 * queue_lock must be held. 1942 */ 1943 void blk_start_request(struct request *req) 1944 { 1945 blk_dequeue_request(req); 1946 1947 /* 1948 * We are now handing the request to the hardware, initialize 1949 * resid_len to full count and add the timeout handler. 1950 */ 1951 req->resid_len = blk_rq_bytes(req); 1952 if (unlikely(blk_bidi_rq(req))) 1953 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 1954 1955 blk_add_timer(req); 1956 } 1957 EXPORT_SYMBOL(blk_start_request); 1958 1959 /** 1960 * blk_fetch_request - fetch a request from a request queue 1961 * @q: request queue to fetch a request from 1962 * 1963 * Description: 1964 * Return the request at the top of @q. The request is started on 1965 * return and LLD can start processing it immediately. 1966 * 1967 * Return: 1968 * Pointer to the request at the top of @q if available. Null 1969 * otherwise. 1970 * 1971 * Context: 1972 * queue_lock must be held. 1973 */ 1974 struct request *blk_fetch_request(struct request_queue *q) 1975 { 1976 struct request *rq; 1977 1978 rq = blk_peek_request(q); 1979 if (rq) 1980 blk_start_request(rq); 1981 return rq; 1982 } 1983 EXPORT_SYMBOL(blk_fetch_request); 1984 1985 /** 1986 * blk_update_request - Special helper function for request stacking drivers 1987 * @req: the request being processed 1988 * @error: %0 for success, < %0 for error 1989 * @nr_bytes: number of bytes to complete @req 1990 * 1991 * Description: 1992 * Ends I/O on a number of bytes attached to @req, but doesn't complete 1993 * the request structure even if @req doesn't have leftover. 1994 * If @req has leftover, sets it up for the next range of segments. 1995 * 1996 * This special helper function is only for request stacking drivers 1997 * (e.g. request-based dm) so that they can handle partial completion. 1998 * Actual device drivers should use blk_end_request instead. 1999 * 2000 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2001 * %false return from this function. 2002 * 2003 * Return: 2004 * %false - this request doesn't have any more data 2005 * %true - this request has more data 2006 **/ 2007 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2008 { 2009 int total_bytes, bio_nbytes, next_idx = 0; 2010 struct bio *bio; 2011 2012 if (!req->bio) 2013 return false; 2014 2015 trace_block_rq_complete(req->q, req); 2016 2017 /* 2018 * For fs requests, rq is just carrier of independent bio's 2019 * and each partial completion should be handled separately. 2020 * Reset per-request error on each partial completion. 2021 * 2022 * TODO: tj: This is too subtle. It would be better to let 2023 * low level drivers do what they see fit. 2024 */ 2025 if (req->cmd_type == REQ_TYPE_FS) 2026 req->errors = 0; 2027 2028 if (error && req->cmd_type == REQ_TYPE_FS && 2029 !(req->cmd_flags & REQ_QUIET)) { 2030 char *error_type; 2031 2032 switch (error) { 2033 case -ENOLINK: 2034 error_type = "recoverable transport"; 2035 break; 2036 case -EREMOTEIO: 2037 error_type = "critical target"; 2038 break; 2039 case -EBADE: 2040 error_type = "critical nexus"; 2041 break; 2042 case -EIO: 2043 default: 2044 error_type = "I/O"; 2045 break; 2046 } 2047 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2048 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2049 (unsigned long long)blk_rq_pos(req)); 2050 } 2051 2052 blk_account_io_completion(req, nr_bytes); 2053 2054 total_bytes = bio_nbytes = 0; 2055 while ((bio = req->bio) != NULL) { 2056 int nbytes; 2057 2058 if (nr_bytes >= bio->bi_size) { 2059 req->bio = bio->bi_next; 2060 nbytes = bio->bi_size; 2061 req_bio_endio(req, bio, nbytes, error); 2062 next_idx = 0; 2063 bio_nbytes = 0; 2064 } else { 2065 int idx = bio->bi_idx + next_idx; 2066 2067 if (unlikely(idx >= bio->bi_vcnt)) { 2068 blk_dump_rq_flags(req, "__end_that"); 2069 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2070 __func__, idx, bio->bi_vcnt); 2071 break; 2072 } 2073 2074 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2075 BIO_BUG_ON(nbytes > bio->bi_size); 2076 2077 /* 2078 * not a complete bvec done 2079 */ 2080 if (unlikely(nbytes > nr_bytes)) { 2081 bio_nbytes += nr_bytes; 2082 total_bytes += nr_bytes; 2083 break; 2084 } 2085 2086 /* 2087 * advance to the next vector 2088 */ 2089 next_idx++; 2090 bio_nbytes += nbytes; 2091 } 2092 2093 total_bytes += nbytes; 2094 nr_bytes -= nbytes; 2095 2096 bio = req->bio; 2097 if (bio) { 2098 /* 2099 * end more in this run, or just return 'not-done' 2100 */ 2101 if (unlikely(nr_bytes <= 0)) 2102 break; 2103 } 2104 } 2105 2106 /* 2107 * completely done 2108 */ 2109 if (!req->bio) { 2110 /* 2111 * Reset counters so that the request stacking driver 2112 * can find how many bytes remain in the request 2113 * later. 2114 */ 2115 req->__data_len = 0; 2116 return false; 2117 } 2118 2119 /* 2120 * if the request wasn't completed, update state 2121 */ 2122 if (bio_nbytes) { 2123 req_bio_endio(req, bio, bio_nbytes, error); 2124 bio->bi_idx += next_idx; 2125 bio_iovec(bio)->bv_offset += nr_bytes; 2126 bio_iovec(bio)->bv_len -= nr_bytes; 2127 } 2128 2129 req->__data_len -= total_bytes; 2130 req->buffer = bio_data(req->bio); 2131 2132 /* update sector only for requests with clear definition of sector */ 2133 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2134 req->__sector += total_bytes >> 9; 2135 2136 /* mixed attributes always follow the first bio */ 2137 if (req->cmd_flags & REQ_MIXED_MERGE) { 2138 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2139 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2140 } 2141 2142 /* 2143 * If total number of sectors is less than the first segment 2144 * size, something has gone terribly wrong. 2145 */ 2146 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2147 blk_dump_rq_flags(req, "request botched"); 2148 req->__data_len = blk_rq_cur_bytes(req); 2149 } 2150 2151 /* recalculate the number of segments */ 2152 blk_recalc_rq_segments(req); 2153 2154 return true; 2155 } 2156 EXPORT_SYMBOL_GPL(blk_update_request); 2157 2158 static bool blk_update_bidi_request(struct request *rq, int error, 2159 unsigned int nr_bytes, 2160 unsigned int bidi_bytes) 2161 { 2162 if (blk_update_request(rq, error, nr_bytes)) 2163 return true; 2164 2165 /* Bidi request must be completed as a whole */ 2166 if (unlikely(blk_bidi_rq(rq)) && 2167 blk_update_request(rq->next_rq, error, bidi_bytes)) 2168 return true; 2169 2170 if (blk_queue_add_random(rq->q)) 2171 add_disk_randomness(rq->rq_disk); 2172 2173 return false; 2174 } 2175 2176 /** 2177 * blk_unprep_request - unprepare a request 2178 * @req: the request 2179 * 2180 * This function makes a request ready for complete resubmission (or 2181 * completion). It happens only after all error handling is complete, 2182 * so represents the appropriate moment to deallocate any resources 2183 * that were allocated to the request in the prep_rq_fn. The queue 2184 * lock is held when calling this. 2185 */ 2186 void blk_unprep_request(struct request *req) 2187 { 2188 struct request_queue *q = req->q; 2189 2190 req->cmd_flags &= ~REQ_DONTPREP; 2191 if (q->unprep_rq_fn) 2192 q->unprep_rq_fn(q, req); 2193 } 2194 EXPORT_SYMBOL_GPL(blk_unprep_request); 2195 2196 /* 2197 * queue lock must be held 2198 */ 2199 static void blk_finish_request(struct request *req, int error) 2200 { 2201 if (blk_rq_tagged(req)) 2202 blk_queue_end_tag(req->q, req); 2203 2204 BUG_ON(blk_queued_rq(req)); 2205 2206 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2207 laptop_io_completion(&req->q->backing_dev_info); 2208 2209 blk_delete_timer(req); 2210 2211 if (req->cmd_flags & REQ_DONTPREP) 2212 blk_unprep_request(req); 2213 2214 2215 blk_account_io_done(req); 2216 2217 if (req->end_io) 2218 req->end_io(req, error); 2219 else { 2220 if (blk_bidi_rq(req)) 2221 __blk_put_request(req->next_rq->q, req->next_rq); 2222 2223 __blk_put_request(req->q, req); 2224 } 2225 } 2226 2227 /** 2228 * blk_end_bidi_request - Complete a bidi request 2229 * @rq: the request to complete 2230 * @error: %0 for success, < %0 for error 2231 * @nr_bytes: number of bytes to complete @rq 2232 * @bidi_bytes: number of bytes to complete @rq->next_rq 2233 * 2234 * Description: 2235 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2236 * Drivers that supports bidi can safely call this member for any 2237 * type of request, bidi or uni. In the later case @bidi_bytes is 2238 * just ignored. 2239 * 2240 * Return: 2241 * %false - we are done with this request 2242 * %true - still buffers pending for this request 2243 **/ 2244 static bool blk_end_bidi_request(struct request *rq, int error, 2245 unsigned int nr_bytes, unsigned int bidi_bytes) 2246 { 2247 struct request_queue *q = rq->q; 2248 unsigned long flags; 2249 2250 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2251 return true; 2252 2253 spin_lock_irqsave(q->queue_lock, flags); 2254 blk_finish_request(rq, error); 2255 spin_unlock_irqrestore(q->queue_lock, flags); 2256 2257 return false; 2258 } 2259 2260 /** 2261 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2262 * @rq: the request to complete 2263 * @error: %0 for success, < %0 for error 2264 * @nr_bytes: number of bytes to complete @rq 2265 * @bidi_bytes: number of bytes to complete @rq->next_rq 2266 * 2267 * Description: 2268 * Identical to blk_end_bidi_request() except that queue lock is 2269 * assumed to be locked on entry and remains so on return. 2270 * 2271 * Return: 2272 * %false - we are done with this request 2273 * %true - still buffers pending for this request 2274 **/ 2275 static bool __blk_end_bidi_request(struct request *rq, int error, 2276 unsigned int nr_bytes, unsigned int bidi_bytes) 2277 { 2278 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2279 return true; 2280 2281 blk_finish_request(rq, error); 2282 2283 return false; 2284 } 2285 2286 /** 2287 * blk_end_request - Helper function for drivers to complete the request. 2288 * @rq: the request being processed 2289 * @error: %0 for success, < %0 for error 2290 * @nr_bytes: number of bytes to complete 2291 * 2292 * Description: 2293 * Ends I/O on a number of bytes attached to @rq. 2294 * If @rq has leftover, sets it up for the next range of segments. 2295 * 2296 * Return: 2297 * %false - we are done with this request 2298 * %true - still buffers pending for this request 2299 **/ 2300 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2301 { 2302 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2303 } 2304 EXPORT_SYMBOL(blk_end_request); 2305 2306 /** 2307 * blk_end_request_all - Helper function for drives to finish the request. 2308 * @rq: the request to finish 2309 * @error: %0 for success, < %0 for error 2310 * 2311 * Description: 2312 * Completely finish @rq. 2313 */ 2314 void blk_end_request_all(struct request *rq, int error) 2315 { 2316 bool pending; 2317 unsigned int bidi_bytes = 0; 2318 2319 if (unlikely(blk_bidi_rq(rq))) 2320 bidi_bytes = blk_rq_bytes(rq->next_rq); 2321 2322 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2323 BUG_ON(pending); 2324 } 2325 EXPORT_SYMBOL(blk_end_request_all); 2326 2327 /** 2328 * blk_end_request_cur - Helper function to finish the current request chunk. 2329 * @rq: the request to finish the current chunk for 2330 * @error: %0 for success, < %0 for error 2331 * 2332 * Description: 2333 * Complete the current consecutively mapped chunk from @rq. 2334 * 2335 * Return: 2336 * %false - we are done with this request 2337 * %true - still buffers pending for this request 2338 */ 2339 bool blk_end_request_cur(struct request *rq, int error) 2340 { 2341 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2342 } 2343 EXPORT_SYMBOL(blk_end_request_cur); 2344 2345 /** 2346 * blk_end_request_err - Finish a request till the next failure boundary. 2347 * @rq: the request to finish till the next failure boundary for 2348 * @error: must be negative errno 2349 * 2350 * Description: 2351 * Complete @rq till the next failure boundary. 2352 * 2353 * Return: 2354 * %false - we are done with this request 2355 * %true - still buffers pending for this request 2356 */ 2357 bool blk_end_request_err(struct request *rq, int error) 2358 { 2359 WARN_ON(error >= 0); 2360 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2361 } 2362 EXPORT_SYMBOL_GPL(blk_end_request_err); 2363 2364 /** 2365 * __blk_end_request - Helper function for drivers to complete the request. 2366 * @rq: the request being processed 2367 * @error: %0 for success, < %0 for error 2368 * @nr_bytes: number of bytes to complete 2369 * 2370 * Description: 2371 * Must be called with queue lock held unlike blk_end_request(). 2372 * 2373 * Return: 2374 * %false - we are done with this request 2375 * %true - still buffers pending for this request 2376 **/ 2377 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2378 { 2379 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2380 } 2381 EXPORT_SYMBOL(__blk_end_request); 2382 2383 /** 2384 * __blk_end_request_all - Helper function for drives to finish the request. 2385 * @rq: the request to finish 2386 * @error: %0 for success, < %0 for error 2387 * 2388 * Description: 2389 * Completely finish @rq. Must be called with queue lock held. 2390 */ 2391 void __blk_end_request_all(struct request *rq, int error) 2392 { 2393 bool pending; 2394 unsigned int bidi_bytes = 0; 2395 2396 if (unlikely(blk_bidi_rq(rq))) 2397 bidi_bytes = blk_rq_bytes(rq->next_rq); 2398 2399 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2400 BUG_ON(pending); 2401 } 2402 EXPORT_SYMBOL(__blk_end_request_all); 2403 2404 /** 2405 * __blk_end_request_cur - Helper function to finish the current request chunk. 2406 * @rq: the request to finish the current chunk for 2407 * @error: %0 for success, < %0 for error 2408 * 2409 * Description: 2410 * Complete the current consecutively mapped chunk from @rq. Must 2411 * be called with queue lock held. 2412 * 2413 * Return: 2414 * %false - we are done with this request 2415 * %true - still buffers pending for this request 2416 */ 2417 bool __blk_end_request_cur(struct request *rq, int error) 2418 { 2419 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2420 } 2421 EXPORT_SYMBOL(__blk_end_request_cur); 2422 2423 /** 2424 * __blk_end_request_err - Finish a request till the next failure boundary. 2425 * @rq: the request to finish till the next failure boundary for 2426 * @error: must be negative errno 2427 * 2428 * Description: 2429 * Complete @rq till the next failure boundary. Must be called 2430 * with queue lock held. 2431 * 2432 * Return: 2433 * %false - we are done with this request 2434 * %true - still buffers pending for this request 2435 */ 2436 bool __blk_end_request_err(struct request *rq, int error) 2437 { 2438 WARN_ON(error >= 0); 2439 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2440 } 2441 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2442 2443 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2444 struct bio *bio) 2445 { 2446 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2447 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2448 2449 if (bio_has_data(bio)) { 2450 rq->nr_phys_segments = bio_phys_segments(q, bio); 2451 rq->buffer = bio_data(bio); 2452 } 2453 rq->__data_len = bio->bi_size; 2454 rq->bio = rq->biotail = bio; 2455 2456 if (bio->bi_bdev) 2457 rq->rq_disk = bio->bi_bdev->bd_disk; 2458 } 2459 2460 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2461 /** 2462 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2463 * @rq: the request to be flushed 2464 * 2465 * Description: 2466 * Flush all pages in @rq. 2467 */ 2468 void rq_flush_dcache_pages(struct request *rq) 2469 { 2470 struct req_iterator iter; 2471 struct bio_vec *bvec; 2472 2473 rq_for_each_segment(bvec, rq, iter) 2474 flush_dcache_page(bvec->bv_page); 2475 } 2476 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2477 #endif 2478 2479 /** 2480 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2481 * @q : the queue of the device being checked 2482 * 2483 * Description: 2484 * Check if underlying low-level drivers of a device are busy. 2485 * If the drivers want to export their busy state, they must set own 2486 * exporting function using blk_queue_lld_busy() first. 2487 * 2488 * Basically, this function is used only by request stacking drivers 2489 * to stop dispatching requests to underlying devices when underlying 2490 * devices are busy. This behavior helps more I/O merging on the queue 2491 * of the request stacking driver and prevents I/O throughput regression 2492 * on burst I/O load. 2493 * 2494 * Return: 2495 * 0 - Not busy (The request stacking driver should dispatch request) 2496 * 1 - Busy (The request stacking driver should stop dispatching request) 2497 */ 2498 int blk_lld_busy(struct request_queue *q) 2499 { 2500 if (q->lld_busy_fn) 2501 return q->lld_busy_fn(q); 2502 2503 return 0; 2504 } 2505 EXPORT_SYMBOL_GPL(blk_lld_busy); 2506 2507 /** 2508 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2509 * @rq: the clone request to be cleaned up 2510 * 2511 * Description: 2512 * Free all bios in @rq for a cloned request. 2513 */ 2514 void blk_rq_unprep_clone(struct request *rq) 2515 { 2516 struct bio *bio; 2517 2518 while ((bio = rq->bio) != NULL) { 2519 rq->bio = bio->bi_next; 2520 2521 bio_put(bio); 2522 } 2523 } 2524 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2525 2526 /* 2527 * Copy attributes of the original request to the clone request. 2528 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2529 */ 2530 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2531 { 2532 dst->cpu = src->cpu; 2533 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2534 dst->cmd_type = src->cmd_type; 2535 dst->__sector = blk_rq_pos(src); 2536 dst->__data_len = blk_rq_bytes(src); 2537 dst->nr_phys_segments = src->nr_phys_segments; 2538 dst->ioprio = src->ioprio; 2539 dst->extra_len = src->extra_len; 2540 } 2541 2542 /** 2543 * blk_rq_prep_clone - Helper function to setup clone request 2544 * @rq: the request to be setup 2545 * @rq_src: original request to be cloned 2546 * @bs: bio_set that bios for clone are allocated from 2547 * @gfp_mask: memory allocation mask for bio 2548 * @bio_ctr: setup function to be called for each clone bio. 2549 * Returns %0 for success, non %0 for failure. 2550 * @data: private data to be passed to @bio_ctr 2551 * 2552 * Description: 2553 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2554 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2555 * are not copied, and copying such parts is the caller's responsibility. 2556 * Also, pages which the original bios are pointing to are not copied 2557 * and the cloned bios just point same pages. 2558 * So cloned bios must be completed before original bios, which means 2559 * the caller must complete @rq before @rq_src. 2560 */ 2561 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2562 struct bio_set *bs, gfp_t gfp_mask, 2563 int (*bio_ctr)(struct bio *, struct bio *, void *), 2564 void *data) 2565 { 2566 struct bio *bio, *bio_src; 2567 2568 if (!bs) 2569 bs = fs_bio_set; 2570 2571 blk_rq_init(NULL, rq); 2572 2573 __rq_for_each_bio(bio_src, rq_src) { 2574 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2575 if (!bio) 2576 goto free_and_out; 2577 2578 __bio_clone(bio, bio_src); 2579 2580 if (bio_integrity(bio_src) && 2581 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2582 goto free_and_out; 2583 2584 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2585 goto free_and_out; 2586 2587 if (rq->bio) { 2588 rq->biotail->bi_next = bio; 2589 rq->biotail = bio; 2590 } else 2591 rq->bio = rq->biotail = bio; 2592 } 2593 2594 __blk_rq_prep_clone(rq, rq_src); 2595 2596 return 0; 2597 2598 free_and_out: 2599 if (bio) 2600 bio_free(bio, bs); 2601 blk_rq_unprep_clone(rq); 2602 2603 return -ENOMEM; 2604 } 2605 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2606 2607 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2608 { 2609 return queue_work(kblockd_workqueue, work); 2610 } 2611 EXPORT_SYMBOL(kblockd_schedule_work); 2612 2613 int kblockd_schedule_delayed_work(struct request_queue *q, 2614 struct delayed_work *dwork, unsigned long delay) 2615 { 2616 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2617 } 2618 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2619 2620 #define PLUG_MAGIC 0x91827364 2621 2622 void blk_start_plug(struct blk_plug *plug) 2623 { 2624 struct task_struct *tsk = current; 2625 2626 plug->magic = PLUG_MAGIC; 2627 INIT_LIST_HEAD(&plug->list); 2628 INIT_LIST_HEAD(&plug->cb_list); 2629 plug->should_sort = 0; 2630 2631 /* 2632 * If this is a nested plug, don't actually assign it. It will be 2633 * flushed on its own. 2634 */ 2635 if (!tsk->plug) { 2636 /* 2637 * Store ordering should not be needed here, since a potential 2638 * preempt will imply a full memory barrier 2639 */ 2640 tsk->plug = plug; 2641 } 2642 } 2643 EXPORT_SYMBOL(blk_start_plug); 2644 2645 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2646 { 2647 struct request *rqa = container_of(a, struct request, queuelist); 2648 struct request *rqb = container_of(b, struct request, queuelist); 2649 2650 return !(rqa->q <= rqb->q); 2651 } 2652 2653 /* 2654 * If 'from_schedule' is true, then postpone the dispatch of requests 2655 * until a safe kblockd context. We due this to avoid accidental big 2656 * additional stack usage in driver dispatch, in places where the originally 2657 * plugger did not intend it. 2658 */ 2659 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2660 bool from_schedule) 2661 __releases(q->queue_lock) 2662 { 2663 trace_block_unplug(q, depth, !from_schedule); 2664 2665 /* 2666 * If we are punting this to kblockd, then we can safely drop 2667 * the queue_lock before waking kblockd (which needs to take 2668 * this lock). 2669 */ 2670 if (from_schedule) { 2671 spin_unlock(q->queue_lock); 2672 blk_run_queue_async(q); 2673 } else { 2674 __blk_run_queue(q); 2675 spin_unlock(q->queue_lock); 2676 } 2677 2678 } 2679 2680 static void flush_plug_callbacks(struct blk_plug *plug) 2681 { 2682 LIST_HEAD(callbacks); 2683 2684 if (list_empty(&plug->cb_list)) 2685 return; 2686 2687 list_splice_init(&plug->cb_list, &callbacks); 2688 2689 while (!list_empty(&callbacks)) { 2690 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2691 struct blk_plug_cb, 2692 list); 2693 list_del(&cb->list); 2694 cb->callback(cb); 2695 } 2696 } 2697 2698 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2699 { 2700 struct request_queue *q; 2701 unsigned long flags; 2702 struct request *rq; 2703 LIST_HEAD(list); 2704 unsigned int depth; 2705 2706 BUG_ON(plug->magic != PLUG_MAGIC); 2707 2708 flush_plug_callbacks(plug); 2709 if (list_empty(&plug->list)) 2710 return; 2711 2712 list_splice_init(&plug->list, &list); 2713 2714 if (plug->should_sort) { 2715 list_sort(NULL, &list, plug_rq_cmp); 2716 plug->should_sort = 0; 2717 } 2718 2719 q = NULL; 2720 depth = 0; 2721 2722 /* 2723 * Save and disable interrupts here, to avoid doing it for every 2724 * queue lock we have to take. 2725 */ 2726 local_irq_save(flags); 2727 while (!list_empty(&list)) { 2728 rq = list_entry_rq(list.next); 2729 list_del_init(&rq->queuelist); 2730 BUG_ON(!rq->q); 2731 if (rq->q != q) { 2732 /* 2733 * This drops the queue lock 2734 */ 2735 if (q) 2736 queue_unplugged(q, depth, from_schedule); 2737 q = rq->q; 2738 depth = 0; 2739 spin_lock(q->queue_lock); 2740 } 2741 /* 2742 * rq is already accounted, so use raw insert 2743 */ 2744 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2745 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2746 else 2747 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2748 2749 depth++; 2750 } 2751 2752 /* 2753 * This drops the queue lock 2754 */ 2755 if (q) 2756 queue_unplugged(q, depth, from_schedule); 2757 2758 local_irq_restore(flags); 2759 } 2760 2761 void blk_finish_plug(struct blk_plug *plug) 2762 { 2763 blk_flush_plug_list(plug, false); 2764 2765 if (plug == current->plug) 2766 current->plug = NULL; 2767 } 2768 EXPORT_SYMBOL(blk_finish_plug); 2769 2770 int __init blk_dev_init(void) 2771 { 2772 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2773 sizeof(((struct request *)0)->cmd_flags)); 2774 2775 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2776 kblockd_workqueue = alloc_workqueue("kblockd", 2777 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2778 if (!kblockd_workqueue) 2779 panic("Failed to create kblockd\n"); 2780 2781 request_cachep = kmem_cache_create("blkdev_requests", 2782 sizeof(struct request), 0, SLAB_PANIC, NULL); 2783 2784 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2785 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2786 2787 return 0; 2788 } 2789