1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/block.h> 34 35 #include "blk.h" 36 37 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 40 41 static int __make_request(struct request_queue *q, struct bio *bio); 42 43 /* 44 * For the allocated request tables 45 */ 46 static struct kmem_cache *request_cachep; 47 48 /* 49 * For queue allocation 50 */ 51 struct kmem_cache *blk_requestq_cachep; 52 53 /* 54 * Controlling structure to kblockd 55 */ 56 static struct workqueue_struct *kblockd_workqueue; 57 58 static void drive_stat_acct(struct request *rq, int new_io) 59 { 60 struct hd_struct *part; 61 int rw = rq_data_dir(rq); 62 int cpu; 63 64 if (!blk_do_io_stat(rq)) 65 return; 66 67 cpu = part_stat_lock(); 68 69 if (!new_io) { 70 part = rq->part; 71 part_stat_inc(cpu, part, merges[rw]); 72 } else { 73 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 74 if (!hd_struct_try_get(part)) { 75 /* 76 * The partition is already being removed, 77 * the request will be accounted on the disk only 78 * 79 * We take a reference on disk->part0 although that 80 * partition will never be deleted, so we can treat 81 * it as any other partition. 82 */ 83 part = &rq->rq_disk->part0; 84 hd_struct_get(part); 85 } 86 part_round_stats(cpu, part); 87 part_inc_in_flight(part, rw); 88 rq->part = part; 89 } 90 91 part_stat_unlock(); 92 } 93 94 void blk_queue_congestion_threshold(struct request_queue *q) 95 { 96 int nr; 97 98 nr = q->nr_requests - (q->nr_requests / 8) + 1; 99 if (nr > q->nr_requests) 100 nr = q->nr_requests; 101 q->nr_congestion_on = nr; 102 103 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 104 if (nr < 1) 105 nr = 1; 106 q->nr_congestion_off = nr; 107 } 108 109 /** 110 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * @bdev: device 112 * 113 * Locates the passed device's request queue and returns the address of its 114 * backing_dev_info 115 * 116 * Will return NULL if the request queue cannot be located. 117 */ 118 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 119 { 120 struct backing_dev_info *ret = NULL; 121 struct request_queue *q = bdev_get_queue(bdev); 122 123 if (q) 124 ret = &q->backing_dev_info; 125 return ret; 126 } 127 EXPORT_SYMBOL(blk_get_backing_dev_info); 128 129 void blk_rq_init(struct request_queue *q, struct request *rq) 130 { 131 memset(rq, 0, sizeof(*rq)); 132 133 INIT_LIST_HEAD(&rq->queuelist); 134 INIT_LIST_HEAD(&rq->timeout_list); 135 rq->cpu = -1; 136 rq->q = q; 137 rq->__sector = (sector_t) -1; 138 INIT_HLIST_NODE(&rq->hash); 139 RB_CLEAR_NODE(&rq->rb_node); 140 rq->cmd = rq->__cmd; 141 rq->cmd_len = BLK_MAX_CDB; 142 rq->tag = -1; 143 rq->ref_count = 1; 144 rq->start_time = jiffies; 145 set_start_time_ns(rq); 146 rq->part = NULL; 147 } 148 EXPORT_SYMBOL(blk_rq_init); 149 150 static void req_bio_endio(struct request *rq, struct bio *bio, 151 unsigned int nbytes, int error) 152 { 153 if (error) 154 clear_bit(BIO_UPTODATE, &bio->bi_flags); 155 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 156 error = -EIO; 157 158 if (unlikely(nbytes > bio->bi_size)) { 159 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 160 __func__, nbytes, bio->bi_size); 161 nbytes = bio->bi_size; 162 } 163 164 if (unlikely(rq->cmd_flags & REQ_QUIET)) 165 set_bit(BIO_QUIET, &bio->bi_flags); 166 167 bio->bi_size -= nbytes; 168 bio->bi_sector += (nbytes >> 9); 169 170 if (bio_integrity(bio)) 171 bio_integrity_advance(bio, nbytes); 172 173 /* don't actually finish bio if it's part of flush sequence */ 174 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 175 bio_endio(bio, error); 176 } 177 178 void blk_dump_rq_flags(struct request *rq, char *msg) 179 { 180 int bit; 181 182 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 183 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 184 rq->cmd_flags); 185 186 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 187 (unsigned long long)blk_rq_pos(rq), 188 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 189 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 190 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 191 192 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 193 printk(KERN_INFO " cdb: "); 194 for (bit = 0; bit < BLK_MAX_CDB; bit++) 195 printk("%02x ", rq->cmd[bit]); 196 printk("\n"); 197 } 198 } 199 EXPORT_SYMBOL(blk_dump_rq_flags); 200 201 static void blk_delay_work(struct work_struct *work) 202 { 203 struct request_queue *q; 204 205 q = container_of(work, struct request_queue, delay_work.work); 206 spin_lock_irq(q->queue_lock); 207 __blk_run_queue(q); 208 spin_unlock_irq(q->queue_lock); 209 } 210 211 /** 212 * blk_delay_queue - restart queueing after defined interval 213 * @q: The &struct request_queue in question 214 * @msecs: Delay in msecs 215 * 216 * Description: 217 * Sometimes queueing needs to be postponed for a little while, to allow 218 * resources to come back. This function will make sure that queueing is 219 * restarted around the specified time. 220 */ 221 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 222 { 223 queue_delayed_work(kblockd_workqueue, &q->delay_work, 224 msecs_to_jiffies(msecs)); 225 } 226 EXPORT_SYMBOL(blk_delay_queue); 227 228 /** 229 * blk_start_queue - restart a previously stopped queue 230 * @q: The &struct request_queue in question 231 * 232 * Description: 233 * blk_start_queue() will clear the stop flag on the queue, and call 234 * the request_fn for the queue if it was in a stopped state when 235 * entered. Also see blk_stop_queue(). Queue lock must be held. 236 **/ 237 void blk_start_queue(struct request_queue *q) 238 { 239 WARN_ON(!irqs_disabled()); 240 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 242 __blk_run_queue(q); 243 } 244 EXPORT_SYMBOL(blk_start_queue); 245 246 /** 247 * blk_stop_queue - stop a queue 248 * @q: The &struct request_queue in question 249 * 250 * Description: 251 * The Linux block layer assumes that a block driver will consume all 252 * entries on the request queue when the request_fn strategy is called. 253 * Often this will not happen, because of hardware limitations (queue 254 * depth settings). If a device driver gets a 'queue full' response, 255 * or if it simply chooses not to queue more I/O at one point, it can 256 * call this function to prevent the request_fn from being called until 257 * the driver has signalled it's ready to go again. This happens by calling 258 * blk_start_queue() to restart queue operations. Queue lock must be held. 259 **/ 260 void blk_stop_queue(struct request_queue *q) 261 { 262 __cancel_delayed_work(&q->delay_work); 263 queue_flag_set(QUEUE_FLAG_STOPPED, q); 264 } 265 EXPORT_SYMBOL(blk_stop_queue); 266 267 /** 268 * blk_sync_queue - cancel any pending callbacks on a queue 269 * @q: the queue 270 * 271 * Description: 272 * The block layer may perform asynchronous callback activity 273 * on a queue, such as calling the unplug function after a timeout. 274 * A block device may call blk_sync_queue to ensure that any 275 * such activity is cancelled, thus allowing it to release resources 276 * that the callbacks might use. The caller must already have made sure 277 * that its ->make_request_fn will not re-add plugging prior to calling 278 * this function. 279 * 280 * This function does not cancel any asynchronous activity arising 281 * out of elevator or throttling code. That would require elevaotor_exit() 282 * and blk_throtl_exit() to be called with queue lock initialized. 283 * 284 */ 285 void blk_sync_queue(struct request_queue *q) 286 { 287 del_timer_sync(&q->timeout); 288 cancel_delayed_work_sync(&q->delay_work); 289 } 290 EXPORT_SYMBOL(blk_sync_queue); 291 292 /** 293 * __blk_run_queue - run a single device queue 294 * @q: The queue to run 295 * 296 * Description: 297 * See @blk_run_queue. This variant must be called with the queue lock 298 * held and interrupts disabled. 299 */ 300 void __blk_run_queue(struct request_queue *q) 301 { 302 if (unlikely(blk_queue_stopped(q))) 303 return; 304 305 q->request_fn(q); 306 } 307 EXPORT_SYMBOL(__blk_run_queue); 308 309 /** 310 * blk_run_queue_async - run a single device queue in workqueue context 311 * @q: The queue to run 312 * 313 * Description: 314 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 315 * of us. 316 */ 317 void blk_run_queue_async(struct request_queue *q) 318 { 319 if (likely(!blk_queue_stopped(q))) { 320 __cancel_delayed_work(&q->delay_work); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 322 } 323 } 324 EXPORT_SYMBOL(blk_run_queue_async); 325 326 /** 327 * blk_run_queue - run a single device queue 328 * @q: The queue to run 329 * 330 * Description: 331 * Invoke request handling on this queue, if it has pending work to do. 332 * May be used to restart queueing when a request has completed. 333 */ 334 void blk_run_queue(struct request_queue *q) 335 { 336 unsigned long flags; 337 338 spin_lock_irqsave(q->queue_lock, flags); 339 __blk_run_queue(q); 340 spin_unlock_irqrestore(q->queue_lock, flags); 341 } 342 EXPORT_SYMBOL(blk_run_queue); 343 344 void blk_put_queue(struct request_queue *q) 345 { 346 kobject_put(&q->kobj); 347 } 348 EXPORT_SYMBOL(blk_put_queue); 349 350 /* 351 * Note: If a driver supplied the queue lock, it should not zap that lock 352 * unexpectedly as some queue cleanup components like elevator_exit() and 353 * blk_throtl_exit() need queue lock. 354 */ 355 void blk_cleanup_queue(struct request_queue *q) 356 { 357 /* 358 * We know we have process context here, so we can be a little 359 * cautious and ensure that pending block actions on this device 360 * are done before moving on. Going into this function, we should 361 * not have processes doing IO to this device. 362 */ 363 blk_sync_queue(q); 364 365 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 366 mutex_lock(&q->sysfs_lock); 367 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 368 mutex_unlock(&q->sysfs_lock); 369 370 if (q->elevator) 371 elevator_exit(q->elevator); 372 373 blk_throtl_exit(q); 374 375 blk_put_queue(q); 376 } 377 EXPORT_SYMBOL(blk_cleanup_queue); 378 379 static int blk_init_free_list(struct request_queue *q) 380 { 381 struct request_list *rl = &q->rq; 382 383 if (unlikely(rl->rq_pool)) 384 return 0; 385 386 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 387 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 388 rl->elvpriv = 0; 389 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 390 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 391 392 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 393 mempool_free_slab, request_cachep, q->node); 394 395 if (!rl->rq_pool) 396 return -ENOMEM; 397 398 return 0; 399 } 400 401 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 402 { 403 return blk_alloc_queue_node(gfp_mask, -1); 404 } 405 EXPORT_SYMBOL(blk_alloc_queue); 406 407 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 408 { 409 struct request_queue *q; 410 int err; 411 412 q = kmem_cache_alloc_node(blk_requestq_cachep, 413 gfp_mask | __GFP_ZERO, node_id); 414 if (!q) 415 return NULL; 416 417 q->backing_dev_info.ra_pages = 418 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 419 q->backing_dev_info.state = 0; 420 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 421 q->backing_dev_info.name = "block"; 422 423 err = bdi_init(&q->backing_dev_info); 424 if (err) { 425 kmem_cache_free(blk_requestq_cachep, q); 426 return NULL; 427 } 428 429 if (blk_throtl_init(q)) { 430 kmem_cache_free(blk_requestq_cachep, q); 431 return NULL; 432 } 433 434 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 435 laptop_mode_timer_fn, (unsigned long) q); 436 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 437 INIT_LIST_HEAD(&q->timeout_list); 438 INIT_LIST_HEAD(&q->flush_queue[0]); 439 INIT_LIST_HEAD(&q->flush_queue[1]); 440 INIT_LIST_HEAD(&q->flush_data_in_flight); 441 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 442 443 kobject_init(&q->kobj, &blk_queue_ktype); 444 445 mutex_init(&q->sysfs_lock); 446 spin_lock_init(&q->__queue_lock); 447 448 /* 449 * By default initialize queue_lock to internal lock and driver can 450 * override it later if need be. 451 */ 452 q->queue_lock = &q->__queue_lock; 453 454 return q; 455 } 456 EXPORT_SYMBOL(blk_alloc_queue_node); 457 458 /** 459 * blk_init_queue - prepare a request queue for use with a block device 460 * @rfn: The function to be called to process requests that have been 461 * placed on the queue. 462 * @lock: Request queue spin lock 463 * 464 * Description: 465 * If a block device wishes to use the standard request handling procedures, 466 * which sorts requests and coalesces adjacent requests, then it must 467 * call blk_init_queue(). The function @rfn will be called when there 468 * are requests on the queue that need to be processed. If the device 469 * supports plugging, then @rfn may not be called immediately when requests 470 * are available on the queue, but may be called at some time later instead. 471 * Plugged queues are generally unplugged when a buffer belonging to one 472 * of the requests on the queue is needed, or due to memory pressure. 473 * 474 * @rfn is not required, or even expected, to remove all requests off the 475 * queue, but only as many as it can handle at a time. If it does leave 476 * requests on the queue, it is responsible for arranging that the requests 477 * get dealt with eventually. 478 * 479 * The queue spin lock must be held while manipulating the requests on the 480 * request queue; this lock will be taken also from interrupt context, so irq 481 * disabling is needed for it. 482 * 483 * Function returns a pointer to the initialized request queue, or %NULL if 484 * it didn't succeed. 485 * 486 * Note: 487 * blk_init_queue() must be paired with a blk_cleanup_queue() call 488 * when the block device is deactivated (such as at module unload). 489 **/ 490 491 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 492 { 493 return blk_init_queue_node(rfn, lock, -1); 494 } 495 EXPORT_SYMBOL(blk_init_queue); 496 497 struct request_queue * 498 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 499 { 500 struct request_queue *uninit_q, *q; 501 502 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 503 if (!uninit_q) 504 return NULL; 505 506 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 507 if (!q) 508 blk_cleanup_queue(uninit_q); 509 510 return q; 511 } 512 EXPORT_SYMBOL(blk_init_queue_node); 513 514 struct request_queue * 515 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 516 spinlock_t *lock) 517 { 518 return blk_init_allocated_queue_node(q, rfn, lock, -1); 519 } 520 EXPORT_SYMBOL(blk_init_allocated_queue); 521 522 struct request_queue * 523 blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, 524 spinlock_t *lock, int node_id) 525 { 526 if (!q) 527 return NULL; 528 529 q->node = node_id; 530 if (blk_init_free_list(q)) 531 return NULL; 532 533 q->request_fn = rfn; 534 q->prep_rq_fn = NULL; 535 q->unprep_rq_fn = NULL; 536 q->queue_flags = QUEUE_FLAG_DEFAULT; 537 538 /* Override internal queue lock with supplied lock pointer */ 539 if (lock) 540 q->queue_lock = lock; 541 542 /* 543 * This also sets hw/phys segments, boundary and size 544 */ 545 blk_queue_make_request(q, __make_request); 546 547 q->sg_reserved_size = INT_MAX; 548 549 /* 550 * all done 551 */ 552 if (!elevator_init(q, NULL)) { 553 blk_queue_congestion_threshold(q); 554 return q; 555 } 556 557 return NULL; 558 } 559 EXPORT_SYMBOL(blk_init_allocated_queue_node); 560 561 int blk_get_queue(struct request_queue *q) 562 { 563 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 564 kobject_get(&q->kobj); 565 return 0; 566 } 567 568 return 1; 569 } 570 EXPORT_SYMBOL(blk_get_queue); 571 572 static inline void blk_free_request(struct request_queue *q, struct request *rq) 573 { 574 if (rq->cmd_flags & REQ_ELVPRIV) 575 elv_put_request(q, rq); 576 mempool_free(rq, q->rq.rq_pool); 577 } 578 579 static struct request * 580 blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 581 { 582 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 583 584 if (!rq) 585 return NULL; 586 587 blk_rq_init(q, rq); 588 589 rq->cmd_flags = flags | REQ_ALLOCED; 590 591 if (priv) { 592 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 593 mempool_free(rq, q->rq.rq_pool); 594 return NULL; 595 } 596 rq->cmd_flags |= REQ_ELVPRIV; 597 } 598 599 return rq; 600 } 601 602 /* 603 * ioc_batching returns true if the ioc is a valid batching request and 604 * should be given priority access to a request. 605 */ 606 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 607 { 608 if (!ioc) 609 return 0; 610 611 /* 612 * Make sure the process is able to allocate at least 1 request 613 * even if the batch times out, otherwise we could theoretically 614 * lose wakeups. 615 */ 616 return ioc->nr_batch_requests == q->nr_batching || 617 (ioc->nr_batch_requests > 0 618 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 619 } 620 621 /* 622 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 623 * will cause the process to be a "batcher" on all queues in the system. This 624 * is the behaviour we want though - once it gets a wakeup it should be given 625 * a nice run. 626 */ 627 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 628 { 629 if (!ioc || ioc_batching(q, ioc)) 630 return; 631 632 ioc->nr_batch_requests = q->nr_batching; 633 ioc->last_waited = jiffies; 634 } 635 636 static void __freed_request(struct request_queue *q, int sync) 637 { 638 struct request_list *rl = &q->rq; 639 640 if (rl->count[sync] < queue_congestion_off_threshold(q)) 641 blk_clear_queue_congested(q, sync); 642 643 if (rl->count[sync] + 1 <= q->nr_requests) { 644 if (waitqueue_active(&rl->wait[sync])) 645 wake_up(&rl->wait[sync]); 646 647 blk_clear_queue_full(q, sync); 648 } 649 } 650 651 /* 652 * A request has just been released. Account for it, update the full and 653 * congestion status, wake up any waiters. Called under q->queue_lock. 654 */ 655 static void freed_request(struct request_queue *q, int sync, int priv) 656 { 657 struct request_list *rl = &q->rq; 658 659 rl->count[sync]--; 660 if (priv) 661 rl->elvpriv--; 662 663 __freed_request(q, sync); 664 665 if (unlikely(rl->starved[sync ^ 1])) 666 __freed_request(q, sync ^ 1); 667 } 668 669 /* 670 * Determine if elevator data should be initialized when allocating the 671 * request associated with @bio. 672 */ 673 static bool blk_rq_should_init_elevator(struct bio *bio) 674 { 675 if (!bio) 676 return true; 677 678 /* 679 * Flush requests do not use the elevator so skip initialization. 680 * This allows a request to share the flush and elevator data. 681 */ 682 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 683 return false; 684 685 return true; 686 } 687 688 /* 689 * Get a free request, queue_lock must be held. 690 * Returns NULL on failure, with queue_lock held. 691 * Returns !NULL on success, with queue_lock *not held*. 692 */ 693 static struct request *get_request(struct request_queue *q, int rw_flags, 694 struct bio *bio, gfp_t gfp_mask) 695 { 696 struct request *rq = NULL; 697 struct request_list *rl = &q->rq; 698 struct io_context *ioc = NULL; 699 const bool is_sync = rw_is_sync(rw_flags) != 0; 700 int may_queue, priv = 0; 701 702 may_queue = elv_may_queue(q, rw_flags); 703 if (may_queue == ELV_MQUEUE_NO) 704 goto rq_starved; 705 706 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 707 if (rl->count[is_sync]+1 >= q->nr_requests) { 708 ioc = current_io_context(GFP_ATOMIC, q->node); 709 /* 710 * The queue will fill after this allocation, so set 711 * it as full, and mark this process as "batching". 712 * This process will be allowed to complete a batch of 713 * requests, others will be blocked. 714 */ 715 if (!blk_queue_full(q, is_sync)) { 716 ioc_set_batching(q, ioc); 717 blk_set_queue_full(q, is_sync); 718 } else { 719 if (may_queue != ELV_MQUEUE_MUST 720 && !ioc_batching(q, ioc)) { 721 /* 722 * The queue is full and the allocating 723 * process is not a "batcher", and not 724 * exempted by the IO scheduler 725 */ 726 goto out; 727 } 728 } 729 } 730 blk_set_queue_congested(q, is_sync); 731 } 732 733 /* 734 * Only allow batching queuers to allocate up to 50% over the defined 735 * limit of requests, otherwise we could have thousands of requests 736 * allocated with any setting of ->nr_requests 737 */ 738 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 739 goto out; 740 741 rl->count[is_sync]++; 742 rl->starved[is_sync] = 0; 743 744 if (blk_rq_should_init_elevator(bio)) { 745 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 746 if (priv) 747 rl->elvpriv++; 748 } 749 750 if (blk_queue_io_stat(q)) 751 rw_flags |= REQ_IO_STAT; 752 spin_unlock_irq(q->queue_lock); 753 754 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 755 if (unlikely(!rq)) { 756 /* 757 * Allocation failed presumably due to memory. Undo anything 758 * we might have messed up. 759 * 760 * Allocating task should really be put onto the front of the 761 * wait queue, but this is pretty rare. 762 */ 763 spin_lock_irq(q->queue_lock); 764 freed_request(q, is_sync, priv); 765 766 /* 767 * in the very unlikely event that allocation failed and no 768 * requests for this direction was pending, mark us starved 769 * so that freeing of a request in the other direction will 770 * notice us. another possible fix would be to split the 771 * rq mempool into READ and WRITE 772 */ 773 rq_starved: 774 if (unlikely(rl->count[is_sync] == 0)) 775 rl->starved[is_sync] = 1; 776 777 goto out; 778 } 779 780 /* 781 * ioc may be NULL here, and ioc_batching will be false. That's 782 * OK, if the queue is under the request limit then requests need 783 * not count toward the nr_batch_requests limit. There will always 784 * be some limit enforced by BLK_BATCH_TIME. 785 */ 786 if (ioc_batching(q, ioc)) 787 ioc->nr_batch_requests--; 788 789 trace_block_getrq(q, bio, rw_flags & 1); 790 out: 791 return rq; 792 } 793 794 /* 795 * No available requests for this queue, wait for some requests to become 796 * available. 797 * 798 * Called with q->queue_lock held, and returns with it unlocked. 799 */ 800 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 801 struct bio *bio) 802 { 803 const bool is_sync = rw_is_sync(rw_flags) != 0; 804 struct request *rq; 805 806 rq = get_request(q, rw_flags, bio, GFP_NOIO); 807 while (!rq) { 808 DEFINE_WAIT(wait); 809 struct io_context *ioc; 810 struct request_list *rl = &q->rq; 811 812 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 813 TASK_UNINTERRUPTIBLE); 814 815 trace_block_sleeprq(q, bio, rw_flags & 1); 816 817 spin_unlock_irq(q->queue_lock); 818 io_schedule(); 819 820 /* 821 * After sleeping, we become a "batching" process and 822 * will be able to allocate at least one request, and 823 * up to a big batch of them for a small period time. 824 * See ioc_batching, ioc_set_batching 825 */ 826 ioc = current_io_context(GFP_NOIO, q->node); 827 ioc_set_batching(q, ioc); 828 829 spin_lock_irq(q->queue_lock); 830 finish_wait(&rl->wait[is_sync], &wait); 831 832 rq = get_request(q, rw_flags, bio, GFP_NOIO); 833 }; 834 835 return rq; 836 } 837 838 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 839 { 840 struct request *rq; 841 842 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 843 return NULL; 844 845 BUG_ON(rw != READ && rw != WRITE); 846 847 spin_lock_irq(q->queue_lock); 848 if (gfp_mask & __GFP_WAIT) { 849 rq = get_request_wait(q, rw, NULL); 850 } else { 851 rq = get_request(q, rw, NULL, gfp_mask); 852 if (!rq) 853 spin_unlock_irq(q->queue_lock); 854 } 855 /* q->queue_lock is unlocked at this point */ 856 857 return rq; 858 } 859 EXPORT_SYMBOL(blk_get_request); 860 861 /** 862 * blk_make_request - given a bio, allocate a corresponding struct request. 863 * @q: target request queue 864 * @bio: The bio describing the memory mappings that will be submitted for IO. 865 * It may be a chained-bio properly constructed by block/bio layer. 866 * @gfp_mask: gfp flags to be used for memory allocation 867 * 868 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 869 * type commands. Where the struct request needs to be farther initialized by 870 * the caller. It is passed a &struct bio, which describes the memory info of 871 * the I/O transfer. 872 * 873 * The caller of blk_make_request must make sure that bi_io_vec 874 * are set to describe the memory buffers. That bio_data_dir() will return 875 * the needed direction of the request. (And all bio's in the passed bio-chain 876 * are properly set accordingly) 877 * 878 * If called under none-sleepable conditions, mapped bio buffers must not 879 * need bouncing, by calling the appropriate masked or flagged allocator, 880 * suitable for the target device. Otherwise the call to blk_queue_bounce will 881 * BUG. 882 * 883 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 884 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 885 * anything but the first bio in the chain. Otherwise you risk waiting for IO 886 * completion of a bio that hasn't been submitted yet, thus resulting in a 887 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 888 * of bio_alloc(), as that avoids the mempool deadlock. 889 * If possible a big IO should be split into smaller parts when allocation 890 * fails. Partial allocation should not be an error, or you risk a live-lock. 891 */ 892 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 893 gfp_t gfp_mask) 894 { 895 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 896 897 if (unlikely(!rq)) 898 return ERR_PTR(-ENOMEM); 899 900 for_each_bio(bio) { 901 struct bio *bounce_bio = bio; 902 int ret; 903 904 blk_queue_bounce(q, &bounce_bio); 905 ret = blk_rq_append_bio(q, rq, bounce_bio); 906 if (unlikely(ret)) { 907 blk_put_request(rq); 908 return ERR_PTR(ret); 909 } 910 } 911 912 return rq; 913 } 914 EXPORT_SYMBOL(blk_make_request); 915 916 /** 917 * blk_requeue_request - put a request back on queue 918 * @q: request queue where request should be inserted 919 * @rq: request to be inserted 920 * 921 * Description: 922 * Drivers often keep queueing requests until the hardware cannot accept 923 * more, when that condition happens we need to put the request back 924 * on the queue. Must be called with queue lock held. 925 */ 926 void blk_requeue_request(struct request_queue *q, struct request *rq) 927 { 928 blk_delete_timer(rq); 929 blk_clear_rq_complete(rq); 930 trace_block_rq_requeue(q, rq); 931 932 if (blk_rq_tagged(rq)) 933 blk_queue_end_tag(q, rq); 934 935 BUG_ON(blk_queued_rq(rq)); 936 937 elv_requeue_request(q, rq); 938 } 939 EXPORT_SYMBOL(blk_requeue_request); 940 941 static void add_acct_request(struct request_queue *q, struct request *rq, 942 int where) 943 { 944 drive_stat_acct(rq, 1); 945 __elv_add_request(q, rq, where); 946 } 947 948 /** 949 * blk_insert_request - insert a special request into a request queue 950 * @q: request queue where request should be inserted 951 * @rq: request to be inserted 952 * @at_head: insert request at head or tail of queue 953 * @data: private data 954 * 955 * Description: 956 * Many block devices need to execute commands asynchronously, so they don't 957 * block the whole kernel from preemption during request execution. This is 958 * accomplished normally by inserting aritficial requests tagged as 959 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them 960 * be scheduled for actual execution by the request queue. 961 * 962 * We have the option of inserting the head or the tail of the queue. 963 * Typically we use the tail for new ioctls and so forth. We use the head 964 * of the queue for things like a QUEUE_FULL message from a device, or a 965 * host that is unable to accept a particular command. 966 */ 967 void blk_insert_request(struct request_queue *q, struct request *rq, 968 int at_head, void *data) 969 { 970 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 971 unsigned long flags; 972 973 /* 974 * tell I/O scheduler that this isn't a regular read/write (ie it 975 * must not attempt merges on this) and that it acts as a soft 976 * barrier 977 */ 978 rq->cmd_type = REQ_TYPE_SPECIAL; 979 980 rq->special = data; 981 982 spin_lock_irqsave(q->queue_lock, flags); 983 984 /* 985 * If command is tagged, release the tag 986 */ 987 if (blk_rq_tagged(rq)) 988 blk_queue_end_tag(q, rq); 989 990 add_acct_request(q, rq, where); 991 __blk_run_queue(q); 992 spin_unlock_irqrestore(q->queue_lock, flags); 993 } 994 EXPORT_SYMBOL(blk_insert_request); 995 996 static void part_round_stats_single(int cpu, struct hd_struct *part, 997 unsigned long now) 998 { 999 if (now == part->stamp) 1000 return; 1001 1002 if (part_in_flight(part)) { 1003 __part_stat_add(cpu, part, time_in_queue, 1004 part_in_flight(part) * (now - part->stamp)); 1005 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1006 } 1007 part->stamp = now; 1008 } 1009 1010 /** 1011 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1012 * @cpu: cpu number for stats access 1013 * @part: target partition 1014 * 1015 * The average IO queue length and utilisation statistics are maintained 1016 * by observing the current state of the queue length and the amount of 1017 * time it has been in this state for. 1018 * 1019 * Normally, that accounting is done on IO completion, but that can result 1020 * in more than a second's worth of IO being accounted for within any one 1021 * second, leading to >100% utilisation. To deal with that, we call this 1022 * function to do a round-off before returning the results when reading 1023 * /proc/diskstats. This accounts immediately for all queue usage up to 1024 * the current jiffies and restarts the counters again. 1025 */ 1026 void part_round_stats(int cpu, struct hd_struct *part) 1027 { 1028 unsigned long now = jiffies; 1029 1030 if (part->partno) 1031 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1032 part_round_stats_single(cpu, part, now); 1033 } 1034 EXPORT_SYMBOL_GPL(part_round_stats); 1035 1036 /* 1037 * queue lock must be held 1038 */ 1039 void __blk_put_request(struct request_queue *q, struct request *req) 1040 { 1041 if (unlikely(!q)) 1042 return; 1043 if (unlikely(--req->ref_count)) 1044 return; 1045 1046 elv_completed_request(q, req); 1047 1048 /* this is a bio leak */ 1049 WARN_ON(req->bio != NULL); 1050 1051 /* 1052 * Request may not have originated from ll_rw_blk. if not, 1053 * it didn't come out of our reserved rq pools 1054 */ 1055 if (req->cmd_flags & REQ_ALLOCED) { 1056 int is_sync = rq_is_sync(req) != 0; 1057 int priv = req->cmd_flags & REQ_ELVPRIV; 1058 1059 BUG_ON(!list_empty(&req->queuelist)); 1060 BUG_ON(!hlist_unhashed(&req->hash)); 1061 1062 blk_free_request(q, req); 1063 freed_request(q, is_sync, priv); 1064 } 1065 } 1066 EXPORT_SYMBOL_GPL(__blk_put_request); 1067 1068 void blk_put_request(struct request *req) 1069 { 1070 unsigned long flags; 1071 struct request_queue *q = req->q; 1072 1073 spin_lock_irqsave(q->queue_lock, flags); 1074 __blk_put_request(q, req); 1075 spin_unlock_irqrestore(q->queue_lock, flags); 1076 } 1077 EXPORT_SYMBOL(blk_put_request); 1078 1079 /** 1080 * blk_add_request_payload - add a payload to a request 1081 * @rq: request to update 1082 * @page: page backing the payload 1083 * @len: length of the payload. 1084 * 1085 * This allows to later add a payload to an already submitted request by 1086 * a block driver. The driver needs to take care of freeing the payload 1087 * itself. 1088 * 1089 * Note that this is a quite horrible hack and nothing but handling of 1090 * discard requests should ever use it. 1091 */ 1092 void blk_add_request_payload(struct request *rq, struct page *page, 1093 unsigned int len) 1094 { 1095 struct bio *bio = rq->bio; 1096 1097 bio->bi_io_vec->bv_page = page; 1098 bio->bi_io_vec->bv_offset = 0; 1099 bio->bi_io_vec->bv_len = len; 1100 1101 bio->bi_size = len; 1102 bio->bi_vcnt = 1; 1103 bio->bi_phys_segments = 1; 1104 1105 rq->__data_len = rq->resid_len = len; 1106 rq->nr_phys_segments = 1; 1107 rq->buffer = bio_data(bio); 1108 } 1109 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1110 1111 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1112 struct bio *bio) 1113 { 1114 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1115 1116 if (!ll_back_merge_fn(q, req, bio)) 1117 return false; 1118 1119 trace_block_bio_backmerge(q, bio); 1120 1121 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1122 blk_rq_set_mixed_merge(req); 1123 1124 req->biotail->bi_next = bio; 1125 req->biotail = bio; 1126 req->__data_len += bio->bi_size; 1127 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1128 1129 drive_stat_acct(req, 0); 1130 elv_bio_merged(q, req, bio); 1131 return true; 1132 } 1133 1134 static bool bio_attempt_front_merge(struct request_queue *q, 1135 struct request *req, struct bio *bio) 1136 { 1137 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1138 1139 if (!ll_front_merge_fn(q, req, bio)) 1140 return false; 1141 1142 trace_block_bio_frontmerge(q, bio); 1143 1144 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1145 blk_rq_set_mixed_merge(req); 1146 1147 bio->bi_next = req->bio; 1148 req->bio = bio; 1149 1150 /* 1151 * may not be valid. if the low level driver said 1152 * it didn't need a bounce buffer then it better 1153 * not touch req->buffer either... 1154 */ 1155 req->buffer = bio_data(bio); 1156 req->__sector = bio->bi_sector; 1157 req->__data_len += bio->bi_size; 1158 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1159 1160 drive_stat_acct(req, 0); 1161 elv_bio_merged(q, req, bio); 1162 return true; 1163 } 1164 1165 /* 1166 * Attempts to merge with the plugged list in the current process. Returns 1167 * true if merge was successful, otherwise false. 1168 */ 1169 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1170 struct bio *bio) 1171 { 1172 struct blk_plug *plug; 1173 struct request *rq; 1174 bool ret = false; 1175 1176 plug = tsk->plug; 1177 if (!plug) 1178 goto out; 1179 1180 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1181 int el_ret; 1182 1183 if (rq->q != q) 1184 continue; 1185 1186 el_ret = elv_try_merge(rq, bio); 1187 if (el_ret == ELEVATOR_BACK_MERGE) { 1188 ret = bio_attempt_back_merge(q, rq, bio); 1189 if (ret) 1190 break; 1191 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1192 ret = bio_attempt_front_merge(q, rq, bio); 1193 if (ret) 1194 break; 1195 } 1196 } 1197 out: 1198 return ret; 1199 } 1200 1201 void init_request_from_bio(struct request *req, struct bio *bio) 1202 { 1203 req->cpu = bio->bi_comp_cpu; 1204 req->cmd_type = REQ_TYPE_FS; 1205 1206 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1207 if (bio->bi_rw & REQ_RAHEAD) 1208 req->cmd_flags |= REQ_FAILFAST_MASK; 1209 1210 req->errors = 0; 1211 req->__sector = bio->bi_sector; 1212 req->ioprio = bio_prio(bio); 1213 blk_rq_bio_prep(req->q, req, bio); 1214 } 1215 1216 static int __make_request(struct request_queue *q, struct bio *bio) 1217 { 1218 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1219 struct blk_plug *plug; 1220 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1221 struct request *req; 1222 1223 /* 1224 * low level driver can indicate that it wants pages above a 1225 * certain limit bounced to low memory (ie for highmem, or even 1226 * ISA dma in theory) 1227 */ 1228 blk_queue_bounce(q, &bio); 1229 1230 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1231 spin_lock_irq(q->queue_lock); 1232 where = ELEVATOR_INSERT_FLUSH; 1233 goto get_rq; 1234 } 1235 1236 /* 1237 * Check if we can merge with the plugged list before grabbing 1238 * any locks. 1239 */ 1240 if (attempt_plug_merge(current, q, bio)) 1241 goto out; 1242 1243 spin_lock_irq(q->queue_lock); 1244 1245 el_ret = elv_merge(q, &req, bio); 1246 if (el_ret == ELEVATOR_BACK_MERGE) { 1247 if (bio_attempt_back_merge(q, req, bio)) { 1248 if (!attempt_back_merge(q, req)) 1249 elv_merged_request(q, req, el_ret); 1250 goto out_unlock; 1251 } 1252 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1253 if (bio_attempt_front_merge(q, req, bio)) { 1254 if (!attempt_front_merge(q, req)) 1255 elv_merged_request(q, req, el_ret); 1256 goto out_unlock; 1257 } 1258 } 1259 1260 get_rq: 1261 /* 1262 * This sync check and mask will be re-done in init_request_from_bio(), 1263 * but we need to set it earlier to expose the sync flag to the 1264 * rq allocator and io schedulers. 1265 */ 1266 rw_flags = bio_data_dir(bio); 1267 if (sync) 1268 rw_flags |= REQ_SYNC; 1269 1270 /* 1271 * Grab a free request. This is might sleep but can not fail. 1272 * Returns with the queue unlocked. 1273 */ 1274 req = get_request_wait(q, rw_flags, bio); 1275 1276 /* 1277 * After dropping the lock and possibly sleeping here, our request 1278 * may now be mergeable after it had proven unmergeable (above). 1279 * We don't worry about that case for efficiency. It won't happen 1280 * often, and the elevators are able to handle it. 1281 */ 1282 init_request_from_bio(req, bio); 1283 1284 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || 1285 bio_flagged(bio, BIO_CPU_AFFINE)) 1286 req->cpu = raw_smp_processor_id(); 1287 1288 plug = current->plug; 1289 if (plug) { 1290 /* 1291 * If this is the first request added after a plug, fire 1292 * of a plug trace. If others have been added before, check 1293 * if we have multiple devices in this plug. If so, make a 1294 * note to sort the list before dispatch. 1295 */ 1296 if (list_empty(&plug->list)) 1297 trace_block_plug(q); 1298 else if (!plug->should_sort) { 1299 struct request *__rq; 1300 1301 __rq = list_entry_rq(plug->list.prev); 1302 if (__rq->q != q) 1303 plug->should_sort = 1; 1304 } 1305 list_add_tail(&req->queuelist, &plug->list); 1306 plug->count++; 1307 drive_stat_acct(req, 1); 1308 if (plug->count >= BLK_MAX_REQUEST_COUNT) 1309 blk_flush_plug_list(plug, false); 1310 } else { 1311 spin_lock_irq(q->queue_lock); 1312 add_acct_request(q, req, where); 1313 __blk_run_queue(q); 1314 out_unlock: 1315 spin_unlock_irq(q->queue_lock); 1316 } 1317 out: 1318 return 0; 1319 } 1320 1321 /* 1322 * If bio->bi_dev is a partition, remap the location 1323 */ 1324 static inline void blk_partition_remap(struct bio *bio) 1325 { 1326 struct block_device *bdev = bio->bi_bdev; 1327 1328 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1329 struct hd_struct *p = bdev->bd_part; 1330 1331 bio->bi_sector += p->start_sect; 1332 bio->bi_bdev = bdev->bd_contains; 1333 1334 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1335 bdev->bd_dev, 1336 bio->bi_sector - p->start_sect); 1337 } 1338 } 1339 1340 static void handle_bad_sector(struct bio *bio) 1341 { 1342 char b[BDEVNAME_SIZE]; 1343 1344 printk(KERN_INFO "attempt to access beyond end of device\n"); 1345 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1346 bdevname(bio->bi_bdev, b), 1347 bio->bi_rw, 1348 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1349 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1350 1351 set_bit(BIO_EOF, &bio->bi_flags); 1352 } 1353 1354 #ifdef CONFIG_FAIL_MAKE_REQUEST 1355 1356 static DECLARE_FAULT_ATTR(fail_make_request); 1357 1358 static int __init setup_fail_make_request(char *str) 1359 { 1360 return setup_fault_attr(&fail_make_request, str); 1361 } 1362 __setup("fail_make_request=", setup_fail_make_request); 1363 1364 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1365 { 1366 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1367 } 1368 1369 static int __init fail_make_request_debugfs(void) 1370 { 1371 return init_fault_attr_dentries(&fail_make_request, 1372 "fail_make_request"); 1373 } 1374 1375 late_initcall(fail_make_request_debugfs); 1376 1377 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1378 1379 static inline bool should_fail_request(struct hd_struct *part, 1380 unsigned int bytes) 1381 { 1382 return false; 1383 } 1384 1385 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1386 1387 /* 1388 * Check whether this bio extends beyond the end of the device. 1389 */ 1390 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1391 { 1392 sector_t maxsector; 1393 1394 if (!nr_sectors) 1395 return 0; 1396 1397 /* Test device or partition size, when known. */ 1398 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1399 if (maxsector) { 1400 sector_t sector = bio->bi_sector; 1401 1402 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1403 /* 1404 * This may well happen - the kernel calls bread() 1405 * without checking the size of the device, e.g., when 1406 * mounting a device. 1407 */ 1408 handle_bad_sector(bio); 1409 return 1; 1410 } 1411 } 1412 1413 return 0; 1414 } 1415 1416 /** 1417 * generic_make_request - hand a buffer to its device driver for I/O 1418 * @bio: The bio describing the location in memory and on the device. 1419 * 1420 * generic_make_request() is used to make I/O requests of block 1421 * devices. It is passed a &struct bio, which describes the I/O that needs 1422 * to be done. 1423 * 1424 * generic_make_request() does not return any status. The 1425 * success/failure status of the request, along with notification of 1426 * completion, is delivered asynchronously through the bio->bi_end_io 1427 * function described (one day) else where. 1428 * 1429 * The caller of generic_make_request must make sure that bi_io_vec 1430 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1431 * set to describe the device address, and the 1432 * bi_end_io and optionally bi_private are set to describe how 1433 * completion notification should be signaled. 1434 * 1435 * generic_make_request and the drivers it calls may use bi_next if this 1436 * bio happens to be merged with someone else, and may change bi_dev and 1437 * bi_sector for remaps as it sees fit. So the values of these fields 1438 * should NOT be depended on after the call to generic_make_request. 1439 */ 1440 static inline void __generic_make_request(struct bio *bio) 1441 { 1442 struct request_queue *q; 1443 sector_t old_sector; 1444 int ret, nr_sectors = bio_sectors(bio); 1445 dev_t old_dev; 1446 int err = -EIO; 1447 1448 might_sleep(); 1449 1450 if (bio_check_eod(bio, nr_sectors)) 1451 goto end_io; 1452 1453 /* 1454 * Resolve the mapping until finished. (drivers are 1455 * still free to implement/resolve their own stacking 1456 * by explicitly returning 0) 1457 * 1458 * NOTE: we don't repeat the blk_size check for each new device. 1459 * Stacking drivers are expected to know what they are doing. 1460 */ 1461 old_sector = -1; 1462 old_dev = 0; 1463 do { 1464 char b[BDEVNAME_SIZE]; 1465 struct hd_struct *part; 1466 1467 q = bdev_get_queue(bio->bi_bdev); 1468 if (unlikely(!q)) { 1469 printk(KERN_ERR 1470 "generic_make_request: Trying to access " 1471 "nonexistent block-device %s (%Lu)\n", 1472 bdevname(bio->bi_bdev, b), 1473 (long long) bio->bi_sector); 1474 goto end_io; 1475 } 1476 1477 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1478 nr_sectors > queue_max_hw_sectors(q))) { 1479 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1480 bdevname(bio->bi_bdev, b), 1481 bio_sectors(bio), 1482 queue_max_hw_sectors(q)); 1483 goto end_io; 1484 } 1485 1486 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 1487 goto end_io; 1488 1489 part = bio->bi_bdev->bd_part; 1490 if (should_fail_request(part, bio->bi_size) || 1491 should_fail_request(&part_to_disk(part)->part0, 1492 bio->bi_size)) 1493 goto end_io; 1494 1495 /* 1496 * If this device has partitions, remap block n 1497 * of partition p to block n+start(p) of the disk. 1498 */ 1499 blk_partition_remap(bio); 1500 1501 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1502 goto end_io; 1503 1504 if (old_sector != -1) 1505 trace_block_bio_remap(q, bio, old_dev, old_sector); 1506 1507 old_sector = bio->bi_sector; 1508 old_dev = bio->bi_bdev->bd_dev; 1509 1510 if (bio_check_eod(bio, nr_sectors)) 1511 goto end_io; 1512 1513 /* 1514 * Filter flush bio's early so that make_request based 1515 * drivers without flush support don't have to worry 1516 * about them. 1517 */ 1518 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1519 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1520 if (!nr_sectors) { 1521 err = 0; 1522 goto end_io; 1523 } 1524 } 1525 1526 if ((bio->bi_rw & REQ_DISCARD) && 1527 (!blk_queue_discard(q) || 1528 ((bio->bi_rw & REQ_SECURE) && 1529 !blk_queue_secdiscard(q)))) { 1530 err = -EOPNOTSUPP; 1531 goto end_io; 1532 } 1533 1534 if (blk_throtl_bio(q, &bio)) 1535 goto end_io; 1536 1537 /* 1538 * If bio = NULL, bio has been throttled and will be submitted 1539 * later. 1540 */ 1541 if (!bio) 1542 break; 1543 1544 trace_block_bio_queue(q, bio); 1545 1546 ret = q->make_request_fn(q, bio); 1547 } while (ret); 1548 1549 return; 1550 1551 end_io: 1552 bio_endio(bio, err); 1553 } 1554 1555 /* 1556 * We only want one ->make_request_fn to be active at a time, 1557 * else stack usage with stacked devices could be a problem. 1558 * So use current->bio_list to keep a list of requests 1559 * submited by a make_request_fn function. 1560 * current->bio_list is also used as a flag to say if 1561 * generic_make_request is currently active in this task or not. 1562 * If it is NULL, then no make_request is active. If it is non-NULL, 1563 * then a make_request is active, and new requests should be added 1564 * at the tail 1565 */ 1566 void generic_make_request(struct bio *bio) 1567 { 1568 struct bio_list bio_list_on_stack; 1569 1570 if (current->bio_list) { 1571 /* make_request is active */ 1572 bio_list_add(current->bio_list, bio); 1573 return; 1574 } 1575 /* following loop may be a bit non-obvious, and so deserves some 1576 * explanation. 1577 * Before entering the loop, bio->bi_next is NULL (as all callers 1578 * ensure that) so we have a list with a single bio. 1579 * We pretend that we have just taken it off a longer list, so 1580 * we assign bio_list to a pointer to the bio_list_on_stack, 1581 * thus initialising the bio_list of new bios to be 1582 * added. __generic_make_request may indeed add some more bios 1583 * through a recursive call to generic_make_request. If it 1584 * did, we find a non-NULL value in bio_list and re-enter the loop 1585 * from the top. In this case we really did just take the bio 1586 * of the top of the list (no pretending) and so remove it from 1587 * bio_list, and call into __generic_make_request again. 1588 * 1589 * The loop was structured like this to make only one call to 1590 * __generic_make_request (which is important as it is large and 1591 * inlined) and to keep the structure simple. 1592 */ 1593 BUG_ON(bio->bi_next); 1594 bio_list_init(&bio_list_on_stack); 1595 current->bio_list = &bio_list_on_stack; 1596 do { 1597 __generic_make_request(bio); 1598 bio = bio_list_pop(current->bio_list); 1599 } while (bio); 1600 current->bio_list = NULL; /* deactivate */ 1601 } 1602 EXPORT_SYMBOL(generic_make_request); 1603 1604 /** 1605 * submit_bio - submit a bio to the block device layer for I/O 1606 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1607 * @bio: The &struct bio which describes the I/O 1608 * 1609 * submit_bio() is very similar in purpose to generic_make_request(), and 1610 * uses that function to do most of the work. Both are fairly rough 1611 * interfaces; @bio must be presetup and ready for I/O. 1612 * 1613 */ 1614 void submit_bio(int rw, struct bio *bio) 1615 { 1616 int count = bio_sectors(bio); 1617 1618 bio->bi_rw |= rw; 1619 1620 /* 1621 * If it's a regular read/write or a barrier with data attached, 1622 * go through the normal accounting stuff before submission. 1623 */ 1624 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1625 if (rw & WRITE) { 1626 count_vm_events(PGPGOUT, count); 1627 } else { 1628 task_io_account_read(bio->bi_size); 1629 count_vm_events(PGPGIN, count); 1630 } 1631 1632 if (unlikely(block_dump)) { 1633 char b[BDEVNAME_SIZE]; 1634 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1635 current->comm, task_pid_nr(current), 1636 (rw & WRITE) ? "WRITE" : "READ", 1637 (unsigned long long)bio->bi_sector, 1638 bdevname(bio->bi_bdev, b), 1639 count); 1640 } 1641 } 1642 1643 generic_make_request(bio); 1644 } 1645 EXPORT_SYMBOL(submit_bio); 1646 1647 /** 1648 * blk_rq_check_limits - Helper function to check a request for the queue limit 1649 * @q: the queue 1650 * @rq: the request being checked 1651 * 1652 * Description: 1653 * @rq may have been made based on weaker limitations of upper-level queues 1654 * in request stacking drivers, and it may violate the limitation of @q. 1655 * Since the block layer and the underlying device driver trust @rq 1656 * after it is inserted to @q, it should be checked against @q before 1657 * the insertion using this generic function. 1658 * 1659 * This function should also be useful for request stacking drivers 1660 * in some cases below, so export this function. 1661 * Request stacking drivers like request-based dm may change the queue 1662 * limits while requests are in the queue (e.g. dm's table swapping). 1663 * Such request stacking drivers should check those requests agaist 1664 * the new queue limits again when they dispatch those requests, 1665 * although such checkings are also done against the old queue limits 1666 * when submitting requests. 1667 */ 1668 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1669 { 1670 if (rq->cmd_flags & REQ_DISCARD) 1671 return 0; 1672 1673 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1674 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1675 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1676 return -EIO; 1677 } 1678 1679 /* 1680 * queue's settings related to segment counting like q->bounce_pfn 1681 * may differ from that of other stacking queues. 1682 * Recalculate it to check the request correctly on this queue's 1683 * limitation. 1684 */ 1685 blk_recalc_rq_segments(rq); 1686 if (rq->nr_phys_segments > queue_max_segments(q)) { 1687 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1688 return -EIO; 1689 } 1690 1691 return 0; 1692 } 1693 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1694 1695 /** 1696 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1697 * @q: the queue to submit the request 1698 * @rq: the request being queued 1699 */ 1700 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1701 { 1702 unsigned long flags; 1703 1704 if (blk_rq_check_limits(q, rq)) 1705 return -EIO; 1706 1707 if (rq->rq_disk && 1708 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1709 return -EIO; 1710 1711 spin_lock_irqsave(q->queue_lock, flags); 1712 1713 /* 1714 * Submitting request must be dequeued before calling this function 1715 * because it will be linked to another request_queue 1716 */ 1717 BUG_ON(blk_queued_rq(rq)); 1718 1719 add_acct_request(q, rq, ELEVATOR_INSERT_BACK); 1720 spin_unlock_irqrestore(q->queue_lock, flags); 1721 1722 return 0; 1723 } 1724 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1725 1726 /** 1727 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1728 * @rq: request to examine 1729 * 1730 * Description: 1731 * A request could be merge of IOs which require different failure 1732 * handling. This function determines the number of bytes which 1733 * can be failed from the beginning of the request without 1734 * crossing into area which need to be retried further. 1735 * 1736 * Return: 1737 * The number of bytes to fail. 1738 * 1739 * Context: 1740 * queue_lock must be held. 1741 */ 1742 unsigned int blk_rq_err_bytes(const struct request *rq) 1743 { 1744 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1745 unsigned int bytes = 0; 1746 struct bio *bio; 1747 1748 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1749 return blk_rq_bytes(rq); 1750 1751 /* 1752 * Currently the only 'mixing' which can happen is between 1753 * different fastfail types. We can safely fail portions 1754 * which have all the failfast bits that the first one has - 1755 * the ones which are at least as eager to fail as the first 1756 * one. 1757 */ 1758 for (bio = rq->bio; bio; bio = bio->bi_next) { 1759 if ((bio->bi_rw & ff) != ff) 1760 break; 1761 bytes += bio->bi_size; 1762 } 1763 1764 /* this could lead to infinite loop */ 1765 BUG_ON(blk_rq_bytes(rq) && !bytes); 1766 return bytes; 1767 } 1768 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1769 1770 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1771 { 1772 if (blk_do_io_stat(req)) { 1773 const int rw = rq_data_dir(req); 1774 struct hd_struct *part; 1775 int cpu; 1776 1777 cpu = part_stat_lock(); 1778 part = req->part; 1779 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1780 part_stat_unlock(); 1781 } 1782 } 1783 1784 static void blk_account_io_done(struct request *req) 1785 { 1786 /* 1787 * Account IO completion. flush_rq isn't accounted as a 1788 * normal IO on queueing nor completion. Accounting the 1789 * containing request is enough. 1790 */ 1791 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1792 unsigned long duration = jiffies - req->start_time; 1793 const int rw = rq_data_dir(req); 1794 struct hd_struct *part; 1795 int cpu; 1796 1797 cpu = part_stat_lock(); 1798 part = req->part; 1799 1800 part_stat_inc(cpu, part, ios[rw]); 1801 part_stat_add(cpu, part, ticks[rw], duration); 1802 part_round_stats(cpu, part); 1803 part_dec_in_flight(part, rw); 1804 1805 hd_struct_put(part); 1806 part_stat_unlock(); 1807 } 1808 } 1809 1810 /** 1811 * blk_peek_request - peek at the top of a request queue 1812 * @q: request queue to peek at 1813 * 1814 * Description: 1815 * Return the request at the top of @q. The returned request 1816 * should be started using blk_start_request() before LLD starts 1817 * processing it. 1818 * 1819 * Return: 1820 * Pointer to the request at the top of @q if available. Null 1821 * otherwise. 1822 * 1823 * Context: 1824 * queue_lock must be held. 1825 */ 1826 struct request *blk_peek_request(struct request_queue *q) 1827 { 1828 struct request *rq; 1829 int ret; 1830 1831 while ((rq = __elv_next_request(q)) != NULL) { 1832 if (!(rq->cmd_flags & REQ_STARTED)) { 1833 /* 1834 * This is the first time the device driver 1835 * sees this request (possibly after 1836 * requeueing). Notify IO scheduler. 1837 */ 1838 if (rq->cmd_flags & REQ_SORTED) 1839 elv_activate_rq(q, rq); 1840 1841 /* 1842 * just mark as started even if we don't start 1843 * it, a request that has been delayed should 1844 * not be passed by new incoming requests 1845 */ 1846 rq->cmd_flags |= REQ_STARTED; 1847 trace_block_rq_issue(q, rq); 1848 } 1849 1850 if (!q->boundary_rq || q->boundary_rq == rq) { 1851 q->end_sector = rq_end_sector(rq); 1852 q->boundary_rq = NULL; 1853 } 1854 1855 if (rq->cmd_flags & REQ_DONTPREP) 1856 break; 1857 1858 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1859 /* 1860 * make sure space for the drain appears we 1861 * know we can do this because max_hw_segments 1862 * has been adjusted to be one fewer than the 1863 * device can handle 1864 */ 1865 rq->nr_phys_segments++; 1866 } 1867 1868 if (!q->prep_rq_fn) 1869 break; 1870 1871 ret = q->prep_rq_fn(q, rq); 1872 if (ret == BLKPREP_OK) { 1873 break; 1874 } else if (ret == BLKPREP_DEFER) { 1875 /* 1876 * the request may have been (partially) prepped. 1877 * we need to keep this request in the front to 1878 * avoid resource deadlock. REQ_STARTED will 1879 * prevent other fs requests from passing this one. 1880 */ 1881 if (q->dma_drain_size && blk_rq_bytes(rq) && 1882 !(rq->cmd_flags & REQ_DONTPREP)) { 1883 /* 1884 * remove the space for the drain we added 1885 * so that we don't add it again 1886 */ 1887 --rq->nr_phys_segments; 1888 } 1889 1890 rq = NULL; 1891 break; 1892 } else if (ret == BLKPREP_KILL) { 1893 rq->cmd_flags |= REQ_QUIET; 1894 /* 1895 * Mark this request as started so we don't trigger 1896 * any debug logic in the end I/O path. 1897 */ 1898 blk_start_request(rq); 1899 __blk_end_request_all(rq, -EIO); 1900 } else { 1901 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1902 break; 1903 } 1904 } 1905 1906 return rq; 1907 } 1908 EXPORT_SYMBOL(blk_peek_request); 1909 1910 void blk_dequeue_request(struct request *rq) 1911 { 1912 struct request_queue *q = rq->q; 1913 1914 BUG_ON(list_empty(&rq->queuelist)); 1915 BUG_ON(ELV_ON_HASH(rq)); 1916 1917 list_del_init(&rq->queuelist); 1918 1919 /* 1920 * the time frame between a request being removed from the lists 1921 * and to it is freed is accounted as io that is in progress at 1922 * the driver side. 1923 */ 1924 if (blk_account_rq(rq)) { 1925 q->in_flight[rq_is_sync(rq)]++; 1926 set_io_start_time_ns(rq); 1927 } 1928 } 1929 1930 /** 1931 * blk_start_request - start request processing on the driver 1932 * @req: request to dequeue 1933 * 1934 * Description: 1935 * Dequeue @req and start timeout timer on it. This hands off the 1936 * request to the driver. 1937 * 1938 * Block internal functions which don't want to start timer should 1939 * call blk_dequeue_request(). 1940 * 1941 * Context: 1942 * queue_lock must be held. 1943 */ 1944 void blk_start_request(struct request *req) 1945 { 1946 blk_dequeue_request(req); 1947 1948 /* 1949 * We are now handing the request to the hardware, initialize 1950 * resid_len to full count and add the timeout handler. 1951 */ 1952 req->resid_len = blk_rq_bytes(req); 1953 if (unlikely(blk_bidi_rq(req))) 1954 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 1955 1956 blk_add_timer(req); 1957 } 1958 EXPORT_SYMBOL(blk_start_request); 1959 1960 /** 1961 * blk_fetch_request - fetch a request from a request queue 1962 * @q: request queue to fetch a request from 1963 * 1964 * Description: 1965 * Return the request at the top of @q. The request is started on 1966 * return and LLD can start processing it immediately. 1967 * 1968 * Return: 1969 * Pointer to the request at the top of @q if available. Null 1970 * otherwise. 1971 * 1972 * Context: 1973 * queue_lock must be held. 1974 */ 1975 struct request *blk_fetch_request(struct request_queue *q) 1976 { 1977 struct request *rq; 1978 1979 rq = blk_peek_request(q); 1980 if (rq) 1981 blk_start_request(rq); 1982 return rq; 1983 } 1984 EXPORT_SYMBOL(blk_fetch_request); 1985 1986 /** 1987 * blk_update_request - Special helper function for request stacking drivers 1988 * @req: the request being processed 1989 * @error: %0 for success, < %0 for error 1990 * @nr_bytes: number of bytes to complete @req 1991 * 1992 * Description: 1993 * Ends I/O on a number of bytes attached to @req, but doesn't complete 1994 * the request structure even if @req doesn't have leftover. 1995 * If @req has leftover, sets it up for the next range of segments. 1996 * 1997 * This special helper function is only for request stacking drivers 1998 * (e.g. request-based dm) so that they can handle partial completion. 1999 * Actual device drivers should use blk_end_request instead. 2000 * 2001 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2002 * %false return from this function. 2003 * 2004 * Return: 2005 * %false - this request doesn't have any more data 2006 * %true - this request has more data 2007 **/ 2008 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2009 { 2010 int total_bytes, bio_nbytes, next_idx = 0; 2011 struct bio *bio; 2012 2013 if (!req->bio) 2014 return false; 2015 2016 trace_block_rq_complete(req->q, req); 2017 2018 /* 2019 * For fs requests, rq is just carrier of independent bio's 2020 * and each partial completion should be handled separately. 2021 * Reset per-request error on each partial completion. 2022 * 2023 * TODO: tj: This is too subtle. It would be better to let 2024 * low level drivers do what they see fit. 2025 */ 2026 if (req->cmd_type == REQ_TYPE_FS) 2027 req->errors = 0; 2028 2029 if (error && req->cmd_type == REQ_TYPE_FS && 2030 !(req->cmd_flags & REQ_QUIET)) { 2031 char *error_type; 2032 2033 switch (error) { 2034 case -ENOLINK: 2035 error_type = "recoverable transport"; 2036 break; 2037 case -EREMOTEIO: 2038 error_type = "critical target"; 2039 break; 2040 case -EBADE: 2041 error_type = "critical nexus"; 2042 break; 2043 case -EIO: 2044 default: 2045 error_type = "I/O"; 2046 break; 2047 } 2048 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2049 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2050 (unsigned long long)blk_rq_pos(req)); 2051 } 2052 2053 blk_account_io_completion(req, nr_bytes); 2054 2055 total_bytes = bio_nbytes = 0; 2056 while ((bio = req->bio) != NULL) { 2057 int nbytes; 2058 2059 if (nr_bytes >= bio->bi_size) { 2060 req->bio = bio->bi_next; 2061 nbytes = bio->bi_size; 2062 req_bio_endio(req, bio, nbytes, error); 2063 next_idx = 0; 2064 bio_nbytes = 0; 2065 } else { 2066 int idx = bio->bi_idx + next_idx; 2067 2068 if (unlikely(idx >= bio->bi_vcnt)) { 2069 blk_dump_rq_flags(req, "__end_that"); 2070 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2071 __func__, idx, bio->bi_vcnt); 2072 break; 2073 } 2074 2075 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2076 BIO_BUG_ON(nbytes > bio->bi_size); 2077 2078 /* 2079 * not a complete bvec done 2080 */ 2081 if (unlikely(nbytes > nr_bytes)) { 2082 bio_nbytes += nr_bytes; 2083 total_bytes += nr_bytes; 2084 break; 2085 } 2086 2087 /* 2088 * advance to the next vector 2089 */ 2090 next_idx++; 2091 bio_nbytes += nbytes; 2092 } 2093 2094 total_bytes += nbytes; 2095 nr_bytes -= nbytes; 2096 2097 bio = req->bio; 2098 if (bio) { 2099 /* 2100 * end more in this run, or just return 'not-done' 2101 */ 2102 if (unlikely(nr_bytes <= 0)) 2103 break; 2104 } 2105 } 2106 2107 /* 2108 * completely done 2109 */ 2110 if (!req->bio) { 2111 /* 2112 * Reset counters so that the request stacking driver 2113 * can find how many bytes remain in the request 2114 * later. 2115 */ 2116 req->__data_len = 0; 2117 return false; 2118 } 2119 2120 /* 2121 * if the request wasn't completed, update state 2122 */ 2123 if (bio_nbytes) { 2124 req_bio_endio(req, bio, bio_nbytes, error); 2125 bio->bi_idx += next_idx; 2126 bio_iovec(bio)->bv_offset += nr_bytes; 2127 bio_iovec(bio)->bv_len -= nr_bytes; 2128 } 2129 2130 req->__data_len -= total_bytes; 2131 req->buffer = bio_data(req->bio); 2132 2133 /* update sector only for requests with clear definition of sector */ 2134 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2135 req->__sector += total_bytes >> 9; 2136 2137 /* mixed attributes always follow the first bio */ 2138 if (req->cmd_flags & REQ_MIXED_MERGE) { 2139 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2140 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2141 } 2142 2143 /* 2144 * If total number of sectors is less than the first segment 2145 * size, something has gone terribly wrong. 2146 */ 2147 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2148 blk_dump_rq_flags(req, "request botched"); 2149 req->__data_len = blk_rq_cur_bytes(req); 2150 } 2151 2152 /* recalculate the number of segments */ 2153 blk_recalc_rq_segments(req); 2154 2155 return true; 2156 } 2157 EXPORT_SYMBOL_GPL(blk_update_request); 2158 2159 static bool blk_update_bidi_request(struct request *rq, int error, 2160 unsigned int nr_bytes, 2161 unsigned int bidi_bytes) 2162 { 2163 if (blk_update_request(rq, error, nr_bytes)) 2164 return true; 2165 2166 /* Bidi request must be completed as a whole */ 2167 if (unlikely(blk_bidi_rq(rq)) && 2168 blk_update_request(rq->next_rq, error, bidi_bytes)) 2169 return true; 2170 2171 if (blk_queue_add_random(rq->q)) 2172 add_disk_randomness(rq->rq_disk); 2173 2174 return false; 2175 } 2176 2177 /** 2178 * blk_unprep_request - unprepare a request 2179 * @req: the request 2180 * 2181 * This function makes a request ready for complete resubmission (or 2182 * completion). It happens only after all error handling is complete, 2183 * so represents the appropriate moment to deallocate any resources 2184 * that were allocated to the request in the prep_rq_fn. The queue 2185 * lock is held when calling this. 2186 */ 2187 void blk_unprep_request(struct request *req) 2188 { 2189 struct request_queue *q = req->q; 2190 2191 req->cmd_flags &= ~REQ_DONTPREP; 2192 if (q->unprep_rq_fn) 2193 q->unprep_rq_fn(q, req); 2194 } 2195 EXPORT_SYMBOL_GPL(blk_unprep_request); 2196 2197 /* 2198 * queue lock must be held 2199 */ 2200 static void blk_finish_request(struct request *req, int error) 2201 { 2202 if (blk_rq_tagged(req)) 2203 blk_queue_end_tag(req->q, req); 2204 2205 BUG_ON(blk_queued_rq(req)); 2206 2207 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2208 laptop_io_completion(&req->q->backing_dev_info); 2209 2210 blk_delete_timer(req); 2211 2212 if (req->cmd_flags & REQ_DONTPREP) 2213 blk_unprep_request(req); 2214 2215 2216 blk_account_io_done(req); 2217 2218 if (req->end_io) 2219 req->end_io(req, error); 2220 else { 2221 if (blk_bidi_rq(req)) 2222 __blk_put_request(req->next_rq->q, req->next_rq); 2223 2224 __blk_put_request(req->q, req); 2225 } 2226 } 2227 2228 /** 2229 * blk_end_bidi_request - Complete a bidi request 2230 * @rq: the request to complete 2231 * @error: %0 for success, < %0 for error 2232 * @nr_bytes: number of bytes to complete @rq 2233 * @bidi_bytes: number of bytes to complete @rq->next_rq 2234 * 2235 * Description: 2236 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2237 * Drivers that supports bidi can safely call this member for any 2238 * type of request, bidi or uni. In the later case @bidi_bytes is 2239 * just ignored. 2240 * 2241 * Return: 2242 * %false - we are done with this request 2243 * %true - still buffers pending for this request 2244 **/ 2245 static bool blk_end_bidi_request(struct request *rq, int error, 2246 unsigned int nr_bytes, unsigned int bidi_bytes) 2247 { 2248 struct request_queue *q = rq->q; 2249 unsigned long flags; 2250 2251 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2252 return true; 2253 2254 spin_lock_irqsave(q->queue_lock, flags); 2255 blk_finish_request(rq, error); 2256 spin_unlock_irqrestore(q->queue_lock, flags); 2257 2258 return false; 2259 } 2260 2261 /** 2262 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2263 * @rq: the request to complete 2264 * @error: %0 for success, < %0 for error 2265 * @nr_bytes: number of bytes to complete @rq 2266 * @bidi_bytes: number of bytes to complete @rq->next_rq 2267 * 2268 * Description: 2269 * Identical to blk_end_bidi_request() except that queue lock is 2270 * assumed to be locked on entry and remains so on return. 2271 * 2272 * Return: 2273 * %false - we are done with this request 2274 * %true - still buffers pending for this request 2275 **/ 2276 static bool __blk_end_bidi_request(struct request *rq, int error, 2277 unsigned int nr_bytes, unsigned int bidi_bytes) 2278 { 2279 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2280 return true; 2281 2282 blk_finish_request(rq, error); 2283 2284 return false; 2285 } 2286 2287 /** 2288 * blk_end_request - Helper function for drivers to complete the request. 2289 * @rq: the request being processed 2290 * @error: %0 for success, < %0 for error 2291 * @nr_bytes: number of bytes to complete 2292 * 2293 * Description: 2294 * Ends I/O on a number of bytes attached to @rq. 2295 * If @rq has leftover, sets it up for the next range of segments. 2296 * 2297 * Return: 2298 * %false - we are done with this request 2299 * %true - still buffers pending for this request 2300 **/ 2301 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2302 { 2303 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2304 } 2305 EXPORT_SYMBOL(blk_end_request); 2306 2307 /** 2308 * blk_end_request_all - Helper function for drives to finish the request. 2309 * @rq: the request to finish 2310 * @error: %0 for success, < %0 for error 2311 * 2312 * Description: 2313 * Completely finish @rq. 2314 */ 2315 void blk_end_request_all(struct request *rq, int error) 2316 { 2317 bool pending; 2318 unsigned int bidi_bytes = 0; 2319 2320 if (unlikely(blk_bidi_rq(rq))) 2321 bidi_bytes = blk_rq_bytes(rq->next_rq); 2322 2323 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2324 BUG_ON(pending); 2325 } 2326 EXPORT_SYMBOL(blk_end_request_all); 2327 2328 /** 2329 * blk_end_request_cur - Helper function to finish the current request chunk. 2330 * @rq: the request to finish the current chunk for 2331 * @error: %0 for success, < %0 for error 2332 * 2333 * Description: 2334 * Complete the current consecutively mapped chunk from @rq. 2335 * 2336 * Return: 2337 * %false - we are done with this request 2338 * %true - still buffers pending for this request 2339 */ 2340 bool blk_end_request_cur(struct request *rq, int error) 2341 { 2342 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2343 } 2344 EXPORT_SYMBOL(blk_end_request_cur); 2345 2346 /** 2347 * blk_end_request_err - Finish a request till the next failure boundary. 2348 * @rq: the request to finish till the next failure boundary for 2349 * @error: must be negative errno 2350 * 2351 * Description: 2352 * Complete @rq till the next failure boundary. 2353 * 2354 * Return: 2355 * %false - we are done with this request 2356 * %true - still buffers pending for this request 2357 */ 2358 bool blk_end_request_err(struct request *rq, int error) 2359 { 2360 WARN_ON(error >= 0); 2361 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2362 } 2363 EXPORT_SYMBOL_GPL(blk_end_request_err); 2364 2365 /** 2366 * __blk_end_request - Helper function for drivers to complete the request. 2367 * @rq: the request being processed 2368 * @error: %0 for success, < %0 for error 2369 * @nr_bytes: number of bytes to complete 2370 * 2371 * Description: 2372 * Must be called with queue lock held unlike blk_end_request(). 2373 * 2374 * Return: 2375 * %false - we are done with this request 2376 * %true - still buffers pending for this request 2377 **/ 2378 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2379 { 2380 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2381 } 2382 EXPORT_SYMBOL(__blk_end_request); 2383 2384 /** 2385 * __blk_end_request_all - Helper function for drives to finish the request. 2386 * @rq: the request to finish 2387 * @error: %0 for success, < %0 for error 2388 * 2389 * Description: 2390 * Completely finish @rq. Must be called with queue lock held. 2391 */ 2392 void __blk_end_request_all(struct request *rq, int error) 2393 { 2394 bool pending; 2395 unsigned int bidi_bytes = 0; 2396 2397 if (unlikely(blk_bidi_rq(rq))) 2398 bidi_bytes = blk_rq_bytes(rq->next_rq); 2399 2400 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2401 BUG_ON(pending); 2402 } 2403 EXPORT_SYMBOL(__blk_end_request_all); 2404 2405 /** 2406 * __blk_end_request_cur - Helper function to finish the current request chunk. 2407 * @rq: the request to finish the current chunk for 2408 * @error: %0 for success, < %0 for error 2409 * 2410 * Description: 2411 * Complete the current consecutively mapped chunk from @rq. Must 2412 * be called with queue lock held. 2413 * 2414 * Return: 2415 * %false - we are done with this request 2416 * %true - still buffers pending for this request 2417 */ 2418 bool __blk_end_request_cur(struct request *rq, int error) 2419 { 2420 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2421 } 2422 EXPORT_SYMBOL(__blk_end_request_cur); 2423 2424 /** 2425 * __blk_end_request_err - Finish a request till the next failure boundary. 2426 * @rq: the request to finish till the next failure boundary for 2427 * @error: must be negative errno 2428 * 2429 * Description: 2430 * Complete @rq till the next failure boundary. Must be called 2431 * with queue lock held. 2432 * 2433 * Return: 2434 * %false - we are done with this request 2435 * %true - still buffers pending for this request 2436 */ 2437 bool __blk_end_request_err(struct request *rq, int error) 2438 { 2439 WARN_ON(error >= 0); 2440 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2441 } 2442 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2443 2444 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2445 struct bio *bio) 2446 { 2447 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2448 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2449 2450 if (bio_has_data(bio)) { 2451 rq->nr_phys_segments = bio_phys_segments(q, bio); 2452 rq->buffer = bio_data(bio); 2453 } 2454 rq->__data_len = bio->bi_size; 2455 rq->bio = rq->biotail = bio; 2456 2457 if (bio->bi_bdev) 2458 rq->rq_disk = bio->bi_bdev->bd_disk; 2459 } 2460 2461 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2462 /** 2463 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2464 * @rq: the request to be flushed 2465 * 2466 * Description: 2467 * Flush all pages in @rq. 2468 */ 2469 void rq_flush_dcache_pages(struct request *rq) 2470 { 2471 struct req_iterator iter; 2472 struct bio_vec *bvec; 2473 2474 rq_for_each_segment(bvec, rq, iter) 2475 flush_dcache_page(bvec->bv_page); 2476 } 2477 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2478 #endif 2479 2480 /** 2481 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2482 * @q : the queue of the device being checked 2483 * 2484 * Description: 2485 * Check if underlying low-level drivers of a device are busy. 2486 * If the drivers want to export their busy state, they must set own 2487 * exporting function using blk_queue_lld_busy() first. 2488 * 2489 * Basically, this function is used only by request stacking drivers 2490 * to stop dispatching requests to underlying devices when underlying 2491 * devices are busy. This behavior helps more I/O merging on the queue 2492 * of the request stacking driver and prevents I/O throughput regression 2493 * on burst I/O load. 2494 * 2495 * Return: 2496 * 0 - Not busy (The request stacking driver should dispatch request) 2497 * 1 - Busy (The request stacking driver should stop dispatching request) 2498 */ 2499 int blk_lld_busy(struct request_queue *q) 2500 { 2501 if (q->lld_busy_fn) 2502 return q->lld_busy_fn(q); 2503 2504 return 0; 2505 } 2506 EXPORT_SYMBOL_GPL(blk_lld_busy); 2507 2508 /** 2509 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2510 * @rq: the clone request to be cleaned up 2511 * 2512 * Description: 2513 * Free all bios in @rq for a cloned request. 2514 */ 2515 void blk_rq_unprep_clone(struct request *rq) 2516 { 2517 struct bio *bio; 2518 2519 while ((bio = rq->bio) != NULL) { 2520 rq->bio = bio->bi_next; 2521 2522 bio_put(bio); 2523 } 2524 } 2525 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2526 2527 /* 2528 * Copy attributes of the original request to the clone request. 2529 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2530 */ 2531 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2532 { 2533 dst->cpu = src->cpu; 2534 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2535 dst->cmd_type = src->cmd_type; 2536 dst->__sector = blk_rq_pos(src); 2537 dst->__data_len = blk_rq_bytes(src); 2538 dst->nr_phys_segments = src->nr_phys_segments; 2539 dst->ioprio = src->ioprio; 2540 dst->extra_len = src->extra_len; 2541 } 2542 2543 /** 2544 * blk_rq_prep_clone - Helper function to setup clone request 2545 * @rq: the request to be setup 2546 * @rq_src: original request to be cloned 2547 * @bs: bio_set that bios for clone are allocated from 2548 * @gfp_mask: memory allocation mask for bio 2549 * @bio_ctr: setup function to be called for each clone bio. 2550 * Returns %0 for success, non %0 for failure. 2551 * @data: private data to be passed to @bio_ctr 2552 * 2553 * Description: 2554 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2555 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2556 * are not copied, and copying such parts is the caller's responsibility. 2557 * Also, pages which the original bios are pointing to are not copied 2558 * and the cloned bios just point same pages. 2559 * So cloned bios must be completed before original bios, which means 2560 * the caller must complete @rq before @rq_src. 2561 */ 2562 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2563 struct bio_set *bs, gfp_t gfp_mask, 2564 int (*bio_ctr)(struct bio *, struct bio *, void *), 2565 void *data) 2566 { 2567 struct bio *bio, *bio_src; 2568 2569 if (!bs) 2570 bs = fs_bio_set; 2571 2572 blk_rq_init(NULL, rq); 2573 2574 __rq_for_each_bio(bio_src, rq_src) { 2575 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2576 if (!bio) 2577 goto free_and_out; 2578 2579 __bio_clone(bio, bio_src); 2580 2581 if (bio_integrity(bio_src) && 2582 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2583 goto free_and_out; 2584 2585 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2586 goto free_and_out; 2587 2588 if (rq->bio) { 2589 rq->biotail->bi_next = bio; 2590 rq->biotail = bio; 2591 } else 2592 rq->bio = rq->biotail = bio; 2593 } 2594 2595 __blk_rq_prep_clone(rq, rq_src); 2596 2597 return 0; 2598 2599 free_and_out: 2600 if (bio) 2601 bio_free(bio, bs); 2602 blk_rq_unprep_clone(rq); 2603 2604 return -ENOMEM; 2605 } 2606 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2607 2608 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2609 { 2610 return queue_work(kblockd_workqueue, work); 2611 } 2612 EXPORT_SYMBOL(kblockd_schedule_work); 2613 2614 int kblockd_schedule_delayed_work(struct request_queue *q, 2615 struct delayed_work *dwork, unsigned long delay) 2616 { 2617 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2618 } 2619 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2620 2621 #define PLUG_MAGIC 0x91827364 2622 2623 void blk_start_plug(struct blk_plug *plug) 2624 { 2625 struct task_struct *tsk = current; 2626 2627 plug->magic = PLUG_MAGIC; 2628 INIT_LIST_HEAD(&plug->list); 2629 INIT_LIST_HEAD(&plug->cb_list); 2630 plug->should_sort = 0; 2631 plug->count = 0; 2632 2633 /* 2634 * If this is a nested plug, don't actually assign it. It will be 2635 * flushed on its own. 2636 */ 2637 if (!tsk->plug) { 2638 /* 2639 * Store ordering should not be needed here, since a potential 2640 * preempt will imply a full memory barrier 2641 */ 2642 tsk->plug = plug; 2643 } 2644 } 2645 EXPORT_SYMBOL(blk_start_plug); 2646 2647 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2648 { 2649 struct request *rqa = container_of(a, struct request, queuelist); 2650 struct request *rqb = container_of(b, struct request, queuelist); 2651 2652 return !(rqa->q <= rqb->q); 2653 } 2654 2655 /* 2656 * If 'from_schedule' is true, then postpone the dispatch of requests 2657 * until a safe kblockd context. We due this to avoid accidental big 2658 * additional stack usage in driver dispatch, in places where the originally 2659 * plugger did not intend it. 2660 */ 2661 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2662 bool from_schedule) 2663 __releases(q->queue_lock) 2664 { 2665 trace_block_unplug(q, depth, !from_schedule); 2666 2667 /* 2668 * If we are punting this to kblockd, then we can safely drop 2669 * the queue_lock before waking kblockd (which needs to take 2670 * this lock). 2671 */ 2672 if (from_schedule) { 2673 spin_unlock(q->queue_lock); 2674 blk_run_queue_async(q); 2675 } else { 2676 __blk_run_queue(q); 2677 spin_unlock(q->queue_lock); 2678 } 2679 2680 } 2681 2682 static void flush_plug_callbacks(struct blk_plug *plug) 2683 { 2684 LIST_HEAD(callbacks); 2685 2686 if (list_empty(&plug->cb_list)) 2687 return; 2688 2689 list_splice_init(&plug->cb_list, &callbacks); 2690 2691 while (!list_empty(&callbacks)) { 2692 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2693 struct blk_plug_cb, 2694 list); 2695 list_del(&cb->list); 2696 cb->callback(cb); 2697 } 2698 } 2699 2700 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2701 { 2702 struct request_queue *q; 2703 unsigned long flags; 2704 struct request *rq; 2705 LIST_HEAD(list); 2706 unsigned int depth; 2707 2708 BUG_ON(plug->magic != PLUG_MAGIC); 2709 2710 flush_plug_callbacks(plug); 2711 if (list_empty(&plug->list)) 2712 return; 2713 2714 list_splice_init(&plug->list, &list); 2715 plug->count = 0; 2716 2717 if (plug->should_sort) { 2718 list_sort(NULL, &list, plug_rq_cmp); 2719 plug->should_sort = 0; 2720 } 2721 2722 q = NULL; 2723 depth = 0; 2724 2725 /* 2726 * Save and disable interrupts here, to avoid doing it for every 2727 * queue lock we have to take. 2728 */ 2729 local_irq_save(flags); 2730 while (!list_empty(&list)) { 2731 rq = list_entry_rq(list.next); 2732 list_del_init(&rq->queuelist); 2733 BUG_ON(!rq->q); 2734 if (rq->q != q) { 2735 /* 2736 * This drops the queue lock 2737 */ 2738 if (q) 2739 queue_unplugged(q, depth, from_schedule); 2740 q = rq->q; 2741 depth = 0; 2742 spin_lock(q->queue_lock); 2743 } 2744 /* 2745 * rq is already accounted, so use raw insert 2746 */ 2747 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2748 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2749 else 2750 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2751 2752 depth++; 2753 } 2754 2755 /* 2756 * This drops the queue lock 2757 */ 2758 if (q) 2759 queue_unplugged(q, depth, from_schedule); 2760 2761 local_irq_restore(flags); 2762 } 2763 2764 void blk_finish_plug(struct blk_plug *plug) 2765 { 2766 blk_flush_plug_list(plug, false); 2767 2768 if (plug == current->plug) 2769 current->plug = NULL; 2770 } 2771 EXPORT_SYMBOL(blk_finish_plug); 2772 2773 int __init blk_dev_init(void) 2774 { 2775 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2776 sizeof(((struct request *)0)->cmd_flags)); 2777 2778 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2779 kblockd_workqueue = alloc_workqueue("kblockd", 2780 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2781 if (!kblockd_workqueue) 2782 panic("Failed to create kblockd\n"); 2783 2784 request_cachep = kmem_cache_create("blkdev_requests", 2785 sizeof(struct request), 0, SLAB_PANIC, NULL); 2786 2787 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2788 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2789 2790 return 0; 2791 } 2792