1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 #include <linux/delay.h> 32 #include <linux/ratelimit.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/block.h> 36 37 #include "blk.h" 38 #include "blk-cgroup.h" 39 40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 41 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 43 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 44 45 DEFINE_IDA(blk_queue_ida); 46 47 /* 48 * For the allocated request tables 49 */ 50 static struct kmem_cache *request_cachep; 51 52 /* 53 * For queue allocation 54 */ 55 struct kmem_cache *blk_requestq_cachep; 56 57 /* 58 * Controlling structure to kblockd 59 */ 60 static struct workqueue_struct *kblockd_workqueue; 61 62 static void drive_stat_acct(struct request *rq, int new_io) 63 { 64 struct hd_struct *part; 65 int rw = rq_data_dir(rq); 66 int cpu; 67 68 if (!blk_do_io_stat(rq)) 69 return; 70 71 cpu = part_stat_lock(); 72 73 if (!new_io) { 74 part = rq->part; 75 part_stat_inc(cpu, part, merges[rw]); 76 } else { 77 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 78 if (!hd_struct_try_get(part)) { 79 /* 80 * The partition is already being removed, 81 * the request will be accounted on the disk only 82 * 83 * We take a reference on disk->part0 although that 84 * partition will never be deleted, so we can treat 85 * it as any other partition. 86 */ 87 part = &rq->rq_disk->part0; 88 hd_struct_get(part); 89 } 90 part_round_stats(cpu, part); 91 part_inc_in_flight(part, rw); 92 rq->part = part; 93 } 94 95 part_stat_unlock(); 96 } 97 98 void blk_queue_congestion_threshold(struct request_queue *q) 99 { 100 int nr; 101 102 nr = q->nr_requests - (q->nr_requests / 8) + 1; 103 if (nr > q->nr_requests) 104 nr = q->nr_requests; 105 q->nr_congestion_on = nr; 106 107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 108 if (nr < 1) 109 nr = 1; 110 q->nr_congestion_off = nr; 111 } 112 113 /** 114 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 115 * @bdev: device 116 * 117 * Locates the passed device's request queue and returns the address of its 118 * backing_dev_info 119 * 120 * Will return NULL if the request queue cannot be located. 121 */ 122 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 123 { 124 struct backing_dev_info *ret = NULL; 125 struct request_queue *q = bdev_get_queue(bdev); 126 127 if (q) 128 ret = &q->backing_dev_info; 129 return ret; 130 } 131 EXPORT_SYMBOL(blk_get_backing_dev_info); 132 133 void blk_rq_init(struct request_queue *q, struct request *rq) 134 { 135 memset(rq, 0, sizeof(*rq)); 136 137 INIT_LIST_HEAD(&rq->queuelist); 138 INIT_LIST_HEAD(&rq->timeout_list); 139 rq->cpu = -1; 140 rq->q = q; 141 rq->__sector = (sector_t) -1; 142 INIT_HLIST_NODE(&rq->hash); 143 RB_CLEAR_NODE(&rq->rb_node); 144 rq->cmd = rq->__cmd; 145 rq->cmd_len = BLK_MAX_CDB; 146 rq->tag = -1; 147 rq->ref_count = 1; 148 rq->start_time = jiffies; 149 set_start_time_ns(rq); 150 rq->part = NULL; 151 } 152 EXPORT_SYMBOL(blk_rq_init); 153 154 static void req_bio_endio(struct request *rq, struct bio *bio, 155 unsigned int nbytes, int error) 156 { 157 if (error) 158 clear_bit(BIO_UPTODATE, &bio->bi_flags); 159 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 160 error = -EIO; 161 162 if (unlikely(nbytes > bio->bi_size)) { 163 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 164 __func__, nbytes, bio->bi_size); 165 nbytes = bio->bi_size; 166 } 167 168 if (unlikely(rq->cmd_flags & REQ_QUIET)) 169 set_bit(BIO_QUIET, &bio->bi_flags); 170 171 bio->bi_size -= nbytes; 172 bio->bi_sector += (nbytes >> 9); 173 174 if (bio_integrity(bio)) 175 bio_integrity_advance(bio, nbytes); 176 177 /* don't actually finish bio if it's part of flush sequence */ 178 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 179 bio_endio(bio, error); 180 } 181 182 void blk_dump_rq_flags(struct request *rq, char *msg) 183 { 184 int bit; 185 186 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 187 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 188 rq->cmd_flags); 189 190 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 191 (unsigned long long)blk_rq_pos(rq), 192 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 193 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 194 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 195 196 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 197 printk(KERN_INFO " cdb: "); 198 for (bit = 0; bit < BLK_MAX_CDB; bit++) 199 printk("%02x ", rq->cmd[bit]); 200 printk("\n"); 201 } 202 } 203 EXPORT_SYMBOL(blk_dump_rq_flags); 204 205 static void blk_delay_work(struct work_struct *work) 206 { 207 struct request_queue *q; 208 209 q = container_of(work, struct request_queue, delay_work.work); 210 spin_lock_irq(q->queue_lock); 211 __blk_run_queue(q); 212 spin_unlock_irq(q->queue_lock); 213 } 214 215 /** 216 * blk_delay_queue - restart queueing after defined interval 217 * @q: The &struct request_queue in question 218 * @msecs: Delay in msecs 219 * 220 * Description: 221 * Sometimes queueing needs to be postponed for a little while, to allow 222 * resources to come back. This function will make sure that queueing is 223 * restarted around the specified time. Queue lock must be held. 224 */ 225 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 226 { 227 if (likely(!blk_queue_dead(q))) 228 queue_delayed_work(kblockd_workqueue, &q->delay_work, 229 msecs_to_jiffies(msecs)); 230 } 231 EXPORT_SYMBOL(blk_delay_queue); 232 233 /** 234 * blk_start_queue - restart a previously stopped queue 235 * @q: The &struct request_queue in question 236 * 237 * Description: 238 * blk_start_queue() will clear the stop flag on the queue, and call 239 * the request_fn for the queue if it was in a stopped state when 240 * entered. Also see blk_stop_queue(). Queue lock must be held. 241 **/ 242 void blk_start_queue(struct request_queue *q) 243 { 244 WARN_ON(!irqs_disabled()); 245 246 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 247 __blk_run_queue(q); 248 } 249 EXPORT_SYMBOL(blk_start_queue); 250 251 /** 252 * blk_stop_queue - stop a queue 253 * @q: The &struct request_queue in question 254 * 255 * Description: 256 * The Linux block layer assumes that a block driver will consume all 257 * entries on the request queue when the request_fn strategy is called. 258 * Often this will not happen, because of hardware limitations (queue 259 * depth settings). If a device driver gets a 'queue full' response, 260 * or if it simply chooses not to queue more I/O at one point, it can 261 * call this function to prevent the request_fn from being called until 262 * the driver has signalled it's ready to go again. This happens by calling 263 * blk_start_queue() to restart queue operations. Queue lock must be held. 264 **/ 265 void blk_stop_queue(struct request_queue *q) 266 { 267 cancel_delayed_work(&q->delay_work); 268 queue_flag_set(QUEUE_FLAG_STOPPED, q); 269 } 270 EXPORT_SYMBOL(blk_stop_queue); 271 272 /** 273 * blk_sync_queue - cancel any pending callbacks on a queue 274 * @q: the queue 275 * 276 * Description: 277 * The block layer may perform asynchronous callback activity 278 * on a queue, such as calling the unplug function after a timeout. 279 * A block device may call blk_sync_queue to ensure that any 280 * such activity is cancelled, thus allowing it to release resources 281 * that the callbacks might use. The caller must already have made sure 282 * that its ->make_request_fn will not re-add plugging prior to calling 283 * this function. 284 * 285 * This function does not cancel any asynchronous activity arising 286 * out of elevator or throttling code. That would require elevaotor_exit() 287 * and blkcg_exit_queue() to be called with queue lock initialized. 288 * 289 */ 290 void blk_sync_queue(struct request_queue *q) 291 { 292 del_timer_sync(&q->timeout); 293 cancel_delayed_work_sync(&q->delay_work); 294 } 295 EXPORT_SYMBOL(blk_sync_queue); 296 297 /** 298 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 299 * @q: The queue to run 300 * 301 * Description: 302 * Invoke request handling on a queue if there are any pending requests. 303 * May be used to restart request handling after a request has completed. 304 * This variant runs the queue whether or not the queue has been 305 * stopped. Must be called with the queue lock held and interrupts 306 * disabled. See also @blk_run_queue. 307 */ 308 inline void __blk_run_queue_uncond(struct request_queue *q) 309 { 310 if (unlikely(blk_queue_dead(q))) 311 return; 312 313 /* 314 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 315 * the queue lock internally. As a result multiple threads may be 316 * running such a request function concurrently. Keep track of the 317 * number of active request_fn invocations such that blk_drain_queue() 318 * can wait until all these request_fn calls have finished. 319 */ 320 q->request_fn_active++; 321 q->request_fn(q); 322 q->request_fn_active--; 323 } 324 325 /** 326 * __blk_run_queue - run a single device queue 327 * @q: The queue to run 328 * 329 * Description: 330 * See @blk_run_queue. This variant must be called with the queue lock 331 * held and interrupts disabled. 332 */ 333 void __blk_run_queue(struct request_queue *q) 334 { 335 if (unlikely(blk_queue_stopped(q))) 336 return; 337 338 __blk_run_queue_uncond(q); 339 } 340 EXPORT_SYMBOL(__blk_run_queue); 341 342 /** 343 * blk_run_queue_async - run a single device queue in workqueue context 344 * @q: The queue to run 345 * 346 * Description: 347 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 348 * of us. The caller must hold the queue lock. 349 */ 350 void blk_run_queue_async(struct request_queue *q) 351 { 352 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 353 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 354 } 355 EXPORT_SYMBOL(blk_run_queue_async); 356 357 /** 358 * blk_run_queue - run a single device queue 359 * @q: The queue to run 360 * 361 * Description: 362 * Invoke request handling on this queue, if it has pending work to do. 363 * May be used to restart queueing when a request has completed. 364 */ 365 void blk_run_queue(struct request_queue *q) 366 { 367 unsigned long flags; 368 369 spin_lock_irqsave(q->queue_lock, flags); 370 __blk_run_queue(q); 371 spin_unlock_irqrestore(q->queue_lock, flags); 372 } 373 EXPORT_SYMBOL(blk_run_queue); 374 375 void blk_put_queue(struct request_queue *q) 376 { 377 kobject_put(&q->kobj); 378 } 379 EXPORT_SYMBOL(blk_put_queue); 380 381 /** 382 * __blk_drain_queue - drain requests from request_queue 383 * @q: queue to drain 384 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 385 * 386 * Drain requests from @q. If @drain_all is set, all requests are drained. 387 * If not, only ELVPRIV requests are drained. The caller is responsible 388 * for ensuring that no new requests which need to be drained are queued. 389 */ 390 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 391 __releases(q->queue_lock) 392 __acquires(q->queue_lock) 393 { 394 int i; 395 396 lockdep_assert_held(q->queue_lock); 397 398 while (true) { 399 bool drain = false; 400 401 /* 402 * The caller might be trying to drain @q before its 403 * elevator is initialized. 404 */ 405 if (q->elevator) 406 elv_drain_elevator(q); 407 408 blkcg_drain_queue(q); 409 410 /* 411 * This function might be called on a queue which failed 412 * driver init after queue creation or is not yet fully 413 * active yet. Some drivers (e.g. fd and loop) get unhappy 414 * in such cases. Kick queue iff dispatch queue has 415 * something on it and @q has request_fn set. 416 */ 417 if (!list_empty(&q->queue_head) && q->request_fn) 418 __blk_run_queue(q); 419 420 drain |= q->nr_rqs_elvpriv; 421 drain |= q->request_fn_active; 422 423 /* 424 * Unfortunately, requests are queued at and tracked from 425 * multiple places and there's no single counter which can 426 * be drained. Check all the queues and counters. 427 */ 428 if (drain_all) { 429 drain |= !list_empty(&q->queue_head); 430 for (i = 0; i < 2; i++) { 431 drain |= q->nr_rqs[i]; 432 drain |= q->in_flight[i]; 433 drain |= !list_empty(&q->flush_queue[i]); 434 } 435 } 436 437 if (!drain) 438 break; 439 440 spin_unlock_irq(q->queue_lock); 441 442 msleep(10); 443 444 spin_lock_irq(q->queue_lock); 445 } 446 447 /* 448 * With queue marked dead, any woken up waiter will fail the 449 * allocation path, so the wakeup chaining is lost and we're 450 * left with hung waiters. We need to wake up those waiters. 451 */ 452 if (q->request_fn) { 453 struct request_list *rl; 454 455 blk_queue_for_each_rl(rl, q) 456 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 457 wake_up_all(&rl->wait[i]); 458 } 459 } 460 461 /** 462 * blk_queue_bypass_start - enter queue bypass mode 463 * @q: queue of interest 464 * 465 * In bypass mode, only the dispatch FIFO queue of @q is used. This 466 * function makes @q enter bypass mode and drains all requests which were 467 * throttled or issued before. On return, it's guaranteed that no request 468 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 469 * inside queue or RCU read lock. 470 */ 471 void blk_queue_bypass_start(struct request_queue *q) 472 { 473 bool drain; 474 475 spin_lock_irq(q->queue_lock); 476 drain = !q->bypass_depth++; 477 queue_flag_set(QUEUE_FLAG_BYPASS, q); 478 spin_unlock_irq(q->queue_lock); 479 480 if (drain) { 481 spin_lock_irq(q->queue_lock); 482 __blk_drain_queue(q, false); 483 spin_unlock_irq(q->queue_lock); 484 485 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 486 synchronize_rcu(); 487 } 488 } 489 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 490 491 /** 492 * blk_queue_bypass_end - leave queue bypass mode 493 * @q: queue of interest 494 * 495 * Leave bypass mode and restore the normal queueing behavior. 496 */ 497 void blk_queue_bypass_end(struct request_queue *q) 498 { 499 spin_lock_irq(q->queue_lock); 500 if (!--q->bypass_depth) 501 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 502 WARN_ON_ONCE(q->bypass_depth < 0); 503 spin_unlock_irq(q->queue_lock); 504 } 505 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 506 507 /** 508 * blk_cleanup_queue - shutdown a request queue 509 * @q: request queue to shutdown 510 * 511 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 512 * put it. All future requests will be failed immediately with -ENODEV. 513 */ 514 void blk_cleanup_queue(struct request_queue *q) 515 { 516 spinlock_t *lock = q->queue_lock; 517 518 /* mark @q DYING, no new request or merges will be allowed afterwards */ 519 mutex_lock(&q->sysfs_lock); 520 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 521 spin_lock_irq(lock); 522 523 /* 524 * A dying queue is permanently in bypass mode till released. Note 525 * that, unlike blk_queue_bypass_start(), we aren't performing 526 * synchronize_rcu() after entering bypass mode to avoid the delay 527 * as some drivers create and destroy a lot of queues while 528 * probing. This is still safe because blk_release_queue() will be 529 * called only after the queue refcnt drops to zero and nothing, 530 * RCU or not, would be traversing the queue by then. 531 */ 532 q->bypass_depth++; 533 queue_flag_set(QUEUE_FLAG_BYPASS, q); 534 535 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 536 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 537 queue_flag_set(QUEUE_FLAG_DYING, q); 538 spin_unlock_irq(lock); 539 mutex_unlock(&q->sysfs_lock); 540 541 /* 542 * Drain all requests queued before DYING marking. Set DEAD flag to 543 * prevent that q->request_fn() gets invoked after draining finished. 544 */ 545 spin_lock_irq(lock); 546 __blk_drain_queue(q, true); 547 queue_flag_set(QUEUE_FLAG_DEAD, q); 548 spin_unlock_irq(lock); 549 550 /* @q won't process any more request, flush async actions */ 551 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 552 blk_sync_queue(q); 553 554 spin_lock_irq(lock); 555 if (q->queue_lock != &q->__queue_lock) 556 q->queue_lock = &q->__queue_lock; 557 spin_unlock_irq(lock); 558 559 /* @q is and will stay empty, shutdown and put */ 560 blk_put_queue(q); 561 } 562 EXPORT_SYMBOL(blk_cleanup_queue); 563 564 int blk_init_rl(struct request_list *rl, struct request_queue *q, 565 gfp_t gfp_mask) 566 { 567 if (unlikely(rl->rq_pool)) 568 return 0; 569 570 rl->q = q; 571 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 572 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 573 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 574 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 575 576 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 577 mempool_free_slab, request_cachep, 578 gfp_mask, q->node); 579 if (!rl->rq_pool) 580 return -ENOMEM; 581 582 return 0; 583 } 584 585 void blk_exit_rl(struct request_list *rl) 586 { 587 if (rl->rq_pool) 588 mempool_destroy(rl->rq_pool); 589 } 590 591 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 592 { 593 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 594 } 595 EXPORT_SYMBOL(blk_alloc_queue); 596 597 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 598 { 599 struct request_queue *q; 600 int err; 601 602 q = kmem_cache_alloc_node(blk_requestq_cachep, 603 gfp_mask | __GFP_ZERO, node_id); 604 if (!q) 605 return NULL; 606 607 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 608 if (q->id < 0) 609 goto fail_q; 610 611 q->backing_dev_info.ra_pages = 612 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 613 q->backing_dev_info.state = 0; 614 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 615 q->backing_dev_info.name = "block"; 616 q->node = node_id; 617 618 err = bdi_init(&q->backing_dev_info); 619 if (err) 620 goto fail_id; 621 622 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 623 laptop_mode_timer_fn, (unsigned long) q); 624 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 625 INIT_LIST_HEAD(&q->queue_head); 626 INIT_LIST_HEAD(&q->timeout_list); 627 INIT_LIST_HEAD(&q->icq_list); 628 #ifdef CONFIG_BLK_CGROUP 629 INIT_LIST_HEAD(&q->blkg_list); 630 #endif 631 INIT_LIST_HEAD(&q->flush_queue[0]); 632 INIT_LIST_HEAD(&q->flush_queue[1]); 633 INIT_LIST_HEAD(&q->flush_data_in_flight); 634 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 635 636 kobject_init(&q->kobj, &blk_queue_ktype); 637 638 mutex_init(&q->sysfs_lock); 639 spin_lock_init(&q->__queue_lock); 640 641 /* 642 * By default initialize queue_lock to internal lock and driver can 643 * override it later if need be. 644 */ 645 q->queue_lock = &q->__queue_lock; 646 647 /* 648 * A queue starts its life with bypass turned on to avoid 649 * unnecessary bypass on/off overhead and nasty surprises during 650 * init. The initial bypass will be finished when the queue is 651 * registered by blk_register_queue(). 652 */ 653 q->bypass_depth = 1; 654 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 655 656 if (blkcg_init_queue(q)) 657 goto fail_id; 658 659 return q; 660 661 fail_id: 662 ida_simple_remove(&blk_queue_ida, q->id); 663 fail_q: 664 kmem_cache_free(blk_requestq_cachep, q); 665 return NULL; 666 } 667 EXPORT_SYMBOL(blk_alloc_queue_node); 668 669 /** 670 * blk_init_queue - prepare a request queue for use with a block device 671 * @rfn: The function to be called to process requests that have been 672 * placed on the queue. 673 * @lock: Request queue spin lock 674 * 675 * Description: 676 * If a block device wishes to use the standard request handling procedures, 677 * which sorts requests and coalesces adjacent requests, then it must 678 * call blk_init_queue(). The function @rfn will be called when there 679 * are requests on the queue that need to be processed. If the device 680 * supports plugging, then @rfn may not be called immediately when requests 681 * are available on the queue, but may be called at some time later instead. 682 * Plugged queues are generally unplugged when a buffer belonging to one 683 * of the requests on the queue is needed, or due to memory pressure. 684 * 685 * @rfn is not required, or even expected, to remove all requests off the 686 * queue, but only as many as it can handle at a time. If it does leave 687 * requests on the queue, it is responsible for arranging that the requests 688 * get dealt with eventually. 689 * 690 * The queue spin lock must be held while manipulating the requests on the 691 * request queue; this lock will be taken also from interrupt context, so irq 692 * disabling is needed for it. 693 * 694 * Function returns a pointer to the initialized request queue, or %NULL if 695 * it didn't succeed. 696 * 697 * Note: 698 * blk_init_queue() must be paired with a blk_cleanup_queue() call 699 * when the block device is deactivated (such as at module unload). 700 **/ 701 702 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 703 { 704 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 705 } 706 EXPORT_SYMBOL(blk_init_queue); 707 708 struct request_queue * 709 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 710 { 711 struct request_queue *uninit_q, *q; 712 713 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 714 if (!uninit_q) 715 return NULL; 716 717 q = blk_init_allocated_queue(uninit_q, rfn, lock); 718 if (!q) 719 blk_cleanup_queue(uninit_q); 720 721 return q; 722 } 723 EXPORT_SYMBOL(blk_init_queue_node); 724 725 struct request_queue * 726 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 727 spinlock_t *lock) 728 { 729 if (!q) 730 return NULL; 731 732 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 733 return NULL; 734 735 q->request_fn = rfn; 736 q->prep_rq_fn = NULL; 737 q->unprep_rq_fn = NULL; 738 q->queue_flags |= QUEUE_FLAG_DEFAULT; 739 740 /* Override internal queue lock with supplied lock pointer */ 741 if (lock) 742 q->queue_lock = lock; 743 744 /* 745 * This also sets hw/phys segments, boundary and size 746 */ 747 blk_queue_make_request(q, blk_queue_bio); 748 749 q->sg_reserved_size = INT_MAX; 750 751 /* init elevator */ 752 if (elevator_init(q, NULL)) 753 return NULL; 754 return q; 755 } 756 EXPORT_SYMBOL(blk_init_allocated_queue); 757 758 bool blk_get_queue(struct request_queue *q) 759 { 760 if (likely(!blk_queue_dying(q))) { 761 __blk_get_queue(q); 762 return true; 763 } 764 765 return false; 766 } 767 EXPORT_SYMBOL(blk_get_queue); 768 769 static inline void blk_free_request(struct request_list *rl, struct request *rq) 770 { 771 if (rq->cmd_flags & REQ_ELVPRIV) { 772 elv_put_request(rl->q, rq); 773 if (rq->elv.icq) 774 put_io_context(rq->elv.icq->ioc); 775 } 776 777 mempool_free(rq, rl->rq_pool); 778 } 779 780 /* 781 * ioc_batching returns true if the ioc is a valid batching request and 782 * should be given priority access to a request. 783 */ 784 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 785 { 786 if (!ioc) 787 return 0; 788 789 /* 790 * Make sure the process is able to allocate at least 1 request 791 * even if the batch times out, otherwise we could theoretically 792 * lose wakeups. 793 */ 794 return ioc->nr_batch_requests == q->nr_batching || 795 (ioc->nr_batch_requests > 0 796 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 797 } 798 799 /* 800 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 801 * will cause the process to be a "batcher" on all queues in the system. This 802 * is the behaviour we want though - once it gets a wakeup it should be given 803 * a nice run. 804 */ 805 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 806 { 807 if (!ioc || ioc_batching(q, ioc)) 808 return; 809 810 ioc->nr_batch_requests = q->nr_batching; 811 ioc->last_waited = jiffies; 812 } 813 814 static void __freed_request(struct request_list *rl, int sync) 815 { 816 struct request_queue *q = rl->q; 817 818 /* 819 * bdi isn't aware of blkcg yet. As all async IOs end up root 820 * blkcg anyway, just use root blkcg state. 821 */ 822 if (rl == &q->root_rl && 823 rl->count[sync] < queue_congestion_off_threshold(q)) 824 blk_clear_queue_congested(q, sync); 825 826 if (rl->count[sync] + 1 <= q->nr_requests) { 827 if (waitqueue_active(&rl->wait[sync])) 828 wake_up(&rl->wait[sync]); 829 830 blk_clear_rl_full(rl, sync); 831 } 832 } 833 834 /* 835 * A request has just been released. Account for it, update the full and 836 * congestion status, wake up any waiters. Called under q->queue_lock. 837 */ 838 static void freed_request(struct request_list *rl, unsigned int flags) 839 { 840 struct request_queue *q = rl->q; 841 int sync = rw_is_sync(flags); 842 843 q->nr_rqs[sync]--; 844 rl->count[sync]--; 845 if (flags & REQ_ELVPRIV) 846 q->nr_rqs_elvpriv--; 847 848 __freed_request(rl, sync); 849 850 if (unlikely(rl->starved[sync ^ 1])) 851 __freed_request(rl, sync ^ 1); 852 } 853 854 /* 855 * Determine if elevator data should be initialized when allocating the 856 * request associated with @bio. 857 */ 858 static bool blk_rq_should_init_elevator(struct bio *bio) 859 { 860 if (!bio) 861 return true; 862 863 /* 864 * Flush requests do not use the elevator so skip initialization. 865 * This allows a request to share the flush and elevator data. 866 */ 867 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 868 return false; 869 870 return true; 871 } 872 873 /** 874 * rq_ioc - determine io_context for request allocation 875 * @bio: request being allocated is for this bio (can be %NULL) 876 * 877 * Determine io_context to use for request allocation for @bio. May return 878 * %NULL if %current->io_context doesn't exist. 879 */ 880 static struct io_context *rq_ioc(struct bio *bio) 881 { 882 #ifdef CONFIG_BLK_CGROUP 883 if (bio && bio->bi_ioc) 884 return bio->bi_ioc; 885 #endif 886 return current->io_context; 887 } 888 889 /** 890 * __get_request - get a free request 891 * @rl: request list to allocate from 892 * @rw_flags: RW and SYNC flags 893 * @bio: bio to allocate request for (can be %NULL) 894 * @gfp_mask: allocation mask 895 * 896 * Get a free request from @q. This function may fail under memory 897 * pressure or if @q is dead. 898 * 899 * Must be callled with @q->queue_lock held and, 900 * Returns %NULL on failure, with @q->queue_lock held. 901 * Returns !%NULL on success, with @q->queue_lock *not held*. 902 */ 903 static struct request *__get_request(struct request_list *rl, int rw_flags, 904 struct bio *bio, gfp_t gfp_mask) 905 { 906 struct request_queue *q = rl->q; 907 struct request *rq; 908 struct elevator_type *et = q->elevator->type; 909 struct io_context *ioc = rq_ioc(bio); 910 struct io_cq *icq = NULL; 911 const bool is_sync = rw_is_sync(rw_flags) != 0; 912 int may_queue; 913 914 if (unlikely(blk_queue_dying(q))) 915 return NULL; 916 917 may_queue = elv_may_queue(q, rw_flags); 918 if (may_queue == ELV_MQUEUE_NO) 919 goto rq_starved; 920 921 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 922 if (rl->count[is_sync]+1 >= q->nr_requests) { 923 /* 924 * The queue will fill after this allocation, so set 925 * it as full, and mark this process as "batching". 926 * This process will be allowed to complete a batch of 927 * requests, others will be blocked. 928 */ 929 if (!blk_rl_full(rl, is_sync)) { 930 ioc_set_batching(q, ioc); 931 blk_set_rl_full(rl, is_sync); 932 } else { 933 if (may_queue != ELV_MQUEUE_MUST 934 && !ioc_batching(q, ioc)) { 935 /* 936 * The queue is full and the allocating 937 * process is not a "batcher", and not 938 * exempted by the IO scheduler 939 */ 940 return NULL; 941 } 942 } 943 } 944 /* 945 * bdi isn't aware of blkcg yet. As all async IOs end up 946 * root blkcg anyway, just use root blkcg state. 947 */ 948 if (rl == &q->root_rl) 949 blk_set_queue_congested(q, is_sync); 950 } 951 952 /* 953 * Only allow batching queuers to allocate up to 50% over the defined 954 * limit of requests, otherwise we could have thousands of requests 955 * allocated with any setting of ->nr_requests 956 */ 957 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 958 return NULL; 959 960 q->nr_rqs[is_sync]++; 961 rl->count[is_sync]++; 962 rl->starved[is_sync] = 0; 963 964 /* 965 * Decide whether the new request will be managed by elevator. If 966 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 967 * prevent the current elevator from being destroyed until the new 968 * request is freed. This guarantees icq's won't be destroyed and 969 * makes creating new ones safe. 970 * 971 * Also, lookup icq while holding queue_lock. If it doesn't exist, 972 * it will be created after releasing queue_lock. 973 */ 974 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 975 rw_flags |= REQ_ELVPRIV; 976 q->nr_rqs_elvpriv++; 977 if (et->icq_cache && ioc) 978 icq = ioc_lookup_icq(ioc, q); 979 } 980 981 if (blk_queue_io_stat(q)) 982 rw_flags |= REQ_IO_STAT; 983 spin_unlock_irq(q->queue_lock); 984 985 /* allocate and init request */ 986 rq = mempool_alloc(rl->rq_pool, gfp_mask); 987 if (!rq) 988 goto fail_alloc; 989 990 blk_rq_init(q, rq); 991 blk_rq_set_rl(rq, rl); 992 rq->cmd_flags = rw_flags | REQ_ALLOCED; 993 994 /* init elvpriv */ 995 if (rw_flags & REQ_ELVPRIV) { 996 if (unlikely(et->icq_cache && !icq)) { 997 if (ioc) 998 icq = ioc_create_icq(ioc, q, gfp_mask); 999 if (!icq) 1000 goto fail_elvpriv; 1001 } 1002 1003 rq->elv.icq = icq; 1004 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1005 goto fail_elvpriv; 1006 1007 /* @rq->elv.icq holds io_context until @rq is freed */ 1008 if (icq) 1009 get_io_context(icq->ioc); 1010 } 1011 out: 1012 /* 1013 * ioc may be NULL here, and ioc_batching will be false. That's 1014 * OK, if the queue is under the request limit then requests need 1015 * not count toward the nr_batch_requests limit. There will always 1016 * be some limit enforced by BLK_BATCH_TIME. 1017 */ 1018 if (ioc_batching(q, ioc)) 1019 ioc->nr_batch_requests--; 1020 1021 trace_block_getrq(q, bio, rw_flags & 1); 1022 return rq; 1023 1024 fail_elvpriv: 1025 /* 1026 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1027 * and may fail indefinitely under memory pressure and thus 1028 * shouldn't stall IO. Treat this request as !elvpriv. This will 1029 * disturb iosched and blkcg but weird is bettern than dead. 1030 */ 1031 printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n", 1032 dev_name(q->backing_dev_info.dev)); 1033 1034 rq->cmd_flags &= ~REQ_ELVPRIV; 1035 rq->elv.icq = NULL; 1036 1037 spin_lock_irq(q->queue_lock); 1038 q->nr_rqs_elvpriv--; 1039 spin_unlock_irq(q->queue_lock); 1040 goto out; 1041 1042 fail_alloc: 1043 /* 1044 * Allocation failed presumably due to memory. Undo anything we 1045 * might have messed up. 1046 * 1047 * Allocating task should really be put onto the front of the wait 1048 * queue, but this is pretty rare. 1049 */ 1050 spin_lock_irq(q->queue_lock); 1051 freed_request(rl, rw_flags); 1052 1053 /* 1054 * in the very unlikely event that allocation failed and no 1055 * requests for this direction was pending, mark us starved so that 1056 * freeing of a request in the other direction will notice 1057 * us. another possible fix would be to split the rq mempool into 1058 * READ and WRITE 1059 */ 1060 rq_starved: 1061 if (unlikely(rl->count[is_sync] == 0)) 1062 rl->starved[is_sync] = 1; 1063 return NULL; 1064 } 1065 1066 /** 1067 * get_request - get a free request 1068 * @q: request_queue to allocate request from 1069 * @rw_flags: RW and SYNC flags 1070 * @bio: bio to allocate request for (can be %NULL) 1071 * @gfp_mask: allocation mask 1072 * 1073 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1074 * function keeps retrying under memory pressure and fails iff @q is dead. 1075 * 1076 * Must be callled with @q->queue_lock held and, 1077 * Returns %NULL on failure, with @q->queue_lock held. 1078 * Returns !%NULL on success, with @q->queue_lock *not held*. 1079 */ 1080 static struct request *get_request(struct request_queue *q, int rw_flags, 1081 struct bio *bio, gfp_t gfp_mask) 1082 { 1083 const bool is_sync = rw_is_sync(rw_flags) != 0; 1084 DEFINE_WAIT(wait); 1085 struct request_list *rl; 1086 struct request *rq; 1087 1088 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1089 retry: 1090 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1091 if (rq) 1092 return rq; 1093 1094 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1095 blk_put_rl(rl); 1096 return NULL; 1097 } 1098 1099 /* wait on @rl and retry */ 1100 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1101 TASK_UNINTERRUPTIBLE); 1102 1103 trace_block_sleeprq(q, bio, rw_flags & 1); 1104 1105 spin_unlock_irq(q->queue_lock); 1106 io_schedule(); 1107 1108 /* 1109 * After sleeping, we become a "batching" process and will be able 1110 * to allocate at least one request, and up to a big batch of them 1111 * for a small period time. See ioc_batching, ioc_set_batching 1112 */ 1113 ioc_set_batching(q, current->io_context); 1114 1115 spin_lock_irq(q->queue_lock); 1116 finish_wait(&rl->wait[is_sync], &wait); 1117 1118 goto retry; 1119 } 1120 1121 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1122 { 1123 struct request *rq; 1124 1125 BUG_ON(rw != READ && rw != WRITE); 1126 1127 /* create ioc upfront */ 1128 create_io_context(gfp_mask, q->node); 1129 1130 spin_lock_irq(q->queue_lock); 1131 rq = get_request(q, rw, NULL, gfp_mask); 1132 if (!rq) 1133 spin_unlock_irq(q->queue_lock); 1134 /* q->queue_lock is unlocked at this point */ 1135 1136 return rq; 1137 } 1138 EXPORT_SYMBOL(blk_get_request); 1139 1140 /** 1141 * blk_make_request - given a bio, allocate a corresponding struct request. 1142 * @q: target request queue 1143 * @bio: The bio describing the memory mappings that will be submitted for IO. 1144 * It may be a chained-bio properly constructed by block/bio layer. 1145 * @gfp_mask: gfp flags to be used for memory allocation 1146 * 1147 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1148 * type commands. Where the struct request needs to be farther initialized by 1149 * the caller. It is passed a &struct bio, which describes the memory info of 1150 * the I/O transfer. 1151 * 1152 * The caller of blk_make_request must make sure that bi_io_vec 1153 * are set to describe the memory buffers. That bio_data_dir() will return 1154 * the needed direction of the request. (And all bio's in the passed bio-chain 1155 * are properly set accordingly) 1156 * 1157 * If called under none-sleepable conditions, mapped bio buffers must not 1158 * need bouncing, by calling the appropriate masked or flagged allocator, 1159 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1160 * BUG. 1161 * 1162 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1163 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1164 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1165 * completion of a bio that hasn't been submitted yet, thus resulting in a 1166 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1167 * of bio_alloc(), as that avoids the mempool deadlock. 1168 * If possible a big IO should be split into smaller parts when allocation 1169 * fails. Partial allocation should not be an error, or you risk a live-lock. 1170 */ 1171 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1172 gfp_t gfp_mask) 1173 { 1174 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1175 1176 if (unlikely(!rq)) 1177 return ERR_PTR(-ENOMEM); 1178 1179 for_each_bio(bio) { 1180 struct bio *bounce_bio = bio; 1181 int ret; 1182 1183 blk_queue_bounce(q, &bounce_bio); 1184 ret = blk_rq_append_bio(q, rq, bounce_bio); 1185 if (unlikely(ret)) { 1186 blk_put_request(rq); 1187 return ERR_PTR(ret); 1188 } 1189 } 1190 1191 return rq; 1192 } 1193 EXPORT_SYMBOL(blk_make_request); 1194 1195 /** 1196 * blk_requeue_request - put a request back on queue 1197 * @q: request queue where request should be inserted 1198 * @rq: request to be inserted 1199 * 1200 * Description: 1201 * Drivers often keep queueing requests until the hardware cannot accept 1202 * more, when that condition happens we need to put the request back 1203 * on the queue. Must be called with queue lock held. 1204 */ 1205 void blk_requeue_request(struct request_queue *q, struct request *rq) 1206 { 1207 blk_delete_timer(rq); 1208 blk_clear_rq_complete(rq); 1209 trace_block_rq_requeue(q, rq); 1210 1211 if (blk_rq_tagged(rq)) 1212 blk_queue_end_tag(q, rq); 1213 1214 BUG_ON(blk_queued_rq(rq)); 1215 1216 elv_requeue_request(q, rq); 1217 } 1218 EXPORT_SYMBOL(blk_requeue_request); 1219 1220 static void add_acct_request(struct request_queue *q, struct request *rq, 1221 int where) 1222 { 1223 drive_stat_acct(rq, 1); 1224 __elv_add_request(q, rq, where); 1225 } 1226 1227 static void part_round_stats_single(int cpu, struct hd_struct *part, 1228 unsigned long now) 1229 { 1230 if (now == part->stamp) 1231 return; 1232 1233 if (part_in_flight(part)) { 1234 __part_stat_add(cpu, part, time_in_queue, 1235 part_in_flight(part) * (now - part->stamp)); 1236 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1237 } 1238 part->stamp = now; 1239 } 1240 1241 /** 1242 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1243 * @cpu: cpu number for stats access 1244 * @part: target partition 1245 * 1246 * The average IO queue length and utilisation statistics are maintained 1247 * by observing the current state of the queue length and the amount of 1248 * time it has been in this state for. 1249 * 1250 * Normally, that accounting is done on IO completion, but that can result 1251 * in more than a second's worth of IO being accounted for within any one 1252 * second, leading to >100% utilisation. To deal with that, we call this 1253 * function to do a round-off before returning the results when reading 1254 * /proc/diskstats. This accounts immediately for all queue usage up to 1255 * the current jiffies and restarts the counters again. 1256 */ 1257 void part_round_stats(int cpu, struct hd_struct *part) 1258 { 1259 unsigned long now = jiffies; 1260 1261 if (part->partno) 1262 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1263 part_round_stats_single(cpu, part, now); 1264 } 1265 EXPORT_SYMBOL_GPL(part_round_stats); 1266 1267 /* 1268 * queue lock must be held 1269 */ 1270 void __blk_put_request(struct request_queue *q, struct request *req) 1271 { 1272 if (unlikely(!q)) 1273 return; 1274 if (unlikely(--req->ref_count)) 1275 return; 1276 1277 elv_completed_request(q, req); 1278 1279 /* this is a bio leak */ 1280 WARN_ON(req->bio != NULL); 1281 1282 /* 1283 * Request may not have originated from ll_rw_blk. if not, 1284 * it didn't come out of our reserved rq pools 1285 */ 1286 if (req->cmd_flags & REQ_ALLOCED) { 1287 unsigned int flags = req->cmd_flags; 1288 struct request_list *rl = blk_rq_rl(req); 1289 1290 BUG_ON(!list_empty(&req->queuelist)); 1291 BUG_ON(!hlist_unhashed(&req->hash)); 1292 1293 blk_free_request(rl, req); 1294 freed_request(rl, flags); 1295 blk_put_rl(rl); 1296 } 1297 } 1298 EXPORT_SYMBOL_GPL(__blk_put_request); 1299 1300 void blk_put_request(struct request *req) 1301 { 1302 unsigned long flags; 1303 struct request_queue *q = req->q; 1304 1305 spin_lock_irqsave(q->queue_lock, flags); 1306 __blk_put_request(q, req); 1307 spin_unlock_irqrestore(q->queue_lock, flags); 1308 } 1309 EXPORT_SYMBOL(blk_put_request); 1310 1311 /** 1312 * blk_add_request_payload - add a payload to a request 1313 * @rq: request to update 1314 * @page: page backing the payload 1315 * @len: length of the payload. 1316 * 1317 * This allows to later add a payload to an already submitted request by 1318 * a block driver. The driver needs to take care of freeing the payload 1319 * itself. 1320 * 1321 * Note that this is a quite horrible hack and nothing but handling of 1322 * discard requests should ever use it. 1323 */ 1324 void blk_add_request_payload(struct request *rq, struct page *page, 1325 unsigned int len) 1326 { 1327 struct bio *bio = rq->bio; 1328 1329 bio->bi_io_vec->bv_page = page; 1330 bio->bi_io_vec->bv_offset = 0; 1331 bio->bi_io_vec->bv_len = len; 1332 1333 bio->bi_size = len; 1334 bio->bi_vcnt = 1; 1335 bio->bi_phys_segments = 1; 1336 1337 rq->__data_len = rq->resid_len = len; 1338 rq->nr_phys_segments = 1; 1339 rq->buffer = bio_data(bio); 1340 } 1341 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1342 1343 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1344 struct bio *bio) 1345 { 1346 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1347 1348 if (!ll_back_merge_fn(q, req, bio)) 1349 return false; 1350 1351 trace_block_bio_backmerge(q, req, bio); 1352 1353 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1354 blk_rq_set_mixed_merge(req); 1355 1356 req->biotail->bi_next = bio; 1357 req->biotail = bio; 1358 req->__data_len += bio->bi_size; 1359 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1360 1361 drive_stat_acct(req, 0); 1362 return true; 1363 } 1364 1365 static bool bio_attempt_front_merge(struct request_queue *q, 1366 struct request *req, struct bio *bio) 1367 { 1368 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1369 1370 if (!ll_front_merge_fn(q, req, bio)) 1371 return false; 1372 1373 trace_block_bio_frontmerge(q, req, bio); 1374 1375 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1376 blk_rq_set_mixed_merge(req); 1377 1378 bio->bi_next = req->bio; 1379 req->bio = bio; 1380 1381 /* 1382 * may not be valid. if the low level driver said 1383 * it didn't need a bounce buffer then it better 1384 * not touch req->buffer either... 1385 */ 1386 req->buffer = bio_data(bio); 1387 req->__sector = bio->bi_sector; 1388 req->__data_len += bio->bi_size; 1389 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1390 1391 drive_stat_acct(req, 0); 1392 return true; 1393 } 1394 1395 /** 1396 * attempt_plug_merge - try to merge with %current's plugged list 1397 * @q: request_queue new bio is being queued at 1398 * @bio: new bio being queued 1399 * @request_count: out parameter for number of traversed plugged requests 1400 * 1401 * Determine whether @bio being queued on @q can be merged with a request 1402 * on %current's plugged list. Returns %true if merge was successful, 1403 * otherwise %false. 1404 * 1405 * Plugging coalesces IOs from the same issuer for the same purpose without 1406 * going through @q->queue_lock. As such it's more of an issuing mechanism 1407 * than scheduling, and the request, while may have elvpriv data, is not 1408 * added on the elevator at this point. In addition, we don't have 1409 * reliable access to the elevator outside queue lock. Only check basic 1410 * merging parameters without querying the elevator. 1411 */ 1412 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1413 unsigned int *request_count) 1414 { 1415 struct blk_plug *plug; 1416 struct request *rq; 1417 bool ret = false; 1418 1419 plug = current->plug; 1420 if (!plug) 1421 goto out; 1422 *request_count = 0; 1423 1424 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1425 int el_ret; 1426 1427 if (rq->q == q) 1428 (*request_count)++; 1429 1430 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1431 continue; 1432 1433 el_ret = blk_try_merge(rq, bio); 1434 if (el_ret == ELEVATOR_BACK_MERGE) { 1435 ret = bio_attempt_back_merge(q, rq, bio); 1436 if (ret) 1437 break; 1438 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1439 ret = bio_attempt_front_merge(q, rq, bio); 1440 if (ret) 1441 break; 1442 } 1443 } 1444 out: 1445 return ret; 1446 } 1447 1448 void init_request_from_bio(struct request *req, struct bio *bio) 1449 { 1450 req->cmd_type = REQ_TYPE_FS; 1451 1452 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1453 if (bio->bi_rw & REQ_RAHEAD) 1454 req->cmd_flags |= REQ_FAILFAST_MASK; 1455 1456 req->errors = 0; 1457 req->__sector = bio->bi_sector; 1458 req->ioprio = bio_prio(bio); 1459 blk_rq_bio_prep(req->q, req, bio); 1460 } 1461 1462 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1463 { 1464 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1465 struct blk_plug *plug; 1466 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1467 struct request *req; 1468 unsigned int request_count = 0; 1469 1470 /* 1471 * low level driver can indicate that it wants pages above a 1472 * certain limit bounced to low memory (ie for highmem, or even 1473 * ISA dma in theory) 1474 */ 1475 blk_queue_bounce(q, &bio); 1476 1477 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1478 bio_endio(bio, -EIO); 1479 return; 1480 } 1481 1482 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1483 spin_lock_irq(q->queue_lock); 1484 where = ELEVATOR_INSERT_FLUSH; 1485 goto get_rq; 1486 } 1487 1488 /* 1489 * Check if we can merge with the plugged list before grabbing 1490 * any locks. 1491 */ 1492 if (attempt_plug_merge(q, bio, &request_count)) 1493 return; 1494 1495 spin_lock_irq(q->queue_lock); 1496 1497 el_ret = elv_merge(q, &req, bio); 1498 if (el_ret == ELEVATOR_BACK_MERGE) { 1499 if (bio_attempt_back_merge(q, req, bio)) { 1500 elv_bio_merged(q, req, bio); 1501 if (!attempt_back_merge(q, req)) 1502 elv_merged_request(q, req, el_ret); 1503 goto out_unlock; 1504 } 1505 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1506 if (bio_attempt_front_merge(q, req, bio)) { 1507 elv_bio_merged(q, req, bio); 1508 if (!attempt_front_merge(q, req)) 1509 elv_merged_request(q, req, el_ret); 1510 goto out_unlock; 1511 } 1512 } 1513 1514 get_rq: 1515 /* 1516 * This sync check and mask will be re-done in init_request_from_bio(), 1517 * but we need to set it earlier to expose the sync flag to the 1518 * rq allocator and io schedulers. 1519 */ 1520 rw_flags = bio_data_dir(bio); 1521 if (sync) 1522 rw_flags |= REQ_SYNC; 1523 1524 /* 1525 * Grab a free request. This is might sleep but can not fail. 1526 * Returns with the queue unlocked. 1527 */ 1528 req = get_request(q, rw_flags, bio, GFP_NOIO); 1529 if (unlikely(!req)) { 1530 bio_endio(bio, -ENODEV); /* @q is dead */ 1531 goto out_unlock; 1532 } 1533 1534 /* 1535 * After dropping the lock and possibly sleeping here, our request 1536 * may now be mergeable after it had proven unmergeable (above). 1537 * We don't worry about that case for efficiency. It won't happen 1538 * often, and the elevators are able to handle it. 1539 */ 1540 init_request_from_bio(req, bio); 1541 1542 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1543 req->cpu = raw_smp_processor_id(); 1544 1545 plug = current->plug; 1546 if (plug) { 1547 /* 1548 * If this is the first request added after a plug, fire 1549 * of a plug trace. If others have been added before, check 1550 * if we have multiple devices in this plug. If so, make a 1551 * note to sort the list before dispatch. 1552 */ 1553 if (list_empty(&plug->list)) 1554 trace_block_plug(q); 1555 else { 1556 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1557 blk_flush_plug_list(plug, false); 1558 trace_block_plug(q); 1559 } 1560 } 1561 list_add_tail(&req->queuelist, &plug->list); 1562 drive_stat_acct(req, 1); 1563 } else { 1564 spin_lock_irq(q->queue_lock); 1565 add_acct_request(q, req, where); 1566 __blk_run_queue(q); 1567 out_unlock: 1568 spin_unlock_irq(q->queue_lock); 1569 } 1570 } 1571 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1572 1573 /* 1574 * If bio->bi_dev is a partition, remap the location 1575 */ 1576 static inline void blk_partition_remap(struct bio *bio) 1577 { 1578 struct block_device *bdev = bio->bi_bdev; 1579 1580 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1581 struct hd_struct *p = bdev->bd_part; 1582 1583 bio->bi_sector += p->start_sect; 1584 bio->bi_bdev = bdev->bd_contains; 1585 1586 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1587 bdev->bd_dev, 1588 bio->bi_sector - p->start_sect); 1589 } 1590 } 1591 1592 static void handle_bad_sector(struct bio *bio) 1593 { 1594 char b[BDEVNAME_SIZE]; 1595 1596 printk(KERN_INFO "attempt to access beyond end of device\n"); 1597 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1598 bdevname(bio->bi_bdev, b), 1599 bio->bi_rw, 1600 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1601 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1602 1603 set_bit(BIO_EOF, &bio->bi_flags); 1604 } 1605 1606 #ifdef CONFIG_FAIL_MAKE_REQUEST 1607 1608 static DECLARE_FAULT_ATTR(fail_make_request); 1609 1610 static int __init setup_fail_make_request(char *str) 1611 { 1612 return setup_fault_attr(&fail_make_request, str); 1613 } 1614 __setup("fail_make_request=", setup_fail_make_request); 1615 1616 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1617 { 1618 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1619 } 1620 1621 static int __init fail_make_request_debugfs(void) 1622 { 1623 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1624 NULL, &fail_make_request); 1625 1626 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1627 } 1628 1629 late_initcall(fail_make_request_debugfs); 1630 1631 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1632 1633 static inline bool should_fail_request(struct hd_struct *part, 1634 unsigned int bytes) 1635 { 1636 return false; 1637 } 1638 1639 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1640 1641 /* 1642 * Check whether this bio extends beyond the end of the device. 1643 */ 1644 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1645 { 1646 sector_t maxsector; 1647 1648 if (!nr_sectors) 1649 return 0; 1650 1651 /* Test device or partition size, when known. */ 1652 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1653 if (maxsector) { 1654 sector_t sector = bio->bi_sector; 1655 1656 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1657 /* 1658 * This may well happen - the kernel calls bread() 1659 * without checking the size of the device, e.g., when 1660 * mounting a device. 1661 */ 1662 handle_bad_sector(bio); 1663 return 1; 1664 } 1665 } 1666 1667 return 0; 1668 } 1669 1670 static noinline_for_stack bool 1671 generic_make_request_checks(struct bio *bio) 1672 { 1673 struct request_queue *q; 1674 int nr_sectors = bio_sectors(bio); 1675 int err = -EIO; 1676 char b[BDEVNAME_SIZE]; 1677 struct hd_struct *part; 1678 1679 might_sleep(); 1680 1681 if (bio_check_eod(bio, nr_sectors)) 1682 goto end_io; 1683 1684 q = bdev_get_queue(bio->bi_bdev); 1685 if (unlikely(!q)) { 1686 printk(KERN_ERR 1687 "generic_make_request: Trying to access " 1688 "nonexistent block-device %s (%Lu)\n", 1689 bdevname(bio->bi_bdev, b), 1690 (long long) bio->bi_sector); 1691 goto end_io; 1692 } 1693 1694 if (likely(bio_is_rw(bio) && 1695 nr_sectors > queue_max_hw_sectors(q))) { 1696 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1697 bdevname(bio->bi_bdev, b), 1698 bio_sectors(bio), 1699 queue_max_hw_sectors(q)); 1700 goto end_io; 1701 } 1702 1703 part = bio->bi_bdev->bd_part; 1704 if (should_fail_request(part, bio->bi_size) || 1705 should_fail_request(&part_to_disk(part)->part0, 1706 bio->bi_size)) 1707 goto end_io; 1708 1709 /* 1710 * If this device has partitions, remap block n 1711 * of partition p to block n+start(p) of the disk. 1712 */ 1713 blk_partition_remap(bio); 1714 1715 if (bio_check_eod(bio, nr_sectors)) 1716 goto end_io; 1717 1718 /* 1719 * Filter flush bio's early so that make_request based 1720 * drivers without flush support don't have to worry 1721 * about them. 1722 */ 1723 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1724 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1725 if (!nr_sectors) { 1726 err = 0; 1727 goto end_io; 1728 } 1729 } 1730 1731 if ((bio->bi_rw & REQ_DISCARD) && 1732 (!blk_queue_discard(q) || 1733 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1734 err = -EOPNOTSUPP; 1735 goto end_io; 1736 } 1737 1738 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { 1739 err = -EOPNOTSUPP; 1740 goto end_io; 1741 } 1742 1743 /* 1744 * Various block parts want %current->io_context and lazy ioc 1745 * allocation ends up trading a lot of pain for a small amount of 1746 * memory. Just allocate it upfront. This may fail and block 1747 * layer knows how to live with it. 1748 */ 1749 create_io_context(GFP_ATOMIC, q->node); 1750 1751 if (blk_throtl_bio(q, bio)) 1752 return false; /* throttled, will be resubmitted later */ 1753 1754 trace_block_bio_queue(q, bio); 1755 return true; 1756 1757 end_io: 1758 bio_endio(bio, err); 1759 return false; 1760 } 1761 1762 /** 1763 * generic_make_request - hand a buffer to its device driver for I/O 1764 * @bio: The bio describing the location in memory and on the device. 1765 * 1766 * generic_make_request() is used to make I/O requests of block 1767 * devices. It is passed a &struct bio, which describes the I/O that needs 1768 * to be done. 1769 * 1770 * generic_make_request() does not return any status. The 1771 * success/failure status of the request, along with notification of 1772 * completion, is delivered asynchronously through the bio->bi_end_io 1773 * function described (one day) else where. 1774 * 1775 * The caller of generic_make_request must make sure that bi_io_vec 1776 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1777 * set to describe the device address, and the 1778 * bi_end_io and optionally bi_private are set to describe how 1779 * completion notification should be signaled. 1780 * 1781 * generic_make_request and the drivers it calls may use bi_next if this 1782 * bio happens to be merged with someone else, and may resubmit the bio to 1783 * a lower device by calling into generic_make_request recursively, which 1784 * means the bio should NOT be touched after the call to ->make_request_fn. 1785 */ 1786 void generic_make_request(struct bio *bio) 1787 { 1788 struct bio_list bio_list_on_stack; 1789 1790 if (!generic_make_request_checks(bio)) 1791 return; 1792 1793 /* 1794 * We only want one ->make_request_fn to be active at a time, else 1795 * stack usage with stacked devices could be a problem. So use 1796 * current->bio_list to keep a list of requests submited by a 1797 * make_request_fn function. current->bio_list is also used as a 1798 * flag to say if generic_make_request is currently active in this 1799 * task or not. If it is NULL, then no make_request is active. If 1800 * it is non-NULL, then a make_request is active, and new requests 1801 * should be added at the tail 1802 */ 1803 if (current->bio_list) { 1804 bio_list_add(current->bio_list, bio); 1805 return; 1806 } 1807 1808 /* following loop may be a bit non-obvious, and so deserves some 1809 * explanation. 1810 * Before entering the loop, bio->bi_next is NULL (as all callers 1811 * ensure that) so we have a list with a single bio. 1812 * We pretend that we have just taken it off a longer list, so 1813 * we assign bio_list to a pointer to the bio_list_on_stack, 1814 * thus initialising the bio_list of new bios to be 1815 * added. ->make_request() may indeed add some more bios 1816 * through a recursive call to generic_make_request. If it 1817 * did, we find a non-NULL value in bio_list and re-enter the loop 1818 * from the top. In this case we really did just take the bio 1819 * of the top of the list (no pretending) and so remove it from 1820 * bio_list, and call into ->make_request() again. 1821 */ 1822 BUG_ON(bio->bi_next); 1823 bio_list_init(&bio_list_on_stack); 1824 current->bio_list = &bio_list_on_stack; 1825 do { 1826 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1827 1828 q->make_request_fn(q, bio); 1829 1830 bio = bio_list_pop(current->bio_list); 1831 } while (bio); 1832 current->bio_list = NULL; /* deactivate */ 1833 } 1834 EXPORT_SYMBOL(generic_make_request); 1835 1836 /** 1837 * submit_bio - submit a bio to the block device layer for I/O 1838 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1839 * @bio: The &struct bio which describes the I/O 1840 * 1841 * submit_bio() is very similar in purpose to generic_make_request(), and 1842 * uses that function to do most of the work. Both are fairly rough 1843 * interfaces; @bio must be presetup and ready for I/O. 1844 * 1845 */ 1846 void submit_bio(int rw, struct bio *bio) 1847 { 1848 bio->bi_rw |= rw; 1849 1850 /* 1851 * If it's a regular read/write or a barrier with data attached, 1852 * go through the normal accounting stuff before submission. 1853 */ 1854 if (bio_has_data(bio)) { 1855 unsigned int count; 1856 1857 if (unlikely(rw & REQ_WRITE_SAME)) 1858 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 1859 else 1860 count = bio_sectors(bio); 1861 1862 if (rw & WRITE) { 1863 count_vm_events(PGPGOUT, count); 1864 } else { 1865 task_io_account_read(bio->bi_size); 1866 count_vm_events(PGPGIN, count); 1867 } 1868 1869 if (unlikely(block_dump)) { 1870 char b[BDEVNAME_SIZE]; 1871 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1872 current->comm, task_pid_nr(current), 1873 (rw & WRITE) ? "WRITE" : "READ", 1874 (unsigned long long)bio->bi_sector, 1875 bdevname(bio->bi_bdev, b), 1876 count); 1877 } 1878 } 1879 1880 generic_make_request(bio); 1881 } 1882 EXPORT_SYMBOL(submit_bio); 1883 1884 /** 1885 * blk_rq_check_limits - Helper function to check a request for the queue limit 1886 * @q: the queue 1887 * @rq: the request being checked 1888 * 1889 * Description: 1890 * @rq may have been made based on weaker limitations of upper-level queues 1891 * in request stacking drivers, and it may violate the limitation of @q. 1892 * Since the block layer and the underlying device driver trust @rq 1893 * after it is inserted to @q, it should be checked against @q before 1894 * the insertion using this generic function. 1895 * 1896 * This function should also be useful for request stacking drivers 1897 * in some cases below, so export this function. 1898 * Request stacking drivers like request-based dm may change the queue 1899 * limits while requests are in the queue (e.g. dm's table swapping). 1900 * Such request stacking drivers should check those requests agaist 1901 * the new queue limits again when they dispatch those requests, 1902 * although such checkings are also done against the old queue limits 1903 * when submitting requests. 1904 */ 1905 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1906 { 1907 if (!rq_mergeable(rq)) 1908 return 0; 1909 1910 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 1911 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1912 return -EIO; 1913 } 1914 1915 /* 1916 * queue's settings related to segment counting like q->bounce_pfn 1917 * may differ from that of other stacking queues. 1918 * Recalculate it to check the request correctly on this queue's 1919 * limitation. 1920 */ 1921 blk_recalc_rq_segments(rq); 1922 if (rq->nr_phys_segments > queue_max_segments(q)) { 1923 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1924 return -EIO; 1925 } 1926 1927 return 0; 1928 } 1929 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1930 1931 /** 1932 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1933 * @q: the queue to submit the request 1934 * @rq: the request being queued 1935 */ 1936 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1937 { 1938 unsigned long flags; 1939 int where = ELEVATOR_INSERT_BACK; 1940 1941 if (blk_rq_check_limits(q, rq)) 1942 return -EIO; 1943 1944 if (rq->rq_disk && 1945 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1946 return -EIO; 1947 1948 spin_lock_irqsave(q->queue_lock, flags); 1949 if (unlikely(blk_queue_dying(q))) { 1950 spin_unlock_irqrestore(q->queue_lock, flags); 1951 return -ENODEV; 1952 } 1953 1954 /* 1955 * Submitting request must be dequeued before calling this function 1956 * because it will be linked to another request_queue 1957 */ 1958 BUG_ON(blk_queued_rq(rq)); 1959 1960 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 1961 where = ELEVATOR_INSERT_FLUSH; 1962 1963 add_acct_request(q, rq, where); 1964 if (where == ELEVATOR_INSERT_FLUSH) 1965 __blk_run_queue(q); 1966 spin_unlock_irqrestore(q->queue_lock, flags); 1967 1968 return 0; 1969 } 1970 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1971 1972 /** 1973 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1974 * @rq: request to examine 1975 * 1976 * Description: 1977 * A request could be merge of IOs which require different failure 1978 * handling. This function determines the number of bytes which 1979 * can be failed from the beginning of the request without 1980 * crossing into area which need to be retried further. 1981 * 1982 * Return: 1983 * The number of bytes to fail. 1984 * 1985 * Context: 1986 * queue_lock must be held. 1987 */ 1988 unsigned int blk_rq_err_bytes(const struct request *rq) 1989 { 1990 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1991 unsigned int bytes = 0; 1992 struct bio *bio; 1993 1994 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1995 return blk_rq_bytes(rq); 1996 1997 /* 1998 * Currently the only 'mixing' which can happen is between 1999 * different fastfail types. We can safely fail portions 2000 * which have all the failfast bits that the first one has - 2001 * the ones which are at least as eager to fail as the first 2002 * one. 2003 */ 2004 for (bio = rq->bio; bio; bio = bio->bi_next) { 2005 if ((bio->bi_rw & ff) != ff) 2006 break; 2007 bytes += bio->bi_size; 2008 } 2009 2010 /* this could lead to infinite loop */ 2011 BUG_ON(blk_rq_bytes(rq) && !bytes); 2012 return bytes; 2013 } 2014 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2015 2016 static void blk_account_io_completion(struct request *req, unsigned int bytes) 2017 { 2018 if (blk_do_io_stat(req)) { 2019 const int rw = rq_data_dir(req); 2020 struct hd_struct *part; 2021 int cpu; 2022 2023 cpu = part_stat_lock(); 2024 part = req->part; 2025 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2026 part_stat_unlock(); 2027 } 2028 } 2029 2030 static void blk_account_io_done(struct request *req) 2031 { 2032 /* 2033 * Account IO completion. flush_rq isn't accounted as a 2034 * normal IO on queueing nor completion. Accounting the 2035 * containing request is enough. 2036 */ 2037 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2038 unsigned long duration = jiffies - req->start_time; 2039 const int rw = rq_data_dir(req); 2040 struct hd_struct *part; 2041 int cpu; 2042 2043 cpu = part_stat_lock(); 2044 part = req->part; 2045 2046 part_stat_inc(cpu, part, ios[rw]); 2047 part_stat_add(cpu, part, ticks[rw], duration); 2048 part_round_stats(cpu, part); 2049 part_dec_in_flight(part, rw); 2050 2051 hd_struct_put(part); 2052 part_stat_unlock(); 2053 } 2054 } 2055 2056 /** 2057 * blk_peek_request - peek at the top of a request queue 2058 * @q: request queue to peek at 2059 * 2060 * Description: 2061 * Return the request at the top of @q. The returned request 2062 * should be started using blk_start_request() before LLD starts 2063 * processing it. 2064 * 2065 * Return: 2066 * Pointer to the request at the top of @q if available. Null 2067 * otherwise. 2068 * 2069 * Context: 2070 * queue_lock must be held. 2071 */ 2072 struct request *blk_peek_request(struct request_queue *q) 2073 { 2074 struct request *rq; 2075 int ret; 2076 2077 while ((rq = __elv_next_request(q)) != NULL) { 2078 if (!(rq->cmd_flags & REQ_STARTED)) { 2079 /* 2080 * This is the first time the device driver 2081 * sees this request (possibly after 2082 * requeueing). Notify IO scheduler. 2083 */ 2084 if (rq->cmd_flags & REQ_SORTED) 2085 elv_activate_rq(q, rq); 2086 2087 /* 2088 * just mark as started even if we don't start 2089 * it, a request that has been delayed should 2090 * not be passed by new incoming requests 2091 */ 2092 rq->cmd_flags |= REQ_STARTED; 2093 trace_block_rq_issue(q, rq); 2094 } 2095 2096 if (!q->boundary_rq || q->boundary_rq == rq) { 2097 q->end_sector = rq_end_sector(rq); 2098 q->boundary_rq = NULL; 2099 } 2100 2101 if (rq->cmd_flags & REQ_DONTPREP) 2102 break; 2103 2104 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2105 /* 2106 * make sure space for the drain appears we 2107 * know we can do this because max_hw_segments 2108 * has been adjusted to be one fewer than the 2109 * device can handle 2110 */ 2111 rq->nr_phys_segments++; 2112 } 2113 2114 if (!q->prep_rq_fn) 2115 break; 2116 2117 ret = q->prep_rq_fn(q, rq); 2118 if (ret == BLKPREP_OK) { 2119 break; 2120 } else if (ret == BLKPREP_DEFER) { 2121 /* 2122 * the request may have been (partially) prepped. 2123 * we need to keep this request in the front to 2124 * avoid resource deadlock. REQ_STARTED will 2125 * prevent other fs requests from passing this one. 2126 */ 2127 if (q->dma_drain_size && blk_rq_bytes(rq) && 2128 !(rq->cmd_flags & REQ_DONTPREP)) { 2129 /* 2130 * remove the space for the drain we added 2131 * so that we don't add it again 2132 */ 2133 --rq->nr_phys_segments; 2134 } 2135 2136 rq = NULL; 2137 break; 2138 } else if (ret == BLKPREP_KILL) { 2139 rq->cmd_flags |= REQ_QUIET; 2140 /* 2141 * Mark this request as started so we don't trigger 2142 * any debug logic in the end I/O path. 2143 */ 2144 blk_start_request(rq); 2145 __blk_end_request_all(rq, -EIO); 2146 } else { 2147 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2148 break; 2149 } 2150 } 2151 2152 return rq; 2153 } 2154 EXPORT_SYMBOL(blk_peek_request); 2155 2156 void blk_dequeue_request(struct request *rq) 2157 { 2158 struct request_queue *q = rq->q; 2159 2160 BUG_ON(list_empty(&rq->queuelist)); 2161 BUG_ON(ELV_ON_HASH(rq)); 2162 2163 list_del_init(&rq->queuelist); 2164 2165 /* 2166 * the time frame between a request being removed from the lists 2167 * and to it is freed is accounted as io that is in progress at 2168 * the driver side. 2169 */ 2170 if (blk_account_rq(rq)) { 2171 q->in_flight[rq_is_sync(rq)]++; 2172 set_io_start_time_ns(rq); 2173 } 2174 } 2175 2176 /** 2177 * blk_start_request - start request processing on the driver 2178 * @req: request to dequeue 2179 * 2180 * Description: 2181 * Dequeue @req and start timeout timer on it. This hands off the 2182 * request to the driver. 2183 * 2184 * Block internal functions which don't want to start timer should 2185 * call blk_dequeue_request(). 2186 * 2187 * Context: 2188 * queue_lock must be held. 2189 */ 2190 void blk_start_request(struct request *req) 2191 { 2192 blk_dequeue_request(req); 2193 2194 /* 2195 * We are now handing the request to the hardware, initialize 2196 * resid_len to full count and add the timeout handler. 2197 */ 2198 req->resid_len = blk_rq_bytes(req); 2199 if (unlikely(blk_bidi_rq(req))) 2200 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2201 2202 blk_add_timer(req); 2203 } 2204 EXPORT_SYMBOL(blk_start_request); 2205 2206 /** 2207 * blk_fetch_request - fetch a request from a request queue 2208 * @q: request queue to fetch a request from 2209 * 2210 * Description: 2211 * Return the request at the top of @q. The request is started on 2212 * return and LLD can start processing it immediately. 2213 * 2214 * Return: 2215 * Pointer to the request at the top of @q if available. Null 2216 * otherwise. 2217 * 2218 * Context: 2219 * queue_lock must be held. 2220 */ 2221 struct request *blk_fetch_request(struct request_queue *q) 2222 { 2223 struct request *rq; 2224 2225 rq = blk_peek_request(q); 2226 if (rq) 2227 blk_start_request(rq); 2228 return rq; 2229 } 2230 EXPORT_SYMBOL(blk_fetch_request); 2231 2232 /** 2233 * blk_update_request - Special helper function for request stacking drivers 2234 * @req: the request being processed 2235 * @error: %0 for success, < %0 for error 2236 * @nr_bytes: number of bytes to complete @req 2237 * 2238 * Description: 2239 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2240 * the request structure even if @req doesn't have leftover. 2241 * If @req has leftover, sets it up for the next range of segments. 2242 * 2243 * This special helper function is only for request stacking drivers 2244 * (e.g. request-based dm) so that they can handle partial completion. 2245 * Actual device drivers should use blk_end_request instead. 2246 * 2247 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2248 * %false return from this function. 2249 * 2250 * Return: 2251 * %false - this request doesn't have any more data 2252 * %true - this request has more data 2253 **/ 2254 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2255 { 2256 int total_bytes, bio_nbytes, next_idx = 0; 2257 struct bio *bio; 2258 2259 if (!req->bio) 2260 return false; 2261 2262 trace_block_rq_complete(req->q, req); 2263 2264 /* 2265 * For fs requests, rq is just carrier of independent bio's 2266 * and each partial completion should be handled separately. 2267 * Reset per-request error on each partial completion. 2268 * 2269 * TODO: tj: This is too subtle. It would be better to let 2270 * low level drivers do what they see fit. 2271 */ 2272 if (req->cmd_type == REQ_TYPE_FS) 2273 req->errors = 0; 2274 2275 if (error && req->cmd_type == REQ_TYPE_FS && 2276 !(req->cmd_flags & REQ_QUIET)) { 2277 char *error_type; 2278 2279 switch (error) { 2280 case -ENOLINK: 2281 error_type = "recoverable transport"; 2282 break; 2283 case -EREMOTEIO: 2284 error_type = "critical target"; 2285 break; 2286 case -EBADE: 2287 error_type = "critical nexus"; 2288 break; 2289 case -EIO: 2290 default: 2291 error_type = "I/O"; 2292 break; 2293 } 2294 printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2295 error_type, req->rq_disk ? 2296 req->rq_disk->disk_name : "?", 2297 (unsigned long long)blk_rq_pos(req)); 2298 2299 } 2300 2301 blk_account_io_completion(req, nr_bytes); 2302 2303 total_bytes = bio_nbytes = 0; 2304 while ((bio = req->bio) != NULL) { 2305 int nbytes; 2306 2307 if (nr_bytes >= bio->bi_size) { 2308 req->bio = bio->bi_next; 2309 nbytes = bio->bi_size; 2310 req_bio_endio(req, bio, nbytes, error); 2311 next_idx = 0; 2312 bio_nbytes = 0; 2313 } else { 2314 int idx = bio->bi_idx + next_idx; 2315 2316 if (unlikely(idx >= bio->bi_vcnt)) { 2317 blk_dump_rq_flags(req, "__end_that"); 2318 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2319 __func__, idx, bio->bi_vcnt); 2320 break; 2321 } 2322 2323 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2324 BIO_BUG_ON(nbytes > bio->bi_size); 2325 2326 /* 2327 * not a complete bvec done 2328 */ 2329 if (unlikely(nbytes > nr_bytes)) { 2330 bio_nbytes += nr_bytes; 2331 total_bytes += nr_bytes; 2332 break; 2333 } 2334 2335 /* 2336 * advance to the next vector 2337 */ 2338 next_idx++; 2339 bio_nbytes += nbytes; 2340 } 2341 2342 total_bytes += nbytes; 2343 nr_bytes -= nbytes; 2344 2345 bio = req->bio; 2346 if (bio) { 2347 /* 2348 * end more in this run, or just return 'not-done' 2349 */ 2350 if (unlikely(nr_bytes <= 0)) 2351 break; 2352 } 2353 } 2354 2355 /* 2356 * completely done 2357 */ 2358 if (!req->bio) { 2359 /* 2360 * Reset counters so that the request stacking driver 2361 * can find how many bytes remain in the request 2362 * later. 2363 */ 2364 req->__data_len = 0; 2365 return false; 2366 } 2367 2368 /* 2369 * if the request wasn't completed, update state 2370 */ 2371 if (bio_nbytes) { 2372 req_bio_endio(req, bio, bio_nbytes, error); 2373 bio->bi_idx += next_idx; 2374 bio_iovec(bio)->bv_offset += nr_bytes; 2375 bio_iovec(bio)->bv_len -= nr_bytes; 2376 } 2377 2378 req->__data_len -= total_bytes; 2379 req->buffer = bio_data(req->bio); 2380 2381 /* update sector only for requests with clear definition of sector */ 2382 if (req->cmd_type == REQ_TYPE_FS) 2383 req->__sector += total_bytes >> 9; 2384 2385 /* mixed attributes always follow the first bio */ 2386 if (req->cmd_flags & REQ_MIXED_MERGE) { 2387 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2388 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2389 } 2390 2391 /* 2392 * If total number of sectors is less than the first segment 2393 * size, something has gone terribly wrong. 2394 */ 2395 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2396 blk_dump_rq_flags(req, "request botched"); 2397 req->__data_len = blk_rq_cur_bytes(req); 2398 } 2399 2400 /* recalculate the number of segments */ 2401 blk_recalc_rq_segments(req); 2402 2403 return true; 2404 } 2405 EXPORT_SYMBOL_GPL(blk_update_request); 2406 2407 static bool blk_update_bidi_request(struct request *rq, int error, 2408 unsigned int nr_bytes, 2409 unsigned int bidi_bytes) 2410 { 2411 if (blk_update_request(rq, error, nr_bytes)) 2412 return true; 2413 2414 /* Bidi request must be completed as a whole */ 2415 if (unlikely(blk_bidi_rq(rq)) && 2416 blk_update_request(rq->next_rq, error, bidi_bytes)) 2417 return true; 2418 2419 if (blk_queue_add_random(rq->q)) 2420 add_disk_randomness(rq->rq_disk); 2421 2422 return false; 2423 } 2424 2425 /** 2426 * blk_unprep_request - unprepare a request 2427 * @req: the request 2428 * 2429 * This function makes a request ready for complete resubmission (or 2430 * completion). It happens only after all error handling is complete, 2431 * so represents the appropriate moment to deallocate any resources 2432 * that were allocated to the request in the prep_rq_fn. The queue 2433 * lock is held when calling this. 2434 */ 2435 void blk_unprep_request(struct request *req) 2436 { 2437 struct request_queue *q = req->q; 2438 2439 req->cmd_flags &= ~REQ_DONTPREP; 2440 if (q->unprep_rq_fn) 2441 q->unprep_rq_fn(q, req); 2442 } 2443 EXPORT_SYMBOL_GPL(blk_unprep_request); 2444 2445 /* 2446 * queue lock must be held 2447 */ 2448 static void blk_finish_request(struct request *req, int error) 2449 { 2450 if (blk_rq_tagged(req)) 2451 blk_queue_end_tag(req->q, req); 2452 2453 BUG_ON(blk_queued_rq(req)); 2454 2455 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2456 laptop_io_completion(&req->q->backing_dev_info); 2457 2458 blk_delete_timer(req); 2459 2460 if (req->cmd_flags & REQ_DONTPREP) 2461 blk_unprep_request(req); 2462 2463 2464 blk_account_io_done(req); 2465 2466 if (req->end_io) 2467 req->end_io(req, error); 2468 else { 2469 if (blk_bidi_rq(req)) 2470 __blk_put_request(req->next_rq->q, req->next_rq); 2471 2472 __blk_put_request(req->q, req); 2473 } 2474 } 2475 2476 /** 2477 * blk_end_bidi_request - Complete a bidi request 2478 * @rq: the request to complete 2479 * @error: %0 for success, < %0 for error 2480 * @nr_bytes: number of bytes to complete @rq 2481 * @bidi_bytes: number of bytes to complete @rq->next_rq 2482 * 2483 * Description: 2484 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2485 * Drivers that supports bidi can safely call this member for any 2486 * type of request, bidi or uni. In the later case @bidi_bytes is 2487 * just ignored. 2488 * 2489 * Return: 2490 * %false - we are done with this request 2491 * %true - still buffers pending for this request 2492 **/ 2493 static bool blk_end_bidi_request(struct request *rq, int error, 2494 unsigned int nr_bytes, unsigned int bidi_bytes) 2495 { 2496 struct request_queue *q = rq->q; 2497 unsigned long flags; 2498 2499 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2500 return true; 2501 2502 spin_lock_irqsave(q->queue_lock, flags); 2503 blk_finish_request(rq, error); 2504 spin_unlock_irqrestore(q->queue_lock, flags); 2505 2506 return false; 2507 } 2508 2509 /** 2510 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2511 * @rq: the request to complete 2512 * @error: %0 for success, < %0 for error 2513 * @nr_bytes: number of bytes to complete @rq 2514 * @bidi_bytes: number of bytes to complete @rq->next_rq 2515 * 2516 * Description: 2517 * Identical to blk_end_bidi_request() except that queue lock is 2518 * assumed to be locked on entry and remains so on return. 2519 * 2520 * Return: 2521 * %false - we are done with this request 2522 * %true - still buffers pending for this request 2523 **/ 2524 bool __blk_end_bidi_request(struct request *rq, int error, 2525 unsigned int nr_bytes, unsigned int bidi_bytes) 2526 { 2527 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2528 return true; 2529 2530 blk_finish_request(rq, error); 2531 2532 return false; 2533 } 2534 2535 /** 2536 * blk_end_request - Helper function for drivers to complete the request. 2537 * @rq: the request being processed 2538 * @error: %0 for success, < %0 for error 2539 * @nr_bytes: number of bytes to complete 2540 * 2541 * Description: 2542 * Ends I/O on a number of bytes attached to @rq. 2543 * If @rq has leftover, sets it up for the next range of segments. 2544 * 2545 * Return: 2546 * %false - we are done with this request 2547 * %true - still buffers pending for this request 2548 **/ 2549 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2550 { 2551 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2552 } 2553 EXPORT_SYMBOL(blk_end_request); 2554 2555 /** 2556 * blk_end_request_all - Helper function for drives to finish the request. 2557 * @rq: the request to finish 2558 * @error: %0 for success, < %0 for error 2559 * 2560 * Description: 2561 * Completely finish @rq. 2562 */ 2563 void blk_end_request_all(struct request *rq, int error) 2564 { 2565 bool pending; 2566 unsigned int bidi_bytes = 0; 2567 2568 if (unlikely(blk_bidi_rq(rq))) 2569 bidi_bytes = blk_rq_bytes(rq->next_rq); 2570 2571 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2572 BUG_ON(pending); 2573 } 2574 EXPORT_SYMBOL(blk_end_request_all); 2575 2576 /** 2577 * blk_end_request_cur - Helper function to finish the current request chunk. 2578 * @rq: the request to finish the current chunk for 2579 * @error: %0 for success, < %0 for error 2580 * 2581 * Description: 2582 * Complete the current consecutively mapped chunk from @rq. 2583 * 2584 * Return: 2585 * %false - we are done with this request 2586 * %true - still buffers pending for this request 2587 */ 2588 bool blk_end_request_cur(struct request *rq, int error) 2589 { 2590 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2591 } 2592 EXPORT_SYMBOL(blk_end_request_cur); 2593 2594 /** 2595 * blk_end_request_err - Finish a request till the next failure boundary. 2596 * @rq: the request to finish till the next failure boundary for 2597 * @error: must be negative errno 2598 * 2599 * Description: 2600 * Complete @rq till the next failure boundary. 2601 * 2602 * Return: 2603 * %false - we are done with this request 2604 * %true - still buffers pending for this request 2605 */ 2606 bool blk_end_request_err(struct request *rq, int error) 2607 { 2608 WARN_ON(error >= 0); 2609 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2610 } 2611 EXPORT_SYMBOL_GPL(blk_end_request_err); 2612 2613 /** 2614 * __blk_end_request - Helper function for drivers to complete the request. 2615 * @rq: the request being processed 2616 * @error: %0 for success, < %0 for error 2617 * @nr_bytes: number of bytes to complete 2618 * 2619 * Description: 2620 * Must be called with queue lock held unlike blk_end_request(). 2621 * 2622 * Return: 2623 * %false - we are done with this request 2624 * %true - still buffers pending for this request 2625 **/ 2626 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2627 { 2628 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2629 } 2630 EXPORT_SYMBOL(__blk_end_request); 2631 2632 /** 2633 * __blk_end_request_all - Helper function for drives to finish the request. 2634 * @rq: the request to finish 2635 * @error: %0 for success, < %0 for error 2636 * 2637 * Description: 2638 * Completely finish @rq. Must be called with queue lock held. 2639 */ 2640 void __blk_end_request_all(struct request *rq, int error) 2641 { 2642 bool pending; 2643 unsigned int bidi_bytes = 0; 2644 2645 if (unlikely(blk_bidi_rq(rq))) 2646 bidi_bytes = blk_rq_bytes(rq->next_rq); 2647 2648 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2649 BUG_ON(pending); 2650 } 2651 EXPORT_SYMBOL(__blk_end_request_all); 2652 2653 /** 2654 * __blk_end_request_cur - Helper function to finish the current request chunk. 2655 * @rq: the request to finish the current chunk for 2656 * @error: %0 for success, < %0 for error 2657 * 2658 * Description: 2659 * Complete the current consecutively mapped chunk from @rq. Must 2660 * be called with queue lock held. 2661 * 2662 * Return: 2663 * %false - we are done with this request 2664 * %true - still buffers pending for this request 2665 */ 2666 bool __blk_end_request_cur(struct request *rq, int error) 2667 { 2668 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2669 } 2670 EXPORT_SYMBOL(__blk_end_request_cur); 2671 2672 /** 2673 * __blk_end_request_err - Finish a request till the next failure boundary. 2674 * @rq: the request to finish till the next failure boundary for 2675 * @error: must be negative errno 2676 * 2677 * Description: 2678 * Complete @rq till the next failure boundary. Must be called 2679 * with queue lock held. 2680 * 2681 * Return: 2682 * %false - we are done with this request 2683 * %true - still buffers pending for this request 2684 */ 2685 bool __blk_end_request_err(struct request *rq, int error) 2686 { 2687 WARN_ON(error >= 0); 2688 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2689 } 2690 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2691 2692 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2693 struct bio *bio) 2694 { 2695 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2696 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2697 2698 if (bio_has_data(bio)) { 2699 rq->nr_phys_segments = bio_phys_segments(q, bio); 2700 rq->buffer = bio_data(bio); 2701 } 2702 rq->__data_len = bio->bi_size; 2703 rq->bio = rq->biotail = bio; 2704 2705 if (bio->bi_bdev) 2706 rq->rq_disk = bio->bi_bdev->bd_disk; 2707 } 2708 2709 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2710 /** 2711 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2712 * @rq: the request to be flushed 2713 * 2714 * Description: 2715 * Flush all pages in @rq. 2716 */ 2717 void rq_flush_dcache_pages(struct request *rq) 2718 { 2719 struct req_iterator iter; 2720 struct bio_vec *bvec; 2721 2722 rq_for_each_segment(bvec, rq, iter) 2723 flush_dcache_page(bvec->bv_page); 2724 } 2725 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2726 #endif 2727 2728 /** 2729 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2730 * @q : the queue of the device being checked 2731 * 2732 * Description: 2733 * Check if underlying low-level drivers of a device are busy. 2734 * If the drivers want to export their busy state, they must set own 2735 * exporting function using blk_queue_lld_busy() first. 2736 * 2737 * Basically, this function is used only by request stacking drivers 2738 * to stop dispatching requests to underlying devices when underlying 2739 * devices are busy. This behavior helps more I/O merging on the queue 2740 * of the request stacking driver and prevents I/O throughput regression 2741 * on burst I/O load. 2742 * 2743 * Return: 2744 * 0 - Not busy (The request stacking driver should dispatch request) 2745 * 1 - Busy (The request stacking driver should stop dispatching request) 2746 */ 2747 int blk_lld_busy(struct request_queue *q) 2748 { 2749 if (q->lld_busy_fn) 2750 return q->lld_busy_fn(q); 2751 2752 return 0; 2753 } 2754 EXPORT_SYMBOL_GPL(blk_lld_busy); 2755 2756 /** 2757 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2758 * @rq: the clone request to be cleaned up 2759 * 2760 * Description: 2761 * Free all bios in @rq for a cloned request. 2762 */ 2763 void blk_rq_unprep_clone(struct request *rq) 2764 { 2765 struct bio *bio; 2766 2767 while ((bio = rq->bio) != NULL) { 2768 rq->bio = bio->bi_next; 2769 2770 bio_put(bio); 2771 } 2772 } 2773 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2774 2775 /* 2776 * Copy attributes of the original request to the clone request. 2777 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2778 */ 2779 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2780 { 2781 dst->cpu = src->cpu; 2782 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2783 dst->cmd_type = src->cmd_type; 2784 dst->__sector = blk_rq_pos(src); 2785 dst->__data_len = blk_rq_bytes(src); 2786 dst->nr_phys_segments = src->nr_phys_segments; 2787 dst->ioprio = src->ioprio; 2788 dst->extra_len = src->extra_len; 2789 } 2790 2791 /** 2792 * blk_rq_prep_clone - Helper function to setup clone request 2793 * @rq: the request to be setup 2794 * @rq_src: original request to be cloned 2795 * @bs: bio_set that bios for clone are allocated from 2796 * @gfp_mask: memory allocation mask for bio 2797 * @bio_ctr: setup function to be called for each clone bio. 2798 * Returns %0 for success, non %0 for failure. 2799 * @data: private data to be passed to @bio_ctr 2800 * 2801 * Description: 2802 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2803 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2804 * are not copied, and copying such parts is the caller's responsibility. 2805 * Also, pages which the original bios are pointing to are not copied 2806 * and the cloned bios just point same pages. 2807 * So cloned bios must be completed before original bios, which means 2808 * the caller must complete @rq before @rq_src. 2809 */ 2810 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2811 struct bio_set *bs, gfp_t gfp_mask, 2812 int (*bio_ctr)(struct bio *, struct bio *, void *), 2813 void *data) 2814 { 2815 struct bio *bio, *bio_src; 2816 2817 if (!bs) 2818 bs = fs_bio_set; 2819 2820 blk_rq_init(NULL, rq); 2821 2822 __rq_for_each_bio(bio_src, rq_src) { 2823 bio = bio_clone_bioset(bio_src, gfp_mask, bs); 2824 if (!bio) 2825 goto free_and_out; 2826 2827 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2828 goto free_and_out; 2829 2830 if (rq->bio) { 2831 rq->biotail->bi_next = bio; 2832 rq->biotail = bio; 2833 } else 2834 rq->bio = rq->biotail = bio; 2835 } 2836 2837 __blk_rq_prep_clone(rq, rq_src); 2838 2839 return 0; 2840 2841 free_and_out: 2842 if (bio) 2843 bio_put(bio); 2844 blk_rq_unprep_clone(rq); 2845 2846 return -ENOMEM; 2847 } 2848 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2849 2850 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2851 { 2852 return queue_work(kblockd_workqueue, work); 2853 } 2854 EXPORT_SYMBOL(kblockd_schedule_work); 2855 2856 int kblockd_schedule_delayed_work(struct request_queue *q, 2857 struct delayed_work *dwork, unsigned long delay) 2858 { 2859 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2860 } 2861 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2862 2863 #define PLUG_MAGIC 0x91827364 2864 2865 /** 2866 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2867 * @plug: The &struct blk_plug that needs to be initialized 2868 * 2869 * Description: 2870 * Tracking blk_plug inside the task_struct will help with auto-flushing the 2871 * pending I/O should the task end up blocking between blk_start_plug() and 2872 * blk_finish_plug(). This is important from a performance perspective, but 2873 * also ensures that we don't deadlock. For instance, if the task is blocking 2874 * for a memory allocation, memory reclaim could end up wanting to free a 2875 * page belonging to that request that is currently residing in our private 2876 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 2877 * this kind of deadlock. 2878 */ 2879 void blk_start_plug(struct blk_plug *plug) 2880 { 2881 struct task_struct *tsk = current; 2882 2883 plug->magic = PLUG_MAGIC; 2884 INIT_LIST_HEAD(&plug->list); 2885 INIT_LIST_HEAD(&plug->cb_list); 2886 2887 /* 2888 * If this is a nested plug, don't actually assign it. It will be 2889 * flushed on its own. 2890 */ 2891 if (!tsk->plug) { 2892 /* 2893 * Store ordering should not be needed here, since a potential 2894 * preempt will imply a full memory barrier 2895 */ 2896 tsk->plug = plug; 2897 } 2898 } 2899 EXPORT_SYMBOL(blk_start_plug); 2900 2901 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2902 { 2903 struct request *rqa = container_of(a, struct request, queuelist); 2904 struct request *rqb = container_of(b, struct request, queuelist); 2905 2906 return !(rqa->q < rqb->q || 2907 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 2908 } 2909 2910 /* 2911 * If 'from_schedule' is true, then postpone the dispatch of requests 2912 * until a safe kblockd context. We due this to avoid accidental big 2913 * additional stack usage in driver dispatch, in places where the originally 2914 * plugger did not intend it. 2915 */ 2916 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2917 bool from_schedule) 2918 __releases(q->queue_lock) 2919 { 2920 trace_block_unplug(q, depth, !from_schedule); 2921 2922 if (from_schedule) 2923 blk_run_queue_async(q); 2924 else 2925 __blk_run_queue(q); 2926 spin_unlock(q->queue_lock); 2927 } 2928 2929 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 2930 { 2931 LIST_HEAD(callbacks); 2932 2933 while (!list_empty(&plug->cb_list)) { 2934 list_splice_init(&plug->cb_list, &callbacks); 2935 2936 while (!list_empty(&callbacks)) { 2937 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2938 struct blk_plug_cb, 2939 list); 2940 list_del(&cb->list); 2941 cb->callback(cb, from_schedule); 2942 } 2943 } 2944 } 2945 2946 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 2947 int size) 2948 { 2949 struct blk_plug *plug = current->plug; 2950 struct blk_plug_cb *cb; 2951 2952 if (!plug) 2953 return NULL; 2954 2955 list_for_each_entry(cb, &plug->cb_list, list) 2956 if (cb->callback == unplug && cb->data == data) 2957 return cb; 2958 2959 /* Not currently on the callback list */ 2960 BUG_ON(size < sizeof(*cb)); 2961 cb = kzalloc(size, GFP_ATOMIC); 2962 if (cb) { 2963 cb->data = data; 2964 cb->callback = unplug; 2965 list_add(&cb->list, &plug->cb_list); 2966 } 2967 return cb; 2968 } 2969 EXPORT_SYMBOL(blk_check_plugged); 2970 2971 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2972 { 2973 struct request_queue *q; 2974 unsigned long flags; 2975 struct request *rq; 2976 LIST_HEAD(list); 2977 unsigned int depth; 2978 2979 BUG_ON(plug->magic != PLUG_MAGIC); 2980 2981 flush_plug_callbacks(plug, from_schedule); 2982 if (list_empty(&plug->list)) 2983 return; 2984 2985 list_splice_init(&plug->list, &list); 2986 2987 list_sort(NULL, &list, plug_rq_cmp); 2988 2989 q = NULL; 2990 depth = 0; 2991 2992 /* 2993 * Save and disable interrupts here, to avoid doing it for every 2994 * queue lock we have to take. 2995 */ 2996 local_irq_save(flags); 2997 while (!list_empty(&list)) { 2998 rq = list_entry_rq(list.next); 2999 list_del_init(&rq->queuelist); 3000 BUG_ON(!rq->q); 3001 if (rq->q != q) { 3002 /* 3003 * This drops the queue lock 3004 */ 3005 if (q) 3006 queue_unplugged(q, depth, from_schedule); 3007 q = rq->q; 3008 depth = 0; 3009 spin_lock(q->queue_lock); 3010 } 3011 3012 /* 3013 * Short-circuit if @q is dead 3014 */ 3015 if (unlikely(blk_queue_dying(q))) { 3016 __blk_end_request_all(rq, -ENODEV); 3017 continue; 3018 } 3019 3020 /* 3021 * rq is already accounted, so use raw insert 3022 */ 3023 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3024 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3025 else 3026 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3027 3028 depth++; 3029 } 3030 3031 /* 3032 * This drops the queue lock 3033 */ 3034 if (q) 3035 queue_unplugged(q, depth, from_schedule); 3036 3037 local_irq_restore(flags); 3038 } 3039 3040 void blk_finish_plug(struct blk_plug *plug) 3041 { 3042 blk_flush_plug_list(plug, false); 3043 3044 if (plug == current->plug) 3045 current->plug = NULL; 3046 } 3047 EXPORT_SYMBOL(blk_finish_plug); 3048 3049 int __init blk_dev_init(void) 3050 { 3051 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3052 sizeof(((struct request *)0)->cmd_flags)); 3053 3054 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3055 kblockd_workqueue = alloc_workqueue("kblockd", 3056 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3057 if (!kblockd_workqueue) 3058 panic("Failed to create kblockd\n"); 3059 3060 request_cachep = kmem_cache_create("blkdev_requests", 3061 sizeof(struct request), 0, SLAB_PANIC, NULL); 3062 3063 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3064 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3065 3066 return 0; 3067 } 3068