1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 #include <linux/delay.h> 32 #include <linux/ratelimit.h> 33 #include <linux/pm_runtime.h> 34 35 #define CREATE_TRACE_POINTS 36 #include <trace/events/block.h> 37 38 #include "blk.h" 39 #include "blk-cgroup.h" 40 41 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 43 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 44 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 45 46 DEFINE_IDA(blk_queue_ida); 47 48 /* 49 * For the allocated request tables 50 */ 51 static struct kmem_cache *request_cachep; 52 53 /* 54 * For queue allocation 55 */ 56 struct kmem_cache *blk_requestq_cachep; 57 58 /* 59 * Controlling structure to kblockd 60 */ 61 static struct workqueue_struct *kblockd_workqueue; 62 63 static void drive_stat_acct(struct request *rq, int new_io) 64 { 65 struct hd_struct *part; 66 int rw = rq_data_dir(rq); 67 int cpu; 68 69 if (!blk_do_io_stat(rq)) 70 return; 71 72 cpu = part_stat_lock(); 73 74 if (!new_io) { 75 part = rq->part; 76 part_stat_inc(cpu, part, merges[rw]); 77 } else { 78 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 79 if (!hd_struct_try_get(part)) { 80 /* 81 * The partition is already being removed, 82 * the request will be accounted on the disk only 83 * 84 * We take a reference on disk->part0 although that 85 * partition will never be deleted, so we can treat 86 * it as any other partition. 87 */ 88 part = &rq->rq_disk->part0; 89 hd_struct_get(part); 90 } 91 part_round_stats(cpu, part); 92 part_inc_in_flight(part, rw); 93 rq->part = part; 94 } 95 96 part_stat_unlock(); 97 } 98 99 void blk_queue_congestion_threshold(struct request_queue *q) 100 { 101 int nr; 102 103 nr = q->nr_requests - (q->nr_requests / 8) + 1; 104 if (nr > q->nr_requests) 105 nr = q->nr_requests; 106 q->nr_congestion_on = nr; 107 108 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 109 if (nr < 1) 110 nr = 1; 111 q->nr_congestion_off = nr; 112 } 113 114 /** 115 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 116 * @bdev: device 117 * 118 * Locates the passed device's request queue and returns the address of its 119 * backing_dev_info 120 * 121 * Will return NULL if the request queue cannot be located. 122 */ 123 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 124 { 125 struct backing_dev_info *ret = NULL; 126 struct request_queue *q = bdev_get_queue(bdev); 127 128 if (q) 129 ret = &q->backing_dev_info; 130 return ret; 131 } 132 EXPORT_SYMBOL(blk_get_backing_dev_info); 133 134 void blk_rq_init(struct request_queue *q, struct request *rq) 135 { 136 memset(rq, 0, sizeof(*rq)); 137 138 INIT_LIST_HEAD(&rq->queuelist); 139 INIT_LIST_HEAD(&rq->timeout_list); 140 rq->cpu = -1; 141 rq->q = q; 142 rq->__sector = (sector_t) -1; 143 INIT_HLIST_NODE(&rq->hash); 144 RB_CLEAR_NODE(&rq->rb_node); 145 rq->cmd = rq->__cmd; 146 rq->cmd_len = BLK_MAX_CDB; 147 rq->tag = -1; 148 rq->start_time = jiffies; 149 set_start_time_ns(rq); 150 rq->part = NULL; 151 } 152 EXPORT_SYMBOL(blk_rq_init); 153 154 static void req_bio_endio(struct request *rq, struct bio *bio, 155 unsigned int nbytes, int error) 156 { 157 if (error) 158 clear_bit(BIO_UPTODATE, &bio->bi_flags); 159 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 160 error = -EIO; 161 162 if (unlikely(rq->cmd_flags & REQ_QUIET)) 163 set_bit(BIO_QUIET, &bio->bi_flags); 164 165 bio_advance(bio, nbytes); 166 167 /* don't actually finish bio if it's part of flush sequence */ 168 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 169 bio_endio(bio, error); 170 } 171 172 void blk_dump_rq_flags(struct request *rq, char *msg) 173 { 174 int bit; 175 176 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 177 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 178 (unsigned long long) rq->cmd_flags); 179 180 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 181 (unsigned long long)blk_rq_pos(rq), 182 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 183 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 184 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 185 186 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 187 printk(KERN_INFO " cdb: "); 188 for (bit = 0; bit < BLK_MAX_CDB; bit++) 189 printk("%02x ", rq->cmd[bit]); 190 printk("\n"); 191 } 192 } 193 EXPORT_SYMBOL(blk_dump_rq_flags); 194 195 static void blk_delay_work(struct work_struct *work) 196 { 197 struct request_queue *q; 198 199 q = container_of(work, struct request_queue, delay_work.work); 200 spin_lock_irq(q->queue_lock); 201 __blk_run_queue(q); 202 spin_unlock_irq(q->queue_lock); 203 } 204 205 /** 206 * blk_delay_queue - restart queueing after defined interval 207 * @q: The &struct request_queue in question 208 * @msecs: Delay in msecs 209 * 210 * Description: 211 * Sometimes queueing needs to be postponed for a little while, to allow 212 * resources to come back. This function will make sure that queueing is 213 * restarted around the specified time. Queue lock must be held. 214 */ 215 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 216 { 217 if (likely(!blk_queue_dead(q))) 218 queue_delayed_work(kblockd_workqueue, &q->delay_work, 219 msecs_to_jiffies(msecs)); 220 } 221 EXPORT_SYMBOL(blk_delay_queue); 222 223 /** 224 * blk_start_queue - restart a previously stopped queue 225 * @q: The &struct request_queue in question 226 * 227 * Description: 228 * blk_start_queue() will clear the stop flag on the queue, and call 229 * the request_fn for the queue if it was in a stopped state when 230 * entered. Also see blk_stop_queue(). Queue lock must be held. 231 **/ 232 void blk_start_queue(struct request_queue *q) 233 { 234 WARN_ON(!irqs_disabled()); 235 236 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 237 __blk_run_queue(q); 238 } 239 EXPORT_SYMBOL(blk_start_queue); 240 241 /** 242 * blk_stop_queue - stop a queue 243 * @q: The &struct request_queue in question 244 * 245 * Description: 246 * The Linux block layer assumes that a block driver will consume all 247 * entries on the request queue when the request_fn strategy is called. 248 * Often this will not happen, because of hardware limitations (queue 249 * depth settings). If a device driver gets a 'queue full' response, 250 * or if it simply chooses not to queue more I/O at one point, it can 251 * call this function to prevent the request_fn from being called until 252 * the driver has signalled it's ready to go again. This happens by calling 253 * blk_start_queue() to restart queue operations. Queue lock must be held. 254 **/ 255 void blk_stop_queue(struct request_queue *q) 256 { 257 cancel_delayed_work(&q->delay_work); 258 queue_flag_set(QUEUE_FLAG_STOPPED, q); 259 } 260 EXPORT_SYMBOL(blk_stop_queue); 261 262 /** 263 * blk_sync_queue - cancel any pending callbacks on a queue 264 * @q: the queue 265 * 266 * Description: 267 * The block layer may perform asynchronous callback activity 268 * on a queue, such as calling the unplug function after a timeout. 269 * A block device may call blk_sync_queue to ensure that any 270 * such activity is cancelled, thus allowing it to release resources 271 * that the callbacks might use. The caller must already have made sure 272 * that its ->make_request_fn will not re-add plugging prior to calling 273 * this function. 274 * 275 * This function does not cancel any asynchronous activity arising 276 * out of elevator or throttling code. That would require elevaotor_exit() 277 * and blkcg_exit_queue() to be called with queue lock initialized. 278 * 279 */ 280 void blk_sync_queue(struct request_queue *q) 281 { 282 del_timer_sync(&q->timeout); 283 cancel_delayed_work_sync(&q->delay_work); 284 } 285 EXPORT_SYMBOL(blk_sync_queue); 286 287 /** 288 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 289 * @q: The queue to run 290 * 291 * Description: 292 * Invoke request handling on a queue if there are any pending requests. 293 * May be used to restart request handling after a request has completed. 294 * This variant runs the queue whether or not the queue has been 295 * stopped. Must be called with the queue lock held and interrupts 296 * disabled. See also @blk_run_queue. 297 */ 298 inline void __blk_run_queue_uncond(struct request_queue *q) 299 { 300 if (unlikely(blk_queue_dead(q))) 301 return; 302 303 /* 304 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 305 * the queue lock internally. As a result multiple threads may be 306 * running such a request function concurrently. Keep track of the 307 * number of active request_fn invocations such that blk_drain_queue() 308 * can wait until all these request_fn calls have finished. 309 */ 310 q->request_fn_active++; 311 q->request_fn(q); 312 q->request_fn_active--; 313 } 314 315 /** 316 * __blk_run_queue - run a single device queue 317 * @q: The queue to run 318 * 319 * Description: 320 * See @blk_run_queue. This variant must be called with the queue lock 321 * held and interrupts disabled. 322 */ 323 void __blk_run_queue(struct request_queue *q) 324 { 325 if (unlikely(blk_queue_stopped(q))) 326 return; 327 328 __blk_run_queue_uncond(q); 329 } 330 EXPORT_SYMBOL(__blk_run_queue); 331 332 /** 333 * blk_run_queue_async - run a single device queue in workqueue context 334 * @q: The queue to run 335 * 336 * Description: 337 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 338 * of us. The caller must hold the queue lock. 339 */ 340 void blk_run_queue_async(struct request_queue *q) 341 { 342 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 343 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 344 } 345 EXPORT_SYMBOL(blk_run_queue_async); 346 347 /** 348 * blk_run_queue - run a single device queue 349 * @q: The queue to run 350 * 351 * Description: 352 * Invoke request handling on this queue, if it has pending work to do. 353 * May be used to restart queueing when a request has completed. 354 */ 355 void blk_run_queue(struct request_queue *q) 356 { 357 unsigned long flags; 358 359 spin_lock_irqsave(q->queue_lock, flags); 360 __blk_run_queue(q); 361 spin_unlock_irqrestore(q->queue_lock, flags); 362 } 363 EXPORT_SYMBOL(blk_run_queue); 364 365 void blk_put_queue(struct request_queue *q) 366 { 367 kobject_put(&q->kobj); 368 } 369 EXPORT_SYMBOL(blk_put_queue); 370 371 /** 372 * __blk_drain_queue - drain requests from request_queue 373 * @q: queue to drain 374 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 375 * 376 * Drain requests from @q. If @drain_all is set, all requests are drained. 377 * If not, only ELVPRIV requests are drained. The caller is responsible 378 * for ensuring that no new requests which need to be drained are queued. 379 */ 380 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 381 __releases(q->queue_lock) 382 __acquires(q->queue_lock) 383 { 384 int i; 385 386 lockdep_assert_held(q->queue_lock); 387 388 while (true) { 389 bool drain = false; 390 391 /* 392 * The caller might be trying to drain @q before its 393 * elevator is initialized. 394 */ 395 if (q->elevator) 396 elv_drain_elevator(q); 397 398 blkcg_drain_queue(q); 399 400 /* 401 * This function might be called on a queue which failed 402 * driver init after queue creation or is not yet fully 403 * active yet. Some drivers (e.g. fd and loop) get unhappy 404 * in such cases. Kick queue iff dispatch queue has 405 * something on it and @q has request_fn set. 406 */ 407 if (!list_empty(&q->queue_head) && q->request_fn) 408 __blk_run_queue(q); 409 410 drain |= q->nr_rqs_elvpriv; 411 drain |= q->request_fn_active; 412 413 /* 414 * Unfortunately, requests are queued at and tracked from 415 * multiple places and there's no single counter which can 416 * be drained. Check all the queues and counters. 417 */ 418 if (drain_all) { 419 drain |= !list_empty(&q->queue_head); 420 for (i = 0; i < 2; i++) { 421 drain |= q->nr_rqs[i]; 422 drain |= q->in_flight[i]; 423 drain |= !list_empty(&q->flush_queue[i]); 424 } 425 } 426 427 if (!drain) 428 break; 429 430 spin_unlock_irq(q->queue_lock); 431 432 msleep(10); 433 434 spin_lock_irq(q->queue_lock); 435 } 436 437 /* 438 * With queue marked dead, any woken up waiter will fail the 439 * allocation path, so the wakeup chaining is lost and we're 440 * left with hung waiters. We need to wake up those waiters. 441 */ 442 if (q->request_fn) { 443 struct request_list *rl; 444 445 blk_queue_for_each_rl(rl, q) 446 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 447 wake_up_all(&rl->wait[i]); 448 } 449 } 450 451 /** 452 * blk_queue_bypass_start - enter queue bypass mode 453 * @q: queue of interest 454 * 455 * In bypass mode, only the dispatch FIFO queue of @q is used. This 456 * function makes @q enter bypass mode and drains all requests which were 457 * throttled or issued before. On return, it's guaranteed that no request 458 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 459 * inside queue or RCU read lock. 460 */ 461 void blk_queue_bypass_start(struct request_queue *q) 462 { 463 bool drain; 464 465 spin_lock_irq(q->queue_lock); 466 drain = !q->bypass_depth++; 467 queue_flag_set(QUEUE_FLAG_BYPASS, q); 468 spin_unlock_irq(q->queue_lock); 469 470 if (drain) { 471 spin_lock_irq(q->queue_lock); 472 __blk_drain_queue(q, false); 473 spin_unlock_irq(q->queue_lock); 474 475 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 476 synchronize_rcu(); 477 } 478 } 479 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 480 481 /** 482 * blk_queue_bypass_end - leave queue bypass mode 483 * @q: queue of interest 484 * 485 * Leave bypass mode and restore the normal queueing behavior. 486 */ 487 void blk_queue_bypass_end(struct request_queue *q) 488 { 489 spin_lock_irq(q->queue_lock); 490 if (!--q->bypass_depth) 491 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 492 WARN_ON_ONCE(q->bypass_depth < 0); 493 spin_unlock_irq(q->queue_lock); 494 } 495 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 496 497 /** 498 * blk_cleanup_queue - shutdown a request queue 499 * @q: request queue to shutdown 500 * 501 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 502 * put it. All future requests will be failed immediately with -ENODEV. 503 */ 504 void blk_cleanup_queue(struct request_queue *q) 505 { 506 spinlock_t *lock = q->queue_lock; 507 508 /* mark @q DYING, no new request or merges will be allowed afterwards */ 509 mutex_lock(&q->sysfs_lock); 510 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 511 spin_lock_irq(lock); 512 513 /* 514 * A dying queue is permanently in bypass mode till released. Note 515 * that, unlike blk_queue_bypass_start(), we aren't performing 516 * synchronize_rcu() after entering bypass mode to avoid the delay 517 * as some drivers create and destroy a lot of queues while 518 * probing. This is still safe because blk_release_queue() will be 519 * called only after the queue refcnt drops to zero and nothing, 520 * RCU or not, would be traversing the queue by then. 521 */ 522 q->bypass_depth++; 523 queue_flag_set(QUEUE_FLAG_BYPASS, q); 524 525 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 526 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 527 queue_flag_set(QUEUE_FLAG_DYING, q); 528 spin_unlock_irq(lock); 529 mutex_unlock(&q->sysfs_lock); 530 531 /* 532 * Drain all requests queued before DYING marking. Set DEAD flag to 533 * prevent that q->request_fn() gets invoked after draining finished. 534 */ 535 spin_lock_irq(lock); 536 __blk_drain_queue(q, true); 537 queue_flag_set(QUEUE_FLAG_DEAD, q); 538 spin_unlock_irq(lock); 539 540 /* @q won't process any more request, flush async actions */ 541 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 542 blk_sync_queue(q); 543 544 spin_lock_irq(lock); 545 if (q->queue_lock != &q->__queue_lock) 546 q->queue_lock = &q->__queue_lock; 547 spin_unlock_irq(lock); 548 549 /* @q is and will stay empty, shutdown and put */ 550 blk_put_queue(q); 551 } 552 EXPORT_SYMBOL(blk_cleanup_queue); 553 554 int blk_init_rl(struct request_list *rl, struct request_queue *q, 555 gfp_t gfp_mask) 556 { 557 if (unlikely(rl->rq_pool)) 558 return 0; 559 560 rl->q = q; 561 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 562 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 563 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 564 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 565 566 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 567 mempool_free_slab, request_cachep, 568 gfp_mask, q->node); 569 if (!rl->rq_pool) 570 return -ENOMEM; 571 572 return 0; 573 } 574 575 void blk_exit_rl(struct request_list *rl) 576 { 577 if (rl->rq_pool) 578 mempool_destroy(rl->rq_pool); 579 } 580 581 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 582 { 583 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 584 } 585 EXPORT_SYMBOL(blk_alloc_queue); 586 587 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 588 { 589 struct request_queue *q; 590 int err; 591 592 q = kmem_cache_alloc_node(blk_requestq_cachep, 593 gfp_mask | __GFP_ZERO, node_id); 594 if (!q) 595 return NULL; 596 597 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 598 if (q->id < 0) 599 goto fail_q; 600 601 q->backing_dev_info.ra_pages = 602 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 603 q->backing_dev_info.state = 0; 604 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 605 q->backing_dev_info.name = "block"; 606 q->node = node_id; 607 608 err = bdi_init(&q->backing_dev_info); 609 if (err) 610 goto fail_id; 611 612 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 613 laptop_mode_timer_fn, (unsigned long) q); 614 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 615 INIT_LIST_HEAD(&q->queue_head); 616 INIT_LIST_HEAD(&q->timeout_list); 617 INIT_LIST_HEAD(&q->icq_list); 618 #ifdef CONFIG_BLK_CGROUP 619 INIT_LIST_HEAD(&q->blkg_list); 620 #endif 621 INIT_LIST_HEAD(&q->flush_queue[0]); 622 INIT_LIST_HEAD(&q->flush_queue[1]); 623 INIT_LIST_HEAD(&q->flush_data_in_flight); 624 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 625 626 kobject_init(&q->kobj, &blk_queue_ktype); 627 628 mutex_init(&q->sysfs_lock); 629 spin_lock_init(&q->__queue_lock); 630 631 /* 632 * By default initialize queue_lock to internal lock and driver can 633 * override it later if need be. 634 */ 635 q->queue_lock = &q->__queue_lock; 636 637 /* 638 * A queue starts its life with bypass turned on to avoid 639 * unnecessary bypass on/off overhead and nasty surprises during 640 * init. The initial bypass will be finished when the queue is 641 * registered by blk_register_queue(). 642 */ 643 q->bypass_depth = 1; 644 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 645 646 if (blkcg_init_queue(q)) 647 goto fail_id; 648 649 return q; 650 651 fail_id: 652 ida_simple_remove(&blk_queue_ida, q->id); 653 fail_q: 654 kmem_cache_free(blk_requestq_cachep, q); 655 return NULL; 656 } 657 EXPORT_SYMBOL(blk_alloc_queue_node); 658 659 /** 660 * blk_init_queue - prepare a request queue for use with a block device 661 * @rfn: The function to be called to process requests that have been 662 * placed on the queue. 663 * @lock: Request queue spin lock 664 * 665 * Description: 666 * If a block device wishes to use the standard request handling procedures, 667 * which sorts requests and coalesces adjacent requests, then it must 668 * call blk_init_queue(). The function @rfn will be called when there 669 * are requests on the queue that need to be processed. If the device 670 * supports plugging, then @rfn may not be called immediately when requests 671 * are available on the queue, but may be called at some time later instead. 672 * Plugged queues are generally unplugged when a buffer belonging to one 673 * of the requests on the queue is needed, or due to memory pressure. 674 * 675 * @rfn is not required, or even expected, to remove all requests off the 676 * queue, but only as many as it can handle at a time. If it does leave 677 * requests on the queue, it is responsible for arranging that the requests 678 * get dealt with eventually. 679 * 680 * The queue spin lock must be held while manipulating the requests on the 681 * request queue; this lock will be taken also from interrupt context, so irq 682 * disabling is needed for it. 683 * 684 * Function returns a pointer to the initialized request queue, or %NULL if 685 * it didn't succeed. 686 * 687 * Note: 688 * blk_init_queue() must be paired with a blk_cleanup_queue() call 689 * when the block device is deactivated (such as at module unload). 690 **/ 691 692 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 693 { 694 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 695 } 696 EXPORT_SYMBOL(blk_init_queue); 697 698 struct request_queue * 699 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 700 { 701 struct request_queue *uninit_q, *q; 702 703 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 704 if (!uninit_q) 705 return NULL; 706 707 q = blk_init_allocated_queue(uninit_q, rfn, lock); 708 if (!q) 709 blk_cleanup_queue(uninit_q); 710 711 return q; 712 } 713 EXPORT_SYMBOL(blk_init_queue_node); 714 715 struct request_queue * 716 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 717 spinlock_t *lock) 718 { 719 if (!q) 720 return NULL; 721 722 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 723 return NULL; 724 725 q->request_fn = rfn; 726 q->prep_rq_fn = NULL; 727 q->unprep_rq_fn = NULL; 728 q->queue_flags |= QUEUE_FLAG_DEFAULT; 729 730 /* Override internal queue lock with supplied lock pointer */ 731 if (lock) 732 q->queue_lock = lock; 733 734 /* 735 * This also sets hw/phys segments, boundary and size 736 */ 737 blk_queue_make_request(q, blk_queue_bio); 738 739 q->sg_reserved_size = INT_MAX; 740 741 /* init elevator */ 742 if (elevator_init(q, NULL)) 743 return NULL; 744 return q; 745 } 746 EXPORT_SYMBOL(blk_init_allocated_queue); 747 748 bool blk_get_queue(struct request_queue *q) 749 { 750 if (likely(!blk_queue_dying(q))) { 751 __blk_get_queue(q); 752 return true; 753 } 754 755 return false; 756 } 757 EXPORT_SYMBOL(blk_get_queue); 758 759 static inline void blk_free_request(struct request_list *rl, struct request *rq) 760 { 761 if (rq->cmd_flags & REQ_ELVPRIV) { 762 elv_put_request(rl->q, rq); 763 if (rq->elv.icq) 764 put_io_context(rq->elv.icq->ioc); 765 } 766 767 mempool_free(rq, rl->rq_pool); 768 } 769 770 /* 771 * ioc_batching returns true if the ioc is a valid batching request and 772 * should be given priority access to a request. 773 */ 774 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 775 { 776 if (!ioc) 777 return 0; 778 779 /* 780 * Make sure the process is able to allocate at least 1 request 781 * even if the batch times out, otherwise we could theoretically 782 * lose wakeups. 783 */ 784 return ioc->nr_batch_requests == q->nr_batching || 785 (ioc->nr_batch_requests > 0 786 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 787 } 788 789 /* 790 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 791 * will cause the process to be a "batcher" on all queues in the system. This 792 * is the behaviour we want though - once it gets a wakeup it should be given 793 * a nice run. 794 */ 795 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 796 { 797 if (!ioc || ioc_batching(q, ioc)) 798 return; 799 800 ioc->nr_batch_requests = q->nr_batching; 801 ioc->last_waited = jiffies; 802 } 803 804 static void __freed_request(struct request_list *rl, int sync) 805 { 806 struct request_queue *q = rl->q; 807 808 /* 809 * bdi isn't aware of blkcg yet. As all async IOs end up root 810 * blkcg anyway, just use root blkcg state. 811 */ 812 if (rl == &q->root_rl && 813 rl->count[sync] < queue_congestion_off_threshold(q)) 814 blk_clear_queue_congested(q, sync); 815 816 if (rl->count[sync] + 1 <= q->nr_requests) { 817 if (waitqueue_active(&rl->wait[sync])) 818 wake_up(&rl->wait[sync]); 819 820 blk_clear_rl_full(rl, sync); 821 } 822 } 823 824 /* 825 * A request has just been released. Account for it, update the full and 826 * congestion status, wake up any waiters. Called under q->queue_lock. 827 */ 828 static void freed_request(struct request_list *rl, unsigned int flags) 829 { 830 struct request_queue *q = rl->q; 831 int sync = rw_is_sync(flags); 832 833 q->nr_rqs[sync]--; 834 rl->count[sync]--; 835 if (flags & REQ_ELVPRIV) 836 q->nr_rqs_elvpriv--; 837 838 __freed_request(rl, sync); 839 840 if (unlikely(rl->starved[sync ^ 1])) 841 __freed_request(rl, sync ^ 1); 842 } 843 844 /* 845 * Determine if elevator data should be initialized when allocating the 846 * request associated with @bio. 847 */ 848 static bool blk_rq_should_init_elevator(struct bio *bio) 849 { 850 if (!bio) 851 return true; 852 853 /* 854 * Flush requests do not use the elevator so skip initialization. 855 * This allows a request to share the flush and elevator data. 856 */ 857 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 858 return false; 859 860 return true; 861 } 862 863 /** 864 * rq_ioc - determine io_context for request allocation 865 * @bio: request being allocated is for this bio (can be %NULL) 866 * 867 * Determine io_context to use for request allocation for @bio. May return 868 * %NULL if %current->io_context doesn't exist. 869 */ 870 static struct io_context *rq_ioc(struct bio *bio) 871 { 872 #ifdef CONFIG_BLK_CGROUP 873 if (bio && bio->bi_ioc) 874 return bio->bi_ioc; 875 #endif 876 return current->io_context; 877 } 878 879 /** 880 * __get_request - get a free request 881 * @rl: request list to allocate from 882 * @rw_flags: RW and SYNC flags 883 * @bio: bio to allocate request for (can be %NULL) 884 * @gfp_mask: allocation mask 885 * 886 * Get a free request from @q. This function may fail under memory 887 * pressure or if @q is dead. 888 * 889 * Must be callled with @q->queue_lock held and, 890 * Returns %NULL on failure, with @q->queue_lock held. 891 * Returns !%NULL on success, with @q->queue_lock *not held*. 892 */ 893 static struct request *__get_request(struct request_list *rl, int rw_flags, 894 struct bio *bio, gfp_t gfp_mask) 895 { 896 struct request_queue *q = rl->q; 897 struct request *rq; 898 struct elevator_type *et = q->elevator->type; 899 struct io_context *ioc = rq_ioc(bio); 900 struct io_cq *icq = NULL; 901 const bool is_sync = rw_is_sync(rw_flags) != 0; 902 int may_queue; 903 904 if (unlikely(blk_queue_dying(q))) 905 return NULL; 906 907 may_queue = elv_may_queue(q, rw_flags); 908 if (may_queue == ELV_MQUEUE_NO) 909 goto rq_starved; 910 911 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 912 if (rl->count[is_sync]+1 >= q->nr_requests) { 913 /* 914 * The queue will fill after this allocation, so set 915 * it as full, and mark this process as "batching". 916 * This process will be allowed to complete a batch of 917 * requests, others will be blocked. 918 */ 919 if (!blk_rl_full(rl, is_sync)) { 920 ioc_set_batching(q, ioc); 921 blk_set_rl_full(rl, is_sync); 922 } else { 923 if (may_queue != ELV_MQUEUE_MUST 924 && !ioc_batching(q, ioc)) { 925 /* 926 * The queue is full and the allocating 927 * process is not a "batcher", and not 928 * exempted by the IO scheduler 929 */ 930 return NULL; 931 } 932 } 933 } 934 /* 935 * bdi isn't aware of blkcg yet. As all async IOs end up 936 * root blkcg anyway, just use root blkcg state. 937 */ 938 if (rl == &q->root_rl) 939 blk_set_queue_congested(q, is_sync); 940 } 941 942 /* 943 * Only allow batching queuers to allocate up to 50% over the defined 944 * limit of requests, otherwise we could have thousands of requests 945 * allocated with any setting of ->nr_requests 946 */ 947 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 948 return NULL; 949 950 q->nr_rqs[is_sync]++; 951 rl->count[is_sync]++; 952 rl->starved[is_sync] = 0; 953 954 /* 955 * Decide whether the new request will be managed by elevator. If 956 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 957 * prevent the current elevator from being destroyed until the new 958 * request is freed. This guarantees icq's won't be destroyed and 959 * makes creating new ones safe. 960 * 961 * Also, lookup icq while holding queue_lock. If it doesn't exist, 962 * it will be created after releasing queue_lock. 963 */ 964 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 965 rw_flags |= REQ_ELVPRIV; 966 q->nr_rqs_elvpriv++; 967 if (et->icq_cache && ioc) 968 icq = ioc_lookup_icq(ioc, q); 969 } 970 971 if (blk_queue_io_stat(q)) 972 rw_flags |= REQ_IO_STAT; 973 spin_unlock_irq(q->queue_lock); 974 975 /* allocate and init request */ 976 rq = mempool_alloc(rl->rq_pool, gfp_mask); 977 if (!rq) 978 goto fail_alloc; 979 980 blk_rq_init(q, rq); 981 blk_rq_set_rl(rq, rl); 982 rq->cmd_flags = rw_flags | REQ_ALLOCED; 983 984 /* init elvpriv */ 985 if (rw_flags & REQ_ELVPRIV) { 986 if (unlikely(et->icq_cache && !icq)) { 987 if (ioc) 988 icq = ioc_create_icq(ioc, q, gfp_mask); 989 if (!icq) 990 goto fail_elvpriv; 991 } 992 993 rq->elv.icq = icq; 994 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 995 goto fail_elvpriv; 996 997 /* @rq->elv.icq holds io_context until @rq is freed */ 998 if (icq) 999 get_io_context(icq->ioc); 1000 } 1001 out: 1002 /* 1003 * ioc may be NULL here, and ioc_batching will be false. That's 1004 * OK, if the queue is under the request limit then requests need 1005 * not count toward the nr_batch_requests limit. There will always 1006 * be some limit enforced by BLK_BATCH_TIME. 1007 */ 1008 if (ioc_batching(q, ioc)) 1009 ioc->nr_batch_requests--; 1010 1011 trace_block_getrq(q, bio, rw_flags & 1); 1012 return rq; 1013 1014 fail_elvpriv: 1015 /* 1016 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1017 * and may fail indefinitely under memory pressure and thus 1018 * shouldn't stall IO. Treat this request as !elvpriv. This will 1019 * disturb iosched and blkcg but weird is bettern than dead. 1020 */ 1021 printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n", 1022 dev_name(q->backing_dev_info.dev)); 1023 1024 rq->cmd_flags &= ~REQ_ELVPRIV; 1025 rq->elv.icq = NULL; 1026 1027 spin_lock_irq(q->queue_lock); 1028 q->nr_rqs_elvpriv--; 1029 spin_unlock_irq(q->queue_lock); 1030 goto out; 1031 1032 fail_alloc: 1033 /* 1034 * Allocation failed presumably due to memory. Undo anything we 1035 * might have messed up. 1036 * 1037 * Allocating task should really be put onto the front of the wait 1038 * queue, but this is pretty rare. 1039 */ 1040 spin_lock_irq(q->queue_lock); 1041 freed_request(rl, rw_flags); 1042 1043 /* 1044 * in the very unlikely event that allocation failed and no 1045 * requests for this direction was pending, mark us starved so that 1046 * freeing of a request in the other direction will notice 1047 * us. another possible fix would be to split the rq mempool into 1048 * READ and WRITE 1049 */ 1050 rq_starved: 1051 if (unlikely(rl->count[is_sync] == 0)) 1052 rl->starved[is_sync] = 1; 1053 return NULL; 1054 } 1055 1056 /** 1057 * get_request - get a free request 1058 * @q: request_queue to allocate request from 1059 * @rw_flags: RW and SYNC flags 1060 * @bio: bio to allocate request for (can be %NULL) 1061 * @gfp_mask: allocation mask 1062 * 1063 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1064 * function keeps retrying under memory pressure and fails iff @q is dead. 1065 * 1066 * Must be callled with @q->queue_lock held and, 1067 * Returns %NULL on failure, with @q->queue_lock held. 1068 * Returns !%NULL on success, with @q->queue_lock *not held*. 1069 */ 1070 static struct request *get_request(struct request_queue *q, int rw_flags, 1071 struct bio *bio, gfp_t gfp_mask) 1072 { 1073 const bool is_sync = rw_is_sync(rw_flags) != 0; 1074 DEFINE_WAIT(wait); 1075 struct request_list *rl; 1076 struct request *rq; 1077 1078 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1079 retry: 1080 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1081 if (rq) 1082 return rq; 1083 1084 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1085 blk_put_rl(rl); 1086 return NULL; 1087 } 1088 1089 /* wait on @rl and retry */ 1090 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1091 TASK_UNINTERRUPTIBLE); 1092 1093 trace_block_sleeprq(q, bio, rw_flags & 1); 1094 1095 spin_unlock_irq(q->queue_lock); 1096 io_schedule(); 1097 1098 /* 1099 * After sleeping, we become a "batching" process and will be able 1100 * to allocate at least one request, and up to a big batch of them 1101 * for a small period time. See ioc_batching, ioc_set_batching 1102 */ 1103 ioc_set_batching(q, current->io_context); 1104 1105 spin_lock_irq(q->queue_lock); 1106 finish_wait(&rl->wait[is_sync], &wait); 1107 1108 goto retry; 1109 } 1110 1111 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1112 { 1113 struct request *rq; 1114 1115 BUG_ON(rw != READ && rw != WRITE); 1116 1117 /* create ioc upfront */ 1118 create_io_context(gfp_mask, q->node); 1119 1120 spin_lock_irq(q->queue_lock); 1121 rq = get_request(q, rw, NULL, gfp_mask); 1122 if (!rq) 1123 spin_unlock_irq(q->queue_lock); 1124 /* q->queue_lock is unlocked at this point */ 1125 1126 return rq; 1127 } 1128 EXPORT_SYMBOL(blk_get_request); 1129 1130 /** 1131 * blk_make_request - given a bio, allocate a corresponding struct request. 1132 * @q: target request queue 1133 * @bio: The bio describing the memory mappings that will be submitted for IO. 1134 * It may be a chained-bio properly constructed by block/bio layer. 1135 * @gfp_mask: gfp flags to be used for memory allocation 1136 * 1137 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1138 * type commands. Where the struct request needs to be farther initialized by 1139 * the caller. It is passed a &struct bio, which describes the memory info of 1140 * the I/O transfer. 1141 * 1142 * The caller of blk_make_request must make sure that bi_io_vec 1143 * are set to describe the memory buffers. That bio_data_dir() will return 1144 * the needed direction of the request. (And all bio's in the passed bio-chain 1145 * are properly set accordingly) 1146 * 1147 * If called under none-sleepable conditions, mapped bio buffers must not 1148 * need bouncing, by calling the appropriate masked or flagged allocator, 1149 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1150 * BUG. 1151 * 1152 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1153 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1154 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1155 * completion of a bio that hasn't been submitted yet, thus resulting in a 1156 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1157 * of bio_alloc(), as that avoids the mempool deadlock. 1158 * If possible a big IO should be split into smaller parts when allocation 1159 * fails. Partial allocation should not be an error, or you risk a live-lock. 1160 */ 1161 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1162 gfp_t gfp_mask) 1163 { 1164 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1165 1166 if (unlikely(!rq)) 1167 return ERR_PTR(-ENOMEM); 1168 1169 for_each_bio(bio) { 1170 struct bio *bounce_bio = bio; 1171 int ret; 1172 1173 blk_queue_bounce(q, &bounce_bio); 1174 ret = blk_rq_append_bio(q, rq, bounce_bio); 1175 if (unlikely(ret)) { 1176 blk_put_request(rq); 1177 return ERR_PTR(ret); 1178 } 1179 } 1180 1181 return rq; 1182 } 1183 EXPORT_SYMBOL(blk_make_request); 1184 1185 /** 1186 * blk_requeue_request - put a request back on queue 1187 * @q: request queue where request should be inserted 1188 * @rq: request to be inserted 1189 * 1190 * Description: 1191 * Drivers often keep queueing requests until the hardware cannot accept 1192 * more, when that condition happens we need to put the request back 1193 * on the queue. Must be called with queue lock held. 1194 */ 1195 void blk_requeue_request(struct request_queue *q, struct request *rq) 1196 { 1197 blk_delete_timer(rq); 1198 blk_clear_rq_complete(rq); 1199 trace_block_rq_requeue(q, rq); 1200 1201 if (blk_rq_tagged(rq)) 1202 blk_queue_end_tag(q, rq); 1203 1204 BUG_ON(blk_queued_rq(rq)); 1205 1206 elv_requeue_request(q, rq); 1207 } 1208 EXPORT_SYMBOL(blk_requeue_request); 1209 1210 static void add_acct_request(struct request_queue *q, struct request *rq, 1211 int where) 1212 { 1213 drive_stat_acct(rq, 1); 1214 __elv_add_request(q, rq, where); 1215 } 1216 1217 static void part_round_stats_single(int cpu, struct hd_struct *part, 1218 unsigned long now) 1219 { 1220 if (now == part->stamp) 1221 return; 1222 1223 if (part_in_flight(part)) { 1224 __part_stat_add(cpu, part, time_in_queue, 1225 part_in_flight(part) * (now - part->stamp)); 1226 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1227 } 1228 part->stamp = now; 1229 } 1230 1231 /** 1232 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1233 * @cpu: cpu number for stats access 1234 * @part: target partition 1235 * 1236 * The average IO queue length and utilisation statistics are maintained 1237 * by observing the current state of the queue length and the amount of 1238 * time it has been in this state for. 1239 * 1240 * Normally, that accounting is done on IO completion, but that can result 1241 * in more than a second's worth of IO being accounted for within any one 1242 * second, leading to >100% utilisation. To deal with that, we call this 1243 * function to do a round-off before returning the results when reading 1244 * /proc/diskstats. This accounts immediately for all queue usage up to 1245 * the current jiffies and restarts the counters again. 1246 */ 1247 void part_round_stats(int cpu, struct hd_struct *part) 1248 { 1249 unsigned long now = jiffies; 1250 1251 if (part->partno) 1252 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1253 part_round_stats_single(cpu, part, now); 1254 } 1255 EXPORT_SYMBOL_GPL(part_round_stats); 1256 1257 #ifdef CONFIG_PM_RUNTIME 1258 static void blk_pm_put_request(struct request *rq) 1259 { 1260 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1261 pm_runtime_mark_last_busy(rq->q->dev); 1262 } 1263 #else 1264 static inline void blk_pm_put_request(struct request *rq) {} 1265 #endif 1266 1267 /* 1268 * queue lock must be held 1269 */ 1270 void __blk_put_request(struct request_queue *q, struct request *req) 1271 { 1272 if (unlikely(!q)) 1273 return; 1274 1275 blk_pm_put_request(req); 1276 1277 elv_completed_request(q, req); 1278 1279 /* this is a bio leak */ 1280 WARN_ON(req->bio != NULL); 1281 1282 /* 1283 * Request may not have originated from ll_rw_blk. if not, 1284 * it didn't come out of our reserved rq pools 1285 */ 1286 if (req->cmd_flags & REQ_ALLOCED) { 1287 unsigned int flags = req->cmd_flags; 1288 struct request_list *rl = blk_rq_rl(req); 1289 1290 BUG_ON(!list_empty(&req->queuelist)); 1291 BUG_ON(!hlist_unhashed(&req->hash)); 1292 1293 blk_free_request(rl, req); 1294 freed_request(rl, flags); 1295 blk_put_rl(rl); 1296 } 1297 } 1298 EXPORT_SYMBOL_GPL(__blk_put_request); 1299 1300 void blk_put_request(struct request *req) 1301 { 1302 unsigned long flags; 1303 struct request_queue *q = req->q; 1304 1305 spin_lock_irqsave(q->queue_lock, flags); 1306 __blk_put_request(q, req); 1307 spin_unlock_irqrestore(q->queue_lock, flags); 1308 } 1309 EXPORT_SYMBOL(blk_put_request); 1310 1311 /** 1312 * blk_add_request_payload - add a payload to a request 1313 * @rq: request to update 1314 * @page: page backing the payload 1315 * @len: length of the payload. 1316 * 1317 * This allows to later add a payload to an already submitted request by 1318 * a block driver. The driver needs to take care of freeing the payload 1319 * itself. 1320 * 1321 * Note that this is a quite horrible hack and nothing but handling of 1322 * discard requests should ever use it. 1323 */ 1324 void blk_add_request_payload(struct request *rq, struct page *page, 1325 unsigned int len) 1326 { 1327 struct bio *bio = rq->bio; 1328 1329 bio->bi_io_vec->bv_page = page; 1330 bio->bi_io_vec->bv_offset = 0; 1331 bio->bi_io_vec->bv_len = len; 1332 1333 bio->bi_size = len; 1334 bio->bi_vcnt = 1; 1335 bio->bi_phys_segments = 1; 1336 1337 rq->__data_len = rq->resid_len = len; 1338 rq->nr_phys_segments = 1; 1339 rq->buffer = bio_data(bio); 1340 } 1341 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1342 1343 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1344 struct bio *bio) 1345 { 1346 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1347 1348 if (!ll_back_merge_fn(q, req, bio)) 1349 return false; 1350 1351 trace_block_bio_backmerge(q, req, bio); 1352 1353 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1354 blk_rq_set_mixed_merge(req); 1355 1356 req->biotail->bi_next = bio; 1357 req->biotail = bio; 1358 req->__data_len += bio->bi_size; 1359 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1360 1361 drive_stat_acct(req, 0); 1362 return true; 1363 } 1364 1365 static bool bio_attempt_front_merge(struct request_queue *q, 1366 struct request *req, struct bio *bio) 1367 { 1368 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1369 1370 if (!ll_front_merge_fn(q, req, bio)) 1371 return false; 1372 1373 trace_block_bio_frontmerge(q, req, bio); 1374 1375 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1376 blk_rq_set_mixed_merge(req); 1377 1378 bio->bi_next = req->bio; 1379 req->bio = bio; 1380 1381 /* 1382 * may not be valid. if the low level driver said 1383 * it didn't need a bounce buffer then it better 1384 * not touch req->buffer either... 1385 */ 1386 req->buffer = bio_data(bio); 1387 req->__sector = bio->bi_sector; 1388 req->__data_len += bio->bi_size; 1389 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1390 1391 drive_stat_acct(req, 0); 1392 return true; 1393 } 1394 1395 /** 1396 * attempt_plug_merge - try to merge with %current's plugged list 1397 * @q: request_queue new bio is being queued at 1398 * @bio: new bio being queued 1399 * @request_count: out parameter for number of traversed plugged requests 1400 * 1401 * Determine whether @bio being queued on @q can be merged with a request 1402 * on %current's plugged list. Returns %true if merge was successful, 1403 * otherwise %false. 1404 * 1405 * Plugging coalesces IOs from the same issuer for the same purpose without 1406 * going through @q->queue_lock. As such it's more of an issuing mechanism 1407 * than scheduling, and the request, while may have elvpriv data, is not 1408 * added on the elevator at this point. In addition, we don't have 1409 * reliable access to the elevator outside queue lock. Only check basic 1410 * merging parameters without querying the elevator. 1411 */ 1412 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1413 unsigned int *request_count) 1414 { 1415 struct blk_plug *plug; 1416 struct request *rq; 1417 bool ret = false; 1418 1419 plug = current->plug; 1420 if (!plug) 1421 goto out; 1422 *request_count = 0; 1423 1424 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1425 int el_ret; 1426 1427 if (rq->q == q) 1428 (*request_count)++; 1429 1430 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1431 continue; 1432 1433 el_ret = blk_try_merge(rq, bio); 1434 if (el_ret == ELEVATOR_BACK_MERGE) { 1435 ret = bio_attempt_back_merge(q, rq, bio); 1436 if (ret) 1437 break; 1438 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1439 ret = bio_attempt_front_merge(q, rq, bio); 1440 if (ret) 1441 break; 1442 } 1443 } 1444 out: 1445 return ret; 1446 } 1447 1448 void init_request_from_bio(struct request *req, struct bio *bio) 1449 { 1450 req->cmd_type = REQ_TYPE_FS; 1451 1452 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1453 if (bio->bi_rw & REQ_RAHEAD) 1454 req->cmd_flags |= REQ_FAILFAST_MASK; 1455 1456 req->errors = 0; 1457 req->__sector = bio->bi_sector; 1458 req->ioprio = bio_prio(bio); 1459 blk_rq_bio_prep(req->q, req, bio); 1460 } 1461 1462 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1463 { 1464 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1465 struct blk_plug *plug; 1466 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1467 struct request *req; 1468 unsigned int request_count = 0; 1469 1470 /* 1471 * low level driver can indicate that it wants pages above a 1472 * certain limit bounced to low memory (ie for highmem, or even 1473 * ISA dma in theory) 1474 */ 1475 blk_queue_bounce(q, &bio); 1476 1477 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1478 bio_endio(bio, -EIO); 1479 return; 1480 } 1481 1482 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1483 spin_lock_irq(q->queue_lock); 1484 where = ELEVATOR_INSERT_FLUSH; 1485 goto get_rq; 1486 } 1487 1488 /* 1489 * Check if we can merge with the plugged list before grabbing 1490 * any locks. 1491 */ 1492 if (attempt_plug_merge(q, bio, &request_count)) 1493 return; 1494 1495 spin_lock_irq(q->queue_lock); 1496 1497 el_ret = elv_merge(q, &req, bio); 1498 if (el_ret == ELEVATOR_BACK_MERGE) { 1499 if (bio_attempt_back_merge(q, req, bio)) { 1500 elv_bio_merged(q, req, bio); 1501 if (!attempt_back_merge(q, req)) 1502 elv_merged_request(q, req, el_ret); 1503 goto out_unlock; 1504 } 1505 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1506 if (bio_attempt_front_merge(q, req, bio)) { 1507 elv_bio_merged(q, req, bio); 1508 if (!attempt_front_merge(q, req)) 1509 elv_merged_request(q, req, el_ret); 1510 goto out_unlock; 1511 } 1512 } 1513 1514 get_rq: 1515 /* 1516 * This sync check and mask will be re-done in init_request_from_bio(), 1517 * but we need to set it earlier to expose the sync flag to the 1518 * rq allocator and io schedulers. 1519 */ 1520 rw_flags = bio_data_dir(bio); 1521 if (sync) 1522 rw_flags |= REQ_SYNC; 1523 1524 /* 1525 * Grab a free request. This is might sleep but can not fail. 1526 * Returns with the queue unlocked. 1527 */ 1528 req = get_request(q, rw_flags, bio, GFP_NOIO); 1529 if (unlikely(!req)) { 1530 bio_endio(bio, -ENODEV); /* @q is dead */ 1531 goto out_unlock; 1532 } 1533 1534 /* 1535 * After dropping the lock and possibly sleeping here, our request 1536 * may now be mergeable after it had proven unmergeable (above). 1537 * We don't worry about that case for efficiency. It won't happen 1538 * often, and the elevators are able to handle it. 1539 */ 1540 init_request_from_bio(req, bio); 1541 1542 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1543 req->cpu = raw_smp_processor_id(); 1544 1545 plug = current->plug; 1546 if (plug) { 1547 /* 1548 * If this is the first request added after a plug, fire 1549 * of a plug trace. 1550 */ 1551 if (!request_count) 1552 trace_block_plug(q); 1553 else { 1554 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1555 blk_flush_plug_list(plug, false); 1556 trace_block_plug(q); 1557 } 1558 } 1559 list_add_tail(&req->queuelist, &plug->list); 1560 drive_stat_acct(req, 1); 1561 } else { 1562 spin_lock_irq(q->queue_lock); 1563 add_acct_request(q, req, where); 1564 __blk_run_queue(q); 1565 out_unlock: 1566 spin_unlock_irq(q->queue_lock); 1567 } 1568 } 1569 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1570 1571 /* 1572 * If bio->bi_dev is a partition, remap the location 1573 */ 1574 static inline void blk_partition_remap(struct bio *bio) 1575 { 1576 struct block_device *bdev = bio->bi_bdev; 1577 1578 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1579 struct hd_struct *p = bdev->bd_part; 1580 1581 bio->bi_sector += p->start_sect; 1582 bio->bi_bdev = bdev->bd_contains; 1583 1584 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1585 bdev->bd_dev, 1586 bio->bi_sector - p->start_sect); 1587 } 1588 } 1589 1590 static void handle_bad_sector(struct bio *bio) 1591 { 1592 char b[BDEVNAME_SIZE]; 1593 1594 printk(KERN_INFO "attempt to access beyond end of device\n"); 1595 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1596 bdevname(bio->bi_bdev, b), 1597 bio->bi_rw, 1598 (unsigned long long)bio_end_sector(bio), 1599 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1600 1601 set_bit(BIO_EOF, &bio->bi_flags); 1602 } 1603 1604 #ifdef CONFIG_FAIL_MAKE_REQUEST 1605 1606 static DECLARE_FAULT_ATTR(fail_make_request); 1607 1608 static int __init setup_fail_make_request(char *str) 1609 { 1610 return setup_fault_attr(&fail_make_request, str); 1611 } 1612 __setup("fail_make_request=", setup_fail_make_request); 1613 1614 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1615 { 1616 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1617 } 1618 1619 static int __init fail_make_request_debugfs(void) 1620 { 1621 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1622 NULL, &fail_make_request); 1623 1624 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1625 } 1626 1627 late_initcall(fail_make_request_debugfs); 1628 1629 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1630 1631 static inline bool should_fail_request(struct hd_struct *part, 1632 unsigned int bytes) 1633 { 1634 return false; 1635 } 1636 1637 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1638 1639 /* 1640 * Check whether this bio extends beyond the end of the device. 1641 */ 1642 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1643 { 1644 sector_t maxsector; 1645 1646 if (!nr_sectors) 1647 return 0; 1648 1649 /* Test device or partition size, when known. */ 1650 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1651 if (maxsector) { 1652 sector_t sector = bio->bi_sector; 1653 1654 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1655 /* 1656 * This may well happen - the kernel calls bread() 1657 * without checking the size of the device, e.g., when 1658 * mounting a device. 1659 */ 1660 handle_bad_sector(bio); 1661 return 1; 1662 } 1663 } 1664 1665 return 0; 1666 } 1667 1668 static noinline_for_stack bool 1669 generic_make_request_checks(struct bio *bio) 1670 { 1671 struct request_queue *q; 1672 int nr_sectors = bio_sectors(bio); 1673 int err = -EIO; 1674 char b[BDEVNAME_SIZE]; 1675 struct hd_struct *part; 1676 1677 might_sleep(); 1678 1679 if (bio_check_eod(bio, nr_sectors)) 1680 goto end_io; 1681 1682 q = bdev_get_queue(bio->bi_bdev); 1683 if (unlikely(!q)) { 1684 printk(KERN_ERR 1685 "generic_make_request: Trying to access " 1686 "nonexistent block-device %s (%Lu)\n", 1687 bdevname(bio->bi_bdev, b), 1688 (long long) bio->bi_sector); 1689 goto end_io; 1690 } 1691 1692 if (likely(bio_is_rw(bio) && 1693 nr_sectors > queue_max_hw_sectors(q))) { 1694 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1695 bdevname(bio->bi_bdev, b), 1696 bio_sectors(bio), 1697 queue_max_hw_sectors(q)); 1698 goto end_io; 1699 } 1700 1701 part = bio->bi_bdev->bd_part; 1702 if (should_fail_request(part, bio->bi_size) || 1703 should_fail_request(&part_to_disk(part)->part0, 1704 bio->bi_size)) 1705 goto end_io; 1706 1707 /* 1708 * If this device has partitions, remap block n 1709 * of partition p to block n+start(p) of the disk. 1710 */ 1711 blk_partition_remap(bio); 1712 1713 if (bio_check_eod(bio, nr_sectors)) 1714 goto end_io; 1715 1716 /* 1717 * Filter flush bio's early so that make_request based 1718 * drivers without flush support don't have to worry 1719 * about them. 1720 */ 1721 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1722 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1723 if (!nr_sectors) { 1724 err = 0; 1725 goto end_io; 1726 } 1727 } 1728 1729 if ((bio->bi_rw & REQ_DISCARD) && 1730 (!blk_queue_discard(q) || 1731 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1732 err = -EOPNOTSUPP; 1733 goto end_io; 1734 } 1735 1736 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { 1737 err = -EOPNOTSUPP; 1738 goto end_io; 1739 } 1740 1741 /* 1742 * Various block parts want %current->io_context and lazy ioc 1743 * allocation ends up trading a lot of pain for a small amount of 1744 * memory. Just allocate it upfront. This may fail and block 1745 * layer knows how to live with it. 1746 */ 1747 create_io_context(GFP_ATOMIC, q->node); 1748 1749 if (blk_throtl_bio(q, bio)) 1750 return false; /* throttled, will be resubmitted later */ 1751 1752 trace_block_bio_queue(q, bio); 1753 return true; 1754 1755 end_io: 1756 bio_endio(bio, err); 1757 return false; 1758 } 1759 1760 /** 1761 * generic_make_request - hand a buffer to its device driver for I/O 1762 * @bio: The bio describing the location in memory and on the device. 1763 * 1764 * generic_make_request() is used to make I/O requests of block 1765 * devices. It is passed a &struct bio, which describes the I/O that needs 1766 * to be done. 1767 * 1768 * generic_make_request() does not return any status. The 1769 * success/failure status of the request, along with notification of 1770 * completion, is delivered asynchronously through the bio->bi_end_io 1771 * function described (one day) else where. 1772 * 1773 * The caller of generic_make_request must make sure that bi_io_vec 1774 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1775 * set to describe the device address, and the 1776 * bi_end_io and optionally bi_private are set to describe how 1777 * completion notification should be signaled. 1778 * 1779 * generic_make_request and the drivers it calls may use bi_next if this 1780 * bio happens to be merged with someone else, and may resubmit the bio to 1781 * a lower device by calling into generic_make_request recursively, which 1782 * means the bio should NOT be touched after the call to ->make_request_fn. 1783 */ 1784 void generic_make_request(struct bio *bio) 1785 { 1786 struct bio_list bio_list_on_stack; 1787 1788 if (!generic_make_request_checks(bio)) 1789 return; 1790 1791 /* 1792 * We only want one ->make_request_fn to be active at a time, else 1793 * stack usage with stacked devices could be a problem. So use 1794 * current->bio_list to keep a list of requests submited by a 1795 * make_request_fn function. current->bio_list is also used as a 1796 * flag to say if generic_make_request is currently active in this 1797 * task or not. If it is NULL, then no make_request is active. If 1798 * it is non-NULL, then a make_request is active, and new requests 1799 * should be added at the tail 1800 */ 1801 if (current->bio_list) { 1802 bio_list_add(current->bio_list, bio); 1803 return; 1804 } 1805 1806 /* following loop may be a bit non-obvious, and so deserves some 1807 * explanation. 1808 * Before entering the loop, bio->bi_next is NULL (as all callers 1809 * ensure that) so we have a list with a single bio. 1810 * We pretend that we have just taken it off a longer list, so 1811 * we assign bio_list to a pointer to the bio_list_on_stack, 1812 * thus initialising the bio_list of new bios to be 1813 * added. ->make_request() may indeed add some more bios 1814 * through a recursive call to generic_make_request. If it 1815 * did, we find a non-NULL value in bio_list and re-enter the loop 1816 * from the top. In this case we really did just take the bio 1817 * of the top of the list (no pretending) and so remove it from 1818 * bio_list, and call into ->make_request() again. 1819 */ 1820 BUG_ON(bio->bi_next); 1821 bio_list_init(&bio_list_on_stack); 1822 current->bio_list = &bio_list_on_stack; 1823 do { 1824 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1825 1826 q->make_request_fn(q, bio); 1827 1828 bio = bio_list_pop(current->bio_list); 1829 } while (bio); 1830 current->bio_list = NULL; /* deactivate */ 1831 } 1832 EXPORT_SYMBOL(generic_make_request); 1833 1834 /** 1835 * submit_bio - submit a bio to the block device layer for I/O 1836 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1837 * @bio: The &struct bio which describes the I/O 1838 * 1839 * submit_bio() is very similar in purpose to generic_make_request(), and 1840 * uses that function to do most of the work. Both are fairly rough 1841 * interfaces; @bio must be presetup and ready for I/O. 1842 * 1843 */ 1844 void submit_bio(int rw, struct bio *bio) 1845 { 1846 bio->bi_rw |= rw; 1847 1848 /* 1849 * If it's a regular read/write or a barrier with data attached, 1850 * go through the normal accounting stuff before submission. 1851 */ 1852 if (bio_has_data(bio)) { 1853 unsigned int count; 1854 1855 if (unlikely(rw & REQ_WRITE_SAME)) 1856 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 1857 else 1858 count = bio_sectors(bio); 1859 1860 if (rw & WRITE) { 1861 count_vm_events(PGPGOUT, count); 1862 } else { 1863 task_io_account_read(bio->bi_size); 1864 count_vm_events(PGPGIN, count); 1865 } 1866 1867 if (unlikely(block_dump)) { 1868 char b[BDEVNAME_SIZE]; 1869 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1870 current->comm, task_pid_nr(current), 1871 (rw & WRITE) ? "WRITE" : "READ", 1872 (unsigned long long)bio->bi_sector, 1873 bdevname(bio->bi_bdev, b), 1874 count); 1875 } 1876 } 1877 1878 generic_make_request(bio); 1879 } 1880 EXPORT_SYMBOL(submit_bio); 1881 1882 /** 1883 * blk_rq_check_limits - Helper function to check a request for the queue limit 1884 * @q: the queue 1885 * @rq: the request being checked 1886 * 1887 * Description: 1888 * @rq may have been made based on weaker limitations of upper-level queues 1889 * in request stacking drivers, and it may violate the limitation of @q. 1890 * Since the block layer and the underlying device driver trust @rq 1891 * after it is inserted to @q, it should be checked against @q before 1892 * the insertion using this generic function. 1893 * 1894 * This function should also be useful for request stacking drivers 1895 * in some cases below, so export this function. 1896 * Request stacking drivers like request-based dm may change the queue 1897 * limits while requests are in the queue (e.g. dm's table swapping). 1898 * Such request stacking drivers should check those requests agaist 1899 * the new queue limits again when they dispatch those requests, 1900 * although such checkings are also done against the old queue limits 1901 * when submitting requests. 1902 */ 1903 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1904 { 1905 if (!rq_mergeable(rq)) 1906 return 0; 1907 1908 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 1909 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1910 return -EIO; 1911 } 1912 1913 /* 1914 * queue's settings related to segment counting like q->bounce_pfn 1915 * may differ from that of other stacking queues. 1916 * Recalculate it to check the request correctly on this queue's 1917 * limitation. 1918 */ 1919 blk_recalc_rq_segments(rq); 1920 if (rq->nr_phys_segments > queue_max_segments(q)) { 1921 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1922 return -EIO; 1923 } 1924 1925 return 0; 1926 } 1927 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1928 1929 /** 1930 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1931 * @q: the queue to submit the request 1932 * @rq: the request being queued 1933 */ 1934 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1935 { 1936 unsigned long flags; 1937 int where = ELEVATOR_INSERT_BACK; 1938 1939 if (blk_rq_check_limits(q, rq)) 1940 return -EIO; 1941 1942 if (rq->rq_disk && 1943 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1944 return -EIO; 1945 1946 spin_lock_irqsave(q->queue_lock, flags); 1947 if (unlikely(blk_queue_dying(q))) { 1948 spin_unlock_irqrestore(q->queue_lock, flags); 1949 return -ENODEV; 1950 } 1951 1952 /* 1953 * Submitting request must be dequeued before calling this function 1954 * because it will be linked to another request_queue 1955 */ 1956 BUG_ON(blk_queued_rq(rq)); 1957 1958 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 1959 where = ELEVATOR_INSERT_FLUSH; 1960 1961 add_acct_request(q, rq, where); 1962 if (where == ELEVATOR_INSERT_FLUSH) 1963 __blk_run_queue(q); 1964 spin_unlock_irqrestore(q->queue_lock, flags); 1965 1966 return 0; 1967 } 1968 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1969 1970 /** 1971 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1972 * @rq: request to examine 1973 * 1974 * Description: 1975 * A request could be merge of IOs which require different failure 1976 * handling. This function determines the number of bytes which 1977 * can be failed from the beginning of the request without 1978 * crossing into area which need to be retried further. 1979 * 1980 * Return: 1981 * The number of bytes to fail. 1982 * 1983 * Context: 1984 * queue_lock must be held. 1985 */ 1986 unsigned int blk_rq_err_bytes(const struct request *rq) 1987 { 1988 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1989 unsigned int bytes = 0; 1990 struct bio *bio; 1991 1992 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1993 return blk_rq_bytes(rq); 1994 1995 /* 1996 * Currently the only 'mixing' which can happen is between 1997 * different fastfail types. We can safely fail portions 1998 * which have all the failfast bits that the first one has - 1999 * the ones which are at least as eager to fail as the first 2000 * one. 2001 */ 2002 for (bio = rq->bio; bio; bio = bio->bi_next) { 2003 if ((bio->bi_rw & ff) != ff) 2004 break; 2005 bytes += bio->bi_size; 2006 } 2007 2008 /* this could lead to infinite loop */ 2009 BUG_ON(blk_rq_bytes(rq) && !bytes); 2010 return bytes; 2011 } 2012 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2013 2014 static void blk_account_io_completion(struct request *req, unsigned int bytes) 2015 { 2016 if (blk_do_io_stat(req)) { 2017 const int rw = rq_data_dir(req); 2018 struct hd_struct *part; 2019 int cpu; 2020 2021 cpu = part_stat_lock(); 2022 part = req->part; 2023 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2024 part_stat_unlock(); 2025 } 2026 } 2027 2028 static void blk_account_io_done(struct request *req) 2029 { 2030 /* 2031 * Account IO completion. flush_rq isn't accounted as a 2032 * normal IO on queueing nor completion. Accounting the 2033 * containing request is enough. 2034 */ 2035 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2036 unsigned long duration = jiffies - req->start_time; 2037 const int rw = rq_data_dir(req); 2038 struct hd_struct *part; 2039 int cpu; 2040 2041 cpu = part_stat_lock(); 2042 part = req->part; 2043 2044 part_stat_inc(cpu, part, ios[rw]); 2045 part_stat_add(cpu, part, ticks[rw], duration); 2046 part_round_stats(cpu, part); 2047 part_dec_in_flight(part, rw); 2048 2049 hd_struct_put(part); 2050 part_stat_unlock(); 2051 } 2052 } 2053 2054 #ifdef CONFIG_PM_RUNTIME 2055 /* 2056 * Don't process normal requests when queue is suspended 2057 * or in the process of suspending/resuming 2058 */ 2059 static struct request *blk_pm_peek_request(struct request_queue *q, 2060 struct request *rq) 2061 { 2062 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2063 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2064 return NULL; 2065 else 2066 return rq; 2067 } 2068 #else 2069 static inline struct request *blk_pm_peek_request(struct request_queue *q, 2070 struct request *rq) 2071 { 2072 return rq; 2073 } 2074 #endif 2075 2076 /** 2077 * blk_peek_request - peek at the top of a request queue 2078 * @q: request queue to peek at 2079 * 2080 * Description: 2081 * Return the request at the top of @q. The returned request 2082 * should be started using blk_start_request() before LLD starts 2083 * processing it. 2084 * 2085 * Return: 2086 * Pointer to the request at the top of @q if available. Null 2087 * otherwise. 2088 * 2089 * Context: 2090 * queue_lock must be held. 2091 */ 2092 struct request *blk_peek_request(struct request_queue *q) 2093 { 2094 struct request *rq; 2095 int ret; 2096 2097 while ((rq = __elv_next_request(q)) != NULL) { 2098 2099 rq = blk_pm_peek_request(q, rq); 2100 if (!rq) 2101 break; 2102 2103 if (!(rq->cmd_flags & REQ_STARTED)) { 2104 /* 2105 * This is the first time the device driver 2106 * sees this request (possibly after 2107 * requeueing). Notify IO scheduler. 2108 */ 2109 if (rq->cmd_flags & REQ_SORTED) 2110 elv_activate_rq(q, rq); 2111 2112 /* 2113 * just mark as started even if we don't start 2114 * it, a request that has been delayed should 2115 * not be passed by new incoming requests 2116 */ 2117 rq->cmd_flags |= REQ_STARTED; 2118 trace_block_rq_issue(q, rq); 2119 } 2120 2121 if (!q->boundary_rq || q->boundary_rq == rq) { 2122 q->end_sector = rq_end_sector(rq); 2123 q->boundary_rq = NULL; 2124 } 2125 2126 if (rq->cmd_flags & REQ_DONTPREP) 2127 break; 2128 2129 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2130 /* 2131 * make sure space for the drain appears we 2132 * know we can do this because max_hw_segments 2133 * has been adjusted to be one fewer than the 2134 * device can handle 2135 */ 2136 rq->nr_phys_segments++; 2137 } 2138 2139 if (!q->prep_rq_fn) 2140 break; 2141 2142 ret = q->prep_rq_fn(q, rq); 2143 if (ret == BLKPREP_OK) { 2144 break; 2145 } else if (ret == BLKPREP_DEFER) { 2146 /* 2147 * the request may have been (partially) prepped. 2148 * we need to keep this request in the front to 2149 * avoid resource deadlock. REQ_STARTED will 2150 * prevent other fs requests from passing this one. 2151 */ 2152 if (q->dma_drain_size && blk_rq_bytes(rq) && 2153 !(rq->cmd_flags & REQ_DONTPREP)) { 2154 /* 2155 * remove the space for the drain we added 2156 * so that we don't add it again 2157 */ 2158 --rq->nr_phys_segments; 2159 } 2160 2161 rq = NULL; 2162 break; 2163 } else if (ret == BLKPREP_KILL) { 2164 rq->cmd_flags |= REQ_QUIET; 2165 /* 2166 * Mark this request as started so we don't trigger 2167 * any debug logic in the end I/O path. 2168 */ 2169 blk_start_request(rq); 2170 __blk_end_request_all(rq, -EIO); 2171 } else { 2172 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2173 break; 2174 } 2175 } 2176 2177 return rq; 2178 } 2179 EXPORT_SYMBOL(blk_peek_request); 2180 2181 void blk_dequeue_request(struct request *rq) 2182 { 2183 struct request_queue *q = rq->q; 2184 2185 BUG_ON(list_empty(&rq->queuelist)); 2186 BUG_ON(ELV_ON_HASH(rq)); 2187 2188 list_del_init(&rq->queuelist); 2189 2190 /* 2191 * the time frame between a request being removed from the lists 2192 * and to it is freed is accounted as io that is in progress at 2193 * the driver side. 2194 */ 2195 if (blk_account_rq(rq)) { 2196 q->in_flight[rq_is_sync(rq)]++; 2197 set_io_start_time_ns(rq); 2198 } 2199 } 2200 2201 /** 2202 * blk_start_request - start request processing on the driver 2203 * @req: request to dequeue 2204 * 2205 * Description: 2206 * Dequeue @req and start timeout timer on it. This hands off the 2207 * request to the driver. 2208 * 2209 * Block internal functions which don't want to start timer should 2210 * call blk_dequeue_request(). 2211 * 2212 * Context: 2213 * queue_lock must be held. 2214 */ 2215 void blk_start_request(struct request *req) 2216 { 2217 blk_dequeue_request(req); 2218 2219 /* 2220 * We are now handing the request to the hardware, initialize 2221 * resid_len to full count and add the timeout handler. 2222 */ 2223 req->resid_len = blk_rq_bytes(req); 2224 if (unlikely(blk_bidi_rq(req))) 2225 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2226 2227 blk_add_timer(req); 2228 } 2229 EXPORT_SYMBOL(blk_start_request); 2230 2231 /** 2232 * blk_fetch_request - fetch a request from a request queue 2233 * @q: request queue to fetch a request from 2234 * 2235 * Description: 2236 * Return the request at the top of @q. The request is started on 2237 * return and LLD can start processing it immediately. 2238 * 2239 * Return: 2240 * Pointer to the request at the top of @q if available. Null 2241 * otherwise. 2242 * 2243 * Context: 2244 * queue_lock must be held. 2245 */ 2246 struct request *blk_fetch_request(struct request_queue *q) 2247 { 2248 struct request *rq; 2249 2250 rq = blk_peek_request(q); 2251 if (rq) 2252 blk_start_request(rq); 2253 return rq; 2254 } 2255 EXPORT_SYMBOL(blk_fetch_request); 2256 2257 /** 2258 * blk_update_request - Special helper function for request stacking drivers 2259 * @req: the request being processed 2260 * @error: %0 for success, < %0 for error 2261 * @nr_bytes: number of bytes to complete @req 2262 * 2263 * Description: 2264 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2265 * the request structure even if @req doesn't have leftover. 2266 * If @req has leftover, sets it up for the next range of segments. 2267 * 2268 * This special helper function is only for request stacking drivers 2269 * (e.g. request-based dm) so that they can handle partial completion. 2270 * Actual device drivers should use blk_end_request instead. 2271 * 2272 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2273 * %false return from this function. 2274 * 2275 * Return: 2276 * %false - this request doesn't have any more data 2277 * %true - this request has more data 2278 **/ 2279 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2280 { 2281 int total_bytes; 2282 2283 if (!req->bio) 2284 return false; 2285 2286 trace_block_rq_complete(req->q, req); 2287 2288 /* 2289 * For fs requests, rq is just carrier of independent bio's 2290 * and each partial completion should be handled separately. 2291 * Reset per-request error on each partial completion. 2292 * 2293 * TODO: tj: This is too subtle. It would be better to let 2294 * low level drivers do what they see fit. 2295 */ 2296 if (req->cmd_type == REQ_TYPE_FS) 2297 req->errors = 0; 2298 2299 if (error && req->cmd_type == REQ_TYPE_FS && 2300 !(req->cmd_flags & REQ_QUIET)) { 2301 char *error_type; 2302 2303 switch (error) { 2304 case -ENOLINK: 2305 error_type = "recoverable transport"; 2306 break; 2307 case -EREMOTEIO: 2308 error_type = "critical target"; 2309 break; 2310 case -EBADE: 2311 error_type = "critical nexus"; 2312 break; 2313 case -ETIMEDOUT: 2314 error_type = "timeout"; 2315 break; 2316 case -ENOSPC: 2317 error_type = "critical space allocation"; 2318 break; 2319 case -ENODATA: 2320 error_type = "critical medium"; 2321 break; 2322 case -EIO: 2323 default: 2324 error_type = "I/O"; 2325 break; 2326 } 2327 printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2328 error_type, req->rq_disk ? 2329 req->rq_disk->disk_name : "?", 2330 (unsigned long long)blk_rq_pos(req)); 2331 2332 } 2333 2334 blk_account_io_completion(req, nr_bytes); 2335 2336 total_bytes = 0; 2337 while (req->bio) { 2338 struct bio *bio = req->bio; 2339 unsigned bio_bytes = min(bio->bi_size, nr_bytes); 2340 2341 if (bio_bytes == bio->bi_size) 2342 req->bio = bio->bi_next; 2343 2344 req_bio_endio(req, bio, bio_bytes, error); 2345 2346 total_bytes += bio_bytes; 2347 nr_bytes -= bio_bytes; 2348 2349 if (!nr_bytes) 2350 break; 2351 } 2352 2353 /* 2354 * completely done 2355 */ 2356 if (!req->bio) { 2357 /* 2358 * Reset counters so that the request stacking driver 2359 * can find how many bytes remain in the request 2360 * later. 2361 */ 2362 req->__data_len = 0; 2363 return false; 2364 } 2365 2366 req->__data_len -= total_bytes; 2367 req->buffer = bio_data(req->bio); 2368 2369 /* update sector only for requests with clear definition of sector */ 2370 if (req->cmd_type == REQ_TYPE_FS) 2371 req->__sector += total_bytes >> 9; 2372 2373 /* mixed attributes always follow the first bio */ 2374 if (req->cmd_flags & REQ_MIXED_MERGE) { 2375 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2376 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2377 } 2378 2379 /* 2380 * If total number of sectors is less than the first segment 2381 * size, something has gone terribly wrong. 2382 */ 2383 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2384 blk_dump_rq_flags(req, "request botched"); 2385 req->__data_len = blk_rq_cur_bytes(req); 2386 } 2387 2388 /* recalculate the number of segments */ 2389 blk_recalc_rq_segments(req); 2390 2391 return true; 2392 } 2393 EXPORT_SYMBOL_GPL(blk_update_request); 2394 2395 static bool blk_update_bidi_request(struct request *rq, int error, 2396 unsigned int nr_bytes, 2397 unsigned int bidi_bytes) 2398 { 2399 if (blk_update_request(rq, error, nr_bytes)) 2400 return true; 2401 2402 /* Bidi request must be completed as a whole */ 2403 if (unlikely(blk_bidi_rq(rq)) && 2404 blk_update_request(rq->next_rq, error, bidi_bytes)) 2405 return true; 2406 2407 if (blk_queue_add_random(rq->q)) 2408 add_disk_randomness(rq->rq_disk); 2409 2410 return false; 2411 } 2412 2413 /** 2414 * blk_unprep_request - unprepare a request 2415 * @req: the request 2416 * 2417 * This function makes a request ready for complete resubmission (or 2418 * completion). It happens only after all error handling is complete, 2419 * so represents the appropriate moment to deallocate any resources 2420 * that were allocated to the request in the prep_rq_fn. The queue 2421 * lock is held when calling this. 2422 */ 2423 void blk_unprep_request(struct request *req) 2424 { 2425 struct request_queue *q = req->q; 2426 2427 req->cmd_flags &= ~REQ_DONTPREP; 2428 if (q->unprep_rq_fn) 2429 q->unprep_rq_fn(q, req); 2430 } 2431 EXPORT_SYMBOL_GPL(blk_unprep_request); 2432 2433 /* 2434 * queue lock must be held 2435 */ 2436 static void blk_finish_request(struct request *req, int error) 2437 { 2438 if (blk_rq_tagged(req)) 2439 blk_queue_end_tag(req->q, req); 2440 2441 BUG_ON(blk_queued_rq(req)); 2442 2443 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2444 laptop_io_completion(&req->q->backing_dev_info); 2445 2446 blk_delete_timer(req); 2447 2448 if (req->cmd_flags & REQ_DONTPREP) 2449 blk_unprep_request(req); 2450 2451 2452 blk_account_io_done(req); 2453 2454 if (req->end_io) 2455 req->end_io(req, error); 2456 else { 2457 if (blk_bidi_rq(req)) 2458 __blk_put_request(req->next_rq->q, req->next_rq); 2459 2460 __blk_put_request(req->q, req); 2461 } 2462 } 2463 2464 /** 2465 * blk_end_bidi_request - Complete a bidi request 2466 * @rq: the request to complete 2467 * @error: %0 for success, < %0 for error 2468 * @nr_bytes: number of bytes to complete @rq 2469 * @bidi_bytes: number of bytes to complete @rq->next_rq 2470 * 2471 * Description: 2472 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2473 * Drivers that supports bidi can safely call this member for any 2474 * type of request, bidi or uni. In the later case @bidi_bytes is 2475 * just ignored. 2476 * 2477 * Return: 2478 * %false - we are done with this request 2479 * %true - still buffers pending for this request 2480 **/ 2481 static bool blk_end_bidi_request(struct request *rq, int error, 2482 unsigned int nr_bytes, unsigned int bidi_bytes) 2483 { 2484 struct request_queue *q = rq->q; 2485 unsigned long flags; 2486 2487 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2488 return true; 2489 2490 spin_lock_irqsave(q->queue_lock, flags); 2491 blk_finish_request(rq, error); 2492 spin_unlock_irqrestore(q->queue_lock, flags); 2493 2494 return false; 2495 } 2496 2497 /** 2498 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2499 * @rq: the request to complete 2500 * @error: %0 for success, < %0 for error 2501 * @nr_bytes: number of bytes to complete @rq 2502 * @bidi_bytes: number of bytes to complete @rq->next_rq 2503 * 2504 * Description: 2505 * Identical to blk_end_bidi_request() except that queue lock is 2506 * assumed to be locked on entry and remains so on return. 2507 * 2508 * Return: 2509 * %false - we are done with this request 2510 * %true - still buffers pending for this request 2511 **/ 2512 bool __blk_end_bidi_request(struct request *rq, int error, 2513 unsigned int nr_bytes, unsigned int bidi_bytes) 2514 { 2515 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2516 return true; 2517 2518 blk_finish_request(rq, error); 2519 2520 return false; 2521 } 2522 2523 /** 2524 * blk_end_request - Helper function for drivers to complete the request. 2525 * @rq: the request being processed 2526 * @error: %0 for success, < %0 for error 2527 * @nr_bytes: number of bytes to complete 2528 * 2529 * Description: 2530 * Ends I/O on a number of bytes attached to @rq. 2531 * If @rq has leftover, sets it up for the next range of segments. 2532 * 2533 * Return: 2534 * %false - we are done with this request 2535 * %true - still buffers pending for this request 2536 **/ 2537 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2538 { 2539 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2540 } 2541 EXPORT_SYMBOL(blk_end_request); 2542 2543 /** 2544 * blk_end_request_all - Helper function for drives to finish the request. 2545 * @rq: the request to finish 2546 * @error: %0 for success, < %0 for error 2547 * 2548 * Description: 2549 * Completely finish @rq. 2550 */ 2551 void blk_end_request_all(struct request *rq, int error) 2552 { 2553 bool pending; 2554 unsigned int bidi_bytes = 0; 2555 2556 if (unlikely(blk_bidi_rq(rq))) 2557 bidi_bytes = blk_rq_bytes(rq->next_rq); 2558 2559 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2560 BUG_ON(pending); 2561 } 2562 EXPORT_SYMBOL(blk_end_request_all); 2563 2564 /** 2565 * blk_end_request_cur - Helper function to finish the current request chunk. 2566 * @rq: the request to finish the current chunk for 2567 * @error: %0 for success, < %0 for error 2568 * 2569 * Description: 2570 * Complete the current consecutively mapped chunk from @rq. 2571 * 2572 * Return: 2573 * %false - we are done with this request 2574 * %true - still buffers pending for this request 2575 */ 2576 bool blk_end_request_cur(struct request *rq, int error) 2577 { 2578 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2579 } 2580 EXPORT_SYMBOL(blk_end_request_cur); 2581 2582 /** 2583 * blk_end_request_err - Finish a request till the next failure boundary. 2584 * @rq: the request to finish till the next failure boundary for 2585 * @error: must be negative errno 2586 * 2587 * Description: 2588 * Complete @rq till the next failure boundary. 2589 * 2590 * Return: 2591 * %false - we are done with this request 2592 * %true - still buffers pending for this request 2593 */ 2594 bool blk_end_request_err(struct request *rq, int error) 2595 { 2596 WARN_ON(error >= 0); 2597 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2598 } 2599 EXPORT_SYMBOL_GPL(blk_end_request_err); 2600 2601 /** 2602 * __blk_end_request - Helper function for drivers to complete the request. 2603 * @rq: the request being processed 2604 * @error: %0 for success, < %0 for error 2605 * @nr_bytes: number of bytes to complete 2606 * 2607 * Description: 2608 * Must be called with queue lock held unlike blk_end_request(). 2609 * 2610 * Return: 2611 * %false - we are done with this request 2612 * %true - still buffers pending for this request 2613 **/ 2614 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2615 { 2616 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2617 } 2618 EXPORT_SYMBOL(__blk_end_request); 2619 2620 /** 2621 * __blk_end_request_all - Helper function for drives to finish the request. 2622 * @rq: the request to finish 2623 * @error: %0 for success, < %0 for error 2624 * 2625 * Description: 2626 * Completely finish @rq. Must be called with queue lock held. 2627 */ 2628 void __blk_end_request_all(struct request *rq, int error) 2629 { 2630 bool pending; 2631 unsigned int bidi_bytes = 0; 2632 2633 if (unlikely(blk_bidi_rq(rq))) 2634 bidi_bytes = blk_rq_bytes(rq->next_rq); 2635 2636 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2637 BUG_ON(pending); 2638 } 2639 EXPORT_SYMBOL(__blk_end_request_all); 2640 2641 /** 2642 * __blk_end_request_cur - Helper function to finish the current request chunk. 2643 * @rq: the request to finish the current chunk for 2644 * @error: %0 for success, < %0 for error 2645 * 2646 * Description: 2647 * Complete the current consecutively mapped chunk from @rq. Must 2648 * be called with queue lock held. 2649 * 2650 * Return: 2651 * %false - we are done with this request 2652 * %true - still buffers pending for this request 2653 */ 2654 bool __blk_end_request_cur(struct request *rq, int error) 2655 { 2656 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2657 } 2658 EXPORT_SYMBOL(__blk_end_request_cur); 2659 2660 /** 2661 * __blk_end_request_err - Finish a request till the next failure boundary. 2662 * @rq: the request to finish till the next failure boundary for 2663 * @error: must be negative errno 2664 * 2665 * Description: 2666 * Complete @rq till the next failure boundary. Must be called 2667 * with queue lock held. 2668 * 2669 * Return: 2670 * %false - we are done with this request 2671 * %true - still buffers pending for this request 2672 */ 2673 bool __blk_end_request_err(struct request *rq, int error) 2674 { 2675 WARN_ON(error >= 0); 2676 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2677 } 2678 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2679 2680 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2681 struct bio *bio) 2682 { 2683 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2684 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2685 2686 if (bio_has_data(bio)) { 2687 rq->nr_phys_segments = bio_phys_segments(q, bio); 2688 rq->buffer = bio_data(bio); 2689 } 2690 rq->__data_len = bio->bi_size; 2691 rq->bio = rq->biotail = bio; 2692 2693 if (bio->bi_bdev) 2694 rq->rq_disk = bio->bi_bdev->bd_disk; 2695 } 2696 2697 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2698 /** 2699 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2700 * @rq: the request to be flushed 2701 * 2702 * Description: 2703 * Flush all pages in @rq. 2704 */ 2705 void rq_flush_dcache_pages(struct request *rq) 2706 { 2707 struct req_iterator iter; 2708 struct bio_vec *bvec; 2709 2710 rq_for_each_segment(bvec, rq, iter) 2711 flush_dcache_page(bvec->bv_page); 2712 } 2713 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2714 #endif 2715 2716 /** 2717 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2718 * @q : the queue of the device being checked 2719 * 2720 * Description: 2721 * Check if underlying low-level drivers of a device are busy. 2722 * If the drivers want to export their busy state, they must set own 2723 * exporting function using blk_queue_lld_busy() first. 2724 * 2725 * Basically, this function is used only by request stacking drivers 2726 * to stop dispatching requests to underlying devices when underlying 2727 * devices are busy. This behavior helps more I/O merging on the queue 2728 * of the request stacking driver and prevents I/O throughput regression 2729 * on burst I/O load. 2730 * 2731 * Return: 2732 * 0 - Not busy (The request stacking driver should dispatch request) 2733 * 1 - Busy (The request stacking driver should stop dispatching request) 2734 */ 2735 int blk_lld_busy(struct request_queue *q) 2736 { 2737 if (q->lld_busy_fn) 2738 return q->lld_busy_fn(q); 2739 2740 return 0; 2741 } 2742 EXPORT_SYMBOL_GPL(blk_lld_busy); 2743 2744 /** 2745 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2746 * @rq: the clone request to be cleaned up 2747 * 2748 * Description: 2749 * Free all bios in @rq for a cloned request. 2750 */ 2751 void blk_rq_unprep_clone(struct request *rq) 2752 { 2753 struct bio *bio; 2754 2755 while ((bio = rq->bio) != NULL) { 2756 rq->bio = bio->bi_next; 2757 2758 bio_put(bio); 2759 } 2760 } 2761 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2762 2763 /* 2764 * Copy attributes of the original request to the clone request. 2765 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2766 */ 2767 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2768 { 2769 dst->cpu = src->cpu; 2770 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2771 dst->cmd_type = src->cmd_type; 2772 dst->__sector = blk_rq_pos(src); 2773 dst->__data_len = blk_rq_bytes(src); 2774 dst->nr_phys_segments = src->nr_phys_segments; 2775 dst->ioprio = src->ioprio; 2776 dst->extra_len = src->extra_len; 2777 } 2778 2779 /** 2780 * blk_rq_prep_clone - Helper function to setup clone request 2781 * @rq: the request to be setup 2782 * @rq_src: original request to be cloned 2783 * @bs: bio_set that bios for clone are allocated from 2784 * @gfp_mask: memory allocation mask for bio 2785 * @bio_ctr: setup function to be called for each clone bio. 2786 * Returns %0 for success, non %0 for failure. 2787 * @data: private data to be passed to @bio_ctr 2788 * 2789 * Description: 2790 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2791 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2792 * are not copied, and copying such parts is the caller's responsibility. 2793 * Also, pages which the original bios are pointing to are not copied 2794 * and the cloned bios just point same pages. 2795 * So cloned bios must be completed before original bios, which means 2796 * the caller must complete @rq before @rq_src. 2797 */ 2798 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2799 struct bio_set *bs, gfp_t gfp_mask, 2800 int (*bio_ctr)(struct bio *, struct bio *, void *), 2801 void *data) 2802 { 2803 struct bio *bio, *bio_src; 2804 2805 if (!bs) 2806 bs = fs_bio_set; 2807 2808 blk_rq_init(NULL, rq); 2809 2810 __rq_for_each_bio(bio_src, rq_src) { 2811 bio = bio_clone_bioset(bio_src, gfp_mask, bs); 2812 if (!bio) 2813 goto free_and_out; 2814 2815 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2816 goto free_and_out; 2817 2818 if (rq->bio) { 2819 rq->biotail->bi_next = bio; 2820 rq->biotail = bio; 2821 } else 2822 rq->bio = rq->biotail = bio; 2823 } 2824 2825 __blk_rq_prep_clone(rq, rq_src); 2826 2827 return 0; 2828 2829 free_and_out: 2830 if (bio) 2831 bio_put(bio); 2832 blk_rq_unprep_clone(rq); 2833 2834 return -ENOMEM; 2835 } 2836 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2837 2838 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2839 { 2840 return queue_work(kblockd_workqueue, work); 2841 } 2842 EXPORT_SYMBOL(kblockd_schedule_work); 2843 2844 int kblockd_schedule_delayed_work(struct request_queue *q, 2845 struct delayed_work *dwork, unsigned long delay) 2846 { 2847 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2848 } 2849 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2850 2851 #define PLUG_MAGIC 0x91827364 2852 2853 /** 2854 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2855 * @plug: The &struct blk_plug that needs to be initialized 2856 * 2857 * Description: 2858 * Tracking blk_plug inside the task_struct will help with auto-flushing the 2859 * pending I/O should the task end up blocking between blk_start_plug() and 2860 * blk_finish_plug(). This is important from a performance perspective, but 2861 * also ensures that we don't deadlock. For instance, if the task is blocking 2862 * for a memory allocation, memory reclaim could end up wanting to free a 2863 * page belonging to that request that is currently residing in our private 2864 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 2865 * this kind of deadlock. 2866 */ 2867 void blk_start_plug(struct blk_plug *plug) 2868 { 2869 struct task_struct *tsk = current; 2870 2871 plug->magic = PLUG_MAGIC; 2872 INIT_LIST_HEAD(&plug->list); 2873 INIT_LIST_HEAD(&plug->cb_list); 2874 2875 /* 2876 * If this is a nested plug, don't actually assign it. It will be 2877 * flushed on its own. 2878 */ 2879 if (!tsk->plug) { 2880 /* 2881 * Store ordering should not be needed here, since a potential 2882 * preempt will imply a full memory barrier 2883 */ 2884 tsk->plug = plug; 2885 } 2886 } 2887 EXPORT_SYMBOL(blk_start_plug); 2888 2889 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2890 { 2891 struct request *rqa = container_of(a, struct request, queuelist); 2892 struct request *rqb = container_of(b, struct request, queuelist); 2893 2894 return !(rqa->q < rqb->q || 2895 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 2896 } 2897 2898 /* 2899 * If 'from_schedule' is true, then postpone the dispatch of requests 2900 * until a safe kblockd context. We due this to avoid accidental big 2901 * additional stack usage in driver dispatch, in places where the originally 2902 * plugger did not intend it. 2903 */ 2904 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2905 bool from_schedule) 2906 __releases(q->queue_lock) 2907 { 2908 trace_block_unplug(q, depth, !from_schedule); 2909 2910 if (from_schedule) 2911 blk_run_queue_async(q); 2912 else 2913 __blk_run_queue(q); 2914 spin_unlock(q->queue_lock); 2915 } 2916 2917 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 2918 { 2919 LIST_HEAD(callbacks); 2920 2921 while (!list_empty(&plug->cb_list)) { 2922 list_splice_init(&plug->cb_list, &callbacks); 2923 2924 while (!list_empty(&callbacks)) { 2925 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2926 struct blk_plug_cb, 2927 list); 2928 list_del(&cb->list); 2929 cb->callback(cb, from_schedule); 2930 } 2931 } 2932 } 2933 2934 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 2935 int size) 2936 { 2937 struct blk_plug *plug = current->plug; 2938 struct blk_plug_cb *cb; 2939 2940 if (!plug) 2941 return NULL; 2942 2943 list_for_each_entry(cb, &plug->cb_list, list) 2944 if (cb->callback == unplug && cb->data == data) 2945 return cb; 2946 2947 /* Not currently on the callback list */ 2948 BUG_ON(size < sizeof(*cb)); 2949 cb = kzalloc(size, GFP_ATOMIC); 2950 if (cb) { 2951 cb->data = data; 2952 cb->callback = unplug; 2953 list_add(&cb->list, &plug->cb_list); 2954 } 2955 return cb; 2956 } 2957 EXPORT_SYMBOL(blk_check_plugged); 2958 2959 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2960 { 2961 struct request_queue *q; 2962 unsigned long flags; 2963 struct request *rq; 2964 LIST_HEAD(list); 2965 unsigned int depth; 2966 2967 BUG_ON(plug->magic != PLUG_MAGIC); 2968 2969 flush_plug_callbacks(plug, from_schedule); 2970 if (list_empty(&plug->list)) 2971 return; 2972 2973 list_splice_init(&plug->list, &list); 2974 2975 list_sort(NULL, &list, plug_rq_cmp); 2976 2977 q = NULL; 2978 depth = 0; 2979 2980 /* 2981 * Save and disable interrupts here, to avoid doing it for every 2982 * queue lock we have to take. 2983 */ 2984 local_irq_save(flags); 2985 while (!list_empty(&list)) { 2986 rq = list_entry_rq(list.next); 2987 list_del_init(&rq->queuelist); 2988 BUG_ON(!rq->q); 2989 if (rq->q != q) { 2990 /* 2991 * This drops the queue lock 2992 */ 2993 if (q) 2994 queue_unplugged(q, depth, from_schedule); 2995 q = rq->q; 2996 depth = 0; 2997 spin_lock(q->queue_lock); 2998 } 2999 3000 /* 3001 * Short-circuit if @q is dead 3002 */ 3003 if (unlikely(blk_queue_dying(q))) { 3004 __blk_end_request_all(rq, -ENODEV); 3005 continue; 3006 } 3007 3008 /* 3009 * rq is already accounted, so use raw insert 3010 */ 3011 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3012 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3013 else 3014 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3015 3016 depth++; 3017 } 3018 3019 /* 3020 * This drops the queue lock 3021 */ 3022 if (q) 3023 queue_unplugged(q, depth, from_schedule); 3024 3025 local_irq_restore(flags); 3026 } 3027 3028 void blk_finish_plug(struct blk_plug *plug) 3029 { 3030 blk_flush_plug_list(plug, false); 3031 3032 if (plug == current->plug) 3033 current->plug = NULL; 3034 } 3035 EXPORT_SYMBOL(blk_finish_plug); 3036 3037 #ifdef CONFIG_PM_RUNTIME 3038 /** 3039 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3040 * @q: the queue of the device 3041 * @dev: the device the queue belongs to 3042 * 3043 * Description: 3044 * Initialize runtime-PM-related fields for @q and start auto suspend for 3045 * @dev. Drivers that want to take advantage of request-based runtime PM 3046 * should call this function after @dev has been initialized, and its 3047 * request queue @q has been allocated, and runtime PM for it can not happen 3048 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3049 * cases, driver should call this function before any I/O has taken place. 3050 * 3051 * This function takes care of setting up using auto suspend for the device, 3052 * the autosuspend delay is set to -1 to make runtime suspend impossible 3053 * until an updated value is either set by user or by driver. Drivers do 3054 * not need to touch other autosuspend settings. 3055 * 3056 * The block layer runtime PM is request based, so only works for drivers 3057 * that use request as their IO unit instead of those directly use bio's. 3058 */ 3059 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3060 { 3061 q->dev = dev; 3062 q->rpm_status = RPM_ACTIVE; 3063 pm_runtime_set_autosuspend_delay(q->dev, -1); 3064 pm_runtime_use_autosuspend(q->dev); 3065 } 3066 EXPORT_SYMBOL(blk_pm_runtime_init); 3067 3068 /** 3069 * blk_pre_runtime_suspend - Pre runtime suspend check 3070 * @q: the queue of the device 3071 * 3072 * Description: 3073 * This function will check if runtime suspend is allowed for the device 3074 * by examining if there are any requests pending in the queue. If there 3075 * are requests pending, the device can not be runtime suspended; otherwise, 3076 * the queue's status will be updated to SUSPENDING and the driver can 3077 * proceed to suspend the device. 3078 * 3079 * For the not allowed case, we mark last busy for the device so that 3080 * runtime PM core will try to autosuspend it some time later. 3081 * 3082 * This function should be called near the start of the device's 3083 * runtime_suspend callback. 3084 * 3085 * Return: 3086 * 0 - OK to runtime suspend the device 3087 * -EBUSY - Device should not be runtime suspended 3088 */ 3089 int blk_pre_runtime_suspend(struct request_queue *q) 3090 { 3091 int ret = 0; 3092 3093 spin_lock_irq(q->queue_lock); 3094 if (q->nr_pending) { 3095 ret = -EBUSY; 3096 pm_runtime_mark_last_busy(q->dev); 3097 } else { 3098 q->rpm_status = RPM_SUSPENDING; 3099 } 3100 spin_unlock_irq(q->queue_lock); 3101 return ret; 3102 } 3103 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3104 3105 /** 3106 * blk_post_runtime_suspend - Post runtime suspend processing 3107 * @q: the queue of the device 3108 * @err: return value of the device's runtime_suspend function 3109 * 3110 * Description: 3111 * Update the queue's runtime status according to the return value of the 3112 * device's runtime suspend function and mark last busy for the device so 3113 * that PM core will try to auto suspend the device at a later time. 3114 * 3115 * This function should be called near the end of the device's 3116 * runtime_suspend callback. 3117 */ 3118 void blk_post_runtime_suspend(struct request_queue *q, int err) 3119 { 3120 spin_lock_irq(q->queue_lock); 3121 if (!err) { 3122 q->rpm_status = RPM_SUSPENDED; 3123 } else { 3124 q->rpm_status = RPM_ACTIVE; 3125 pm_runtime_mark_last_busy(q->dev); 3126 } 3127 spin_unlock_irq(q->queue_lock); 3128 } 3129 EXPORT_SYMBOL(blk_post_runtime_suspend); 3130 3131 /** 3132 * blk_pre_runtime_resume - Pre runtime resume processing 3133 * @q: the queue of the device 3134 * 3135 * Description: 3136 * Update the queue's runtime status to RESUMING in preparation for the 3137 * runtime resume of the device. 3138 * 3139 * This function should be called near the start of the device's 3140 * runtime_resume callback. 3141 */ 3142 void blk_pre_runtime_resume(struct request_queue *q) 3143 { 3144 spin_lock_irq(q->queue_lock); 3145 q->rpm_status = RPM_RESUMING; 3146 spin_unlock_irq(q->queue_lock); 3147 } 3148 EXPORT_SYMBOL(blk_pre_runtime_resume); 3149 3150 /** 3151 * blk_post_runtime_resume - Post runtime resume processing 3152 * @q: the queue of the device 3153 * @err: return value of the device's runtime_resume function 3154 * 3155 * Description: 3156 * Update the queue's runtime status according to the return value of the 3157 * device's runtime_resume function. If it is successfully resumed, process 3158 * the requests that are queued into the device's queue when it is resuming 3159 * and then mark last busy and initiate autosuspend for it. 3160 * 3161 * This function should be called near the end of the device's 3162 * runtime_resume callback. 3163 */ 3164 void blk_post_runtime_resume(struct request_queue *q, int err) 3165 { 3166 spin_lock_irq(q->queue_lock); 3167 if (!err) { 3168 q->rpm_status = RPM_ACTIVE; 3169 __blk_run_queue(q); 3170 pm_runtime_mark_last_busy(q->dev); 3171 pm_request_autosuspend(q->dev); 3172 } else { 3173 q->rpm_status = RPM_SUSPENDED; 3174 } 3175 spin_unlock_irq(q->queue_lock); 3176 } 3177 EXPORT_SYMBOL(blk_post_runtime_resume); 3178 #endif 3179 3180 int __init blk_dev_init(void) 3181 { 3182 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3183 sizeof(((struct request *)0)->cmd_flags)); 3184 3185 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3186 kblockd_workqueue = alloc_workqueue("kblockd", 3187 WQ_MEM_RECLAIM | WQ_HIGHPRI | 3188 WQ_POWER_EFFICIENT, 0); 3189 if (!kblockd_workqueue) 3190 panic("Failed to create kblockd\n"); 3191 3192 request_cachep = kmem_cache_create("blkdev_requests", 3193 sizeof(struct request), 0, SLAB_PANIC, NULL); 3194 3195 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3196 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3197 3198 return 0; 3199 } 3200