1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 #include <linux/delay.h> 32 #include <linux/ratelimit.h> 33 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/block.h> 36 37 #include "blk.h" 38 #include "blk-cgroup.h" 39 40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 41 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 43 44 DEFINE_IDA(blk_queue_ida); 45 46 /* 47 * For the allocated request tables 48 */ 49 static struct kmem_cache *request_cachep; 50 51 /* 52 * For queue allocation 53 */ 54 struct kmem_cache *blk_requestq_cachep; 55 56 /* 57 * Controlling structure to kblockd 58 */ 59 static struct workqueue_struct *kblockd_workqueue; 60 61 static void drive_stat_acct(struct request *rq, int new_io) 62 { 63 struct hd_struct *part; 64 int rw = rq_data_dir(rq); 65 int cpu; 66 67 if (!blk_do_io_stat(rq)) 68 return; 69 70 cpu = part_stat_lock(); 71 72 if (!new_io) { 73 part = rq->part; 74 part_stat_inc(cpu, part, merges[rw]); 75 } else { 76 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 77 if (!hd_struct_try_get(part)) { 78 /* 79 * The partition is already being removed, 80 * the request will be accounted on the disk only 81 * 82 * We take a reference on disk->part0 although that 83 * partition will never be deleted, so we can treat 84 * it as any other partition. 85 */ 86 part = &rq->rq_disk->part0; 87 hd_struct_get(part); 88 } 89 part_round_stats(cpu, part); 90 part_inc_in_flight(part, rw); 91 rq->part = part; 92 } 93 94 part_stat_unlock(); 95 } 96 97 void blk_queue_congestion_threshold(struct request_queue *q) 98 { 99 int nr; 100 101 nr = q->nr_requests - (q->nr_requests / 8) + 1; 102 if (nr > q->nr_requests) 103 nr = q->nr_requests; 104 q->nr_congestion_on = nr; 105 106 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 107 if (nr < 1) 108 nr = 1; 109 q->nr_congestion_off = nr; 110 } 111 112 /** 113 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 114 * @bdev: device 115 * 116 * Locates the passed device's request queue and returns the address of its 117 * backing_dev_info 118 * 119 * Will return NULL if the request queue cannot be located. 120 */ 121 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 122 { 123 struct backing_dev_info *ret = NULL; 124 struct request_queue *q = bdev_get_queue(bdev); 125 126 if (q) 127 ret = &q->backing_dev_info; 128 return ret; 129 } 130 EXPORT_SYMBOL(blk_get_backing_dev_info); 131 132 void blk_rq_init(struct request_queue *q, struct request *rq) 133 { 134 memset(rq, 0, sizeof(*rq)); 135 136 INIT_LIST_HEAD(&rq->queuelist); 137 INIT_LIST_HEAD(&rq->timeout_list); 138 rq->cpu = -1; 139 rq->q = q; 140 rq->__sector = (sector_t) -1; 141 INIT_HLIST_NODE(&rq->hash); 142 RB_CLEAR_NODE(&rq->rb_node); 143 rq->cmd = rq->__cmd; 144 rq->cmd_len = BLK_MAX_CDB; 145 rq->tag = -1; 146 rq->ref_count = 1; 147 rq->start_time = jiffies; 148 set_start_time_ns(rq); 149 rq->part = NULL; 150 } 151 EXPORT_SYMBOL(blk_rq_init); 152 153 static void req_bio_endio(struct request *rq, struct bio *bio, 154 unsigned int nbytes, int error) 155 { 156 if (error) 157 clear_bit(BIO_UPTODATE, &bio->bi_flags); 158 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 159 error = -EIO; 160 161 if (unlikely(nbytes > bio->bi_size)) { 162 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 163 __func__, nbytes, bio->bi_size); 164 nbytes = bio->bi_size; 165 } 166 167 if (unlikely(rq->cmd_flags & REQ_QUIET)) 168 set_bit(BIO_QUIET, &bio->bi_flags); 169 170 bio->bi_size -= nbytes; 171 bio->bi_sector += (nbytes >> 9); 172 173 if (bio_integrity(bio)) 174 bio_integrity_advance(bio, nbytes); 175 176 /* don't actually finish bio if it's part of flush sequence */ 177 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 178 bio_endio(bio, error); 179 } 180 181 void blk_dump_rq_flags(struct request *rq, char *msg) 182 { 183 int bit; 184 185 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 186 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 187 rq->cmd_flags); 188 189 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 190 (unsigned long long)blk_rq_pos(rq), 191 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 192 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 193 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 194 195 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 196 printk(KERN_INFO " cdb: "); 197 for (bit = 0; bit < BLK_MAX_CDB; bit++) 198 printk("%02x ", rq->cmd[bit]); 199 printk("\n"); 200 } 201 } 202 EXPORT_SYMBOL(blk_dump_rq_flags); 203 204 static void blk_delay_work(struct work_struct *work) 205 { 206 struct request_queue *q; 207 208 q = container_of(work, struct request_queue, delay_work.work); 209 spin_lock_irq(q->queue_lock); 210 __blk_run_queue(q); 211 spin_unlock_irq(q->queue_lock); 212 } 213 214 /** 215 * blk_delay_queue - restart queueing after defined interval 216 * @q: The &struct request_queue in question 217 * @msecs: Delay in msecs 218 * 219 * Description: 220 * Sometimes queueing needs to be postponed for a little while, to allow 221 * resources to come back. This function will make sure that queueing is 222 * restarted around the specified time. 223 */ 224 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 225 { 226 queue_delayed_work(kblockd_workqueue, &q->delay_work, 227 msecs_to_jiffies(msecs)); 228 } 229 EXPORT_SYMBOL(blk_delay_queue); 230 231 /** 232 * blk_start_queue - restart a previously stopped queue 233 * @q: The &struct request_queue in question 234 * 235 * Description: 236 * blk_start_queue() will clear the stop flag on the queue, and call 237 * the request_fn for the queue if it was in a stopped state when 238 * entered. Also see blk_stop_queue(). Queue lock must be held. 239 **/ 240 void blk_start_queue(struct request_queue *q) 241 { 242 WARN_ON(!irqs_disabled()); 243 244 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 245 __blk_run_queue(q); 246 } 247 EXPORT_SYMBOL(blk_start_queue); 248 249 /** 250 * blk_stop_queue - stop a queue 251 * @q: The &struct request_queue in question 252 * 253 * Description: 254 * The Linux block layer assumes that a block driver will consume all 255 * entries on the request queue when the request_fn strategy is called. 256 * Often this will not happen, because of hardware limitations (queue 257 * depth settings). If a device driver gets a 'queue full' response, 258 * or if it simply chooses not to queue more I/O at one point, it can 259 * call this function to prevent the request_fn from being called until 260 * the driver has signalled it's ready to go again. This happens by calling 261 * blk_start_queue() to restart queue operations. Queue lock must be held. 262 **/ 263 void blk_stop_queue(struct request_queue *q) 264 { 265 __cancel_delayed_work(&q->delay_work); 266 queue_flag_set(QUEUE_FLAG_STOPPED, q); 267 } 268 EXPORT_SYMBOL(blk_stop_queue); 269 270 /** 271 * blk_sync_queue - cancel any pending callbacks on a queue 272 * @q: the queue 273 * 274 * Description: 275 * The block layer may perform asynchronous callback activity 276 * on a queue, such as calling the unplug function after a timeout. 277 * A block device may call blk_sync_queue to ensure that any 278 * such activity is cancelled, thus allowing it to release resources 279 * that the callbacks might use. The caller must already have made sure 280 * that its ->make_request_fn will not re-add plugging prior to calling 281 * this function. 282 * 283 * This function does not cancel any asynchronous activity arising 284 * out of elevator or throttling code. That would require elevaotor_exit() 285 * and blkcg_exit_queue() to be called with queue lock initialized. 286 * 287 */ 288 void blk_sync_queue(struct request_queue *q) 289 { 290 del_timer_sync(&q->timeout); 291 cancel_delayed_work_sync(&q->delay_work); 292 } 293 EXPORT_SYMBOL(blk_sync_queue); 294 295 /** 296 * __blk_run_queue - run a single device queue 297 * @q: The queue to run 298 * 299 * Description: 300 * See @blk_run_queue. This variant must be called with the queue lock 301 * held and interrupts disabled. 302 */ 303 void __blk_run_queue(struct request_queue *q) 304 { 305 if (unlikely(blk_queue_stopped(q))) 306 return; 307 308 q->request_fn(q); 309 } 310 EXPORT_SYMBOL(__blk_run_queue); 311 312 /** 313 * blk_run_queue_async - run a single device queue in workqueue context 314 * @q: The queue to run 315 * 316 * Description: 317 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 318 * of us. 319 */ 320 void blk_run_queue_async(struct request_queue *q) 321 { 322 if (likely(!blk_queue_stopped(q))) { 323 __cancel_delayed_work(&q->delay_work); 324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 325 } 326 } 327 EXPORT_SYMBOL(blk_run_queue_async); 328 329 /** 330 * blk_run_queue - run a single device queue 331 * @q: The queue to run 332 * 333 * Description: 334 * Invoke request handling on this queue, if it has pending work to do. 335 * May be used to restart queueing when a request has completed. 336 */ 337 void blk_run_queue(struct request_queue *q) 338 { 339 unsigned long flags; 340 341 spin_lock_irqsave(q->queue_lock, flags); 342 __blk_run_queue(q); 343 spin_unlock_irqrestore(q->queue_lock, flags); 344 } 345 EXPORT_SYMBOL(blk_run_queue); 346 347 void blk_put_queue(struct request_queue *q) 348 { 349 kobject_put(&q->kobj); 350 } 351 EXPORT_SYMBOL(blk_put_queue); 352 353 /** 354 * blk_drain_queue - drain requests from request_queue 355 * @q: queue to drain 356 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 357 * 358 * Drain requests from @q. If @drain_all is set, all requests are drained. 359 * If not, only ELVPRIV requests are drained. The caller is responsible 360 * for ensuring that no new requests which need to be drained are queued. 361 */ 362 void blk_drain_queue(struct request_queue *q, bool drain_all) 363 { 364 while (true) { 365 bool drain = false; 366 int i; 367 368 spin_lock_irq(q->queue_lock); 369 370 /* 371 * The caller might be trying to drain @q before its 372 * elevator is initialized. 373 */ 374 if (q->elevator) 375 elv_drain_elevator(q); 376 377 blkcg_drain_queue(q); 378 379 /* 380 * This function might be called on a queue which failed 381 * driver init after queue creation or is not yet fully 382 * active yet. Some drivers (e.g. fd and loop) get unhappy 383 * in such cases. Kick queue iff dispatch queue has 384 * something on it and @q has request_fn set. 385 */ 386 if (!list_empty(&q->queue_head) && q->request_fn) 387 __blk_run_queue(q); 388 389 drain |= q->rq.elvpriv; 390 391 /* 392 * Unfortunately, requests are queued at and tracked from 393 * multiple places and there's no single counter which can 394 * be drained. Check all the queues and counters. 395 */ 396 if (drain_all) { 397 drain |= !list_empty(&q->queue_head); 398 for (i = 0; i < 2; i++) { 399 drain |= q->rq.count[i]; 400 drain |= q->in_flight[i]; 401 drain |= !list_empty(&q->flush_queue[i]); 402 } 403 } 404 405 spin_unlock_irq(q->queue_lock); 406 407 if (!drain) 408 break; 409 msleep(10); 410 } 411 } 412 413 /** 414 * blk_queue_bypass_start - enter queue bypass mode 415 * @q: queue of interest 416 * 417 * In bypass mode, only the dispatch FIFO queue of @q is used. This 418 * function makes @q enter bypass mode and drains all requests which were 419 * throttled or issued before. On return, it's guaranteed that no request 420 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 421 * inside queue or RCU read lock. 422 */ 423 void blk_queue_bypass_start(struct request_queue *q) 424 { 425 bool drain; 426 427 spin_lock_irq(q->queue_lock); 428 drain = !q->bypass_depth++; 429 queue_flag_set(QUEUE_FLAG_BYPASS, q); 430 spin_unlock_irq(q->queue_lock); 431 432 if (drain) { 433 blk_drain_queue(q, false); 434 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 435 synchronize_rcu(); 436 } 437 } 438 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 439 440 /** 441 * blk_queue_bypass_end - leave queue bypass mode 442 * @q: queue of interest 443 * 444 * Leave bypass mode and restore the normal queueing behavior. 445 */ 446 void blk_queue_bypass_end(struct request_queue *q) 447 { 448 spin_lock_irq(q->queue_lock); 449 if (!--q->bypass_depth) 450 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 451 WARN_ON_ONCE(q->bypass_depth < 0); 452 spin_unlock_irq(q->queue_lock); 453 } 454 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 455 456 /** 457 * blk_cleanup_queue - shutdown a request queue 458 * @q: request queue to shutdown 459 * 460 * Mark @q DEAD, drain all pending requests, destroy and put it. All 461 * future requests will be failed immediately with -ENODEV. 462 */ 463 void blk_cleanup_queue(struct request_queue *q) 464 { 465 spinlock_t *lock = q->queue_lock; 466 467 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 468 mutex_lock(&q->sysfs_lock); 469 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 470 471 spin_lock_irq(lock); 472 473 /* 474 * Dead queue is permanently in bypass mode till released. Note 475 * that, unlike blk_queue_bypass_start(), we aren't performing 476 * synchronize_rcu() after entering bypass mode to avoid the delay 477 * as some drivers create and destroy a lot of queues while 478 * probing. This is still safe because blk_release_queue() will be 479 * called only after the queue refcnt drops to zero and nothing, 480 * RCU or not, would be traversing the queue by then. 481 */ 482 q->bypass_depth++; 483 queue_flag_set(QUEUE_FLAG_BYPASS, q); 484 485 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 486 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 487 queue_flag_set(QUEUE_FLAG_DEAD, q); 488 489 if (q->queue_lock != &q->__queue_lock) 490 q->queue_lock = &q->__queue_lock; 491 492 spin_unlock_irq(lock); 493 mutex_unlock(&q->sysfs_lock); 494 495 /* drain all requests queued before DEAD marking */ 496 blk_drain_queue(q, true); 497 498 /* @q won't process any more request, flush async actions */ 499 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 500 blk_sync_queue(q); 501 502 /* @q is and will stay empty, shutdown and put */ 503 blk_put_queue(q); 504 } 505 EXPORT_SYMBOL(blk_cleanup_queue); 506 507 static int blk_init_free_list(struct request_queue *q) 508 { 509 struct request_list *rl = &q->rq; 510 511 if (unlikely(rl->rq_pool)) 512 return 0; 513 514 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 515 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 516 rl->elvpriv = 0; 517 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 518 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 519 520 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 521 mempool_free_slab, request_cachep, q->node); 522 523 if (!rl->rq_pool) 524 return -ENOMEM; 525 526 return 0; 527 } 528 529 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 530 { 531 return blk_alloc_queue_node(gfp_mask, -1); 532 } 533 EXPORT_SYMBOL(blk_alloc_queue); 534 535 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 536 { 537 struct request_queue *q; 538 int err; 539 540 q = kmem_cache_alloc_node(blk_requestq_cachep, 541 gfp_mask | __GFP_ZERO, node_id); 542 if (!q) 543 return NULL; 544 545 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 546 if (q->id < 0) 547 goto fail_q; 548 549 q->backing_dev_info.ra_pages = 550 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 551 q->backing_dev_info.state = 0; 552 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 553 q->backing_dev_info.name = "block"; 554 q->node = node_id; 555 556 err = bdi_init(&q->backing_dev_info); 557 if (err) 558 goto fail_id; 559 560 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 561 laptop_mode_timer_fn, (unsigned long) q); 562 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 563 INIT_LIST_HEAD(&q->queue_head); 564 INIT_LIST_HEAD(&q->timeout_list); 565 INIT_LIST_HEAD(&q->icq_list); 566 #ifdef CONFIG_BLK_CGROUP 567 INIT_LIST_HEAD(&q->blkg_list); 568 #endif 569 INIT_LIST_HEAD(&q->flush_queue[0]); 570 INIT_LIST_HEAD(&q->flush_queue[1]); 571 INIT_LIST_HEAD(&q->flush_data_in_flight); 572 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 573 574 kobject_init(&q->kobj, &blk_queue_ktype); 575 576 mutex_init(&q->sysfs_lock); 577 spin_lock_init(&q->__queue_lock); 578 579 /* 580 * By default initialize queue_lock to internal lock and driver can 581 * override it later if need be. 582 */ 583 q->queue_lock = &q->__queue_lock; 584 585 /* 586 * A queue starts its life with bypass turned on to avoid 587 * unnecessary bypass on/off overhead and nasty surprises during 588 * init. The initial bypass will be finished at the end of 589 * blk_init_allocated_queue(). 590 */ 591 q->bypass_depth = 1; 592 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 593 594 if (blkcg_init_queue(q)) 595 goto fail_id; 596 597 return q; 598 599 fail_id: 600 ida_simple_remove(&blk_queue_ida, q->id); 601 fail_q: 602 kmem_cache_free(blk_requestq_cachep, q); 603 return NULL; 604 } 605 EXPORT_SYMBOL(blk_alloc_queue_node); 606 607 /** 608 * blk_init_queue - prepare a request queue for use with a block device 609 * @rfn: The function to be called to process requests that have been 610 * placed on the queue. 611 * @lock: Request queue spin lock 612 * 613 * Description: 614 * If a block device wishes to use the standard request handling procedures, 615 * which sorts requests and coalesces adjacent requests, then it must 616 * call blk_init_queue(). The function @rfn will be called when there 617 * are requests on the queue that need to be processed. If the device 618 * supports plugging, then @rfn may not be called immediately when requests 619 * are available on the queue, but may be called at some time later instead. 620 * Plugged queues are generally unplugged when a buffer belonging to one 621 * of the requests on the queue is needed, or due to memory pressure. 622 * 623 * @rfn is not required, or even expected, to remove all requests off the 624 * queue, but only as many as it can handle at a time. If it does leave 625 * requests on the queue, it is responsible for arranging that the requests 626 * get dealt with eventually. 627 * 628 * The queue spin lock must be held while manipulating the requests on the 629 * request queue; this lock will be taken also from interrupt context, so irq 630 * disabling is needed for it. 631 * 632 * Function returns a pointer to the initialized request queue, or %NULL if 633 * it didn't succeed. 634 * 635 * Note: 636 * blk_init_queue() must be paired with a blk_cleanup_queue() call 637 * when the block device is deactivated (such as at module unload). 638 **/ 639 640 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 641 { 642 return blk_init_queue_node(rfn, lock, -1); 643 } 644 EXPORT_SYMBOL(blk_init_queue); 645 646 struct request_queue * 647 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 648 { 649 struct request_queue *uninit_q, *q; 650 651 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 652 if (!uninit_q) 653 return NULL; 654 655 q = blk_init_allocated_queue(uninit_q, rfn, lock); 656 if (!q) 657 blk_cleanup_queue(uninit_q); 658 659 return q; 660 } 661 EXPORT_SYMBOL(blk_init_queue_node); 662 663 struct request_queue * 664 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 665 spinlock_t *lock) 666 { 667 if (!q) 668 return NULL; 669 670 if (blk_init_free_list(q)) 671 return NULL; 672 673 q->request_fn = rfn; 674 q->prep_rq_fn = NULL; 675 q->unprep_rq_fn = NULL; 676 q->queue_flags = QUEUE_FLAG_DEFAULT; 677 678 /* Override internal queue lock with supplied lock pointer */ 679 if (lock) 680 q->queue_lock = lock; 681 682 /* 683 * This also sets hw/phys segments, boundary and size 684 */ 685 blk_queue_make_request(q, blk_queue_bio); 686 687 q->sg_reserved_size = INT_MAX; 688 689 /* init elevator */ 690 if (elevator_init(q, NULL)) 691 return NULL; 692 693 blk_queue_congestion_threshold(q); 694 695 /* all done, end the initial bypass */ 696 blk_queue_bypass_end(q); 697 return q; 698 } 699 EXPORT_SYMBOL(blk_init_allocated_queue); 700 701 bool blk_get_queue(struct request_queue *q) 702 { 703 if (likely(!blk_queue_dead(q))) { 704 __blk_get_queue(q); 705 return true; 706 } 707 708 return false; 709 } 710 EXPORT_SYMBOL(blk_get_queue); 711 712 static inline void blk_free_request(struct request_queue *q, struct request *rq) 713 { 714 if (rq->cmd_flags & REQ_ELVPRIV) { 715 elv_put_request(q, rq); 716 if (rq->elv.icq) 717 put_io_context(rq->elv.icq->ioc); 718 } 719 720 mempool_free(rq, q->rq.rq_pool); 721 } 722 723 /* 724 * ioc_batching returns true if the ioc is a valid batching request and 725 * should be given priority access to a request. 726 */ 727 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 728 { 729 if (!ioc) 730 return 0; 731 732 /* 733 * Make sure the process is able to allocate at least 1 request 734 * even if the batch times out, otherwise we could theoretically 735 * lose wakeups. 736 */ 737 return ioc->nr_batch_requests == q->nr_batching || 738 (ioc->nr_batch_requests > 0 739 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 740 } 741 742 /* 743 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 744 * will cause the process to be a "batcher" on all queues in the system. This 745 * is the behaviour we want though - once it gets a wakeup it should be given 746 * a nice run. 747 */ 748 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 749 { 750 if (!ioc || ioc_batching(q, ioc)) 751 return; 752 753 ioc->nr_batch_requests = q->nr_batching; 754 ioc->last_waited = jiffies; 755 } 756 757 static void __freed_request(struct request_queue *q, int sync) 758 { 759 struct request_list *rl = &q->rq; 760 761 if (rl->count[sync] < queue_congestion_off_threshold(q)) 762 blk_clear_queue_congested(q, sync); 763 764 if (rl->count[sync] + 1 <= q->nr_requests) { 765 if (waitqueue_active(&rl->wait[sync])) 766 wake_up(&rl->wait[sync]); 767 768 blk_clear_queue_full(q, sync); 769 } 770 } 771 772 /* 773 * A request has just been released. Account for it, update the full and 774 * congestion status, wake up any waiters. Called under q->queue_lock. 775 */ 776 static void freed_request(struct request_queue *q, unsigned int flags) 777 { 778 struct request_list *rl = &q->rq; 779 int sync = rw_is_sync(flags); 780 781 rl->count[sync]--; 782 if (flags & REQ_ELVPRIV) 783 rl->elvpriv--; 784 785 __freed_request(q, sync); 786 787 if (unlikely(rl->starved[sync ^ 1])) 788 __freed_request(q, sync ^ 1); 789 } 790 791 /* 792 * Determine if elevator data should be initialized when allocating the 793 * request associated with @bio. 794 */ 795 static bool blk_rq_should_init_elevator(struct bio *bio) 796 { 797 if (!bio) 798 return true; 799 800 /* 801 * Flush requests do not use the elevator so skip initialization. 802 * This allows a request to share the flush and elevator data. 803 */ 804 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 805 return false; 806 807 return true; 808 } 809 810 /** 811 * rq_ioc - determine io_context for request allocation 812 * @bio: request being allocated is for this bio (can be %NULL) 813 * 814 * Determine io_context to use for request allocation for @bio. May return 815 * %NULL if %current->io_context doesn't exist. 816 */ 817 static struct io_context *rq_ioc(struct bio *bio) 818 { 819 #ifdef CONFIG_BLK_CGROUP 820 if (bio && bio->bi_ioc) 821 return bio->bi_ioc; 822 #endif 823 return current->io_context; 824 } 825 826 /** 827 * get_request - get a free request 828 * @q: request_queue to allocate request from 829 * @rw_flags: RW and SYNC flags 830 * @bio: bio to allocate request for (can be %NULL) 831 * @gfp_mask: allocation mask 832 * 833 * Get a free request from @q. This function may fail under memory 834 * pressure or if @q is dead. 835 * 836 * Must be callled with @q->queue_lock held and, 837 * Returns %NULL on failure, with @q->queue_lock held. 838 * Returns !%NULL on success, with @q->queue_lock *not held*. 839 */ 840 static struct request *get_request(struct request_queue *q, int rw_flags, 841 struct bio *bio, gfp_t gfp_mask) 842 { 843 struct request *rq; 844 struct request_list *rl = &q->rq; 845 struct elevator_type *et; 846 struct io_context *ioc; 847 struct io_cq *icq = NULL; 848 const bool is_sync = rw_is_sync(rw_flags) != 0; 849 bool retried = false; 850 int may_queue; 851 retry: 852 et = q->elevator->type; 853 ioc = rq_ioc(bio); 854 855 if (unlikely(blk_queue_dead(q))) 856 return NULL; 857 858 may_queue = elv_may_queue(q, rw_flags); 859 if (may_queue == ELV_MQUEUE_NO) 860 goto rq_starved; 861 862 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 863 if (rl->count[is_sync]+1 >= q->nr_requests) { 864 /* 865 * We want ioc to record batching state. If it's 866 * not already there, creating a new one requires 867 * dropping queue_lock, which in turn requires 868 * retesting conditions to avoid queue hang. 869 */ 870 if (!ioc && !retried) { 871 spin_unlock_irq(q->queue_lock); 872 create_io_context(gfp_mask, q->node); 873 spin_lock_irq(q->queue_lock); 874 retried = true; 875 goto retry; 876 } 877 878 /* 879 * The queue will fill after this allocation, so set 880 * it as full, and mark this process as "batching". 881 * This process will be allowed to complete a batch of 882 * requests, others will be blocked. 883 */ 884 if (!blk_queue_full(q, is_sync)) { 885 ioc_set_batching(q, ioc); 886 blk_set_queue_full(q, is_sync); 887 } else { 888 if (may_queue != ELV_MQUEUE_MUST 889 && !ioc_batching(q, ioc)) { 890 /* 891 * The queue is full and the allocating 892 * process is not a "batcher", and not 893 * exempted by the IO scheduler 894 */ 895 return NULL; 896 } 897 } 898 } 899 blk_set_queue_congested(q, is_sync); 900 } 901 902 /* 903 * Only allow batching queuers to allocate up to 50% over the defined 904 * limit of requests, otherwise we could have thousands of requests 905 * allocated with any setting of ->nr_requests 906 */ 907 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 908 return NULL; 909 910 rl->count[is_sync]++; 911 rl->starved[is_sync] = 0; 912 913 /* 914 * Decide whether the new request will be managed by elevator. If 915 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 916 * prevent the current elevator from being destroyed until the new 917 * request is freed. This guarantees icq's won't be destroyed and 918 * makes creating new ones safe. 919 * 920 * Also, lookup icq while holding queue_lock. If it doesn't exist, 921 * it will be created after releasing queue_lock. 922 */ 923 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 924 rw_flags |= REQ_ELVPRIV; 925 rl->elvpriv++; 926 if (et->icq_cache && ioc) 927 icq = ioc_lookup_icq(ioc, q); 928 } 929 930 if (blk_queue_io_stat(q)) 931 rw_flags |= REQ_IO_STAT; 932 spin_unlock_irq(q->queue_lock); 933 934 /* allocate and init request */ 935 rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 936 if (!rq) 937 goto fail_alloc; 938 939 blk_rq_init(q, rq); 940 rq->cmd_flags = rw_flags | REQ_ALLOCED; 941 942 /* init elvpriv */ 943 if (rw_flags & REQ_ELVPRIV) { 944 if (unlikely(et->icq_cache && !icq)) { 945 create_io_context(gfp_mask, q->node); 946 ioc = rq_ioc(bio); 947 if (!ioc) 948 goto fail_elvpriv; 949 950 icq = ioc_create_icq(ioc, q, gfp_mask); 951 if (!icq) 952 goto fail_elvpriv; 953 } 954 955 rq->elv.icq = icq; 956 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 957 goto fail_elvpriv; 958 959 /* @rq->elv.icq holds io_context until @rq is freed */ 960 if (icq) 961 get_io_context(icq->ioc); 962 } 963 out: 964 /* 965 * ioc may be NULL here, and ioc_batching will be false. That's 966 * OK, if the queue is under the request limit then requests need 967 * not count toward the nr_batch_requests limit. There will always 968 * be some limit enforced by BLK_BATCH_TIME. 969 */ 970 if (ioc_batching(q, ioc)) 971 ioc->nr_batch_requests--; 972 973 trace_block_getrq(q, bio, rw_flags & 1); 974 return rq; 975 976 fail_elvpriv: 977 /* 978 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 979 * and may fail indefinitely under memory pressure and thus 980 * shouldn't stall IO. Treat this request as !elvpriv. This will 981 * disturb iosched and blkcg but weird is bettern than dead. 982 */ 983 printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n", 984 dev_name(q->backing_dev_info.dev)); 985 986 rq->cmd_flags &= ~REQ_ELVPRIV; 987 rq->elv.icq = NULL; 988 989 spin_lock_irq(q->queue_lock); 990 rl->elvpriv--; 991 spin_unlock_irq(q->queue_lock); 992 goto out; 993 994 fail_alloc: 995 /* 996 * Allocation failed presumably due to memory. Undo anything we 997 * might have messed up. 998 * 999 * Allocating task should really be put onto the front of the wait 1000 * queue, but this is pretty rare. 1001 */ 1002 spin_lock_irq(q->queue_lock); 1003 freed_request(q, rw_flags); 1004 1005 /* 1006 * in the very unlikely event that allocation failed and no 1007 * requests for this direction was pending, mark us starved so that 1008 * freeing of a request in the other direction will notice 1009 * us. another possible fix would be to split the rq mempool into 1010 * READ and WRITE 1011 */ 1012 rq_starved: 1013 if (unlikely(rl->count[is_sync] == 0)) 1014 rl->starved[is_sync] = 1; 1015 return NULL; 1016 } 1017 1018 /** 1019 * get_request_wait - get a free request with retry 1020 * @q: request_queue to allocate request from 1021 * @rw_flags: RW and SYNC flags 1022 * @bio: bio to allocate request for (can be %NULL) 1023 * 1024 * Get a free request from @q. This function keeps retrying under memory 1025 * pressure and fails iff @q is dead. 1026 * 1027 * Must be callled with @q->queue_lock held and, 1028 * Returns %NULL on failure, with @q->queue_lock held. 1029 * Returns !%NULL on success, with @q->queue_lock *not held*. 1030 */ 1031 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 1032 struct bio *bio) 1033 { 1034 const bool is_sync = rw_is_sync(rw_flags) != 0; 1035 struct request *rq; 1036 1037 rq = get_request(q, rw_flags, bio, GFP_NOIO); 1038 while (!rq) { 1039 DEFINE_WAIT(wait); 1040 struct request_list *rl = &q->rq; 1041 1042 if (unlikely(blk_queue_dead(q))) 1043 return NULL; 1044 1045 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1046 TASK_UNINTERRUPTIBLE); 1047 1048 trace_block_sleeprq(q, bio, rw_flags & 1); 1049 1050 spin_unlock_irq(q->queue_lock); 1051 io_schedule(); 1052 1053 /* 1054 * After sleeping, we become a "batching" process and 1055 * will be able to allocate at least one request, and 1056 * up to a big batch of them for a small period time. 1057 * See ioc_batching, ioc_set_batching 1058 */ 1059 create_io_context(GFP_NOIO, q->node); 1060 ioc_set_batching(q, current->io_context); 1061 1062 spin_lock_irq(q->queue_lock); 1063 finish_wait(&rl->wait[is_sync], &wait); 1064 1065 rq = get_request(q, rw_flags, bio, GFP_NOIO); 1066 }; 1067 1068 return rq; 1069 } 1070 1071 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1072 { 1073 struct request *rq; 1074 1075 BUG_ON(rw != READ && rw != WRITE); 1076 1077 spin_lock_irq(q->queue_lock); 1078 if (gfp_mask & __GFP_WAIT) 1079 rq = get_request_wait(q, rw, NULL); 1080 else 1081 rq = get_request(q, rw, NULL, gfp_mask); 1082 if (!rq) 1083 spin_unlock_irq(q->queue_lock); 1084 /* q->queue_lock is unlocked at this point */ 1085 1086 return rq; 1087 } 1088 EXPORT_SYMBOL(blk_get_request); 1089 1090 /** 1091 * blk_make_request - given a bio, allocate a corresponding struct request. 1092 * @q: target request queue 1093 * @bio: The bio describing the memory mappings that will be submitted for IO. 1094 * It may be a chained-bio properly constructed by block/bio layer. 1095 * @gfp_mask: gfp flags to be used for memory allocation 1096 * 1097 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1098 * type commands. Where the struct request needs to be farther initialized by 1099 * the caller. It is passed a &struct bio, which describes the memory info of 1100 * the I/O transfer. 1101 * 1102 * The caller of blk_make_request must make sure that bi_io_vec 1103 * are set to describe the memory buffers. That bio_data_dir() will return 1104 * the needed direction of the request. (And all bio's in the passed bio-chain 1105 * are properly set accordingly) 1106 * 1107 * If called under none-sleepable conditions, mapped bio buffers must not 1108 * need bouncing, by calling the appropriate masked or flagged allocator, 1109 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1110 * BUG. 1111 * 1112 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1113 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1114 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1115 * completion of a bio that hasn't been submitted yet, thus resulting in a 1116 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1117 * of bio_alloc(), as that avoids the mempool deadlock. 1118 * If possible a big IO should be split into smaller parts when allocation 1119 * fails. Partial allocation should not be an error, or you risk a live-lock. 1120 */ 1121 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1122 gfp_t gfp_mask) 1123 { 1124 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1125 1126 if (unlikely(!rq)) 1127 return ERR_PTR(-ENOMEM); 1128 1129 for_each_bio(bio) { 1130 struct bio *bounce_bio = bio; 1131 int ret; 1132 1133 blk_queue_bounce(q, &bounce_bio); 1134 ret = blk_rq_append_bio(q, rq, bounce_bio); 1135 if (unlikely(ret)) { 1136 blk_put_request(rq); 1137 return ERR_PTR(ret); 1138 } 1139 } 1140 1141 return rq; 1142 } 1143 EXPORT_SYMBOL(blk_make_request); 1144 1145 /** 1146 * blk_requeue_request - put a request back on queue 1147 * @q: request queue where request should be inserted 1148 * @rq: request to be inserted 1149 * 1150 * Description: 1151 * Drivers often keep queueing requests until the hardware cannot accept 1152 * more, when that condition happens we need to put the request back 1153 * on the queue. Must be called with queue lock held. 1154 */ 1155 void blk_requeue_request(struct request_queue *q, struct request *rq) 1156 { 1157 blk_delete_timer(rq); 1158 blk_clear_rq_complete(rq); 1159 trace_block_rq_requeue(q, rq); 1160 1161 if (blk_rq_tagged(rq)) 1162 blk_queue_end_tag(q, rq); 1163 1164 BUG_ON(blk_queued_rq(rq)); 1165 1166 elv_requeue_request(q, rq); 1167 } 1168 EXPORT_SYMBOL(blk_requeue_request); 1169 1170 static void add_acct_request(struct request_queue *q, struct request *rq, 1171 int where) 1172 { 1173 drive_stat_acct(rq, 1); 1174 __elv_add_request(q, rq, where); 1175 } 1176 1177 static void part_round_stats_single(int cpu, struct hd_struct *part, 1178 unsigned long now) 1179 { 1180 if (now == part->stamp) 1181 return; 1182 1183 if (part_in_flight(part)) { 1184 __part_stat_add(cpu, part, time_in_queue, 1185 part_in_flight(part) * (now - part->stamp)); 1186 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1187 } 1188 part->stamp = now; 1189 } 1190 1191 /** 1192 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1193 * @cpu: cpu number for stats access 1194 * @part: target partition 1195 * 1196 * The average IO queue length and utilisation statistics are maintained 1197 * by observing the current state of the queue length and the amount of 1198 * time it has been in this state for. 1199 * 1200 * Normally, that accounting is done on IO completion, but that can result 1201 * in more than a second's worth of IO being accounted for within any one 1202 * second, leading to >100% utilisation. To deal with that, we call this 1203 * function to do a round-off before returning the results when reading 1204 * /proc/diskstats. This accounts immediately for all queue usage up to 1205 * the current jiffies and restarts the counters again. 1206 */ 1207 void part_round_stats(int cpu, struct hd_struct *part) 1208 { 1209 unsigned long now = jiffies; 1210 1211 if (part->partno) 1212 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1213 part_round_stats_single(cpu, part, now); 1214 } 1215 EXPORT_SYMBOL_GPL(part_round_stats); 1216 1217 /* 1218 * queue lock must be held 1219 */ 1220 void __blk_put_request(struct request_queue *q, struct request *req) 1221 { 1222 if (unlikely(!q)) 1223 return; 1224 if (unlikely(--req->ref_count)) 1225 return; 1226 1227 elv_completed_request(q, req); 1228 1229 /* this is a bio leak */ 1230 WARN_ON(req->bio != NULL); 1231 1232 /* 1233 * Request may not have originated from ll_rw_blk. if not, 1234 * it didn't come out of our reserved rq pools 1235 */ 1236 if (req->cmd_flags & REQ_ALLOCED) { 1237 unsigned int flags = req->cmd_flags; 1238 1239 BUG_ON(!list_empty(&req->queuelist)); 1240 BUG_ON(!hlist_unhashed(&req->hash)); 1241 1242 blk_free_request(q, req); 1243 freed_request(q, flags); 1244 } 1245 } 1246 EXPORT_SYMBOL_GPL(__blk_put_request); 1247 1248 void blk_put_request(struct request *req) 1249 { 1250 unsigned long flags; 1251 struct request_queue *q = req->q; 1252 1253 spin_lock_irqsave(q->queue_lock, flags); 1254 __blk_put_request(q, req); 1255 spin_unlock_irqrestore(q->queue_lock, flags); 1256 } 1257 EXPORT_SYMBOL(blk_put_request); 1258 1259 /** 1260 * blk_add_request_payload - add a payload to a request 1261 * @rq: request to update 1262 * @page: page backing the payload 1263 * @len: length of the payload. 1264 * 1265 * This allows to later add a payload to an already submitted request by 1266 * a block driver. The driver needs to take care of freeing the payload 1267 * itself. 1268 * 1269 * Note that this is a quite horrible hack and nothing but handling of 1270 * discard requests should ever use it. 1271 */ 1272 void blk_add_request_payload(struct request *rq, struct page *page, 1273 unsigned int len) 1274 { 1275 struct bio *bio = rq->bio; 1276 1277 bio->bi_io_vec->bv_page = page; 1278 bio->bi_io_vec->bv_offset = 0; 1279 bio->bi_io_vec->bv_len = len; 1280 1281 bio->bi_size = len; 1282 bio->bi_vcnt = 1; 1283 bio->bi_phys_segments = 1; 1284 1285 rq->__data_len = rq->resid_len = len; 1286 rq->nr_phys_segments = 1; 1287 rq->buffer = bio_data(bio); 1288 } 1289 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1290 1291 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1292 struct bio *bio) 1293 { 1294 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1295 1296 if (!ll_back_merge_fn(q, req, bio)) 1297 return false; 1298 1299 trace_block_bio_backmerge(q, bio); 1300 1301 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1302 blk_rq_set_mixed_merge(req); 1303 1304 req->biotail->bi_next = bio; 1305 req->biotail = bio; 1306 req->__data_len += bio->bi_size; 1307 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1308 1309 drive_stat_acct(req, 0); 1310 return true; 1311 } 1312 1313 static bool bio_attempt_front_merge(struct request_queue *q, 1314 struct request *req, struct bio *bio) 1315 { 1316 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1317 1318 if (!ll_front_merge_fn(q, req, bio)) 1319 return false; 1320 1321 trace_block_bio_frontmerge(q, bio); 1322 1323 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1324 blk_rq_set_mixed_merge(req); 1325 1326 bio->bi_next = req->bio; 1327 req->bio = bio; 1328 1329 /* 1330 * may not be valid. if the low level driver said 1331 * it didn't need a bounce buffer then it better 1332 * not touch req->buffer either... 1333 */ 1334 req->buffer = bio_data(bio); 1335 req->__sector = bio->bi_sector; 1336 req->__data_len += bio->bi_size; 1337 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1338 1339 drive_stat_acct(req, 0); 1340 return true; 1341 } 1342 1343 /** 1344 * attempt_plug_merge - try to merge with %current's plugged list 1345 * @q: request_queue new bio is being queued at 1346 * @bio: new bio being queued 1347 * @request_count: out parameter for number of traversed plugged requests 1348 * 1349 * Determine whether @bio being queued on @q can be merged with a request 1350 * on %current's plugged list. Returns %true if merge was successful, 1351 * otherwise %false. 1352 * 1353 * Plugging coalesces IOs from the same issuer for the same purpose without 1354 * going through @q->queue_lock. As such it's more of an issuing mechanism 1355 * than scheduling, and the request, while may have elvpriv data, is not 1356 * added on the elevator at this point. In addition, we don't have 1357 * reliable access to the elevator outside queue lock. Only check basic 1358 * merging parameters without querying the elevator. 1359 */ 1360 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1361 unsigned int *request_count) 1362 { 1363 struct blk_plug *plug; 1364 struct request *rq; 1365 bool ret = false; 1366 1367 plug = current->plug; 1368 if (!plug) 1369 goto out; 1370 *request_count = 0; 1371 1372 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1373 int el_ret; 1374 1375 if (rq->q == q) 1376 (*request_count)++; 1377 1378 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1379 continue; 1380 1381 el_ret = blk_try_merge(rq, bio); 1382 if (el_ret == ELEVATOR_BACK_MERGE) { 1383 ret = bio_attempt_back_merge(q, rq, bio); 1384 if (ret) 1385 break; 1386 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1387 ret = bio_attempt_front_merge(q, rq, bio); 1388 if (ret) 1389 break; 1390 } 1391 } 1392 out: 1393 return ret; 1394 } 1395 1396 void init_request_from_bio(struct request *req, struct bio *bio) 1397 { 1398 req->cmd_type = REQ_TYPE_FS; 1399 1400 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1401 if (bio->bi_rw & REQ_RAHEAD) 1402 req->cmd_flags |= REQ_FAILFAST_MASK; 1403 1404 req->errors = 0; 1405 req->__sector = bio->bi_sector; 1406 req->ioprio = bio_prio(bio); 1407 blk_rq_bio_prep(req->q, req, bio); 1408 } 1409 1410 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1411 { 1412 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1413 struct blk_plug *plug; 1414 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1415 struct request *req; 1416 unsigned int request_count = 0; 1417 1418 /* 1419 * low level driver can indicate that it wants pages above a 1420 * certain limit bounced to low memory (ie for highmem, or even 1421 * ISA dma in theory) 1422 */ 1423 blk_queue_bounce(q, &bio); 1424 1425 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1426 spin_lock_irq(q->queue_lock); 1427 where = ELEVATOR_INSERT_FLUSH; 1428 goto get_rq; 1429 } 1430 1431 /* 1432 * Check if we can merge with the plugged list before grabbing 1433 * any locks. 1434 */ 1435 if (attempt_plug_merge(q, bio, &request_count)) 1436 return; 1437 1438 spin_lock_irq(q->queue_lock); 1439 1440 el_ret = elv_merge(q, &req, bio); 1441 if (el_ret == ELEVATOR_BACK_MERGE) { 1442 if (bio_attempt_back_merge(q, req, bio)) { 1443 elv_bio_merged(q, req, bio); 1444 if (!attempt_back_merge(q, req)) 1445 elv_merged_request(q, req, el_ret); 1446 goto out_unlock; 1447 } 1448 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1449 if (bio_attempt_front_merge(q, req, bio)) { 1450 elv_bio_merged(q, req, bio); 1451 if (!attempt_front_merge(q, req)) 1452 elv_merged_request(q, req, el_ret); 1453 goto out_unlock; 1454 } 1455 } 1456 1457 get_rq: 1458 /* 1459 * This sync check and mask will be re-done in init_request_from_bio(), 1460 * but we need to set it earlier to expose the sync flag to the 1461 * rq allocator and io schedulers. 1462 */ 1463 rw_flags = bio_data_dir(bio); 1464 if (sync) 1465 rw_flags |= REQ_SYNC; 1466 1467 /* 1468 * Grab a free request. This is might sleep but can not fail. 1469 * Returns with the queue unlocked. 1470 */ 1471 req = get_request_wait(q, rw_flags, bio); 1472 if (unlikely(!req)) { 1473 bio_endio(bio, -ENODEV); /* @q is dead */ 1474 goto out_unlock; 1475 } 1476 1477 /* 1478 * After dropping the lock and possibly sleeping here, our request 1479 * may now be mergeable after it had proven unmergeable (above). 1480 * We don't worry about that case for efficiency. It won't happen 1481 * often, and the elevators are able to handle it. 1482 */ 1483 init_request_from_bio(req, bio); 1484 1485 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1486 req->cpu = raw_smp_processor_id(); 1487 1488 plug = current->plug; 1489 if (plug) { 1490 /* 1491 * If this is the first request added after a plug, fire 1492 * of a plug trace. If others have been added before, check 1493 * if we have multiple devices in this plug. If so, make a 1494 * note to sort the list before dispatch. 1495 */ 1496 if (list_empty(&plug->list)) 1497 trace_block_plug(q); 1498 else { 1499 if (!plug->should_sort) { 1500 struct request *__rq; 1501 1502 __rq = list_entry_rq(plug->list.prev); 1503 if (__rq->q != q) 1504 plug->should_sort = 1; 1505 } 1506 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1507 blk_flush_plug_list(plug, false); 1508 trace_block_plug(q); 1509 } 1510 } 1511 list_add_tail(&req->queuelist, &plug->list); 1512 drive_stat_acct(req, 1); 1513 } else { 1514 spin_lock_irq(q->queue_lock); 1515 add_acct_request(q, req, where); 1516 __blk_run_queue(q); 1517 out_unlock: 1518 spin_unlock_irq(q->queue_lock); 1519 } 1520 } 1521 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1522 1523 /* 1524 * If bio->bi_dev is a partition, remap the location 1525 */ 1526 static inline void blk_partition_remap(struct bio *bio) 1527 { 1528 struct block_device *bdev = bio->bi_bdev; 1529 1530 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1531 struct hd_struct *p = bdev->bd_part; 1532 1533 bio->bi_sector += p->start_sect; 1534 bio->bi_bdev = bdev->bd_contains; 1535 1536 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1537 bdev->bd_dev, 1538 bio->bi_sector - p->start_sect); 1539 } 1540 } 1541 1542 static void handle_bad_sector(struct bio *bio) 1543 { 1544 char b[BDEVNAME_SIZE]; 1545 1546 printk(KERN_INFO "attempt to access beyond end of device\n"); 1547 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1548 bdevname(bio->bi_bdev, b), 1549 bio->bi_rw, 1550 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1551 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1552 1553 set_bit(BIO_EOF, &bio->bi_flags); 1554 } 1555 1556 #ifdef CONFIG_FAIL_MAKE_REQUEST 1557 1558 static DECLARE_FAULT_ATTR(fail_make_request); 1559 1560 static int __init setup_fail_make_request(char *str) 1561 { 1562 return setup_fault_attr(&fail_make_request, str); 1563 } 1564 __setup("fail_make_request=", setup_fail_make_request); 1565 1566 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1567 { 1568 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1569 } 1570 1571 static int __init fail_make_request_debugfs(void) 1572 { 1573 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1574 NULL, &fail_make_request); 1575 1576 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1577 } 1578 1579 late_initcall(fail_make_request_debugfs); 1580 1581 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1582 1583 static inline bool should_fail_request(struct hd_struct *part, 1584 unsigned int bytes) 1585 { 1586 return false; 1587 } 1588 1589 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1590 1591 /* 1592 * Check whether this bio extends beyond the end of the device. 1593 */ 1594 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1595 { 1596 sector_t maxsector; 1597 1598 if (!nr_sectors) 1599 return 0; 1600 1601 /* Test device or partition size, when known. */ 1602 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1603 if (maxsector) { 1604 sector_t sector = bio->bi_sector; 1605 1606 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1607 /* 1608 * This may well happen - the kernel calls bread() 1609 * without checking the size of the device, e.g., when 1610 * mounting a device. 1611 */ 1612 handle_bad_sector(bio); 1613 return 1; 1614 } 1615 } 1616 1617 return 0; 1618 } 1619 1620 static noinline_for_stack bool 1621 generic_make_request_checks(struct bio *bio) 1622 { 1623 struct request_queue *q; 1624 int nr_sectors = bio_sectors(bio); 1625 int err = -EIO; 1626 char b[BDEVNAME_SIZE]; 1627 struct hd_struct *part; 1628 1629 might_sleep(); 1630 1631 if (bio_check_eod(bio, nr_sectors)) 1632 goto end_io; 1633 1634 q = bdev_get_queue(bio->bi_bdev); 1635 if (unlikely(!q)) { 1636 printk(KERN_ERR 1637 "generic_make_request: Trying to access " 1638 "nonexistent block-device %s (%Lu)\n", 1639 bdevname(bio->bi_bdev, b), 1640 (long long) bio->bi_sector); 1641 goto end_io; 1642 } 1643 1644 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1645 nr_sectors > queue_max_hw_sectors(q))) { 1646 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1647 bdevname(bio->bi_bdev, b), 1648 bio_sectors(bio), 1649 queue_max_hw_sectors(q)); 1650 goto end_io; 1651 } 1652 1653 part = bio->bi_bdev->bd_part; 1654 if (should_fail_request(part, bio->bi_size) || 1655 should_fail_request(&part_to_disk(part)->part0, 1656 bio->bi_size)) 1657 goto end_io; 1658 1659 /* 1660 * If this device has partitions, remap block n 1661 * of partition p to block n+start(p) of the disk. 1662 */ 1663 blk_partition_remap(bio); 1664 1665 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1666 goto end_io; 1667 1668 if (bio_check_eod(bio, nr_sectors)) 1669 goto end_io; 1670 1671 /* 1672 * Filter flush bio's early so that make_request based 1673 * drivers without flush support don't have to worry 1674 * about them. 1675 */ 1676 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1677 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1678 if (!nr_sectors) { 1679 err = 0; 1680 goto end_io; 1681 } 1682 } 1683 1684 if ((bio->bi_rw & REQ_DISCARD) && 1685 (!blk_queue_discard(q) || 1686 ((bio->bi_rw & REQ_SECURE) && 1687 !blk_queue_secdiscard(q)))) { 1688 err = -EOPNOTSUPP; 1689 goto end_io; 1690 } 1691 1692 if (blk_throtl_bio(q, bio)) 1693 return false; /* throttled, will be resubmitted later */ 1694 1695 trace_block_bio_queue(q, bio); 1696 return true; 1697 1698 end_io: 1699 bio_endio(bio, err); 1700 return false; 1701 } 1702 1703 /** 1704 * generic_make_request - hand a buffer to its device driver for I/O 1705 * @bio: The bio describing the location in memory and on the device. 1706 * 1707 * generic_make_request() is used to make I/O requests of block 1708 * devices. It is passed a &struct bio, which describes the I/O that needs 1709 * to be done. 1710 * 1711 * generic_make_request() does not return any status. The 1712 * success/failure status of the request, along with notification of 1713 * completion, is delivered asynchronously through the bio->bi_end_io 1714 * function described (one day) else where. 1715 * 1716 * The caller of generic_make_request must make sure that bi_io_vec 1717 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1718 * set to describe the device address, and the 1719 * bi_end_io and optionally bi_private are set to describe how 1720 * completion notification should be signaled. 1721 * 1722 * generic_make_request and the drivers it calls may use bi_next if this 1723 * bio happens to be merged with someone else, and may resubmit the bio to 1724 * a lower device by calling into generic_make_request recursively, which 1725 * means the bio should NOT be touched after the call to ->make_request_fn. 1726 */ 1727 void generic_make_request(struct bio *bio) 1728 { 1729 struct bio_list bio_list_on_stack; 1730 1731 if (!generic_make_request_checks(bio)) 1732 return; 1733 1734 /* 1735 * We only want one ->make_request_fn to be active at a time, else 1736 * stack usage with stacked devices could be a problem. So use 1737 * current->bio_list to keep a list of requests submited by a 1738 * make_request_fn function. current->bio_list is also used as a 1739 * flag to say if generic_make_request is currently active in this 1740 * task or not. If it is NULL, then no make_request is active. If 1741 * it is non-NULL, then a make_request is active, and new requests 1742 * should be added at the tail 1743 */ 1744 if (current->bio_list) { 1745 bio_list_add(current->bio_list, bio); 1746 return; 1747 } 1748 1749 /* following loop may be a bit non-obvious, and so deserves some 1750 * explanation. 1751 * Before entering the loop, bio->bi_next is NULL (as all callers 1752 * ensure that) so we have a list with a single bio. 1753 * We pretend that we have just taken it off a longer list, so 1754 * we assign bio_list to a pointer to the bio_list_on_stack, 1755 * thus initialising the bio_list of new bios to be 1756 * added. ->make_request() may indeed add some more bios 1757 * through a recursive call to generic_make_request. If it 1758 * did, we find a non-NULL value in bio_list and re-enter the loop 1759 * from the top. In this case we really did just take the bio 1760 * of the top of the list (no pretending) and so remove it from 1761 * bio_list, and call into ->make_request() again. 1762 */ 1763 BUG_ON(bio->bi_next); 1764 bio_list_init(&bio_list_on_stack); 1765 current->bio_list = &bio_list_on_stack; 1766 do { 1767 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1768 1769 q->make_request_fn(q, bio); 1770 1771 bio = bio_list_pop(current->bio_list); 1772 } while (bio); 1773 current->bio_list = NULL; /* deactivate */ 1774 } 1775 EXPORT_SYMBOL(generic_make_request); 1776 1777 /** 1778 * submit_bio - submit a bio to the block device layer for I/O 1779 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1780 * @bio: The &struct bio which describes the I/O 1781 * 1782 * submit_bio() is very similar in purpose to generic_make_request(), and 1783 * uses that function to do most of the work. Both are fairly rough 1784 * interfaces; @bio must be presetup and ready for I/O. 1785 * 1786 */ 1787 void submit_bio(int rw, struct bio *bio) 1788 { 1789 int count = bio_sectors(bio); 1790 1791 bio->bi_rw |= rw; 1792 1793 /* 1794 * If it's a regular read/write or a barrier with data attached, 1795 * go through the normal accounting stuff before submission. 1796 */ 1797 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1798 if (rw & WRITE) { 1799 count_vm_events(PGPGOUT, count); 1800 } else { 1801 task_io_account_read(bio->bi_size); 1802 count_vm_events(PGPGIN, count); 1803 } 1804 1805 if (unlikely(block_dump)) { 1806 char b[BDEVNAME_SIZE]; 1807 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1808 current->comm, task_pid_nr(current), 1809 (rw & WRITE) ? "WRITE" : "READ", 1810 (unsigned long long)bio->bi_sector, 1811 bdevname(bio->bi_bdev, b), 1812 count); 1813 } 1814 } 1815 1816 generic_make_request(bio); 1817 } 1818 EXPORT_SYMBOL(submit_bio); 1819 1820 /** 1821 * blk_rq_check_limits - Helper function to check a request for the queue limit 1822 * @q: the queue 1823 * @rq: the request being checked 1824 * 1825 * Description: 1826 * @rq may have been made based on weaker limitations of upper-level queues 1827 * in request stacking drivers, and it may violate the limitation of @q. 1828 * Since the block layer and the underlying device driver trust @rq 1829 * after it is inserted to @q, it should be checked against @q before 1830 * the insertion using this generic function. 1831 * 1832 * This function should also be useful for request stacking drivers 1833 * in some cases below, so export this function. 1834 * Request stacking drivers like request-based dm may change the queue 1835 * limits while requests are in the queue (e.g. dm's table swapping). 1836 * Such request stacking drivers should check those requests agaist 1837 * the new queue limits again when they dispatch those requests, 1838 * although such checkings are also done against the old queue limits 1839 * when submitting requests. 1840 */ 1841 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1842 { 1843 if (rq->cmd_flags & REQ_DISCARD) 1844 return 0; 1845 1846 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1847 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1848 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1849 return -EIO; 1850 } 1851 1852 /* 1853 * queue's settings related to segment counting like q->bounce_pfn 1854 * may differ from that of other stacking queues. 1855 * Recalculate it to check the request correctly on this queue's 1856 * limitation. 1857 */ 1858 blk_recalc_rq_segments(rq); 1859 if (rq->nr_phys_segments > queue_max_segments(q)) { 1860 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1861 return -EIO; 1862 } 1863 1864 return 0; 1865 } 1866 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1867 1868 /** 1869 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1870 * @q: the queue to submit the request 1871 * @rq: the request being queued 1872 */ 1873 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1874 { 1875 unsigned long flags; 1876 int where = ELEVATOR_INSERT_BACK; 1877 1878 if (blk_rq_check_limits(q, rq)) 1879 return -EIO; 1880 1881 if (rq->rq_disk && 1882 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1883 return -EIO; 1884 1885 spin_lock_irqsave(q->queue_lock, flags); 1886 if (unlikely(blk_queue_dead(q))) { 1887 spin_unlock_irqrestore(q->queue_lock, flags); 1888 return -ENODEV; 1889 } 1890 1891 /* 1892 * Submitting request must be dequeued before calling this function 1893 * because it will be linked to another request_queue 1894 */ 1895 BUG_ON(blk_queued_rq(rq)); 1896 1897 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 1898 where = ELEVATOR_INSERT_FLUSH; 1899 1900 add_acct_request(q, rq, where); 1901 if (where == ELEVATOR_INSERT_FLUSH) 1902 __blk_run_queue(q); 1903 spin_unlock_irqrestore(q->queue_lock, flags); 1904 1905 return 0; 1906 } 1907 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1908 1909 /** 1910 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1911 * @rq: request to examine 1912 * 1913 * Description: 1914 * A request could be merge of IOs which require different failure 1915 * handling. This function determines the number of bytes which 1916 * can be failed from the beginning of the request without 1917 * crossing into area which need to be retried further. 1918 * 1919 * Return: 1920 * The number of bytes to fail. 1921 * 1922 * Context: 1923 * queue_lock must be held. 1924 */ 1925 unsigned int blk_rq_err_bytes(const struct request *rq) 1926 { 1927 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1928 unsigned int bytes = 0; 1929 struct bio *bio; 1930 1931 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1932 return blk_rq_bytes(rq); 1933 1934 /* 1935 * Currently the only 'mixing' which can happen is between 1936 * different fastfail types. We can safely fail portions 1937 * which have all the failfast bits that the first one has - 1938 * the ones which are at least as eager to fail as the first 1939 * one. 1940 */ 1941 for (bio = rq->bio; bio; bio = bio->bi_next) { 1942 if ((bio->bi_rw & ff) != ff) 1943 break; 1944 bytes += bio->bi_size; 1945 } 1946 1947 /* this could lead to infinite loop */ 1948 BUG_ON(blk_rq_bytes(rq) && !bytes); 1949 return bytes; 1950 } 1951 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1952 1953 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1954 { 1955 if (blk_do_io_stat(req)) { 1956 const int rw = rq_data_dir(req); 1957 struct hd_struct *part; 1958 int cpu; 1959 1960 cpu = part_stat_lock(); 1961 part = req->part; 1962 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1963 part_stat_unlock(); 1964 } 1965 } 1966 1967 static void blk_account_io_done(struct request *req) 1968 { 1969 /* 1970 * Account IO completion. flush_rq isn't accounted as a 1971 * normal IO on queueing nor completion. Accounting the 1972 * containing request is enough. 1973 */ 1974 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1975 unsigned long duration = jiffies - req->start_time; 1976 const int rw = rq_data_dir(req); 1977 struct hd_struct *part; 1978 int cpu; 1979 1980 cpu = part_stat_lock(); 1981 part = req->part; 1982 1983 part_stat_inc(cpu, part, ios[rw]); 1984 part_stat_add(cpu, part, ticks[rw], duration); 1985 part_round_stats(cpu, part); 1986 part_dec_in_flight(part, rw); 1987 1988 hd_struct_put(part); 1989 part_stat_unlock(); 1990 } 1991 } 1992 1993 /** 1994 * blk_peek_request - peek at the top of a request queue 1995 * @q: request queue to peek at 1996 * 1997 * Description: 1998 * Return the request at the top of @q. The returned request 1999 * should be started using blk_start_request() before LLD starts 2000 * processing it. 2001 * 2002 * Return: 2003 * Pointer to the request at the top of @q if available. Null 2004 * otherwise. 2005 * 2006 * Context: 2007 * queue_lock must be held. 2008 */ 2009 struct request *blk_peek_request(struct request_queue *q) 2010 { 2011 struct request *rq; 2012 int ret; 2013 2014 while ((rq = __elv_next_request(q)) != NULL) { 2015 if (!(rq->cmd_flags & REQ_STARTED)) { 2016 /* 2017 * This is the first time the device driver 2018 * sees this request (possibly after 2019 * requeueing). Notify IO scheduler. 2020 */ 2021 if (rq->cmd_flags & REQ_SORTED) 2022 elv_activate_rq(q, rq); 2023 2024 /* 2025 * just mark as started even if we don't start 2026 * it, a request that has been delayed should 2027 * not be passed by new incoming requests 2028 */ 2029 rq->cmd_flags |= REQ_STARTED; 2030 trace_block_rq_issue(q, rq); 2031 } 2032 2033 if (!q->boundary_rq || q->boundary_rq == rq) { 2034 q->end_sector = rq_end_sector(rq); 2035 q->boundary_rq = NULL; 2036 } 2037 2038 if (rq->cmd_flags & REQ_DONTPREP) 2039 break; 2040 2041 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2042 /* 2043 * make sure space for the drain appears we 2044 * know we can do this because max_hw_segments 2045 * has been adjusted to be one fewer than the 2046 * device can handle 2047 */ 2048 rq->nr_phys_segments++; 2049 } 2050 2051 if (!q->prep_rq_fn) 2052 break; 2053 2054 ret = q->prep_rq_fn(q, rq); 2055 if (ret == BLKPREP_OK) { 2056 break; 2057 } else if (ret == BLKPREP_DEFER) { 2058 /* 2059 * the request may have been (partially) prepped. 2060 * we need to keep this request in the front to 2061 * avoid resource deadlock. REQ_STARTED will 2062 * prevent other fs requests from passing this one. 2063 */ 2064 if (q->dma_drain_size && blk_rq_bytes(rq) && 2065 !(rq->cmd_flags & REQ_DONTPREP)) { 2066 /* 2067 * remove the space for the drain we added 2068 * so that we don't add it again 2069 */ 2070 --rq->nr_phys_segments; 2071 } 2072 2073 rq = NULL; 2074 break; 2075 } else if (ret == BLKPREP_KILL) { 2076 rq->cmd_flags |= REQ_QUIET; 2077 /* 2078 * Mark this request as started so we don't trigger 2079 * any debug logic in the end I/O path. 2080 */ 2081 blk_start_request(rq); 2082 __blk_end_request_all(rq, -EIO); 2083 } else { 2084 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2085 break; 2086 } 2087 } 2088 2089 return rq; 2090 } 2091 EXPORT_SYMBOL(blk_peek_request); 2092 2093 void blk_dequeue_request(struct request *rq) 2094 { 2095 struct request_queue *q = rq->q; 2096 2097 BUG_ON(list_empty(&rq->queuelist)); 2098 BUG_ON(ELV_ON_HASH(rq)); 2099 2100 list_del_init(&rq->queuelist); 2101 2102 /* 2103 * the time frame between a request being removed from the lists 2104 * and to it is freed is accounted as io that is in progress at 2105 * the driver side. 2106 */ 2107 if (blk_account_rq(rq)) { 2108 q->in_flight[rq_is_sync(rq)]++; 2109 set_io_start_time_ns(rq); 2110 } 2111 } 2112 2113 /** 2114 * blk_start_request - start request processing on the driver 2115 * @req: request to dequeue 2116 * 2117 * Description: 2118 * Dequeue @req and start timeout timer on it. This hands off the 2119 * request to the driver. 2120 * 2121 * Block internal functions which don't want to start timer should 2122 * call blk_dequeue_request(). 2123 * 2124 * Context: 2125 * queue_lock must be held. 2126 */ 2127 void blk_start_request(struct request *req) 2128 { 2129 blk_dequeue_request(req); 2130 2131 /* 2132 * We are now handing the request to the hardware, initialize 2133 * resid_len to full count and add the timeout handler. 2134 */ 2135 req->resid_len = blk_rq_bytes(req); 2136 if (unlikely(blk_bidi_rq(req))) 2137 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2138 2139 blk_add_timer(req); 2140 } 2141 EXPORT_SYMBOL(blk_start_request); 2142 2143 /** 2144 * blk_fetch_request - fetch a request from a request queue 2145 * @q: request queue to fetch a request from 2146 * 2147 * Description: 2148 * Return the request at the top of @q. The request is started on 2149 * return and LLD can start processing it immediately. 2150 * 2151 * Return: 2152 * Pointer to the request at the top of @q if available. Null 2153 * otherwise. 2154 * 2155 * Context: 2156 * queue_lock must be held. 2157 */ 2158 struct request *blk_fetch_request(struct request_queue *q) 2159 { 2160 struct request *rq; 2161 2162 rq = blk_peek_request(q); 2163 if (rq) 2164 blk_start_request(rq); 2165 return rq; 2166 } 2167 EXPORT_SYMBOL(blk_fetch_request); 2168 2169 /** 2170 * blk_update_request - Special helper function for request stacking drivers 2171 * @req: the request being processed 2172 * @error: %0 for success, < %0 for error 2173 * @nr_bytes: number of bytes to complete @req 2174 * 2175 * Description: 2176 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2177 * the request structure even if @req doesn't have leftover. 2178 * If @req has leftover, sets it up for the next range of segments. 2179 * 2180 * This special helper function is only for request stacking drivers 2181 * (e.g. request-based dm) so that they can handle partial completion. 2182 * Actual device drivers should use blk_end_request instead. 2183 * 2184 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2185 * %false return from this function. 2186 * 2187 * Return: 2188 * %false - this request doesn't have any more data 2189 * %true - this request has more data 2190 **/ 2191 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2192 { 2193 int total_bytes, bio_nbytes, next_idx = 0; 2194 struct bio *bio; 2195 2196 if (!req->bio) 2197 return false; 2198 2199 trace_block_rq_complete(req->q, req); 2200 2201 /* 2202 * For fs requests, rq is just carrier of independent bio's 2203 * and each partial completion should be handled separately. 2204 * Reset per-request error on each partial completion. 2205 * 2206 * TODO: tj: This is too subtle. It would be better to let 2207 * low level drivers do what they see fit. 2208 */ 2209 if (req->cmd_type == REQ_TYPE_FS) 2210 req->errors = 0; 2211 2212 if (error && req->cmd_type == REQ_TYPE_FS && 2213 !(req->cmd_flags & REQ_QUIET)) { 2214 char *error_type; 2215 2216 switch (error) { 2217 case -ENOLINK: 2218 error_type = "recoverable transport"; 2219 break; 2220 case -EREMOTEIO: 2221 error_type = "critical target"; 2222 break; 2223 case -EBADE: 2224 error_type = "critical nexus"; 2225 break; 2226 case -EIO: 2227 default: 2228 error_type = "I/O"; 2229 break; 2230 } 2231 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2232 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2233 (unsigned long long)blk_rq_pos(req)); 2234 } 2235 2236 blk_account_io_completion(req, nr_bytes); 2237 2238 total_bytes = bio_nbytes = 0; 2239 while ((bio = req->bio) != NULL) { 2240 int nbytes; 2241 2242 if (nr_bytes >= bio->bi_size) { 2243 req->bio = bio->bi_next; 2244 nbytes = bio->bi_size; 2245 req_bio_endio(req, bio, nbytes, error); 2246 next_idx = 0; 2247 bio_nbytes = 0; 2248 } else { 2249 int idx = bio->bi_idx + next_idx; 2250 2251 if (unlikely(idx >= bio->bi_vcnt)) { 2252 blk_dump_rq_flags(req, "__end_that"); 2253 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2254 __func__, idx, bio->bi_vcnt); 2255 break; 2256 } 2257 2258 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2259 BIO_BUG_ON(nbytes > bio->bi_size); 2260 2261 /* 2262 * not a complete bvec done 2263 */ 2264 if (unlikely(nbytes > nr_bytes)) { 2265 bio_nbytes += nr_bytes; 2266 total_bytes += nr_bytes; 2267 break; 2268 } 2269 2270 /* 2271 * advance to the next vector 2272 */ 2273 next_idx++; 2274 bio_nbytes += nbytes; 2275 } 2276 2277 total_bytes += nbytes; 2278 nr_bytes -= nbytes; 2279 2280 bio = req->bio; 2281 if (bio) { 2282 /* 2283 * end more in this run, or just return 'not-done' 2284 */ 2285 if (unlikely(nr_bytes <= 0)) 2286 break; 2287 } 2288 } 2289 2290 /* 2291 * completely done 2292 */ 2293 if (!req->bio) { 2294 /* 2295 * Reset counters so that the request stacking driver 2296 * can find how many bytes remain in the request 2297 * later. 2298 */ 2299 req->__data_len = 0; 2300 return false; 2301 } 2302 2303 /* 2304 * if the request wasn't completed, update state 2305 */ 2306 if (bio_nbytes) { 2307 req_bio_endio(req, bio, bio_nbytes, error); 2308 bio->bi_idx += next_idx; 2309 bio_iovec(bio)->bv_offset += nr_bytes; 2310 bio_iovec(bio)->bv_len -= nr_bytes; 2311 } 2312 2313 req->__data_len -= total_bytes; 2314 req->buffer = bio_data(req->bio); 2315 2316 /* update sector only for requests with clear definition of sector */ 2317 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2318 req->__sector += total_bytes >> 9; 2319 2320 /* mixed attributes always follow the first bio */ 2321 if (req->cmd_flags & REQ_MIXED_MERGE) { 2322 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2323 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2324 } 2325 2326 /* 2327 * If total number of sectors is less than the first segment 2328 * size, something has gone terribly wrong. 2329 */ 2330 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2331 blk_dump_rq_flags(req, "request botched"); 2332 req->__data_len = blk_rq_cur_bytes(req); 2333 } 2334 2335 /* recalculate the number of segments */ 2336 blk_recalc_rq_segments(req); 2337 2338 return true; 2339 } 2340 EXPORT_SYMBOL_GPL(blk_update_request); 2341 2342 static bool blk_update_bidi_request(struct request *rq, int error, 2343 unsigned int nr_bytes, 2344 unsigned int bidi_bytes) 2345 { 2346 if (blk_update_request(rq, error, nr_bytes)) 2347 return true; 2348 2349 /* Bidi request must be completed as a whole */ 2350 if (unlikely(blk_bidi_rq(rq)) && 2351 blk_update_request(rq->next_rq, error, bidi_bytes)) 2352 return true; 2353 2354 if (blk_queue_add_random(rq->q)) 2355 add_disk_randomness(rq->rq_disk); 2356 2357 return false; 2358 } 2359 2360 /** 2361 * blk_unprep_request - unprepare a request 2362 * @req: the request 2363 * 2364 * This function makes a request ready for complete resubmission (or 2365 * completion). It happens only after all error handling is complete, 2366 * so represents the appropriate moment to deallocate any resources 2367 * that were allocated to the request in the prep_rq_fn. The queue 2368 * lock is held when calling this. 2369 */ 2370 void blk_unprep_request(struct request *req) 2371 { 2372 struct request_queue *q = req->q; 2373 2374 req->cmd_flags &= ~REQ_DONTPREP; 2375 if (q->unprep_rq_fn) 2376 q->unprep_rq_fn(q, req); 2377 } 2378 EXPORT_SYMBOL_GPL(blk_unprep_request); 2379 2380 /* 2381 * queue lock must be held 2382 */ 2383 static void blk_finish_request(struct request *req, int error) 2384 { 2385 if (blk_rq_tagged(req)) 2386 blk_queue_end_tag(req->q, req); 2387 2388 BUG_ON(blk_queued_rq(req)); 2389 2390 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2391 laptop_io_completion(&req->q->backing_dev_info); 2392 2393 blk_delete_timer(req); 2394 2395 if (req->cmd_flags & REQ_DONTPREP) 2396 blk_unprep_request(req); 2397 2398 2399 blk_account_io_done(req); 2400 2401 if (req->end_io) 2402 req->end_io(req, error); 2403 else { 2404 if (blk_bidi_rq(req)) 2405 __blk_put_request(req->next_rq->q, req->next_rq); 2406 2407 __blk_put_request(req->q, req); 2408 } 2409 } 2410 2411 /** 2412 * blk_end_bidi_request - Complete a bidi request 2413 * @rq: the request to complete 2414 * @error: %0 for success, < %0 for error 2415 * @nr_bytes: number of bytes to complete @rq 2416 * @bidi_bytes: number of bytes to complete @rq->next_rq 2417 * 2418 * Description: 2419 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2420 * Drivers that supports bidi can safely call this member for any 2421 * type of request, bidi or uni. In the later case @bidi_bytes is 2422 * just ignored. 2423 * 2424 * Return: 2425 * %false - we are done with this request 2426 * %true - still buffers pending for this request 2427 **/ 2428 static bool blk_end_bidi_request(struct request *rq, int error, 2429 unsigned int nr_bytes, unsigned int bidi_bytes) 2430 { 2431 struct request_queue *q = rq->q; 2432 unsigned long flags; 2433 2434 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2435 return true; 2436 2437 spin_lock_irqsave(q->queue_lock, flags); 2438 blk_finish_request(rq, error); 2439 spin_unlock_irqrestore(q->queue_lock, flags); 2440 2441 return false; 2442 } 2443 2444 /** 2445 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2446 * @rq: the request to complete 2447 * @error: %0 for success, < %0 for error 2448 * @nr_bytes: number of bytes to complete @rq 2449 * @bidi_bytes: number of bytes to complete @rq->next_rq 2450 * 2451 * Description: 2452 * Identical to blk_end_bidi_request() except that queue lock is 2453 * assumed to be locked on entry and remains so on return. 2454 * 2455 * Return: 2456 * %false - we are done with this request 2457 * %true - still buffers pending for this request 2458 **/ 2459 bool __blk_end_bidi_request(struct request *rq, int error, 2460 unsigned int nr_bytes, unsigned int bidi_bytes) 2461 { 2462 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2463 return true; 2464 2465 blk_finish_request(rq, error); 2466 2467 return false; 2468 } 2469 2470 /** 2471 * blk_end_request - Helper function for drivers to complete the request. 2472 * @rq: the request being processed 2473 * @error: %0 for success, < %0 for error 2474 * @nr_bytes: number of bytes to complete 2475 * 2476 * Description: 2477 * Ends I/O on a number of bytes attached to @rq. 2478 * If @rq has leftover, sets it up for the next range of segments. 2479 * 2480 * Return: 2481 * %false - we are done with this request 2482 * %true - still buffers pending for this request 2483 **/ 2484 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2485 { 2486 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2487 } 2488 EXPORT_SYMBOL(blk_end_request); 2489 2490 /** 2491 * blk_end_request_all - Helper function for drives to finish the request. 2492 * @rq: the request to finish 2493 * @error: %0 for success, < %0 for error 2494 * 2495 * Description: 2496 * Completely finish @rq. 2497 */ 2498 void blk_end_request_all(struct request *rq, int error) 2499 { 2500 bool pending; 2501 unsigned int bidi_bytes = 0; 2502 2503 if (unlikely(blk_bidi_rq(rq))) 2504 bidi_bytes = blk_rq_bytes(rq->next_rq); 2505 2506 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2507 BUG_ON(pending); 2508 } 2509 EXPORT_SYMBOL(blk_end_request_all); 2510 2511 /** 2512 * blk_end_request_cur - Helper function to finish the current request chunk. 2513 * @rq: the request to finish the current chunk for 2514 * @error: %0 for success, < %0 for error 2515 * 2516 * Description: 2517 * Complete the current consecutively mapped chunk from @rq. 2518 * 2519 * Return: 2520 * %false - we are done with this request 2521 * %true - still buffers pending for this request 2522 */ 2523 bool blk_end_request_cur(struct request *rq, int error) 2524 { 2525 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2526 } 2527 EXPORT_SYMBOL(blk_end_request_cur); 2528 2529 /** 2530 * blk_end_request_err - Finish a request till the next failure boundary. 2531 * @rq: the request to finish till the next failure boundary for 2532 * @error: must be negative errno 2533 * 2534 * Description: 2535 * Complete @rq till the next failure boundary. 2536 * 2537 * Return: 2538 * %false - we are done with this request 2539 * %true - still buffers pending for this request 2540 */ 2541 bool blk_end_request_err(struct request *rq, int error) 2542 { 2543 WARN_ON(error >= 0); 2544 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2545 } 2546 EXPORT_SYMBOL_GPL(blk_end_request_err); 2547 2548 /** 2549 * __blk_end_request - Helper function for drivers to complete the request. 2550 * @rq: the request being processed 2551 * @error: %0 for success, < %0 for error 2552 * @nr_bytes: number of bytes to complete 2553 * 2554 * Description: 2555 * Must be called with queue lock held unlike blk_end_request(). 2556 * 2557 * Return: 2558 * %false - we are done with this request 2559 * %true - still buffers pending for this request 2560 **/ 2561 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2562 { 2563 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2564 } 2565 EXPORT_SYMBOL(__blk_end_request); 2566 2567 /** 2568 * __blk_end_request_all - Helper function for drives to finish the request. 2569 * @rq: the request to finish 2570 * @error: %0 for success, < %0 for error 2571 * 2572 * Description: 2573 * Completely finish @rq. Must be called with queue lock held. 2574 */ 2575 void __blk_end_request_all(struct request *rq, int error) 2576 { 2577 bool pending; 2578 unsigned int bidi_bytes = 0; 2579 2580 if (unlikely(blk_bidi_rq(rq))) 2581 bidi_bytes = blk_rq_bytes(rq->next_rq); 2582 2583 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2584 BUG_ON(pending); 2585 } 2586 EXPORT_SYMBOL(__blk_end_request_all); 2587 2588 /** 2589 * __blk_end_request_cur - Helper function to finish the current request chunk. 2590 * @rq: the request to finish the current chunk for 2591 * @error: %0 for success, < %0 for error 2592 * 2593 * Description: 2594 * Complete the current consecutively mapped chunk from @rq. Must 2595 * be called with queue lock held. 2596 * 2597 * Return: 2598 * %false - we are done with this request 2599 * %true - still buffers pending for this request 2600 */ 2601 bool __blk_end_request_cur(struct request *rq, int error) 2602 { 2603 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2604 } 2605 EXPORT_SYMBOL(__blk_end_request_cur); 2606 2607 /** 2608 * __blk_end_request_err - Finish a request till the next failure boundary. 2609 * @rq: the request to finish till the next failure boundary for 2610 * @error: must be negative errno 2611 * 2612 * Description: 2613 * Complete @rq till the next failure boundary. Must be called 2614 * with queue lock held. 2615 * 2616 * Return: 2617 * %false - we are done with this request 2618 * %true - still buffers pending for this request 2619 */ 2620 bool __blk_end_request_err(struct request *rq, int error) 2621 { 2622 WARN_ON(error >= 0); 2623 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2624 } 2625 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2626 2627 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2628 struct bio *bio) 2629 { 2630 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2631 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2632 2633 if (bio_has_data(bio)) { 2634 rq->nr_phys_segments = bio_phys_segments(q, bio); 2635 rq->buffer = bio_data(bio); 2636 } 2637 rq->__data_len = bio->bi_size; 2638 rq->bio = rq->biotail = bio; 2639 2640 if (bio->bi_bdev) 2641 rq->rq_disk = bio->bi_bdev->bd_disk; 2642 } 2643 2644 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2645 /** 2646 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2647 * @rq: the request to be flushed 2648 * 2649 * Description: 2650 * Flush all pages in @rq. 2651 */ 2652 void rq_flush_dcache_pages(struct request *rq) 2653 { 2654 struct req_iterator iter; 2655 struct bio_vec *bvec; 2656 2657 rq_for_each_segment(bvec, rq, iter) 2658 flush_dcache_page(bvec->bv_page); 2659 } 2660 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2661 #endif 2662 2663 /** 2664 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2665 * @q : the queue of the device being checked 2666 * 2667 * Description: 2668 * Check if underlying low-level drivers of a device are busy. 2669 * If the drivers want to export their busy state, they must set own 2670 * exporting function using blk_queue_lld_busy() first. 2671 * 2672 * Basically, this function is used only by request stacking drivers 2673 * to stop dispatching requests to underlying devices when underlying 2674 * devices are busy. This behavior helps more I/O merging on the queue 2675 * of the request stacking driver and prevents I/O throughput regression 2676 * on burst I/O load. 2677 * 2678 * Return: 2679 * 0 - Not busy (The request stacking driver should dispatch request) 2680 * 1 - Busy (The request stacking driver should stop dispatching request) 2681 */ 2682 int blk_lld_busy(struct request_queue *q) 2683 { 2684 if (q->lld_busy_fn) 2685 return q->lld_busy_fn(q); 2686 2687 return 0; 2688 } 2689 EXPORT_SYMBOL_GPL(blk_lld_busy); 2690 2691 /** 2692 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2693 * @rq: the clone request to be cleaned up 2694 * 2695 * Description: 2696 * Free all bios in @rq for a cloned request. 2697 */ 2698 void blk_rq_unprep_clone(struct request *rq) 2699 { 2700 struct bio *bio; 2701 2702 while ((bio = rq->bio) != NULL) { 2703 rq->bio = bio->bi_next; 2704 2705 bio_put(bio); 2706 } 2707 } 2708 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2709 2710 /* 2711 * Copy attributes of the original request to the clone request. 2712 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2713 */ 2714 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2715 { 2716 dst->cpu = src->cpu; 2717 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2718 dst->cmd_type = src->cmd_type; 2719 dst->__sector = blk_rq_pos(src); 2720 dst->__data_len = blk_rq_bytes(src); 2721 dst->nr_phys_segments = src->nr_phys_segments; 2722 dst->ioprio = src->ioprio; 2723 dst->extra_len = src->extra_len; 2724 } 2725 2726 /** 2727 * blk_rq_prep_clone - Helper function to setup clone request 2728 * @rq: the request to be setup 2729 * @rq_src: original request to be cloned 2730 * @bs: bio_set that bios for clone are allocated from 2731 * @gfp_mask: memory allocation mask for bio 2732 * @bio_ctr: setup function to be called for each clone bio. 2733 * Returns %0 for success, non %0 for failure. 2734 * @data: private data to be passed to @bio_ctr 2735 * 2736 * Description: 2737 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2738 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2739 * are not copied, and copying such parts is the caller's responsibility. 2740 * Also, pages which the original bios are pointing to are not copied 2741 * and the cloned bios just point same pages. 2742 * So cloned bios must be completed before original bios, which means 2743 * the caller must complete @rq before @rq_src. 2744 */ 2745 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2746 struct bio_set *bs, gfp_t gfp_mask, 2747 int (*bio_ctr)(struct bio *, struct bio *, void *), 2748 void *data) 2749 { 2750 struct bio *bio, *bio_src; 2751 2752 if (!bs) 2753 bs = fs_bio_set; 2754 2755 blk_rq_init(NULL, rq); 2756 2757 __rq_for_each_bio(bio_src, rq_src) { 2758 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2759 if (!bio) 2760 goto free_and_out; 2761 2762 __bio_clone(bio, bio_src); 2763 2764 if (bio_integrity(bio_src) && 2765 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2766 goto free_and_out; 2767 2768 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2769 goto free_and_out; 2770 2771 if (rq->bio) { 2772 rq->biotail->bi_next = bio; 2773 rq->biotail = bio; 2774 } else 2775 rq->bio = rq->biotail = bio; 2776 } 2777 2778 __blk_rq_prep_clone(rq, rq_src); 2779 2780 return 0; 2781 2782 free_and_out: 2783 if (bio) 2784 bio_free(bio, bs); 2785 blk_rq_unprep_clone(rq); 2786 2787 return -ENOMEM; 2788 } 2789 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2790 2791 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2792 { 2793 return queue_work(kblockd_workqueue, work); 2794 } 2795 EXPORT_SYMBOL(kblockd_schedule_work); 2796 2797 int kblockd_schedule_delayed_work(struct request_queue *q, 2798 struct delayed_work *dwork, unsigned long delay) 2799 { 2800 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2801 } 2802 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2803 2804 #define PLUG_MAGIC 0x91827364 2805 2806 /** 2807 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2808 * @plug: The &struct blk_plug that needs to be initialized 2809 * 2810 * Description: 2811 * Tracking blk_plug inside the task_struct will help with auto-flushing the 2812 * pending I/O should the task end up blocking between blk_start_plug() and 2813 * blk_finish_plug(). This is important from a performance perspective, but 2814 * also ensures that we don't deadlock. For instance, if the task is blocking 2815 * for a memory allocation, memory reclaim could end up wanting to free a 2816 * page belonging to that request that is currently residing in our private 2817 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 2818 * this kind of deadlock. 2819 */ 2820 void blk_start_plug(struct blk_plug *plug) 2821 { 2822 struct task_struct *tsk = current; 2823 2824 plug->magic = PLUG_MAGIC; 2825 INIT_LIST_HEAD(&plug->list); 2826 INIT_LIST_HEAD(&plug->cb_list); 2827 plug->should_sort = 0; 2828 2829 /* 2830 * If this is a nested plug, don't actually assign it. It will be 2831 * flushed on its own. 2832 */ 2833 if (!tsk->plug) { 2834 /* 2835 * Store ordering should not be needed here, since a potential 2836 * preempt will imply a full memory barrier 2837 */ 2838 tsk->plug = plug; 2839 } 2840 } 2841 EXPORT_SYMBOL(blk_start_plug); 2842 2843 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2844 { 2845 struct request *rqa = container_of(a, struct request, queuelist); 2846 struct request *rqb = container_of(b, struct request, queuelist); 2847 2848 return !(rqa->q <= rqb->q); 2849 } 2850 2851 /* 2852 * If 'from_schedule' is true, then postpone the dispatch of requests 2853 * until a safe kblockd context. We due this to avoid accidental big 2854 * additional stack usage in driver dispatch, in places where the originally 2855 * plugger did not intend it. 2856 */ 2857 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2858 bool from_schedule) 2859 __releases(q->queue_lock) 2860 { 2861 trace_block_unplug(q, depth, !from_schedule); 2862 2863 /* 2864 * Don't mess with dead queue. 2865 */ 2866 if (unlikely(blk_queue_dead(q))) { 2867 spin_unlock(q->queue_lock); 2868 return; 2869 } 2870 2871 /* 2872 * If we are punting this to kblockd, then we can safely drop 2873 * the queue_lock before waking kblockd (which needs to take 2874 * this lock). 2875 */ 2876 if (from_schedule) { 2877 spin_unlock(q->queue_lock); 2878 blk_run_queue_async(q); 2879 } else { 2880 __blk_run_queue(q); 2881 spin_unlock(q->queue_lock); 2882 } 2883 2884 } 2885 2886 static void flush_plug_callbacks(struct blk_plug *plug) 2887 { 2888 LIST_HEAD(callbacks); 2889 2890 if (list_empty(&plug->cb_list)) 2891 return; 2892 2893 list_splice_init(&plug->cb_list, &callbacks); 2894 2895 while (!list_empty(&callbacks)) { 2896 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2897 struct blk_plug_cb, 2898 list); 2899 list_del(&cb->list); 2900 cb->callback(cb); 2901 } 2902 } 2903 2904 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2905 { 2906 struct request_queue *q; 2907 unsigned long flags; 2908 struct request *rq; 2909 LIST_HEAD(list); 2910 unsigned int depth; 2911 2912 BUG_ON(plug->magic != PLUG_MAGIC); 2913 2914 flush_plug_callbacks(plug); 2915 if (list_empty(&plug->list)) 2916 return; 2917 2918 list_splice_init(&plug->list, &list); 2919 2920 if (plug->should_sort) { 2921 list_sort(NULL, &list, plug_rq_cmp); 2922 plug->should_sort = 0; 2923 } 2924 2925 q = NULL; 2926 depth = 0; 2927 2928 /* 2929 * Save and disable interrupts here, to avoid doing it for every 2930 * queue lock we have to take. 2931 */ 2932 local_irq_save(flags); 2933 while (!list_empty(&list)) { 2934 rq = list_entry_rq(list.next); 2935 list_del_init(&rq->queuelist); 2936 BUG_ON(!rq->q); 2937 if (rq->q != q) { 2938 /* 2939 * This drops the queue lock 2940 */ 2941 if (q) 2942 queue_unplugged(q, depth, from_schedule); 2943 q = rq->q; 2944 depth = 0; 2945 spin_lock(q->queue_lock); 2946 } 2947 2948 /* 2949 * Short-circuit if @q is dead 2950 */ 2951 if (unlikely(blk_queue_dead(q))) { 2952 __blk_end_request_all(rq, -ENODEV); 2953 continue; 2954 } 2955 2956 /* 2957 * rq is already accounted, so use raw insert 2958 */ 2959 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2960 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2961 else 2962 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2963 2964 depth++; 2965 } 2966 2967 /* 2968 * This drops the queue lock 2969 */ 2970 if (q) 2971 queue_unplugged(q, depth, from_schedule); 2972 2973 local_irq_restore(flags); 2974 } 2975 2976 void blk_finish_plug(struct blk_plug *plug) 2977 { 2978 blk_flush_plug_list(plug, false); 2979 2980 if (plug == current->plug) 2981 current->plug = NULL; 2982 } 2983 EXPORT_SYMBOL(blk_finish_plug); 2984 2985 int __init blk_dev_init(void) 2986 { 2987 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2988 sizeof(((struct request *)0)->cmd_flags)); 2989 2990 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2991 kblockd_workqueue = alloc_workqueue("kblockd", 2992 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2993 if (!kblockd_workqueue) 2994 panic("Failed to create kblockd\n"); 2995 2996 request_cachep = kmem_cache_create("blkdev_requests", 2997 sizeof(struct request), 0, SLAB_PANIC, NULL); 2998 2999 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3000 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3001 3002 return 0; 3003 } 3004