1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/highmem.h> 20 #include <linux/mm.h> 21 #include <linux/kernel_stat.h> 22 #include <linux/string.h> 23 #include <linux/init.h> 24 #include <linux/completion.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/task_io_accounting_ops.h> 29 #include <linux/fault-inject.h> 30 #include <linux/list_sort.h> 31 #include <linux/delay.h> 32 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/block.h> 35 36 #include "blk.h" 37 38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 41 42 /* 43 * For the allocated request tables 44 */ 45 static struct kmem_cache *request_cachep; 46 47 /* 48 * For queue allocation 49 */ 50 struct kmem_cache *blk_requestq_cachep; 51 52 /* 53 * Controlling structure to kblockd 54 */ 55 static struct workqueue_struct *kblockd_workqueue; 56 57 static void drive_stat_acct(struct request *rq, int new_io) 58 { 59 struct hd_struct *part; 60 int rw = rq_data_dir(rq); 61 int cpu; 62 63 if (!blk_do_io_stat(rq)) 64 return; 65 66 cpu = part_stat_lock(); 67 68 if (!new_io) { 69 part = rq->part; 70 part_stat_inc(cpu, part, merges[rw]); 71 } else { 72 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 73 if (!hd_struct_try_get(part)) { 74 /* 75 * The partition is already being removed, 76 * the request will be accounted on the disk only 77 * 78 * We take a reference on disk->part0 although that 79 * partition will never be deleted, so we can treat 80 * it as any other partition. 81 */ 82 part = &rq->rq_disk->part0; 83 hd_struct_get(part); 84 } 85 part_round_stats(cpu, part); 86 part_inc_in_flight(part, rw); 87 rq->part = part; 88 } 89 90 part_stat_unlock(); 91 } 92 93 void blk_queue_congestion_threshold(struct request_queue *q) 94 { 95 int nr; 96 97 nr = q->nr_requests - (q->nr_requests / 8) + 1; 98 if (nr > q->nr_requests) 99 nr = q->nr_requests; 100 q->nr_congestion_on = nr; 101 102 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 103 if (nr < 1) 104 nr = 1; 105 q->nr_congestion_off = nr; 106 } 107 108 /** 109 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 110 * @bdev: device 111 * 112 * Locates the passed device's request queue and returns the address of its 113 * backing_dev_info 114 * 115 * Will return NULL if the request queue cannot be located. 116 */ 117 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 118 { 119 struct backing_dev_info *ret = NULL; 120 struct request_queue *q = bdev_get_queue(bdev); 121 122 if (q) 123 ret = &q->backing_dev_info; 124 return ret; 125 } 126 EXPORT_SYMBOL(blk_get_backing_dev_info); 127 128 void blk_rq_init(struct request_queue *q, struct request *rq) 129 { 130 memset(rq, 0, sizeof(*rq)); 131 132 INIT_LIST_HEAD(&rq->queuelist); 133 INIT_LIST_HEAD(&rq->timeout_list); 134 rq->cpu = -1; 135 rq->q = q; 136 rq->__sector = (sector_t) -1; 137 INIT_HLIST_NODE(&rq->hash); 138 RB_CLEAR_NODE(&rq->rb_node); 139 rq->cmd = rq->__cmd; 140 rq->cmd_len = BLK_MAX_CDB; 141 rq->tag = -1; 142 rq->ref_count = 1; 143 rq->start_time = jiffies; 144 set_start_time_ns(rq); 145 rq->part = NULL; 146 } 147 EXPORT_SYMBOL(blk_rq_init); 148 149 static void req_bio_endio(struct request *rq, struct bio *bio, 150 unsigned int nbytes, int error) 151 { 152 if (error) 153 clear_bit(BIO_UPTODATE, &bio->bi_flags); 154 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 155 error = -EIO; 156 157 if (unlikely(nbytes > bio->bi_size)) { 158 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 159 __func__, nbytes, bio->bi_size); 160 nbytes = bio->bi_size; 161 } 162 163 if (unlikely(rq->cmd_flags & REQ_QUIET)) 164 set_bit(BIO_QUIET, &bio->bi_flags); 165 166 bio->bi_size -= nbytes; 167 bio->bi_sector += (nbytes >> 9); 168 169 if (bio_integrity(bio)) 170 bio_integrity_advance(bio, nbytes); 171 172 /* don't actually finish bio if it's part of flush sequence */ 173 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 174 bio_endio(bio, error); 175 } 176 177 void blk_dump_rq_flags(struct request *rq, char *msg) 178 { 179 int bit; 180 181 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 182 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 183 rq->cmd_flags); 184 185 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 186 (unsigned long long)blk_rq_pos(rq), 187 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 188 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 189 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 190 191 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 192 printk(KERN_INFO " cdb: "); 193 for (bit = 0; bit < BLK_MAX_CDB; bit++) 194 printk("%02x ", rq->cmd[bit]); 195 printk("\n"); 196 } 197 } 198 EXPORT_SYMBOL(blk_dump_rq_flags); 199 200 static void blk_delay_work(struct work_struct *work) 201 { 202 struct request_queue *q; 203 204 q = container_of(work, struct request_queue, delay_work.work); 205 spin_lock_irq(q->queue_lock); 206 __blk_run_queue(q); 207 spin_unlock_irq(q->queue_lock); 208 } 209 210 /** 211 * blk_delay_queue - restart queueing after defined interval 212 * @q: The &struct request_queue in question 213 * @msecs: Delay in msecs 214 * 215 * Description: 216 * Sometimes queueing needs to be postponed for a little while, to allow 217 * resources to come back. This function will make sure that queueing is 218 * restarted around the specified time. 219 */ 220 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221 { 222 queue_delayed_work(kblockd_workqueue, &q->delay_work, 223 msecs_to_jiffies(msecs)); 224 } 225 EXPORT_SYMBOL(blk_delay_queue); 226 227 /** 228 * blk_start_queue - restart a previously stopped queue 229 * @q: The &struct request_queue in question 230 * 231 * Description: 232 * blk_start_queue() will clear the stop flag on the queue, and call 233 * the request_fn for the queue if it was in a stopped state when 234 * entered. Also see blk_stop_queue(). Queue lock must be held. 235 **/ 236 void blk_start_queue(struct request_queue *q) 237 { 238 WARN_ON(!irqs_disabled()); 239 240 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 __blk_run_queue(q); 242 } 243 EXPORT_SYMBOL(blk_start_queue); 244 245 /** 246 * blk_stop_queue - stop a queue 247 * @q: The &struct request_queue in question 248 * 249 * Description: 250 * The Linux block layer assumes that a block driver will consume all 251 * entries on the request queue when the request_fn strategy is called. 252 * Often this will not happen, because of hardware limitations (queue 253 * depth settings). If a device driver gets a 'queue full' response, 254 * or if it simply chooses not to queue more I/O at one point, it can 255 * call this function to prevent the request_fn from being called until 256 * the driver has signalled it's ready to go again. This happens by calling 257 * blk_start_queue() to restart queue operations. Queue lock must be held. 258 **/ 259 void blk_stop_queue(struct request_queue *q) 260 { 261 __cancel_delayed_work(&q->delay_work); 262 queue_flag_set(QUEUE_FLAG_STOPPED, q); 263 } 264 EXPORT_SYMBOL(blk_stop_queue); 265 266 /** 267 * blk_sync_queue - cancel any pending callbacks on a queue 268 * @q: the queue 269 * 270 * Description: 271 * The block layer may perform asynchronous callback activity 272 * on a queue, such as calling the unplug function after a timeout. 273 * A block device may call blk_sync_queue to ensure that any 274 * such activity is cancelled, thus allowing it to release resources 275 * that the callbacks might use. The caller must already have made sure 276 * that its ->make_request_fn will not re-add plugging prior to calling 277 * this function. 278 * 279 * This function does not cancel any asynchronous activity arising 280 * out of elevator or throttling code. That would require elevaotor_exit() 281 * and blk_throtl_exit() to be called with queue lock initialized. 282 * 283 */ 284 void blk_sync_queue(struct request_queue *q) 285 { 286 del_timer_sync(&q->timeout); 287 cancel_delayed_work_sync(&q->delay_work); 288 } 289 EXPORT_SYMBOL(blk_sync_queue); 290 291 /** 292 * __blk_run_queue - run a single device queue 293 * @q: The queue to run 294 * 295 * Description: 296 * See @blk_run_queue. This variant must be called with the queue lock 297 * held and interrupts disabled. 298 */ 299 void __blk_run_queue(struct request_queue *q) 300 { 301 if (unlikely(blk_queue_stopped(q))) 302 return; 303 304 q->request_fn(q); 305 } 306 EXPORT_SYMBOL(__blk_run_queue); 307 308 /** 309 * blk_run_queue_async - run a single device queue in workqueue context 310 * @q: The queue to run 311 * 312 * Description: 313 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 314 * of us. 315 */ 316 void blk_run_queue_async(struct request_queue *q) 317 { 318 if (likely(!blk_queue_stopped(q))) { 319 __cancel_delayed_work(&q->delay_work); 320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 321 } 322 } 323 EXPORT_SYMBOL(blk_run_queue_async); 324 325 /** 326 * blk_run_queue - run a single device queue 327 * @q: The queue to run 328 * 329 * Description: 330 * Invoke request handling on this queue, if it has pending work to do. 331 * May be used to restart queueing when a request has completed. 332 */ 333 void blk_run_queue(struct request_queue *q) 334 { 335 unsigned long flags; 336 337 spin_lock_irqsave(q->queue_lock, flags); 338 __blk_run_queue(q); 339 spin_unlock_irqrestore(q->queue_lock, flags); 340 } 341 EXPORT_SYMBOL(blk_run_queue); 342 343 void blk_put_queue(struct request_queue *q) 344 { 345 kobject_put(&q->kobj); 346 } 347 EXPORT_SYMBOL(blk_put_queue); 348 349 /** 350 * blk_drain_queue - drain requests from request_queue 351 * @q: queue to drain 352 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 353 * 354 * Drain requests from @q. If @drain_all is set, all requests are drained. 355 * If not, only ELVPRIV requests are drained. The caller is responsible 356 * for ensuring that no new requests which need to be drained are queued. 357 */ 358 void blk_drain_queue(struct request_queue *q, bool drain_all) 359 { 360 while (true) { 361 int nr_rqs; 362 363 spin_lock_irq(q->queue_lock); 364 365 elv_drain_elevator(q); 366 if (drain_all) 367 blk_throtl_drain(q); 368 369 __blk_run_queue(q); 370 371 if (drain_all) 372 nr_rqs = q->rq.count[0] + q->rq.count[1]; 373 else 374 nr_rqs = q->rq.elvpriv; 375 376 spin_unlock_irq(q->queue_lock); 377 378 if (!nr_rqs) 379 break; 380 msleep(10); 381 } 382 } 383 384 /** 385 * blk_cleanup_queue - shutdown a request queue 386 * @q: request queue to shutdown 387 * 388 * Mark @q DEAD, drain all pending requests, destroy and put it. All 389 * future requests will be failed immediately with -ENODEV. 390 */ 391 void blk_cleanup_queue(struct request_queue *q) 392 { 393 spinlock_t *lock = q->queue_lock; 394 395 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 396 mutex_lock(&q->sysfs_lock); 397 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 398 399 spin_lock_irq(lock); 400 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 401 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 402 queue_flag_set(QUEUE_FLAG_DEAD, q); 403 404 if (q->queue_lock != &q->__queue_lock) 405 q->queue_lock = &q->__queue_lock; 406 407 spin_unlock_irq(lock); 408 mutex_unlock(&q->sysfs_lock); 409 410 /* 411 * Drain all requests queued before DEAD marking. The caller might 412 * be trying to tear down @q before its elevator is initialized, in 413 * which case we don't want to call into draining. 414 */ 415 if (q->elevator) 416 blk_drain_queue(q, true); 417 418 /* @q won't process any more request, flush async actions */ 419 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 420 blk_sync_queue(q); 421 422 /* @q is and will stay empty, shutdown and put */ 423 blk_put_queue(q); 424 } 425 EXPORT_SYMBOL(blk_cleanup_queue); 426 427 static int blk_init_free_list(struct request_queue *q) 428 { 429 struct request_list *rl = &q->rq; 430 431 if (unlikely(rl->rq_pool)) 432 return 0; 433 434 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 435 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 436 rl->elvpriv = 0; 437 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 438 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 439 440 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 441 mempool_free_slab, request_cachep, q->node); 442 443 if (!rl->rq_pool) 444 return -ENOMEM; 445 446 return 0; 447 } 448 449 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 450 { 451 return blk_alloc_queue_node(gfp_mask, -1); 452 } 453 EXPORT_SYMBOL(blk_alloc_queue); 454 455 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 456 { 457 struct request_queue *q; 458 int err; 459 460 q = kmem_cache_alloc_node(blk_requestq_cachep, 461 gfp_mask | __GFP_ZERO, node_id); 462 if (!q) 463 return NULL; 464 465 q->backing_dev_info.ra_pages = 466 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 467 q->backing_dev_info.state = 0; 468 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 469 q->backing_dev_info.name = "block"; 470 471 err = bdi_init(&q->backing_dev_info); 472 if (err) { 473 kmem_cache_free(blk_requestq_cachep, q); 474 return NULL; 475 } 476 477 if (blk_throtl_init(q)) { 478 kmem_cache_free(blk_requestq_cachep, q); 479 return NULL; 480 } 481 482 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 483 laptop_mode_timer_fn, (unsigned long) q); 484 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 485 INIT_LIST_HEAD(&q->timeout_list); 486 INIT_LIST_HEAD(&q->flush_queue[0]); 487 INIT_LIST_HEAD(&q->flush_queue[1]); 488 INIT_LIST_HEAD(&q->flush_data_in_flight); 489 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 490 491 kobject_init(&q->kobj, &blk_queue_ktype); 492 493 mutex_init(&q->sysfs_lock); 494 spin_lock_init(&q->__queue_lock); 495 496 /* 497 * By default initialize queue_lock to internal lock and driver can 498 * override it later if need be. 499 */ 500 q->queue_lock = &q->__queue_lock; 501 502 return q; 503 } 504 EXPORT_SYMBOL(blk_alloc_queue_node); 505 506 /** 507 * blk_init_queue - prepare a request queue for use with a block device 508 * @rfn: The function to be called to process requests that have been 509 * placed on the queue. 510 * @lock: Request queue spin lock 511 * 512 * Description: 513 * If a block device wishes to use the standard request handling procedures, 514 * which sorts requests and coalesces adjacent requests, then it must 515 * call blk_init_queue(). The function @rfn will be called when there 516 * are requests on the queue that need to be processed. If the device 517 * supports plugging, then @rfn may not be called immediately when requests 518 * are available on the queue, but may be called at some time later instead. 519 * Plugged queues are generally unplugged when a buffer belonging to one 520 * of the requests on the queue is needed, or due to memory pressure. 521 * 522 * @rfn is not required, or even expected, to remove all requests off the 523 * queue, but only as many as it can handle at a time. If it does leave 524 * requests on the queue, it is responsible for arranging that the requests 525 * get dealt with eventually. 526 * 527 * The queue spin lock must be held while manipulating the requests on the 528 * request queue; this lock will be taken also from interrupt context, so irq 529 * disabling is needed for it. 530 * 531 * Function returns a pointer to the initialized request queue, or %NULL if 532 * it didn't succeed. 533 * 534 * Note: 535 * blk_init_queue() must be paired with a blk_cleanup_queue() call 536 * when the block device is deactivated (such as at module unload). 537 **/ 538 539 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 540 { 541 return blk_init_queue_node(rfn, lock, -1); 542 } 543 EXPORT_SYMBOL(blk_init_queue); 544 545 struct request_queue * 546 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 547 { 548 struct request_queue *uninit_q, *q; 549 550 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 551 if (!uninit_q) 552 return NULL; 553 554 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 555 if (!q) 556 blk_cleanup_queue(uninit_q); 557 558 return q; 559 } 560 EXPORT_SYMBOL(blk_init_queue_node); 561 562 struct request_queue * 563 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 564 spinlock_t *lock) 565 { 566 return blk_init_allocated_queue_node(q, rfn, lock, -1); 567 } 568 EXPORT_SYMBOL(blk_init_allocated_queue); 569 570 struct request_queue * 571 blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, 572 spinlock_t *lock, int node_id) 573 { 574 if (!q) 575 return NULL; 576 577 q->node = node_id; 578 if (blk_init_free_list(q)) 579 return NULL; 580 581 q->request_fn = rfn; 582 q->prep_rq_fn = NULL; 583 q->unprep_rq_fn = NULL; 584 q->queue_flags = QUEUE_FLAG_DEFAULT; 585 586 /* Override internal queue lock with supplied lock pointer */ 587 if (lock) 588 q->queue_lock = lock; 589 590 /* 591 * This also sets hw/phys segments, boundary and size 592 */ 593 blk_queue_make_request(q, blk_queue_bio); 594 595 q->sg_reserved_size = INT_MAX; 596 597 /* 598 * all done 599 */ 600 if (!elevator_init(q, NULL)) { 601 blk_queue_congestion_threshold(q); 602 return q; 603 } 604 605 return NULL; 606 } 607 EXPORT_SYMBOL(blk_init_allocated_queue_node); 608 609 int blk_get_queue(struct request_queue *q) 610 { 611 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 612 kobject_get(&q->kobj); 613 return 0; 614 } 615 616 return 1; 617 } 618 EXPORT_SYMBOL(blk_get_queue); 619 620 static inline void blk_free_request(struct request_queue *q, struct request *rq) 621 { 622 if (rq->cmd_flags & REQ_ELVPRIV) 623 elv_put_request(q, rq); 624 mempool_free(rq, q->rq.rq_pool); 625 } 626 627 static struct request * 628 blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask) 629 { 630 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 631 632 if (!rq) 633 return NULL; 634 635 blk_rq_init(q, rq); 636 637 rq->cmd_flags = flags | REQ_ALLOCED; 638 639 if ((flags & REQ_ELVPRIV) && 640 unlikely(elv_set_request(q, rq, gfp_mask))) { 641 mempool_free(rq, q->rq.rq_pool); 642 return NULL; 643 } 644 645 return rq; 646 } 647 648 /* 649 * ioc_batching returns true if the ioc is a valid batching request and 650 * should be given priority access to a request. 651 */ 652 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 653 { 654 if (!ioc) 655 return 0; 656 657 /* 658 * Make sure the process is able to allocate at least 1 request 659 * even if the batch times out, otherwise we could theoretically 660 * lose wakeups. 661 */ 662 return ioc->nr_batch_requests == q->nr_batching || 663 (ioc->nr_batch_requests > 0 664 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 665 } 666 667 /* 668 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 669 * will cause the process to be a "batcher" on all queues in the system. This 670 * is the behaviour we want though - once it gets a wakeup it should be given 671 * a nice run. 672 */ 673 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 674 { 675 if (!ioc || ioc_batching(q, ioc)) 676 return; 677 678 ioc->nr_batch_requests = q->nr_batching; 679 ioc->last_waited = jiffies; 680 } 681 682 static void __freed_request(struct request_queue *q, int sync) 683 { 684 struct request_list *rl = &q->rq; 685 686 if (rl->count[sync] < queue_congestion_off_threshold(q)) 687 blk_clear_queue_congested(q, sync); 688 689 if (rl->count[sync] + 1 <= q->nr_requests) { 690 if (waitqueue_active(&rl->wait[sync])) 691 wake_up(&rl->wait[sync]); 692 693 blk_clear_queue_full(q, sync); 694 } 695 } 696 697 /* 698 * A request has just been released. Account for it, update the full and 699 * congestion status, wake up any waiters. Called under q->queue_lock. 700 */ 701 static void freed_request(struct request_queue *q, unsigned int flags) 702 { 703 struct request_list *rl = &q->rq; 704 int sync = rw_is_sync(flags); 705 706 rl->count[sync]--; 707 if (flags & REQ_ELVPRIV) 708 rl->elvpriv--; 709 710 __freed_request(q, sync); 711 712 if (unlikely(rl->starved[sync ^ 1])) 713 __freed_request(q, sync ^ 1); 714 } 715 716 /* 717 * Determine if elevator data should be initialized when allocating the 718 * request associated with @bio. 719 */ 720 static bool blk_rq_should_init_elevator(struct bio *bio) 721 { 722 if (!bio) 723 return true; 724 725 /* 726 * Flush requests do not use the elevator so skip initialization. 727 * This allows a request to share the flush and elevator data. 728 */ 729 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 730 return false; 731 732 return true; 733 } 734 735 /** 736 * get_request - get a free request 737 * @q: request_queue to allocate request from 738 * @rw_flags: RW and SYNC flags 739 * @bio: bio to allocate request for (can be %NULL) 740 * @gfp_mask: allocation mask 741 * 742 * Get a free request from @q. This function may fail under memory 743 * pressure or if @q is dead. 744 * 745 * Must be callled with @q->queue_lock held and, 746 * Returns %NULL on failure, with @q->queue_lock held. 747 * Returns !%NULL on success, with @q->queue_lock *not held*. 748 */ 749 static struct request *get_request(struct request_queue *q, int rw_flags, 750 struct bio *bio, gfp_t gfp_mask) 751 { 752 struct request *rq = NULL; 753 struct request_list *rl = &q->rq; 754 struct io_context *ioc = NULL; 755 const bool is_sync = rw_is_sync(rw_flags) != 0; 756 int may_queue; 757 758 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 759 return NULL; 760 761 may_queue = elv_may_queue(q, rw_flags); 762 if (may_queue == ELV_MQUEUE_NO) 763 goto rq_starved; 764 765 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 766 if (rl->count[is_sync]+1 >= q->nr_requests) { 767 ioc = current_io_context(GFP_ATOMIC, q->node); 768 /* 769 * The queue will fill after this allocation, so set 770 * it as full, and mark this process as "batching". 771 * This process will be allowed to complete a batch of 772 * requests, others will be blocked. 773 */ 774 if (!blk_queue_full(q, is_sync)) { 775 ioc_set_batching(q, ioc); 776 blk_set_queue_full(q, is_sync); 777 } else { 778 if (may_queue != ELV_MQUEUE_MUST 779 && !ioc_batching(q, ioc)) { 780 /* 781 * The queue is full and the allocating 782 * process is not a "batcher", and not 783 * exempted by the IO scheduler 784 */ 785 goto out; 786 } 787 } 788 } 789 blk_set_queue_congested(q, is_sync); 790 } 791 792 /* 793 * Only allow batching queuers to allocate up to 50% over the defined 794 * limit of requests, otherwise we could have thousands of requests 795 * allocated with any setting of ->nr_requests 796 */ 797 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 798 goto out; 799 800 rl->count[is_sync]++; 801 rl->starved[is_sync] = 0; 802 803 if (blk_rq_should_init_elevator(bio) && 804 !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { 805 rw_flags |= REQ_ELVPRIV; 806 rl->elvpriv++; 807 } 808 809 if (blk_queue_io_stat(q)) 810 rw_flags |= REQ_IO_STAT; 811 spin_unlock_irq(q->queue_lock); 812 813 rq = blk_alloc_request(q, rw_flags, gfp_mask); 814 if (unlikely(!rq)) { 815 /* 816 * Allocation failed presumably due to memory. Undo anything 817 * we might have messed up. 818 * 819 * Allocating task should really be put onto the front of the 820 * wait queue, but this is pretty rare. 821 */ 822 spin_lock_irq(q->queue_lock); 823 freed_request(q, rw_flags); 824 825 /* 826 * in the very unlikely event that allocation failed and no 827 * requests for this direction was pending, mark us starved 828 * so that freeing of a request in the other direction will 829 * notice us. another possible fix would be to split the 830 * rq mempool into READ and WRITE 831 */ 832 rq_starved: 833 if (unlikely(rl->count[is_sync] == 0)) 834 rl->starved[is_sync] = 1; 835 836 goto out; 837 } 838 839 /* 840 * ioc may be NULL here, and ioc_batching will be false. That's 841 * OK, if the queue is under the request limit then requests need 842 * not count toward the nr_batch_requests limit. There will always 843 * be some limit enforced by BLK_BATCH_TIME. 844 */ 845 if (ioc_batching(q, ioc)) 846 ioc->nr_batch_requests--; 847 848 trace_block_getrq(q, bio, rw_flags & 1); 849 out: 850 return rq; 851 } 852 853 /** 854 * get_request_wait - get a free request with retry 855 * @q: request_queue to allocate request from 856 * @rw_flags: RW and SYNC flags 857 * @bio: bio to allocate request for (can be %NULL) 858 * 859 * Get a free request from @q. This function keeps retrying under memory 860 * pressure and fails iff @q is dead. 861 * 862 * Must be callled with @q->queue_lock held and, 863 * Returns %NULL on failure, with @q->queue_lock held. 864 * Returns !%NULL on success, with @q->queue_lock *not held*. 865 */ 866 static struct request *get_request_wait(struct request_queue *q, int rw_flags, 867 struct bio *bio) 868 { 869 const bool is_sync = rw_is_sync(rw_flags) != 0; 870 struct request *rq; 871 872 rq = get_request(q, rw_flags, bio, GFP_NOIO); 873 while (!rq) { 874 DEFINE_WAIT(wait); 875 struct io_context *ioc; 876 struct request_list *rl = &q->rq; 877 878 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 879 return NULL; 880 881 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 882 TASK_UNINTERRUPTIBLE); 883 884 trace_block_sleeprq(q, bio, rw_flags & 1); 885 886 spin_unlock_irq(q->queue_lock); 887 io_schedule(); 888 889 /* 890 * After sleeping, we become a "batching" process and 891 * will be able to allocate at least one request, and 892 * up to a big batch of them for a small period time. 893 * See ioc_batching, ioc_set_batching 894 */ 895 ioc = current_io_context(GFP_NOIO, q->node); 896 ioc_set_batching(q, ioc); 897 898 spin_lock_irq(q->queue_lock); 899 finish_wait(&rl->wait[is_sync], &wait); 900 901 rq = get_request(q, rw_flags, bio, GFP_NOIO); 902 }; 903 904 return rq; 905 } 906 907 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 908 { 909 struct request *rq; 910 911 BUG_ON(rw != READ && rw != WRITE); 912 913 spin_lock_irq(q->queue_lock); 914 if (gfp_mask & __GFP_WAIT) 915 rq = get_request_wait(q, rw, NULL); 916 else 917 rq = get_request(q, rw, NULL, gfp_mask); 918 if (!rq) 919 spin_unlock_irq(q->queue_lock); 920 /* q->queue_lock is unlocked at this point */ 921 922 return rq; 923 } 924 EXPORT_SYMBOL(blk_get_request); 925 926 /** 927 * blk_make_request - given a bio, allocate a corresponding struct request. 928 * @q: target request queue 929 * @bio: The bio describing the memory mappings that will be submitted for IO. 930 * It may be a chained-bio properly constructed by block/bio layer. 931 * @gfp_mask: gfp flags to be used for memory allocation 932 * 933 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 934 * type commands. Where the struct request needs to be farther initialized by 935 * the caller. It is passed a &struct bio, which describes the memory info of 936 * the I/O transfer. 937 * 938 * The caller of blk_make_request must make sure that bi_io_vec 939 * are set to describe the memory buffers. That bio_data_dir() will return 940 * the needed direction of the request. (And all bio's in the passed bio-chain 941 * are properly set accordingly) 942 * 943 * If called under none-sleepable conditions, mapped bio buffers must not 944 * need bouncing, by calling the appropriate masked or flagged allocator, 945 * suitable for the target device. Otherwise the call to blk_queue_bounce will 946 * BUG. 947 * 948 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 949 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 950 * anything but the first bio in the chain. Otherwise you risk waiting for IO 951 * completion of a bio that hasn't been submitted yet, thus resulting in a 952 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 953 * of bio_alloc(), as that avoids the mempool deadlock. 954 * If possible a big IO should be split into smaller parts when allocation 955 * fails. Partial allocation should not be an error, or you risk a live-lock. 956 */ 957 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 958 gfp_t gfp_mask) 959 { 960 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 961 962 if (unlikely(!rq)) 963 return ERR_PTR(-ENOMEM); 964 965 for_each_bio(bio) { 966 struct bio *bounce_bio = bio; 967 int ret; 968 969 blk_queue_bounce(q, &bounce_bio); 970 ret = blk_rq_append_bio(q, rq, bounce_bio); 971 if (unlikely(ret)) { 972 blk_put_request(rq); 973 return ERR_PTR(ret); 974 } 975 } 976 977 return rq; 978 } 979 EXPORT_SYMBOL(blk_make_request); 980 981 /** 982 * blk_requeue_request - put a request back on queue 983 * @q: request queue where request should be inserted 984 * @rq: request to be inserted 985 * 986 * Description: 987 * Drivers often keep queueing requests until the hardware cannot accept 988 * more, when that condition happens we need to put the request back 989 * on the queue. Must be called with queue lock held. 990 */ 991 void blk_requeue_request(struct request_queue *q, struct request *rq) 992 { 993 blk_delete_timer(rq); 994 blk_clear_rq_complete(rq); 995 trace_block_rq_requeue(q, rq); 996 997 if (blk_rq_tagged(rq)) 998 blk_queue_end_tag(q, rq); 999 1000 BUG_ON(blk_queued_rq(rq)); 1001 1002 elv_requeue_request(q, rq); 1003 } 1004 EXPORT_SYMBOL(blk_requeue_request); 1005 1006 static void add_acct_request(struct request_queue *q, struct request *rq, 1007 int where) 1008 { 1009 drive_stat_acct(rq, 1); 1010 __elv_add_request(q, rq, where); 1011 } 1012 1013 /** 1014 * blk_insert_request - insert a special request into a request queue 1015 * @q: request queue where request should be inserted 1016 * @rq: request to be inserted 1017 * @at_head: insert request at head or tail of queue 1018 * @data: private data 1019 * 1020 * Description: 1021 * Many block devices need to execute commands asynchronously, so they don't 1022 * block the whole kernel from preemption during request execution. This is 1023 * accomplished normally by inserting aritficial requests tagged as 1024 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them 1025 * be scheduled for actual execution by the request queue. 1026 * 1027 * We have the option of inserting the head or the tail of the queue. 1028 * Typically we use the tail for new ioctls and so forth. We use the head 1029 * of the queue for things like a QUEUE_FULL message from a device, or a 1030 * host that is unable to accept a particular command. 1031 */ 1032 void blk_insert_request(struct request_queue *q, struct request *rq, 1033 int at_head, void *data) 1034 { 1035 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 1036 unsigned long flags; 1037 1038 /* 1039 * tell I/O scheduler that this isn't a regular read/write (ie it 1040 * must not attempt merges on this) and that it acts as a soft 1041 * barrier 1042 */ 1043 rq->cmd_type = REQ_TYPE_SPECIAL; 1044 1045 rq->special = data; 1046 1047 spin_lock_irqsave(q->queue_lock, flags); 1048 1049 /* 1050 * If command is tagged, release the tag 1051 */ 1052 if (blk_rq_tagged(rq)) 1053 blk_queue_end_tag(q, rq); 1054 1055 add_acct_request(q, rq, where); 1056 __blk_run_queue(q); 1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 } 1059 EXPORT_SYMBOL(blk_insert_request); 1060 1061 static void part_round_stats_single(int cpu, struct hd_struct *part, 1062 unsigned long now) 1063 { 1064 if (now == part->stamp) 1065 return; 1066 1067 if (part_in_flight(part)) { 1068 __part_stat_add(cpu, part, time_in_queue, 1069 part_in_flight(part) * (now - part->stamp)); 1070 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1071 } 1072 part->stamp = now; 1073 } 1074 1075 /** 1076 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1077 * @cpu: cpu number for stats access 1078 * @part: target partition 1079 * 1080 * The average IO queue length and utilisation statistics are maintained 1081 * by observing the current state of the queue length and the amount of 1082 * time it has been in this state for. 1083 * 1084 * Normally, that accounting is done on IO completion, but that can result 1085 * in more than a second's worth of IO being accounted for within any one 1086 * second, leading to >100% utilisation. To deal with that, we call this 1087 * function to do a round-off before returning the results when reading 1088 * /proc/diskstats. This accounts immediately for all queue usage up to 1089 * the current jiffies and restarts the counters again. 1090 */ 1091 void part_round_stats(int cpu, struct hd_struct *part) 1092 { 1093 unsigned long now = jiffies; 1094 1095 if (part->partno) 1096 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1097 part_round_stats_single(cpu, part, now); 1098 } 1099 EXPORT_SYMBOL_GPL(part_round_stats); 1100 1101 /* 1102 * queue lock must be held 1103 */ 1104 void __blk_put_request(struct request_queue *q, struct request *req) 1105 { 1106 if (unlikely(!q)) 1107 return; 1108 if (unlikely(--req->ref_count)) 1109 return; 1110 1111 elv_completed_request(q, req); 1112 1113 /* this is a bio leak */ 1114 WARN_ON(req->bio != NULL); 1115 1116 /* 1117 * Request may not have originated from ll_rw_blk. if not, 1118 * it didn't come out of our reserved rq pools 1119 */ 1120 if (req->cmd_flags & REQ_ALLOCED) { 1121 unsigned int flags = req->cmd_flags; 1122 1123 BUG_ON(!list_empty(&req->queuelist)); 1124 BUG_ON(!hlist_unhashed(&req->hash)); 1125 1126 blk_free_request(q, req); 1127 freed_request(q, flags); 1128 } 1129 } 1130 EXPORT_SYMBOL_GPL(__blk_put_request); 1131 1132 void blk_put_request(struct request *req) 1133 { 1134 unsigned long flags; 1135 struct request_queue *q = req->q; 1136 1137 spin_lock_irqsave(q->queue_lock, flags); 1138 __blk_put_request(q, req); 1139 spin_unlock_irqrestore(q->queue_lock, flags); 1140 } 1141 EXPORT_SYMBOL(blk_put_request); 1142 1143 /** 1144 * blk_add_request_payload - add a payload to a request 1145 * @rq: request to update 1146 * @page: page backing the payload 1147 * @len: length of the payload. 1148 * 1149 * This allows to later add a payload to an already submitted request by 1150 * a block driver. The driver needs to take care of freeing the payload 1151 * itself. 1152 * 1153 * Note that this is a quite horrible hack and nothing but handling of 1154 * discard requests should ever use it. 1155 */ 1156 void blk_add_request_payload(struct request *rq, struct page *page, 1157 unsigned int len) 1158 { 1159 struct bio *bio = rq->bio; 1160 1161 bio->bi_io_vec->bv_page = page; 1162 bio->bi_io_vec->bv_offset = 0; 1163 bio->bi_io_vec->bv_len = len; 1164 1165 bio->bi_size = len; 1166 bio->bi_vcnt = 1; 1167 bio->bi_phys_segments = 1; 1168 1169 rq->__data_len = rq->resid_len = len; 1170 rq->nr_phys_segments = 1; 1171 rq->buffer = bio_data(bio); 1172 } 1173 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1174 1175 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1176 struct bio *bio) 1177 { 1178 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1179 1180 if (!ll_back_merge_fn(q, req, bio)) 1181 return false; 1182 1183 trace_block_bio_backmerge(q, bio); 1184 1185 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1186 blk_rq_set_mixed_merge(req); 1187 1188 req->biotail->bi_next = bio; 1189 req->biotail = bio; 1190 req->__data_len += bio->bi_size; 1191 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1192 1193 drive_stat_acct(req, 0); 1194 elv_bio_merged(q, req, bio); 1195 return true; 1196 } 1197 1198 static bool bio_attempt_front_merge(struct request_queue *q, 1199 struct request *req, struct bio *bio) 1200 { 1201 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1202 1203 if (!ll_front_merge_fn(q, req, bio)) 1204 return false; 1205 1206 trace_block_bio_frontmerge(q, bio); 1207 1208 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1209 blk_rq_set_mixed_merge(req); 1210 1211 bio->bi_next = req->bio; 1212 req->bio = bio; 1213 1214 /* 1215 * may not be valid. if the low level driver said 1216 * it didn't need a bounce buffer then it better 1217 * not touch req->buffer either... 1218 */ 1219 req->buffer = bio_data(bio); 1220 req->__sector = bio->bi_sector; 1221 req->__data_len += bio->bi_size; 1222 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1223 1224 drive_stat_acct(req, 0); 1225 elv_bio_merged(q, req, bio); 1226 return true; 1227 } 1228 1229 /** 1230 * attempt_plug_merge - try to merge with %current's plugged list 1231 * @q: request_queue new bio is being queued at 1232 * @bio: new bio being queued 1233 * @request_count: out parameter for number of traversed plugged requests 1234 * 1235 * Determine whether @bio being queued on @q can be merged with a request 1236 * on %current's plugged list. Returns %true if merge was successful, 1237 * otherwise %false. 1238 * 1239 * This function is called without @q->queue_lock; however, elevator is 1240 * accessed iff there already are requests on the plugged list which in 1241 * turn guarantees validity of the elevator. 1242 * 1243 * Note that, on successful merge, elevator operation 1244 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1245 * must be ready for this. 1246 */ 1247 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1248 unsigned int *request_count) 1249 { 1250 struct blk_plug *plug; 1251 struct request *rq; 1252 bool ret = false; 1253 1254 plug = current->plug; 1255 if (!plug) 1256 goto out; 1257 *request_count = 0; 1258 1259 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1260 int el_ret; 1261 1262 (*request_count)++; 1263 1264 if (rq->q != q) 1265 continue; 1266 1267 el_ret = elv_try_merge(rq, bio); 1268 if (el_ret == ELEVATOR_BACK_MERGE) { 1269 ret = bio_attempt_back_merge(q, rq, bio); 1270 if (ret) 1271 break; 1272 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1273 ret = bio_attempt_front_merge(q, rq, bio); 1274 if (ret) 1275 break; 1276 } 1277 } 1278 out: 1279 return ret; 1280 } 1281 1282 void init_request_from_bio(struct request *req, struct bio *bio) 1283 { 1284 req->cmd_type = REQ_TYPE_FS; 1285 1286 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1287 if (bio->bi_rw & REQ_RAHEAD) 1288 req->cmd_flags |= REQ_FAILFAST_MASK; 1289 1290 req->errors = 0; 1291 req->__sector = bio->bi_sector; 1292 req->ioprio = bio_prio(bio); 1293 blk_rq_bio_prep(req->q, req, bio); 1294 } 1295 1296 void blk_queue_bio(struct request_queue *q, struct bio *bio) 1297 { 1298 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1299 struct blk_plug *plug; 1300 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1301 struct request *req; 1302 unsigned int request_count = 0; 1303 1304 /* 1305 * low level driver can indicate that it wants pages above a 1306 * certain limit bounced to low memory (ie for highmem, or even 1307 * ISA dma in theory) 1308 */ 1309 blk_queue_bounce(q, &bio); 1310 1311 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1312 spin_lock_irq(q->queue_lock); 1313 where = ELEVATOR_INSERT_FLUSH; 1314 goto get_rq; 1315 } 1316 1317 /* 1318 * Check if we can merge with the plugged list before grabbing 1319 * any locks. 1320 */ 1321 if (attempt_plug_merge(q, bio, &request_count)) 1322 return; 1323 1324 spin_lock_irq(q->queue_lock); 1325 1326 el_ret = elv_merge(q, &req, bio); 1327 if (el_ret == ELEVATOR_BACK_MERGE) { 1328 if (bio_attempt_back_merge(q, req, bio)) { 1329 if (!attempt_back_merge(q, req)) 1330 elv_merged_request(q, req, el_ret); 1331 goto out_unlock; 1332 } 1333 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1334 if (bio_attempt_front_merge(q, req, bio)) { 1335 if (!attempt_front_merge(q, req)) 1336 elv_merged_request(q, req, el_ret); 1337 goto out_unlock; 1338 } 1339 } 1340 1341 get_rq: 1342 /* 1343 * This sync check and mask will be re-done in init_request_from_bio(), 1344 * but we need to set it earlier to expose the sync flag to the 1345 * rq allocator and io schedulers. 1346 */ 1347 rw_flags = bio_data_dir(bio); 1348 if (sync) 1349 rw_flags |= REQ_SYNC; 1350 1351 /* 1352 * Grab a free request. This is might sleep but can not fail. 1353 * Returns with the queue unlocked. 1354 */ 1355 req = get_request_wait(q, rw_flags, bio); 1356 if (unlikely(!req)) { 1357 bio_endio(bio, -ENODEV); /* @q is dead */ 1358 goto out_unlock; 1359 } 1360 1361 /* 1362 * After dropping the lock and possibly sleeping here, our request 1363 * may now be mergeable after it had proven unmergeable (above). 1364 * We don't worry about that case for efficiency. It won't happen 1365 * often, and the elevators are able to handle it. 1366 */ 1367 init_request_from_bio(req, bio); 1368 1369 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1370 req->cpu = raw_smp_processor_id(); 1371 1372 plug = current->plug; 1373 if (plug) { 1374 /* 1375 * If this is the first request added after a plug, fire 1376 * of a plug trace. If others have been added before, check 1377 * if we have multiple devices in this plug. If so, make a 1378 * note to sort the list before dispatch. 1379 */ 1380 if (list_empty(&plug->list)) 1381 trace_block_plug(q); 1382 else { 1383 if (!plug->should_sort) { 1384 struct request *__rq; 1385 1386 __rq = list_entry_rq(plug->list.prev); 1387 if (__rq->q != q) 1388 plug->should_sort = 1; 1389 } 1390 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1391 blk_flush_plug_list(plug, false); 1392 trace_block_plug(q); 1393 } 1394 } 1395 list_add_tail(&req->queuelist, &plug->list); 1396 drive_stat_acct(req, 1); 1397 } else { 1398 spin_lock_irq(q->queue_lock); 1399 add_acct_request(q, req, where); 1400 __blk_run_queue(q); 1401 out_unlock: 1402 spin_unlock_irq(q->queue_lock); 1403 } 1404 } 1405 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1406 1407 /* 1408 * If bio->bi_dev is a partition, remap the location 1409 */ 1410 static inline void blk_partition_remap(struct bio *bio) 1411 { 1412 struct block_device *bdev = bio->bi_bdev; 1413 1414 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1415 struct hd_struct *p = bdev->bd_part; 1416 1417 bio->bi_sector += p->start_sect; 1418 bio->bi_bdev = bdev->bd_contains; 1419 1420 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1421 bdev->bd_dev, 1422 bio->bi_sector - p->start_sect); 1423 } 1424 } 1425 1426 static void handle_bad_sector(struct bio *bio) 1427 { 1428 char b[BDEVNAME_SIZE]; 1429 1430 printk(KERN_INFO "attempt to access beyond end of device\n"); 1431 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1432 bdevname(bio->bi_bdev, b), 1433 bio->bi_rw, 1434 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1435 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1436 1437 set_bit(BIO_EOF, &bio->bi_flags); 1438 } 1439 1440 #ifdef CONFIG_FAIL_MAKE_REQUEST 1441 1442 static DECLARE_FAULT_ATTR(fail_make_request); 1443 1444 static int __init setup_fail_make_request(char *str) 1445 { 1446 return setup_fault_attr(&fail_make_request, str); 1447 } 1448 __setup("fail_make_request=", setup_fail_make_request); 1449 1450 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1451 { 1452 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1453 } 1454 1455 static int __init fail_make_request_debugfs(void) 1456 { 1457 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1458 NULL, &fail_make_request); 1459 1460 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1461 } 1462 1463 late_initcall(fail_make_request_debugfs); 1464 1465 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1466 1467 static inline bool should_fail_request(struct hd_struct *part, 1468 unsigned int bytes) 1469 { 1470 return false; 1471 } 1472 1473 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1474 1475 /* 1476 * Check whether this bio extends beyond the end of the device. 1477 */ 1478 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1479 { 1480 sector_t maxsector; 1481 1482 if (!nr_sectors) 1483 return 0; 1484 1485 /* Test device or partition size, when known. */ 1486 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1487 if (maxsector) { 1488 sector_t sector = bio->bi_sector; 1489 1490 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1491 /* 1492 * This may well happen - the kernel calls bread() 1493 * without checking the size of the device, e.g., when 1494 * mounting a device. 1495 */ 1496 handle_bad_sector(bio); 1497 return 1; 1498 } 1499 } 1500 1501 return 0; 1502 } 1503 1504 static noinline_for_stack bool 1505 generic_make_request_checks(struct bio *bio) 1506 { 1507 struct request_queue *q; 1508 int nr_sectors = bio_sectors(bio); 1509 int err = -EIO; 1510 char b[BDEVNAME_SIZE]; 1511 struct hd_struct *part; 1512 1513 might_sleep(); 1514 1515 if (bio_check_eod(bio, nr_sectors)) 1516 goto end_io; 1517 1518 q = bdev_get_queue(bio->bi_bdev); 1519 if (unlikely(!q)) { 1520 printk(KERN_ERR 1521 "generic_make_request: Trying to access " 1522 "nonexistent block-device %s (%Lu)\n", 1523 bdevname(bio->bi_bdev, b), 1524 (long long) bio->bi_sector); 1525 goto end_io; 1526 } 1527 1528 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1529 nr_sectors > queue_max_hw_sectors(q))) { 1530 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1531 bdevname(bio->bi_bdev, b), 1532 bio_sectors(bio), 1533 queue_max_hw_sectors(q)); 1534 goto end_io; 1535 } 1536 1537 part = bio->bi_bdev->bd_part; 1538 if (should_fail_request(part, bio->bi_size) || 1539 should_fail_request(&part_to_disk(part)->part0, 1540 bio->bi_size)) 1541 goto end_io; 1542 1543 /* 1544 * If this device has partitions, remap block n 1545 * of partition p to block n+start(p) of the disk. 1546 */ 1547 blk_partition_remap(bio); 1548 1549 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1550 goto end_io; 1551 1552 if (bio_check_eod(bio, nr_sectors)) 1553 goto end_io; 1554 1555 /* 1556 * Filter flush bio's early so that make_request based 1557 * drivers without flush support don't have to worry 1558 * about them. 1559 */ 1560 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1561 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1562 if (!nr_sectors) { 1563 err = 0; 1564 goto end_io; 1565 } 1566 } 1567 1568 if ((bio->bi_rw & REQ_DISCARD) && 1569 (!blk_queue_discard(q) || 1570 ((bio->bi_rw & REQ_SECURE) && 1571 !blk_queue_secdiscard(q)))) { 1572 err = -EOPNOTSUPP; 1573 goto end_io; 1574 } 1575 1576 if (blk_throtl_bio(q, bio)) 1577 return false; /* throttled, will be resubmitted later */ 1578 1579 trace_block_bio_queue(q, bio); 1580 return true; 1581 1582 end_io: 1583 bio_endio(bio, err); 1584 return false; 1585 } 1586 1587 /** 1588 * generic_make_request - hand a buffer to its device driver for I/O 1589 * @bio: The bio describing the location in memory and on the device. 1590 * 1591 * generic_make_request() is used to make I/O requests of block 1592 * devices. It is passed a &struct bio, which describes the I/O that needs 1593 * to be done. 1594 * 1595 * generic_make_request() does not return any status. The 1596 * success/failure status of the request, along with notification of 1597 * completion, is delivered asynchronously through the bio->bi_end_io 1598 * function described (one day) else where. 1599 * 1600 * The caller of generic_make_request must make sure that bi_io_vec 1601 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1602 * set to describe the device address, and the 1603 * bi_end_io and optionally bi_private are set to describe how 1604 * completion notification should be signaled. 1605 * 1606 * generic_make_request and the drivers it calls may use bi_next if this 1607 * bio happens to be merged with someone else, and may resubmit the bio to 1608 * a lower device by calling into generic_make_request recursively, which 1609 * means the bio should NOT be touched after the call to ->make_request_fn. 1610 */ 1611 void generic_make_request(struct bio *bio) 1612 { 1613 struct bio_list bio_list_on_stack; 1614 1615 if (!generic_make_request_checks(bio)) 1616 return; 1617 1618 /* 1619 * We only want one ->make_request_fn to be active at a time, else 1620 * stack usage with stacked devices could be a problem. So use 1621 * current->bio_list to keep a list of requests submited by a 1622 * make_request_fn function. current->bio_list is also used as a 1623 * flag to say if generic_make_request is currently active in this 1624 * task or not. If it is NULL, then no make_request is active. If 1625 * it is non-NULL, then a make_request is active, and new requests 1626 * should be added at the tail 1627 */ 1628 if (current->bio_list) { 1629 bio_list_add(current->bio_list, bio); 1630 return; 1631 } 1632 1633 /* following loop may be a bit non-obvious, and so deserves some 1634 * explanation. 1635 * Before entering the loop, bio->bi_next is NULL (as all callers 1636 * ensure that) so we have a list with a single bio. 1637 * We pretend that we have just taken it off a longer list, so 1638 * we assign bio_list to a pointer to the bio_list_on_stack, 1639 * thus initialising the bio_list of new bios to be 1640 * added. ->make_request() may indeed add some more bios 1641 * through a recursive call to generic_make_request. If it 1642 * did, we find a non-NULL value in bio_list and re-enter the loop 1643 * from the top. In this case we really did just take the bio 1644 * of the top of the list (no pretending) and so remove it from 1645 * bio_list, and call into ->make_request() again. 1646 */ 1647 BUG_ON(bio->bi_next); 1648 bio_list_init(&bio_list_on_stack); 1649 current->bio_list = &bio_list_on_stack; 1650 do { 1651 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1652 1653 q->make_request_fn(q, bio); 1654 1655 bio = bio_list_pop(current->bio_list); 1656 } while (bio); 1657 current->bio_list = NULL; /* deactivate */ 1658 } 1659 EXPORT_SYMBOL(generic_make_request); 1660 1661 /** 1662 * submit_bio - submit a bio to the block device layer for I/O 1663 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1664 * @bio: The &struct bio which describes the I/O 1665 * 1666 * submit_bio() is very similar in purpose to generic_make_request(), and 1667 * uses that function to do most of the work. Both are fairly rough 1668 * interfaces; @bio must be presetup and ready for I/O. 1669 * 1670 */ 1671 void submit_bio(int rw, struct bio *bio) 1672 { 1673 int count = bio_sectors(bio); 1674 1675 bio->bi_rw |= rw; 1676 1677 /* 1678 * If it's a regular read/write or a barrier with data attached, 1679 * go through the normal accounting stuff before submission. 1680 */ 1681 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1682 if (rw & WRITE) { 1683 count_vm_events(PGPGOUT, count); 1684 } else { 1685 task_io_account_read(bio->bi_size); 1686 count_vm_events(PGPGIN, count); 1687 } 1688 1689 if (unlikely(block_dump)) { 1690 char b[BDEVNAME_SIZE]; 1691 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1692 current->comm, task_pid_nr(current), 1693 (rw & WRITE) ? "WRITE" : "READ", 1694 (unsigned long long)bio->bi_sector, 1695 bdevname(bio->bi_bdev, b), 1696 count); 1697 } 1698 } 1699 1700 generic_make_request(bio); 1701 } 1702 EXPORT_SYMBOL(submit_bio); 1703 1704 /** 1705 * blk_rq_check_limits - Helper function to check a request for the queue limit 1706 * @q: the queue 1707 * @rq: the request being checked 1708 * 1709 * Description: 1710 * @rq may have been made based on weaker limitations of upper-level queues 1711 * in request stacking drivers, and it may violate the limitation of @q. 1712 * Since the block layer and the underlying device driver trust @rq 1713 * after it is inserted to @q, it should be checked against @q before 1714 * the insertion using this generic function. 1715 * 1716 * This function should also be useful for request stacking drivers 1717 * in some cases below, so export this function. 1718 * Request stacking drivers like request-based dm may change the queue 1719 * limits while requests are in the queue (e.g. dm's table swapping). 1720 * Such request stacking drivers should check those requests agaist 1721 * the new queue limits again when they dispatch those requests, 1722 * although such checkings are also done against the old queue limits 1723 * when submitting requests. 1724 */ 1725 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1726 { 1727 if (rq->cmd_flags & REQ_DISCARD) 1728 return 0; 1729 1730 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1731 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1732 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1733 return -EIO; 1734 } 1735 1736 /* 1737 * queue's settings related to segment counting like q->bounce_pfn 1738 * may differ from that of other stacking queues. 1739 * Recalculate it to check the request correctly on this queue's 1740 * limitation. 1741 */ 1742 blk_recalc_rq_segments(rq); 1743 if (rq->nr_phys_segments > queue_max_segments(q)) { 1744 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1745 return -EIO; 1746 } 1747 1748 return 0; 1749 } 1750 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1751 1752 /** 1753 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1754 * @q: the queue to submit the request 1755 * @rq: the request being queued 1756 */ 1757 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1758 { 1759 unsigned long flags; 1760 int where = ELEVATOR_INSERT_BACK; 1761 1762 if (blk_rq_check_limits(q, rq)) 1763 return -EIO; 1764 1765 if (rq->rq_disk && 1766 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1767 return -EIO; 1768 1769 spin_lock_irqsave(q->queue_lock, flags); 1770 1771 /* 1772 * Submitting request must be dequeued before calling this function 1773 * because it will be linked to another request_queue 1774 */ 1775 BUG_ON(blk_queued_rq(rq)); 1776 1777 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 1778 where = ELEVATOR_INSERT_FLUSH; 1779 1780 add_acct_request(q, rq, where); 1781 if (where == ELEVATOR_INSERT_FLUSH) 1782 __blk_run_queue(q); 1783 spin_unlock_irqrestore(q->queue_lock, flags); 1784 1785 return 0; 1786 } 1787 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1788 1789 /** 1790 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1791 * @rq: request to examine 1792 * 1793 * Description: 1794 * A request could be merge of IOs which require different failure 1795 * handling. This function determines the number of bytes which 1796 * can be failed from the beginning of the request without 1797 * crossing into area which need to be retried further. 1798 * 1799 * Return: 1800 * The number of bytes to fail. 1801 * 1802 * Context: 1803 * queue_lock must be held. 1804 */ 1805 unsigned int blk_rq_err_bytes(const struct request *rq) 1806 { 1807 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1808 unsigned int bytes = 0; 1809 struct bio *bio; 1810 1811 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1812 return blk_rq_bytes(rq); 1813 1814 /* 1815 * Currently the only 'mixing' which can happen is between 1816 * different fastfail types. We can safely fail portions 1817 * which have all the failfast bits that the first one has - 1818 * the ones which are at least as eager to fail as the first 1819 * one. 1820 */ 1821 for (bio = rq->bio; bio; bio = bio->bi_next) { 1822 if ((bio->bi_rw & ff) != ff) 1823 break; 1824 bytes += bio->bi_size; 1825 } 1826 1827 /* this could lead to infinite loop */ 1828 BUG_ON(blk_rq_bytes(rq) && !bytes); 1829 return bytes; 1830 } 1831 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1832 1833 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1834 { 1835 if (blk_do_io_stat(req)) { 1836 const int rw = rq_data_dir(req); 1837 struct hd_struct *part; 1838 int cpu; 1839 1840 cpu = part_stat_lock(); 1841 part = req->part; 1842 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1843 part_stat_unlock(); 1844 } 1845 } 1846 1847 static void blk_account_io_done(struct request *req) 1848 { 1849 /* 1850 * Account IO completion. flush_rq isn't accounted as a 1851 * normal IO on queueing nor completion. Accounting the 1852 * containing request is enough. 1853 */ 1854 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1855 unsigned long duration = jiffies - req->start_time; 1856 const int rw = rq_data_dir(req); 1857 struct hd_struct *part; 1858 int cpu; 1859 1860 cpu = part_stat_lock(); 1861 part = req->part; 1862 1863 part_stat_inc(cpu, part, ios[rw]); 1864 part_stat_add(cpu, part, ticks[rw], duration); 1865 part_round_stats(cpu, part); 1866 part_dec_in_flight(part, rw); 1867 1868 hd_struct_put(part); 1869 part_stat_unlock(); 1870 } 1871 } 1872 1873 /** 1874 * blk_peek_request - peek at the top of a request queue 1875 * @q: request queue to peek at 1876 * 1877 * Description: 1878 * Return the request at the top of @q. The returned request 1879 * should be started using blk_start_request() before LLD starts 1880 * processing it. 1881 * 1882 * Return: 1883 * Pointer to the request at the top of @q if available. Null 1884 * otherwise. 1885 * 1886 * Context: 1887 * queue_lock must be held. 1888 */ 1889 struct request *blk_peek_request(struct request_queue *q) 1890 { 1891 struct request *rq; 1892 int ret; 1893 1894 while ((rq = __elv_next_request(q)) != NULL) { 1895 if (!(rq->cmd_flags & REQ_STARTED)) { 1896 /* 1897 * This is the first time the device driver 1898 * sees this request (possibly after 1899 * requeueing). Notify IO scheduler. 1900 */ 1901 if (rq->cmd_flags & REQ_SORTED) 1902 elv_activate_rq(q, rq); 1903 1904 /* 1905 * just mark as started even if we don't start 1906 * it, a request that has been delayed should 1907 * not be passed by new incoming requests 1908 */ 1909 rq->cmd_flags |= REQ_STARTED; 1910 trace_block_rq_issue(q, rq); 1911 } 1912 1913 if (!q->boundary_rq || q->boundary_rq == rq) { 1914 q->end_sector = rq_end_sector(rq); 1915 q->boundary_rq = NULL; 1916 } 1917 1918 if (rq->cmd_flags & REQ_DONTPREP) 1919 break; 1920 1921 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1922 /* 1923 * make sure space for the drain appears we 1924 * know we can do this because max_hw_segments 1925 * has been adjusted to be one fewer than the 1926 * device can handle 1927 */ 1928 rq->nr_phys_segments++; 1929 } 1930 1931 if (!q->prep_rq_fn) 1932 break; 1933 1934 ret = q->prep_rq_fn(q, rq); 1935 if (ret == BLKPREP_OK) { 1936 break; 1937 } else if (ret == BLKPREP_DEFER) { 1938 /* 1939 * the request may have been (partially) prepped. 1940 * we need to keep this request in the front to 1941 * avoid resource deadlock. REQ_STARTED will 1942 * prevent other fs requests from passing this one. 1943 */ 1944 if (q->dma_drain_size && blk_rq_bytes(rq) && 1945 !(rq->cmd_flags & REQ_DONTPREP)) { 1946 /* 1947 * remove the space for the drain we added 1948 * so that we don't add it again 1949 */ 1950 --rq->nr_phys_segments; 1951 } 1952 1953 rq = NULL; 1954 break; 1955 } else if (ret == BLKPREP_KILL) { 1956 rq->cmd_flags |= REQ_QUIET; 1957 /* 1958 * Mark this request as started so we don't trigger 1959 * any debug logic in the end I/O path. 1960 */ 1961 blk_start_request(rq); 1962 __blk_end_request_all(rq, -EIO); 1963 } else { 1964 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1965 break; 1966 } 1967 } 1968 1969 return rq; 1970 } 1971 EXPORT_SYMBOL(blk_peek_request); 1972 1973 void blk_dequeue_request(struct request *rq) 1974 { 1975 struct request_queue *q = rq->q; 1976 1977 BUG_ON(list_empty(&rq->queuelist)); 1978 BUG_ON(ELV_ON_HASH(rq)); 1979 1980 list_del_init(&rq->queuelist); 1981 1982 /* 1983 * the time frame between a request being removed from the lists 1984 * and to it is freed is accounted as io that is in progress at 1985 * the driver side. 1986 */ 1987 if (blk_account_rq(rq)) { 1988 q->in_flight[rq_is_sync(rq)]++; 1989 set_io_start_time_ns(rq); 1990 } 1991 } 1992 1993 /** 1994 * blk_start_request - start request processing on the driver 1995 * @req: request to dequeue 1996 * 1997 * Description: 1998 * Dequeue @req and start timeout timer on it. This hands off the 1999 * request to the driver. 2000 * 2001 * Block internal functions which don't want to start timer should 2002 * call blk_dequeue_request(). 2003 * 2004 * Context: 2005 * queue_lock must be held. 2006 */ 2007 void blk_start_request(struct request *req) 2008 { 2009 blk_dequeue_request(req); 2010 2011 /* 2012 * We are now handing the request to the hardware, initialize 2013 * resid_len to full count and add the timeout handler. 2014 */ 2015 req->resid_len = blk_rq_bytes(req); 2016 if (unlikely(blk_bidi_rq(req))) 2017 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2018 2019 blk_add_timer(req); 2020 } 2021 EXPORT_SYMBOL(blk_start_request); 2022 2023 /** 2024 * blk_fetch_request - fetch a request from a request queue 2025 * @q: request queue to fetch a request from 2026 * 2027 * Description: 2028 * Return the request at the top of @q. The request is started on 2029 * return and LLD can start processing it immediately. 2030 * 2031 * Return: 2032 * Pointer to the request at the top of @q if available. Null 2033 * otherwise. 2034 * 2035 * Context: 2036 * queue_lock must be held. 2037 */ 2038 struct request *blk_fetch_request(struct request_queue *q) 2039 { 2040 struct request *rq; 2041 2042 rq = blk_peek_request(q); 2043 if (rq) 2044 blk_start_request(rq); 2045 return rq; 2046 } 2047 EXPORT_SYMBOL(blk_fetch_request); 2048 2049 /** 2050 * blk_update_request - Special helper function for request stacking drivers 2051 * @req: the request being processed 2052 * @error: %0 for success, < %0 for error 2053 * @nr_bytes: number of bytes to complete @req 2054 * 2055 * Description: 2056 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2057 * the request structure even if @req doesn't have leftover. 2058 * If @req has leftover, sets it up for the next range of segments. 2059 * 2060 * This special helper function is only for request stacking drivers 2061 * (e.g. request-based dm) so that they can handle partial completion. 2062 * Actual device drivers should use blk_end_request instead. 2063 * 2064 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2065 * %false return from this function. 2066 * 2067 * Return: 2068 * %false - this request doesn't have any more data 2069 * %true - this request has more data 2070 **/ 2071 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2072 { 2073 int total_bytes, bio_nbytes, next_idx = 0; 2074 struct bio *bio; 2075 2076 if (!req->bio) 2077 return false; 2078 2079 trace_block_rq_complete(req->q, req); 2080 2081 /* 2082 * For fs requests, rq is just carrier of independent bio's 2083 * and each partial completion should be handled separately. 2084 * Reset per-request error on each partial completion. 2085 * 2086 * TODO: tj: This is too subtle. It would be better to let 2087 * low level drivers do what they see fit. 2088 */ 2089 if (req->cmd_type == REQ_TYPE_FS) 2090 req->errors = 0; 2091 2092 if (error && req->cmd_type == REQ_TYPE_FS && 2093 !(req->cmd_flags & REQ_QUIET)) { 2094 char *error_type; 2095 2096 switch (error) { 2097 case -ENOLINK: 2098 error_type = "recoverable transport"; 2099 break; 2100 case -EREMOTEIO: 2101 error_type = "critical target"; 2102 break; 2103 case -EBADE: 2104 error_type = "critical nexus"; 2105 break; 2106 case -EIO: 2107 default: 2108 error_type = "I/O"; 2109 break; 2110 } 2111 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2112 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2113 (unsigned long long)blk_rq_pos(req)); 2114 } 2115 2116 blk_account_io_completion(req, nr_bytes); 2117 2118 total_bytes = bio_nbytes = 0; 2119 while ((bio = req->bio) != NULL) { 2120 int nbytes; 2121 2122 if (nr_bytes >= bio->bi_size) { 2123 req->bio = bio->bi_next; 2124 nbytes = bio->bi_size; 2125 req_bio_endio(req, bio, nbytes, error); 2126 next_idx = 0; 2127 bio_nbytes = 0; 2128 } else { 2129 int idx = bio->bi_idx + next_idx; 2130 2131 if (unlikely(idx >= bio->bi_vcnt)) { 2132 blk_dump_rq_flags(req, "__end_that"); 2133 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2134 __func__, idx, bio->bi_vcnt); 2135 break; 2136 } 2137 2138 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2139 BIO_BUG_ON(nbytes > bio->bi_size); 2140 2141 /* 2142 * not a complete bvec done 2143 */ 2144 if (unlikely(nbytes > nr_bytes)) { 2145 bio_nbytes += nr_bytes; 2146 total_bytes += nr_bytes; 2147 break; 2148 } 2149 2150 /* 2151 * advance to the next vector 2152 */ 2153 next_idx++; 2154 bio_nbytes += nbytes; 2155 } 2156 2157 total_bytes += nbytes; 2158 nr_bytes -= nbytes; 2159 2160 bio = req->bio; 2161 if (bio) { 2162 /* 2163 * end more in this run, or just return 'not-done' 2164 */ 2165 if (unlikely(nr_bytes <= 0)) 2166 break; 2167 } 2168 } 2169 2170 /* 2171 * completely done 2172 */ 2173 if (!req->bio) { 2174 /* 2175 * Reset counters so that the request stacking driver 2176 * can find how many bytes remain in the request 2177 * later. 2178 */ 2179 req->__data_len = 0; 2180 return false; 2181 } 2182 2183 /* 2184 * if the request wasn't completed, update state 2185 */ 2186 if (bio_nbytes) { 2187 req_bio_endio(req, bio, bio_nbytes, error); 2188 bio->bi_idx += next_idx; 2189 bio_iovec(bio)->bv_offset += nr_bytes; 2190 bio_iovec(bio)->bv_len -= nr_bytes; 2191 } 2192 2193 req->__data_len -= total_bytes; 2194 req->buffer = bio_data(req->bio); 2195 2196 /* update sector only for requests with clear definition of sector */ 2197 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2198 req->__sector += total_bytes >> 9; 2199 2200 /* mixed attributes always follow the first bio */ 2201 if (req->cmd_flags & REQ_MIXED_MERGE) { 2202 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2203 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2204 } 2205 2206 /* 2207 * If total number of sectors is less than the first segment 2208 * size, something has gone terribly wrong. 2209 */ 2210 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2211 blk_dump_rq_flags(req, "request botched"); 2212 req->__data_len = blk_rq_cur_bytes(req); 2213 } 2214 2215 /* recalculate the number of segments */ 2216 blk_recalc_rq_segments(req); 2217 2218 return true; 2219 } 2220 EXPORT_SYMBOL_GPL(blk_update_request); 2221 2222 static bool blk_update_bidi_request(struct request *rq, int error, 2223 unsigned int nr_bytes, 2224 unsigned int bidi_bytes) 2225 { 2226 if (blk_update_request(rq, error, nr_bytes)) 2227 return true; 2228 2229 /* Bidi request must be completed as a whole */ 2230 if (unlikely(blk_bidi_rq(rq)) && 2231 blk_update_request(rq->next_rq, error, bidi_bytes)) 2232 return true; 2233 2234 if (blk_queue_add_random(rq->q)) 2235 add_disk_randomness(rq->rq_disk); 2236 2237 return false; 2238 } 2239 2240 /** 2241 * blk_unprep_request - unprepare a request 2242 * @req: the request 2243 * 2244 * This function makes a request ready for complete resubmission (or 2245 * completion). It happens only after all error handling is complete, 2246 * so represents the appropriate moment to deallocate any resources 2247 * that were allocated to the request in the prep_rq_fn. The queue 2248 * lock is held when calling this. 2249 */ 2250 void blk_unprep_request(struct request *req) 2251 { 2252 struct request_queue *q = req->q; 2253 2254 req->cmd_flags &= ~REQ_DONTPREP; 2255 if (q->unprep_rq_fn) 2256 q->unprep_rq_fn(q, req); 2257 } 2258 EXPORT_SYMBOL_GPL(blk_unprep_request); 2259 2260 /* 2261 * queue lock must be held 2262 */ 2263 static void blk_finish_request(struct request *req, int error) 2264 { 2265 if (blk_rq_tagged(req)) 2266 blk_queue_end_tag(req->q, req); 2267 2268 BUG_ON(blk_queued_rq(req)); 2269 2270 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2271 laptop_io_completion(&req->q->backing_dev_info); 2272 2273 blk_delete_timer(req); 2274 2275 if (req->cmd_flags & REQ_DONTPREP) 2276 blk_unprep_request(req); 2277 2278 2279 blk_account_io_done(req); 2280 2281 if (req->end_io) 2282 req->end_io(req, error); 2283 else { 2284 if (blk_bidi_rq(req)) 2285 __blk_put_request(req->next_rq->q, req->next_rq); 2286 2287 __blk_put_request(req->q, req); 2288 } 2289 } 2290 2291 /** 2292 * blk_end_bidi_request - Complete a bidi request 2293 * @rq: the request to complete 2294 * @error: %0 for success, < %0 for error 2295 * @nr_bytes: number of bytes to complete @rq 2296 * @bidi_bytes: number of bytes to complete @rq->next_rq 2297 * 2298 * Description: 2299 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2300 * Drivers that supports bidi can safely call this member for any 2301 * type of request, bidi or uni. In the later case @bidi_bytes is 2302 * just ignored. 2303 * 2304 * Return: 2305 * %false - we are done with this request 2306 * %true - still buffers pending for this request 2307 **/ 2308 static bool blk_end_bidi_request(struct request *rq, int error, 2309 unsigned int nr_bytes, unsigned int bidi_bytes) 2310 { 2311 struct request_queue *q = rq->q; 2312 unsigned long flags; 2313 2314 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2315 return true; 2316 2317 spin_lock_irqsave(q->queue_lock, flags); 2318 blk_finish_request(rq, error); 2319 spin_unlock_irqrestore(q->queue_lock, flags); 2320 2321 return false; 2322 } 2323 2324 /** 2325 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2326 * @rq: the request to complete 2327 * @error: %0 for success, < %0 for error 2328 * @nr_bytes: number of bytes to complete @rq 2329 * @bidi_bytes: number of bytes to complete @rq->next_rq 2330 * 2331 * Description: 2332 * Identical to blk_end_bidi_request() except that queue lock is 2333 * assumed to be locked on entry and remains so on return. 2334 * 2335 * Return: 2336 * %false - we are done with this request 2337 * %true - still buffers pending for this request 2338 **/ 2339 bool __blk_end_bidi_request(struct request *rq, int error, 2340 unsigned int nr_bytes, unsigned int bidi_bytes) 2341 { 2342 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2343 return true; 2344 2345 blk_finish_request(rq, error); 2346 2347 return false; 2348 } 2349 2350 /** 2351 * blk_end_request - Helper function for drivers to complete the request. 2352 * @rq: the request being processed 2353 * @error: %0 for success, < %0 for error 2354 * @nr_bytes: number of bytes to complete 2355 * 2356 * Description: 2357 * Ends I/O on a number of bytes attached to @rq. 2358 * If @rq has leftover, sets it up for the next range of segments. 2359 * 2360 * Return: 2361 * %false - we are done with this request 2362 * %true - still buffers pending for this request 2363 **/ 2364 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2365 { 2366 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2367 } 2368 EXPORT_SYMBOL(blk_end_request); 2369 2370 /** 2371 * blk_end_request_all - Helper function for drives to finish the request. 2372 * @rq: the request to finish 2373 * @error: %0 for success, < %0 for error 2374 * 2375 * Description: 2376 * Completely finish @rq. 2377 */ 2378 void blk_end_request_all(struct request *rq, int error) 2379 { 2380 bool pending; 2381 unsigned int bidi_bytes = 0; 2382 2383 if (unlikely(blk_bidi_rq(rq))) 2384 bidi_bytes = blk_rq_bytes(rq->next_rq); 2385 2386 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2387 BUG_ON(pending); 2388 } 2389 EXPORT_SYMBOL(blk_end_request_all); 2390 2391 /** 2392 * blk_end_request_cur - Helper function to finish the current request chunk. 2393 * @rq: the request to finish the current chunk for 2394 * @error: %0 for success, < %0 for error 2395 * 2396 * Description: 2397 * Complete the current consecutively mapped chunk from @rq. 2398 * 2399 * Return: 2400 * %false - we are done with this request 2401 * %true - still buffers pending for this request 2402 */ 2403 bool blk_end_request_cur(struct request *rq, int error) 2404 { 2405 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2406 } 2407 EXPORT_SYMBOL(blk_end_request_cur); 2408 2409 /** 2410 * blk_end_request_err - Finish a request till the next failure boundary. 2411 * @rq: the request to finish till the next failure boundary for 2412 * @error: must be negative errno 2413 * 2414 * Description: 2415 * Complete @rq till the next failure boundary. 2416 * 2417 * Return: 2418 * %false - we are done with this request 2419 * %true - still buffers pending for this request 2420 */ 2421 bool blk_end_request_err(struct request *rq, int error) 2422 { 2423 WARN_ON(error >= 0); 2424 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2425 } 2426 EXPORT_SYMBOL_GPL(blk_end_request_err); 2427 2428 /** 2429 * __blk_end_request - Helper function for drivers to complete the request. 2430 * @rq: the request being processed 2431 * @error: %0 for success, < %0 for error 2432 * @nr_bytes: number of bytes to complete 2433 * 2434 * Description: 2435 * Must be called with queue lock held unlike blk_end_request(). 2436 * 2437 * Return: 2438 * %false - we are done with this request 2439 * %true - still buffers pending for this request 2440 **/ 2441 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2442 { 2443 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2444 } 2445 EXPORT_SYMBOL(__blk_end_request); 2446 2447 /** 2448 * __blk_end_request_all - Helper function for drives to finish the request. 2449 * @rq: the request to finish 2450 * @error: %0 for success, < %0 for error 2451 * 2452 * Description: 2453 * Completely finish @rq. Must be called with queue lock held. 2454 */ 2455 void __blk_end_request_all(struct request *rq, int error) 2456 { 2457 bool pending; 2458 unsigned int bidi_bytes = 0; 2459 2460 if (unlikely(blk_bidi_rq(rq))) 2461 bidi_bytes = blk_rq_bytes(rq->next_rq); 2462 2463 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2464 BUG_ON(pending); 2465 } 2466 EXPORT_SYMBOL(__blk_end_request_all); 2467 2468 /** 2469 * __blk_end_request_cur - Helper function to finish the current request chunk. 2470 * @rq: the request to finish the current chunk for 2471 * @error: %0 for success, < %0 for error 2472 * 2473 * Description: 2474 * Complete the current consecutively mapped chunk from @rq. Must 2475 * be called with queue lock held. 2476 * 2477 * Return: 2478 * %false - we are done with this request 2479 * %true - still buffers pending for this request 2480 */ 2481 bool __blk_end_request_cur(struct request *rq, int error) 2482 { 2483 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2484 } 2485 EXPORT_SYMBOL(__blk_end_request_cur); 2486 2487 /** 2488 * __blk_end_request_err - Finish a request till the next failure boundary. 2489 * @rq: the request to finish till the next failure boundary for 2490 * @error: must be negative errno 2491 * 2492 * Description: 2493 * Complete @rq till the next failure boundary. Must be called 2494 * with queue lock held. 2495 * 2496 * Return: 2497 * %false - we are done with this request 2498 * %true - still buffers pending for this request 2499 */ 2500 bool __blk_end_request_err(struct request *rq, int error) 2501 { 2502 WARN_ON(error >= 0); 2503 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2504 } 2505 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2506 2507 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2508 struct bio *bio) 2509 { 2510 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2511 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2512 2513 if (bio_has_data(bio)) { 2514 rq->nr_phys_segments = bio_phys_segments(q, bio); 2515 rq->buffer = bio_data(bio); 2516 } 2517 rq->__data_len = bio->bi_size; 2518 rq->bio = rq->biotail = bio; 2519 2520 if (bio->bi_bdev) 2521 rq->rq_disk = bio->bi_bdev->bd_disk; 2522 } 2523 2524 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2525 /** 2526 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2527 * @rq: the request to be flushed 2528 * 2529 * Description: 2530 * Flush all pages in @rq. 2531 */ 2532 void rq_flush_dcache_pages(struct request *rq) 2533 { 2534 struct req_iterator iter; 2535 struct bio_vec *bvec; 2536 2537 rq_for_each_segment(bvec, rq, iter) 2538 flush_dcache_page(bvec->bv_page); 2539 } 2540 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2541 #endif 2542 2543 /** 2544 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2545 * @q : the queue of the device being checked 2546 * 2547 * Description: 2548 * Check if underlying low-level drivers of a device are busy. 2549 * If the drivers want to export their busy state, they must set own 2550 * exporting function using blk_queue_lld_busy() first. 2551 * 2552 * Basically, this function is used only by request stacking drivers 2553 * to stop dispatching requests to underlying devices when underlying 2554 * devices are busy. This behavior helps more I/O merging on the queue 2555 * of the request stacking driver and prevents I/O throughput regression 2556 * on burst I/O load. 2557 * 2558 * Return: 2559 * 0 - Not busy (The request stacking driver should dispatch request) 2560 * 1 - Busy (The request stacking driver should stop dispatching request) 2561 */ 2562 int blk_lld_busy(struct request_queue *q) 2563 { 2564 if (q->lld_busy_fn) 2565 return q->lld_busy_fn(q); 2566 2567 return 0; 2568 } 2569 EXPORT_SYMBOL_GPL(blk_lld_busy); 2570 2571 /** 2572 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2573 * @rq: the clone request to be cleaned up 2574 * 2575 * Description: 2576 * Free all bios in @rq for a cloned request. 2577 */ 2578 void blk_rq_unprep_clone(struct request *rq) 2579 { 2580 struct bio *bio; 2581 2582 while ((bio = rq->bio) != NULL) { 2583 rq->bio = bio->bi_next; 2584 2585 bio_put(bio); 2586 } 2587 } 2588 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2589 2590 /* 2591 * Copy attributes of the original request to the clone request. 2592 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2593 */ 2594 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2595 { 2596 dst->cpu = src->cpu; 2597 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2598 dst->cmd_type = src->cmd_type; 2599 dst->__sector = blk_rq_pos(src); 2600 dst->__data_len = blk_rq_bytes(src); 2601 dst->nr_phys_segments = src->nr_phys_segments; 2602 dst->ioprio = src->ioprio; 2603 dst->extra_len = src->extra_len; 2604 } 2605 2606 /** 2607 * blk_rq_prep_clone - Helper function to setup clone request 2608 * @rq: the request to be setup 2609 * @rq_src: original request to be cloned 2610 * @bs: bio_set that bios for clone are allocated from 2611 * @gfp_mask: memory allocation mask for bio 2612 * @bio_ctr: setup function to be called for each clone bio. 2613 * Returns %0 for success, non %0 for failure. 2614 * @data: private data to be passed to @bio_ctr 2615 * 2616 * Description: 2617 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2618 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2619 * are not copied, and copying such parts is the caller's responsibility. 2620 * Also, pages which the original bios are pointing to are not copied 2621 * and the cloned bios just point same pages. 2622 * So cloned bios must be completed before original bios, which means 2623 * the caller must complete @rq before @rq_src. 2624 */ 2625 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2626 struct bio_set *bs, gfp_t gfp_mask, 2627 int (*bio_ctr)(struct bio *, struct bio *, void *), 2628 void *data) 2629 { 2630 struct bio *bio, *bio_src; 2631 2632 if (!bs) 2633 bs = fs_bio_set; 2634 2635 blk_rq_init(NULL, rq); 2636 2637 __rq_for_each_bio(bio_src, rq_src) { 2638 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2639 if (!bio) 2640 goto free_and_out; 2641 2642 __bio_clone(bio, bio_src); 2643 2644 if (bio_integrity(bio_src) && 2645 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2646 goto free_and_out; 2647 2648 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2649 goto free_and_out; 2650 2651 if (rq->bio) { 2652 rq->biotail->bi_next = bio; 2653 rq->biotail = bio; 2654 } else 2655 rq->bio = rq->biotail = bio; 2656 } 2657 2658 __blk_rq_prep_clone(rq, rq_src); 2659 2660 return 0; 2661 2662 free_and_out: 2663 if (bio) 2664 bio_free(bio, bs); 2665 blk_rq_unprep_clone(rq); 2666 2667 return -ENOMEM; 2668 } 2669 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2670 2671 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2672 { 2673 return queue_work(kblockd_workqueue, work); 2674 } 2675 EXPORT_SYMBOL(kblockd_schedule_work); 2676 2677 int kblockd_schedule_delayed_work(struct request_queue *q, 2678 struct delayed_work *dwork, unsigned long delay) 2679 { 2680 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2681 } 2682 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2683 2684 #define PLUG_MAGIC 0x91827364 2685 2686 /** 2687 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2688 * @plug: The &struct blk_plug that needs to be initialized 2689 * 2690 * Description: 2691 * Tracking blk_plug inside the task_struct will help with auto-flushing the 2692 * pending I/O should the task end up blocking between blk_start_plug() and 2693 * blk_finish_plug(). This is important from a performance perspective, but 2694 * also ensures that we don't deadlock. For instance, if the task is blocking 2695 * for a memory allocation, memory reclaim could end up wanting to free a 2696 * page belonging to that request that is currently residing in our private 2697 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 2698 * this kind of deadlock. 2699 */ 2700 void blk_start_plug(struct blk_plug *plug) 2701 { 2702 struct task_struct *tsk = current; 2703 2704 plug->magic = PLUG_MAGIC; 2705 INIT_LIST_HEAD(&plug->list); 2706 INIT_LIST_HEAD(&plug->cb_list); 2707 plug->should_sort = 0; 2708 2709 /* 2710 * If this is a nested plug, don't actually assign it. It will be 2711 * flushed on its own. 2712 */ 2713 if (!tsk->plug) { 2714 /* 2715 * Store ordering should not be needed here, since a potential 2716 * preempt will imply a full memory barrier 2717 */ 2718 tsk->plug = plug; 2719 } 2720 } 2721 EXPORT_SYMBOL(blk_start_plug); 2722 2723 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2724 { 2725 struct request *rqa = container_of(a, struct request, queuelist); 2726 struct request *rqb = container_of(b, struct request, queuelist); 2727 2728 return !(rqa->q <= rqb->q); 2729 } 2730 2731 /* 2732 * If 'from_schedule' is true, then postpone the dispatch of requests 2733 * until a safe kblockd context. We due this to avoid accidental big 2734 * additional stack usage in driver dispatch, in places where the originally 2735 * plugger did not intend it. 2736 */ 2737 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2738 bool from_schedule) 2739 __releases(q->queue_lock) 2740 { 2741 trace_block_unplug(q, depth, !from_schedule); 2742 2743 /* 2744 * If we are punting this to kblockd, then we can safely drop 2745 * the queue_lock before waking kblockd (which needs to take 2746 * this lock). 2747 */ 2748 if (from_schedule) { 2749 spin_unlock(q->queue_lock); 2750 blk_run_queue_async(q); 2751 } else { 2752 __blk_run_queue(q); 2753 spin_unlock(q->queue_lock); 2754 } 2755 2756 } 2757 2758 static void flush_plug_callbacks(struct blk_plug *plug) 2759 { 2760 LIST_HEAD(callbacks); 2761 2762 if (list_empty(&plug->cb_list)) 2763 return; 2764 2765 list_splice_init(&plug->cb_list, &callbacks); 2766 2767 while (!list_empty(&callbacks)) { 2768 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2769 struct blk_plug_cb, 2770 list); 2771 list_del(&cb->list); 2772 cb->callback(cb); 2773 } 2774 } 2775 2776 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2777 { 2778 struct request_queue *q; 2779 unsigned long flags; 2780 struct request *rq; 2781 LIST_HEAD(list); 2782 unsigned int depth; 2783 2784 BUG_ON(plug->magic != PLUG_MAGIC); 2785 2786 flush_plug_callbacks(plug); 2787 if (list_empty(&plug->list)) 2788 return; 2789 2790 list_splice_init(&plug->list, &list); 2791 2792 if (plug->should_sort) { 2793 list_sort(NULL, &list, plug_rq_cmp); 2794 plug->should_sort = 0; 2795 } 2796 2797 q = NULL; 2798 depth = 0; 2799 2800 /* 2801 * Save and disable interrupts here, to avoid doing it for every 2802 * queue lock we have to take. 2803 */ 2804 local_irq_save(flags); 2805 while (!list_empty(&list)) { 2806 rq = list_entry_rq(list.next); 2807 list_del_init(&rq->queuelist); 2808 BUG_ON(!rq->q); 2809 if (rq->q != q) { 2810 /* 2811 * This drops the queue lock 2812 */ 2813 if (q) 2814 queue_unplugged(q, depth, from_schedule); 2815 q = rq->q; 2816 depth = 0; 2817 spin_lock(q->queue_lock); 2818 } 2819 /* 2820 * rq is already accounted, so use raw insert 2821 */ 2822 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2823 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2824 else 2825 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2826 2827 depth++; 2828 } 2829 2830 /* 2831 * This drops the queue lock 2832 */ 2833 if (q) 2834 queue_unplugged(q, depth, from_schedule); 2835 2836 local_irq_restore(flags); 2837 } 2838 2839 void blk_finish_plug(struct blk_plug *plug) 2840 { 2841 blk_flush_plug_list(plug, false); 2842 2843 if (plug == current->plug) 2844 current->plug = NULL; 2845 } 2846 EXPORT_SYMBOL(blk_finish_plug); 2847 2848 int __init blk_dev_init(void) 2849 { 2850 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2851 sizeof(((struct request *)0)->cmd_flags)); 2852 2853 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2854 kblockd_workqueue = alloc_workqueue("kblockd", 2855 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2856 if (!kblockd_workqueue) 2857 panic("Failed to create kblockd\n"); 2858 2859 request_cachep = kmem_cache_create("blkdev_requests", 2860 sizeof(struct request), 0, SLAB_PANIC, NULL); 2861 2862 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2863 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2864 2865 return 0; 2866 } 2867