1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/block.h> 39 40 #include "blk.h" 41 #include "blk-mq.h" 42 43 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 44 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 45 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 46 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 47 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 48 49 DEFINE_IDA(blk_queue_ida); 50 51 /* 52 * For the allocated request tables 53 */ 54 struct kmem_cache *request_cachep = NULL; 55 56 /* 57 * For queue allocation 58 */ 59 struct kmem_cache *blk_requestq_cachep; 60 61 /* 62 * Controlling structure to kblockd 63 */ 64 static struct workqueue_struct *kblockd_workqueue; 65 66 static void blk_clear_congested(struct request_list *rl, int sync) 67 { 68 #ifdef CONFIG_CGROUP_WRITEBACK 69 clear_wb_congested(rl->blkg->wb_congested, sync); 70 #else 71 /* 72 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 73 * flip its congestion state for events on other blkcgs. 74 */ 75 if (rl == &rl->q->root_rl) 76 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 77 #endif 78 } 79 80 static void blk_set_congested(struct request_list *rl, int sync) 81 { 82 #ifdef CONFIG_CGROUP_WRITEBACK 83 set_wb_congested(rl->blkg->wb_congested, sync); 84 #else 85 /* see blk_clear_congested() */ 86 if (rl == &rl->q->root_rl) 87 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 88 #endif 89 } 90 91 void blk_queue_congestion_threshold(struct request_queue *q) 92 { 93 int nr; 94 95 nr = q->nr_requests - (q->nr_requests / 8) + 1; 96 if (nr > q->nr_requests) 97 nr = q->nr_requests; 98 q->nr_congestion_on = nr; 99 100 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 101 if (nr < 1) 102 nr = 1; 103 q->nr_congestion_off = nr; 104 } 105 106 /** 107 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 108 * @bdev: device 109 * 110 * Locates the passed device's request queue and returns the address of its 111 * backing_dev_info. This function can only be called if @bdev is opened 112 * and the return value is never NULL. 113 */ 114 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 115 { 116 struct request_queue *q = bdev_get_queue(bdev); 117 118 return &q->backing_dev_info; 119 } 120 EXPORT_SYMBOL(blk_get_backing_dev_info); 121 122 void blk_rq_init(struct request_queue *q, struct request *rq) 123 { 124 memset(rq, 0, sizeof(*rq)); 125 126 INIT_LIST_HEAD(&rq->queuelist); 127 INIT_LIST_HEAD(&rq->timeout_list); 128 rq->cpu = -1; 129 rq->q = q; 130 rq->__sector = (sector_t) -1; 131 INIT_HLIST_NODE(&rq->hash); 132 RB_CLEAR_NODE(&rq->rb_node); 133 rq->cmd = rq->__cmd; 134 rq->cmd_len = BLK_MAX_CDB; 135 rq->tag = -1; 136 rq->start_time = jiffies; 137 set_start_time_ns(rq); 138 rq->part = NULL; 139 } 140 EXPORT_SYMBOL(blk_rq_init); 141 142 static void req_bio_endio(struct request *rq, struct bio *bio, 143 unsigned int nbytes, int error) 144 { 145 if (error) 146 bio->bi_error = error; 147 148 if (unlikely(rq->cmd_flags & REQ_QUIET)) 149 bio_set_flag(bio, BIO_QUIET); 150 151 bio_advance(bio, nbytes); 152 153 /* don't actually finish bio if it's part of flush sequence */ 154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 155 bio_endio(bio); 156 } 157 158 void blk_dump_rq_flags(struct request *rq, char *msg) 159 { 160 int bit; 161 162 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 163 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 164 (unsigned long long) rq->cmd_flags); 165 166 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 167 (unsigned long long)blk_rq_pos(rq), 168 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 169 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 170 rq->bio, rq->biotail, blk_rq_bytes(rq)); 171 172 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 173 printk(KERN_INFO " cdb: "); 174 for (bit = 0; bit < BLK_MAX_CDB; bit++) 175 printk("%02x ", rq->cmd[bit]); 176 printk("\n"); 177 } 178 } 179 EXPORT_SYMBOL(blk_dump_rq_flags); 180 181 static void blk_delay_work(struct work_struct *work) 182 { 183 struct request_queue *q; 184 185 q = container_of(work, struct request_queue, delay_work.work); 186 spin_lock_irq(q->queue_lock); 187 __blk_run_queue(q); 188 spin_unlock_irq(q->queue_lock); 189 } 190 191 /** 192 * blk_delay_queue - restart queueing after defined interval 193 * @q: The &struct request_queue in question 194 * @msecs: Delay in msecs 195 * 196 * Description: 197 * Sometimes queueing needs to be postponed for a little while, to allow 198 * resources to come back. This function will make sure that queueing is 199 * restarted around the specified time. Queue lock must be held. 200 */ 201 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 202 { 203 if (likely(!blk_queue_dead(q))) 204 queue_delayed_work(kblockd_workqueue, &q->delay_work, 205 msecs_to_jiffies(msecs)); 206 } 207 EXPORT_SYMBOL(blk_delay_queue); 208 209 /** 210 * blk_start_queue - restart a previously stopped queue 211 * @q: The &struct request_queue in question 212 * 213 * Description: 214 * blk_start_queue() will clear the stop flag on the queue, and call 215 * the request_fn for the queue if it was in a stopped state when 216 * entered. Also see blk_stop_queue(). Queue lock must be held. 217 **/ 218 void blk_start_queue(struct request_queue *q) 219 { 220 WARN_ON(!irqs_disabled()); 221 222 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 223 __blk_run_queue(q); 224 } 225 EXPORT_SYMBOL(blk_start_queue); 226 227 /** 228 * blk_stop_queue - stop a queue 229 * @q: The &struct request_queue in question 230 * 231 * Description: 232 * The Linux block layer assumes that a block driver will consume all 233 * entries on the request queue when the request_fn strategy is called. 234 * Often this will not happen, because of hardware limitations (queue 235 * depth settings). If a device driver gets a 'queue full' response, 236 * or if it simply chooses not to queue more I/O at one point, it can 237 * call this function to prevent the request_fn from being called until 238 * the driver has signalled it's ready to go again. This happens by calling 239 * blk_start_queue() to restart queue operations. Queue lock must be held. 240 **/ 241 void blk_stop_queue(struct request_queue *q) 242 { 243 cancel_delayed_work(&q->delay_work); 244 queue_flag_set(QUEUE_FLAG_STOPPED, q); 245 } 246 EXPORT_SYMBOL(blk_stop_queue); 247 248 /** 249 * blk_sync_queue - cancel any pending callbacks on a queue 250 * @q: the queue 251 * 252 * Description: 253 * The block layer may perform asynchronous callback activity 254 * on a queue, such as calling the unplug function after a timeout. 255 * A block device may call blk_sync_queue to ensure that any 256 * such activity is cancelled, thus allowing it to release resources 257 * that the callbacks might use. The caller must already have made sure 258 * that its ->make_request_fn will not re-add plugging prior to calling 259 * this function. 260 * 261 * This function does not cancel any asynchronous activity arising 262 * out of elevator or throttling code. That would require elevator_exit() 263 * and blkcg_exit_queue() to be called with queue lock initialized. 264 * 265 */ 266 void blk_sync_queue(struct request_queue *q) 267 { 268 del_timer_sync(&q->timeout); 269 270 if (q->mq_ops) { 271 struct blk_mq_hw_ctx *hctx; 272 int i; 273 274 queue_for_each_hw_ctx(q, hctx, i) { 275 cancel_delayed_work_sync(&hctx->run_work); 276 cancel_delayed_work_sync(&hctx->delay_work); 277 } 278 } else { 279 cancel_delayed_work_sync(&q->delay_work); 280 } 281 } 282 EXPORT_SYMBOL(blk_sync_queue); 283 284 /** 285 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 286 * @q: The queue to run 287 * 288 * Description: 289 * Invoke request handling on a queue if there are any pending requests. 290 * May be used to restart request handling after a request has completed. 291 * This variant runs the queue whether or not the queue has been 292 * stopped. Must be called with the queue lock held and interrupts 293 * disabled. See also @blk_run_queue. 294 */ 295 inline void __blk_run_queue_uncond(struct request_queue *q) 296 { 297 if (unlikely(blk_queue_dead(q))) 298 return; 299 300 /* 301 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 302 * the queue lock internally. As a result multiple threads may be 303 * running such a request function concurrently. Keep track of the 304 * number of active request_fn invocations such that blk_drain_queue() 305 * can wait until all these request_fn calls have finished. 306 */ 307 q->request_fn_active++; 308 q->request_fn(q); 309 q->request_fn_active--; 310 } 311 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 312 313 /** 314 * __blk_run_queue - run a single device queue 315 * @q: The queue to run 316 * 317 * Description: 318 * See @blk_run_queue. This variant must be called with the queue lock 319 * held and interrupts disabled. 320 */ 321 void __blk_run_queue(struct request_queue *q) 322 { 323 if (unlikely(blk_queue_stopped(q))) 324 return; 325 326 __blk_run_queue_uncond(q); 327 } 328 EXPORT_SYMBOL(__blk_run_queue); 329 330 /** 331 * blk_run_queue_async - run a single device queue in workqueue context 332 * @q: The queue to run 333 * 334 * Description: 335 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 336 * of us. The caller must hold the queue lock. 337 */ 338 void blk_run_queue_async(struct request_queue *q) 339 { 340 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 341 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 342 } 343 EXPORT_SYMBOL(blk_run_queue_async); 344 345 /** 346 * blk_run_queue - run a single device queue 347 * @q: The queue to run 348 * 349 * Description: 350 * Invoke request handling on this queue, if it has pending work to do. 351 * May be used to restart queueing when a request has completed. 352 */ 353 void blk_run_queue(struct request_queue *q) 354 { 355 unsigned long flags; 356 357 spin_lock_irqsave(q->queue_lock, flags); 358 __blk_run_queue(q); 359 spin_unlock_irqrestore(q->queue_lock, flags); 360 } 361 EXPORT_SYMBOL(blk_run_queue); 362 363 void blk_put_queue(struct request_queue *q) 364 { 365 kobject_put(&q->kobj); 366 } 367 EXPORT_SYMBOL(blk_put_queue); 368 369 /** 370 * __blk_drain_queue - drain requests from request_queue 371 * @q: queue to drain 372 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 373 * 374 * Drain requests from @q. If @drain_all is set, all requests are drained. 375 * If not, only ELVPRIV requests are drained. The caller is responsible 376 * for ensuring that no new requests which need to be drained are queued. 377 */ 378 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 379 __releases(q->queue_lock) 380 __acquires(q->queue_lock) 381 { 382 int i; 383 384 lockdep_assert_held(q->queue_lock); 385 386 while (true) { 387 bool drain = false; 388 389 /* 390 * The caller might be trying to drain @q before its 391 * elevator is initialized. 392 */ 393 if (q->elevator) 394 elv_drain_elevator(q); 395 396 blkcg_drain_queue(q); 397 398 /* 399 * This function might be called on a queue which failed 400 * driver init after queue creation or is not yet fully 401 * active yet. Some drivers (e.g. fd and loop) get unhappy 402 * in such cases. Kick queue iff dispatch queue has 403 * something on it and @q has request_fn set. 404 */ 405 if (!list_empty(&q->queue_head) && q->request_fn) 406 __blk_run_queue(q); 407 408 drain |= q->nr_rqs_elvpriv; 409 drain |= q->request_fn_active; 410 411 /* 412 * Unfortunately, requests are queued at and tracked from 413 * multiple places and there's no single counter which can 414 * be drained. Check all the queues and counters. 415 */ 416 if (drain_all) { 417 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 418 drain |= !list_empty(&q->queue_head); 419 for (i = 0; i < 2; i++) { 420 drain |= q->nr_rqs[i]; 421 drain |= q->in_flight[i]; 422 if (fq) 423 drain |= !list_empty(&fq->flush_queue[i]); 424 } 425 } 426 427 if (!drain) 428 break; 429 430 spin_unlock_irq(q->queue_lock); 431 432 msleep(10); 433 434 spin_lock_irq(q->queue_lock); 435 } 436 437 /* 438 * With queue marked dead, any woken up waiter will fail the 439 * allocation path, so the wakeup chaining is lost and we're 440 * left with hung waiters. We need to wake up those waiters. 441 */ 442 if (q->request_fn) { 443 struct request_list *rl; 444 445 blk_queue_for_each_rl(rl, q) 446 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 447 wake_up_all(&rl->wait[i]); 448 } 449 } 450 451 /** 452 * blk_queue_bypass_start - enter queue bypass mode 453 * @q: queue of interest 454 * 455 * In bypass mode, only the dispatch FIFO queue of @q is used. This 456 * function makes @q enter bypass mode and drains all requests which were 457 * throttled or issued before. On return, it's guaranteed that no request 458 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 459 * inside queue or RCU read lock. 460 */ 461 void blk_queue_bypass_start(struct request_queue *q) 462 { 463 spin_lock_irq(q->queue_lock); 464 q->bypass_depth++; 465 queue_flag_set(QUEUE_FLAG_BYPASS, q); 466 spin_unlock_irq(q->queue_lock); 467 468 /* 469 * Queues start drained. Skip actual draining till init is 470 * complete. This avoids lenghty delays during queue init which 471 * can happen many times during boot. 472 */ 473 if (blk_queue_init_done(q)) { 474 spin_lock_irq(q->queue_lock); 475 __blk_drain_queue(q, false); 476 spin_unlock_irq(q->queue_lock); 477 478 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 479 synchronize_rcu(); 480 } 481 } 482 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 483 484 /** 485 * blk_queue_bypass_end - leave queue bypass mode 486 * @q: queue of interest 487 * 488 * Leave bypass mode and restore the normal queueing behavior. 489 */ 490 void blk_queue_bypass_end(struct request_queue *q) 491 { 492 spin_lock_irq(q->queue_lock); 493 if (!--q->bypass_depth) 494 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 495 WARN_ON_ONCE(q->bypass_depth < 0); 496 spin_unlock_irq(q->queue_lock); 497 } 498 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 499 500 void blk_set_queue_dying(struct request_queue *q) 501 { 502 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 503 504 if (q->mq_ops) 505 blk_mq_wake_waiters(q); 506 else { 507 struct request_list *rl; 508 509 blk_queue_for_each_rl(rl, q) { 510 if (rl->rq_pool) { 511 wake_up(&rl->wait[BLK_RW_SYNC]); 512 wake_up(&rl->wait[BLK_RW_ASYNC]); 513 } 514 } 515 } 516 } 517 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 518 519 /** 520 * blk_cleanup_queue - shutdown a request queue 521 * @q: request queue to shutdown 522 * 523 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 524 * put it. All future requests will be failed immediately with -ENODEV. 525 */ 526 void blk_cleanup_queue(struct request_queue *q) 527 { 528 spinlock_t *lock = q->queue_lock; 529 530 /* mark @q DYING, no new request or merges will be allowed afterwards */ 531 mutex_lock(&q->sysfs_lock); 532 blk_set_queue_dying(q); 533 spin_lock_irq(lock); 534 535 /* 536 * A dying queue is permanently in bypass mode till released. Note 537 * that, unlike blk_queue_bypass_start(), we aren't performing 538 * synchronize_rcu() after entering bypass mode to avoid the delay 539 * as some drivers create and destroy a lot of queues while 540 * probing. This is still safe because blk_release_queue() will be 541 * called only after the queue refcnt drops to zero and nothing, 542 * RCU or not, would be traversing the queue by then. 543 */ 544 q->bypass_depth++; 545 queue_flag_set(QUEUE_FLAG_BYPASS, q); 546 547 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 548 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 549 queue_flag_set(QUEUE_FLAG_DYING, q); 550 spin_unlock_irq(lock); 551 mutex_unlock(&q->sysfs_lock); 552 553 /* 554 * Drain all requests queued before DYING marking. Set DEAD flag to 555 * prevent that q->request_fn() gets invoked after draining finished. 556 */ 557 blk_freeze_queue(q); 558 spin_lock_irq(lock); 559 if (!q->mq_ops) 560 __blk_drain_queue(q, true); 561 queue_flag_set(QUEUE_FLAG_DEAD, q); 562 spin_unlock_irq(lock); 563 564 /* for synchronous bio-based driver finish in-flight integrity i/o */ 565 blk_flush_integrity(); 566 567 /* @q won't process any more request, flush async actions */ 568 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 569 blk_sync_queue(q); 570 571 if (q->mq_ops) 572 blk_mq_free_queue(q); 573 percpu_ref_exit(&q->q_usage_counter); 574 575 spin_lock_irq(lock); 576 if (q->queue_lock != &q->__queue_lock) 577 q->queue_lock = &q->__queue_lock; 578 spin_unlock_irq(lock); 579 580 bdi_unregister(&q->backing_dev_info); 581 582 /* @q is and will stay empty, shutdown and put */ 583 blk_put_queue(q); 584 } 585 EXPORT_SYMBOL(blk_cleanup_queue); 586 587 /* Allocate memory local to the request queue */ 588 static void *alloc_request_struct(gfp_t gfp_mask, void *data) 589 { 590 int nid = (int)(long)data; 591 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); 592 } 593 594 static void free_request_struct(void *element, void *unused) 595 { 596 kmem_cache_free(request_cachep, element); 597 } 598 599 int blk_init_rl(struct request_list *rl, struct request_queue *q, 600 gfp_t gfp_mask) 601 { 602 if (unlikely(rl->rq_pool)) 603 return 0; 604 605 rl->q = q; 606 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 607 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 608 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 609 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 610 611 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, 612 free_request_struct, 613 (void *)(long)q->node, gfp_mask, 614 q->node); 615 if (!rl->rq_pool) 616 return -ENOMEM; 617 618 return 0; 619 } 620 621 void blk_exit_rl(struct request_list *rl) 622 { 623 if (rl->rq_pool) 624 mempool_destroy(rl->rq_pool); 625 } 626 627 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 628 { 629 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 630 } 631 EXPORT_SYMBOL(blk_alloc_queue); 632 633 int blk_queue_enter(struct request_queue *q, gfp_t gfp) 634 { 635 while (true) { 636 int ret; 637 638 if (percpu_ref_tryget_live(&q->q_usage_counter)) 639 return 0; 640 641 if (!(gfp & __GFP_WAIT)) 642 return -EBUSY; 643 644 ret = wait_event_interruptible(q->mq_freeze_wq, 645 !atomic_read(&q->mq_freeze_depth) || 646 blk_queue_dying(q)); 647 if (blk_queue_dying(q)) 648 return -ENODEV; 649 if (ret) 650 return ret; 651 } 652 } 653 654 void blk_queue_exit(struct request_queue *q) 655 { 656 percpu_ref_put(&q->q_usage_counter); 657 } 658 659 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 660 { 661 struct request_queue *q = 662 container_of(ref, struct request_queue, q_usage_counter); 663 664 wake_up_all(&q->mq_freeze_wq); 665 } 666 667 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 668 { 669 struct request_queue *q; 670 int err; 671 672 q = kmem_cache_alloc_node(blk_requestq_cachep, 673 gfp_mask | __GFP_ZERO, node_id); 674 if (!q) 675 return NULL; 676 677 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 678 if (q->id < 0) 679 goto fail_q; 680 681 q->bio_split = bioset_create(BIO_POOL_SIZE, 0); 682 if (!q->bio_split) 683 goto fail_id; 684 685 q->backing_dev_info.ra_pages = 686 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 687 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 688 q->backing_dev_info.name = "block"; 689 q->node = node_id; 690 691 err = bdi_init(&q->backing_dev_info); 692 if (err) 693 goto fail_split; 694 695 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 696 laptop_mode_timer_fn, (unsigned long) q); 697 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 698 INIT_LIST_HEAD(&q->queue_head); 699 INIT_LIST_HEAD(&q->timeout_list); 700 INIT_LIST_HEAD(&q->icq_list); 701 #ifdef CONFIG_BLK_CGROUP 702 INIT_LIST_HEAD(&q->blkg_list); 703 #endif 704 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 705 706 kobject_init(&q->kobj, &blk_queue_ktype); 707 708 mutex_init(&q->sysfs_lock); 709 spin_lock_init(&q->__queue_lock); 710 711 /* 712 * By default initialize queue_lock to internal lock and driver can 713 * override it later if need be. 714 */ 715 q->queue_lock = &q->__queue_lock; 716 717 /* 718 * A queue starts its life with bypass turned on to avoid 719 * unnecessary bypass on/off overhead and nasty surprises during 720 * init. The initial bypass will be finished when the queue is 721 * registered by blk_register_queue(). 722 */ 723 q->bypass_depth = 1; 724 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 725 726 init_waitqueue_head(&q->mq_freeze_wq); 727 728 /* 729 * Init percpu_ref in atomic mode so that it's faster to shutdown. 730 * See blk_register_queue() for details. 731 */ 732 if (percpu_ref_init(&q->q_usage_counter, 733 blk_queue_usage_counter_release, 734 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 735 goto fail_bdi; 736 737 if (blkcg_init_queue(q)) 738 goto fail_ref; 739 740 return q; 741 742 fail_ref: 743 percpu_ref_exit(&q->q_usage_counter); 744 fail_bdi: 745 bdi_destroy(&q->backing_dev_info); 746 fail_split: 747 bioset_free(q->bio_split); 748 fail_id: 749 ida_simple_remove(&blk_queue_ida, q->id); 750 fail_q: 751 kmem_cache_free(blk_requestq_cachep, q); 752 return NULL; 753 } 754 EXPORT_SYMBOL(blk_alloc_queue_node); 755 756 /** 757 * blk_init_queue - prepare a request queue for use with a block device 758 * @rfn: The function to be called to process requests that have been 759 * placed on the queue. 760 * @lock: Request queue spin lock 761 * 762 * Description: 763 * If a block device wishes to use the standard request handling procedures, 764 * which sorts requests and coalesces adjacent requests, then it must 765 * call blk_init_queue(). The function @rfn will be called when there 766 * are requests on the queue that need to be processed. If the device 767 * supports plugging, then @rfn may not be called immediately when requests 768 * are available on the queue, but may be called at some time later instead. 769 * Plugged queues are generally unplugged when a buffer belonging to one 770 * of the requests on the queue is needed, or due to memory pressure. 771 * 772 * @rfn is not required, or even expected, to remove all requests off the 773 * queue, but only as many as it can handle at a time. If it does leave 774 * requests on the queue, it is responsible for arranging that the requests 775 * get dealt with eventually. 776 * 777 * The queue spin lock must be held while manipulating the requests on the 778 * request queue; this lock will be taken also from interrupt context, so irq 779 * disabling is needed for it. 780 * 781 * Function returns a pointer to the initialized request queue, or %NULL if 782 * it didn't succeed. 783 * 784 * Note: 785 * blk_init_queue() must be paired with a blk_cleanup_queue() call 786 * when the block device is deactivated (such as at module unload). 787 **/ 788 789 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 790 { 791 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 792 } 793 EXPORT_SYMBOL(blk_init_queue); 794 795 struct request_queue * 796 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 797 { 798 struct request_queue *uninit_q, *q; 799 800 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 801 if (!uninit_q) 802 return NULL; 803 804 q = blk_init_allocated_queue(uninit_q, rfn, lock); 805 if (!q) 806 blk_cleanup_queue(uninit_q); 807 808 return q; 809 } 810 EXPORT_SYMBOL(blk_init_queue_node); 811 812 static void blk_queue_bio(struct request_queue *q, struct bio *bio); 813 814 struct request_queue * 815 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 816 spinlock_t *lock) 817 { 818 if (!q) 819 return NULL; 820 821 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); 822 if (!q->fq) 823 return NULL; 824 825 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 826 goto fail; 827 828 q->request_fn = rfn; 829 q->prep_rq_fn = NULL; 830 q->unprep_rq_fn = NULL; 831 q->queue_flags |= QUEUE_FLAG_DEFAULT; 832 833 /* Override internal queue lock with supplied lock pointer */ 834 if (lock) 835 q->queue_lock = lock; 836 837 /* 838 * This also sets hw/phys segments, boundary and size 839 */ 840 blk_queue_make_request(q, blk_queue_bio); 841 842 q->sg_reserved_size = INT_MAX; 843 844 /* Protect q->elevator from elevator_change */ 845 mutex_lock(&q->sysfs_lock); 846 847 /* init elevator */ 848 if (elevator_init(q, NULL)) { 849 mutex_unlock(&q->sysfs_lock); 850 goto fail; 851 } 852 853 mutex_unlock(&q->sysfs_lock); 854 855 return q; 856 857 fail: 858 blk_free_flush_queue(q->fq); 859 return NULL; 860 } 861 EXPORT_SYMBOL(blk_init_allocated_queue); 862 863 bool blk_get_queue(struct request_queue *q) 864 { 865 if (likely(!blk_queue_dying(q))) { 866 __blk_get_queue(q); 867 return true; 868 } 869 870 return false; 871 } 872 EXPORT_SYMBOL(blk_get_queue); 873 874 static inline void blk_free_request(struct request_list *rl, struct request *rq) 875 { 876 if (rq->cmd_flags & REQ_ELVPRIV) { 877 elv_put_request(rl->q, rq); 878 if (rq->elv.icq) 879 put_io_context(rq->elv.icq->ioc); 880 } 881 882 mempool_free(rq, rl->rq_pool); 883 } 884 885 /* 886 * ioc_batching returns true if the ioc is a valid batching request and 887 * should be given priority access to a request. 888 */ 889 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 890 { 891 if (!ioc) 892 return 0; 893 894 /* 895 * Make sure the process is able to allocate at least 1 request 896 * even if the batch times out, otherwise we could theoretically 897 * lose wakeups. 898 */ 899 return ioc->nr_batch_requests == q->nr_batching || 900 (ioc->nr_batch_requests > 0 901 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 902 } 903 904 /* 905 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 906 * will cause the process to be a "batcher" on all queues in the system. This 907 * is the behaviour we want though - once it gets a wakeup it should be given 908 * a nice run. 909 */ 910 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 911 { 912 if (!ioc || ioc_batching(q, ioc)) 913 return; 914 915 ioc->nr_batch_requests = q->nr_batching; 916 ioc->last_waited = jiffies; 917 } 918 919 static void __freed_request(struct request_list *rl, int sync) 920 { 921 struct request_queue *q = rl->q; 922 923 if (rl->count[sync] < queue_congestion_off_threshold(q)) 924 blk_clear_congested(rl, sync); 925 926 if (rl->count[sync] + 1 <= q->nr_requests) { 927 if (waitqueue_active(&rl->wait[sync])) 928 wake_up(&rl->wait[sync]); 929 930 blk_clear_rl_full(rl, sync); 931 } 932 } 933 934 /* 935 * A request has just been released. Account for it, update the full and 936 * congestion status, wake up any waiters. Called under q->queue_lock. 937 */ 938 static void freed_request(struct request_list *rl, unsigned int flags) 939 { 940 struct request_queue *q = rl->q; 941 int sync = rw_is_sync(flags); 942 943 q->nr_rqs[sync]--; 944 rl->count[sync]--; 945 if (flags & REQ_ELVPRIV) 946 q->nr_rqs_elvpriv--; 947 948 __freed_request(rl, sync); 949 950 if (unlikely(rl->starved[sync ^ 1])) 951 __freed_request(rl, sync ^ 1); 952 } 953 954 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 955 { 956 struct request_list *rl; 957 int on_thresh, off_thresh; 958 959 spin_lock_irq(q->queue_lock); 960 q->nr_requests = nr; 961 blk_queue_congestion_threshold(q); 962 on_thresh = queue_congestion_on_threshold(q); 963 off_thresh = queue_congestion_off_threshold(q); 964 965 blk_queue_for_each_rl(rl, q) { 966 if (rl->count[BLK_RW_SYNC] >= on_thresh) 967 blk_set_congested(rl, BLK_RW_SYNC); 968 else if (rl->count[BLK_RW_SYNC] < off_thresh) 969 blk_clear_congested(rl, BLK_RW_SYNC); 970 971 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 972 blk_set_congested(rl, BLK_RW_ASYNC); 973 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 974 blk_clear_congested(rl, BLK_RW_ASYNC); 975 976 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 977 blk_set_rl_full(rl, BLK_RW_SYNC); 978 } else { 979 blk_clear_rl_full(rl, BLK_RW_SYNC); 980 wake_up(&rl->wait[BLK_RW_SYNC]); 981 } 982 983 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 984 blk_set_rl_full(rl, BLK_RW_ASYNC); 985 } else { 986 blk_clear_rl_full(rl, BLK_RW_ASYNC); 987 wake_up(&rl->wait[BLK_RW_ASYNC]); 988 } 989 } 990 991 spin_unlock_irq(q->queue_lock); 992 return 0; 993 } 994 995 /* 996 * Determine if elevator data should be initialized when allocating the 997 * request associated with @bio. 998 */ 999 static bool blk_rq_should_init_elevator(struct bio *bio) 1000 { 1001 if (!bio) 1002 return true; 1003 1004 /* 1005 * Flush requests do not use the elevator so skip initialization. 1006 * This allows a request to share the flush and elevator data. 1007 */ 1008 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 1009 return false; 1010 1011 return true; 1012 } 1013 1014 /** 1015 * rq_ioc - determine io_context for request allocation 1016 * @bio: request being allocated is for this bio (can be %NULL) 1017 * 1018 * Determine io_context to use for request allocation for @bio. May return 1019 * %NULL if %current->io_context doesn't exist. 1020 */ 1021 static struct io_context *rq_ioc(struct bio *bio) 1022 { 1023 #ifdef CONFIG_BLK_CGROUP 1024 if (bio && bio->bi_ioc) 1025 return bio->bi_ioc; 1026 #endif 1027 return current->io_context; 1028 } 1029 1030 /** 1031 * __get_request - get a free request 1032 * @rl: request list to allocate from 1033 * @rw_flags: RW and SYNC flags 1034 * @bio: bio to allocate request for (can be %NULL) 1035 * @gfp_mask: allocation mask 1036 * 1037 * Get a free request from @q. This function may fail under memory 1038 * pressure or if @q is dead. 1039 * 1040 * Must be called with @q->queue_lock held and, 1041 * Returns ERR_PTR on failure, with @q->queue_lock held. 1042 * Returns request pointer on success, with @q->queue_lock *not held*. 1043 */ 1044 static struct request *__get_request(struct request_list *rl, int rw_flags, 1045 struct bio *bio, gfp_t gfp_mask) 1046 { 1047 struct request_queue *q = rl->q; 1048 struct request *rq; 1049 struct elevator_type *et = q->elevator->type; 1050 struct io_context *ioc = rq_ioc(bio); 1051 struct io_cq *icq = NULL; 1052 const bool is_sync = rw_is_sync(rw_flags) != 0; 1053 int may_queue; 1054 1055 if (unlikely(blk_queue_dying(q))) 1056 return ERR_PTR(-ENODEV); 1057 1058 may_queue = elv_may_queue(q, rw_flags); 1059 if (may_queue == ELV_MQUEUE_NO) 1060 goto rq_starved; 1061 1062 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1063 if (rl->count[is_sync]+1 >= q->nr_requests) { 1064 /* 1065 * The queue will fill after this allocation, so set 1066 * it as full, and mark this process as "batching". 1067 * This process will be allowed to complete a batch of 1068 * requests, others will be blocked. 1069 */ 1070 if (!blk_rl_full(rl, is_sync)) { 1071 ioc_set_batching(q, ioc); 1072 blk_set_rl_full(rl, is_sync); 1073 } else { 1074 if (may_queue != ELV_MQUEUE_MUST 1075 && !ioc_batching(q, ioc)) { 1076 /* 1077 * The queue is full and the allocating 1078 * process is not a "batcher", and not 1079 * exempted by the IO scheduler 1080 */ 1081 return ERR_PTR(-ENOMEM); 1082 } 1083 } 1084 } 1085 blk_set_congested(rl, is_sync); 1086 } 1087 1088 /* 1089 * Only allow batching queuers to allocate up to 50% over the defined 1090 * limit of requests, otherwise we could have thousands of requests 1091 * allocated with any setting of ->nr_requests 1092 */ 1093 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1094 return ERR_PTR(-ENOMEM); 1095 1096 q->nr_rqs[is_sync]++; 1097 rl->count[is_sync]++; 1098 rl->starved[is_sync] = 0; 1099 1100 /* 1101 * Decide whether the new request will be managed by elevator. If 1102 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 1103 * prevent the current elevator from being destroyed until the new 1104 * request is freed. This guarantees icq's won't be destroyed and 1105 * makes creating new ones safe. 1106 * 1107 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1108 * it will be created after releasing queue_lock. 1109 */ 1110 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1111 rw_flags |= REQ_ELVPRIV; 1112 q->nr_rqs_elvpriv++; 1113 if (et->icq_cache && ioc) 1114 icq = ioc_lookup_icq(ioc, q); 1115 } 1116 1117 if (blk_queue_io_stat(q)) 1118 rw_flags |= REQ_IO_STAT; 1119 spin_unlock_irq(q->queue_lock); 1120 1121 /* allocate and init request */ 1122 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1123 if (!rq) 1124 goto fail_alloc; 1125 1126 blk_rq_init(q, rq); 1127 blk_rq_set_rl(rq, rl); 1128 rq->cmd_flags = rw_flags | REQ_ALLOCED; 1129 1130 /* init elvpriv */ 1131 if (rw_flags & REQ_ELVPRIV) { 1132 if (unlikely(et->icq_cache && !icq)) { 1133 if (ioc) 1134 icq = ioc_create_icq(ioc, q, gfp_mask); 1135 if (!icq) 1136 goto fail_elvpriv; 1137 } 1138 1139 rq->elv.icq = icq; 1140 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1141 goto fail_elvpriv; 1142 1143 /* @rq->elv.icq holds io_context until @rq is freed */ 1144 if (icq) 1145 get_io_context(icq->ioc); 1146 } 1147 out: 1148 /* 1149 * ioc may be NULL here, and ioc_batching will be false. That's 1150 * OK, if the queue is under the request limit then requests need 1151 * not count toward the nr_batch_requests limit. There will always 1152 * be some limit enforced by BLK_BATCH_TIME. 1153 */ 1154 if (ioc_batching(q, ioc)) 1155 ioc->nr_batch_requests--; 1156 1157 trace_block_getrq(q, bio, rw_flags & 1); 1158 return rq; 1159 1160 fail_elvpriv: 1161 /* 1162 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1163 * and may fail indefinitely under memory pressure and thus 1164 * shouldn't stall IO. Treat this request as !elvpriv. This will 1165 * disturb iosched and blkcg but weird is bettern than dead. 1166 */ 1167 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1168 __func__, dev_name(q->backing_dev_info.dev)); 1169 1170 rq->cmd_flags &= ~REQ_ELVPRIV; 1171 rq->elv.icq = NULL; 1172 1173 spin_lock_irq(q->queue_lock); 1174 q->nr_rqs_elvpriv--; 1175 spin_unlock_irq(q->queue_lock); 1176 goto out; 1177 1178 fail_alloc: 1179 /* 1180 * Allocation failed presumably due to memory. Undo anything we 1181 * might have messed up. 1182 * 1183 * Allocating task should really be put onto the front of the wait 1184 * queue, but this is pretty rare. 1185 */ 1186 spin_lock_irq(q->queue_lock); 1187 freed_request(rl, rw_flags); 1188 1189 /* 1190 * in the very unlikely event that allocation failed and no 1191 * requests for this direction was pending, mark us starved so that 1192 * freeing of a request in the other direction will notice 1193 * us. another possible fix would be to split the rq mempool into 1194 * READ and WRITE 1195 */ 1196 rq_starved: 1197 if (unlikely(rl->count[is_sync] == 0)) 1198 rl->starved[is_sync] = 1; 1199 return ERR_PTR(-ENOMEM); 1200 } 1201 1202 /** 1203 * get_request - get a free request 1204 * @q: request_queue to allocate request from 1205 * @rw_flags: RW and SYNC flags 1206 * @bio: bio to allocate request for (can be %NULL) 1207 * @gfp_mask: allocation mask 1208 * 1209 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1210 * function keeps retrying under memory pressure and fails iff @q is dead. 1211 * 1212 * Must be called with @q->queue_lock held and, 1213 * Returns ERR_PTR on failure, with @q->queue_lock held. 1214 * Returns request pointer on success, with @q->queue_lock *not held*. 1215 */ 1216 static struct request *get_request(struct request_queue *q, int rw_flags, 1217 struct bio *bio, gfp_t gfp_mask) 1218 { 1219 const bool is_sync = rw_is_sync(rw_flags) != 0; 1220 DEFINE_WAIT(wait); 1221 struct request_list *rl; 1222 struct request *rq; 1223 1224 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1225 retry: 1226 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1227 if (!IS_ERR(rq)) 1228 return rq; 1229 1230 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1231 blk_put_rl(rl); 1232 return rq; 1233 } 1234 1235 /* wait on @rl and retry */ 1236 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1237 TASK_UNINTERRUPTIBLE); 1238 1239 trace_block_sleeprq(q, bio, rw_flags & 1); 1240 1241 spin_unlock_irq(q->queue_lock); 1242 io_schedule(); 1243 1244 /* 1245 * After sleeping, we become a "batching" process and will be able 1246 * to allocate at least one request, and up to a big batch of them 1247 * for a small period time. See ioc_batching, ioc_set_batching 1248 */ 1249 ioc_set_batching(q, current->io_context); 1250 1251 spin_lock_irq(q->queue_lock); 1252 finish_wait(&rl->wait[is_sync], &wait); 1253 1254 goto retry; 1255 } 1256 1257 static struct request *blk_old_get_request(struct request_queue *q, int rw, 1258 gfp_t gfp_mask) 1259 { 1260 struct request *rq; 1261 1262 BUG_ON(rw != READ && rw != WRITE); 1263 1264 /* create ioc upfront */ 1265 create_io_context(gfp_mask, q->node); 1266 1267 spin_lock_irq(q->queue_lock); 1268 rq = get_request(q, rw, NULL, gfp_mask); 1269 if (IS_ERR(rq)) 1270 spin_unlock_irq(q->queue_lock); 1271 /* q->queue_lock is unlocked at this point */ 1272 1273 return rq; 1274 } 1275 1276 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1277 { 1278 if (q->mq_ops) 1279 return blk_mq_alloc_request(q, rw, gfp_mask, false); 1280 else 1281 return blk_old_get_request(q, rw, gfp_mask); 1282 } 1283 EXPORT_SYMBOL(blk_get_request); 1284 1285 /** 1286 * blk_make_request - given a bio, allocate a corresponding struct request. 1287 * @q: target request queue 1288 * @bio: The bio describing the memory mappings that will be submitted for IO. 1289 * It may be a chained-bio properly constructed by block/bio layer. 1290 * @gfp_mask: gfp flags to be used for memory allocation 1291 * 1292 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 1293 * type commands. Where the struct request needs to be farther initialized by 1294 * the caller. It is passed a &struct bio, which describes the memory info of 1295 * the I/O transfer. 1296 * 1297 * The caller of blk_make_request must make sure that bi_io_vec 1298 * are set to describe the memory buffers. That bio_data_dir() will return 1299 * the needed direction of the request. (And all bio's in the passed bio-chain 1300 * are properly set accordingly) 1301 * 1302 * If called under none-sleepable conditions, mapped bio buffers must not 1303 * need bouncing, by calling the appropriate masked or flagged allocator, 1304 * suitable for the target device. Otherwise the call to blk_queue_bounce will 1305 * BUG. 1306 * 1307 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 1308 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 1309 * anything but the first bio in the chain. Otherwise you risk waiting for IO 1310 * completion of a bio that hasn't been submitted yet, thus resulting in a 1311 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 1312 * of bio_alloc(), as that avoids the mempool deadlock. 1313 * If possible a big IO should be split into smaller parts when allocation 1314 * fails. Partial allocation should not be an error, or you risk a live-lock. 1315 */ 1316 struct request *blk_make_request(struct request_queue *q, struct bio *bio, 1317 gfp_t gfp_mask) 1318 { 1319 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1320 1321 if (IS_ERR(rq)) 1322 return rq; 1323 1324 blk_rq_set_block_pc(rq); 1325 1326 for_each_bio(bio) { 1327 struct bio *bounce_bio = bio; 1328 int ret; 1329 1330 blk_queue_bounce(q, &bounce_bio); 1331 ret = blk_rq_append_bio(q, rq, bounce_bio); 1332 if (unlikely(ret)) { 1333 blk_put_request(rq); 1334 return ERR_PTR(ret); 1335 } 1336 } 1337 1338 return rq; 1339 } 1340 EXPORT_SYMBOL(blk_make_request); 1341 1342 /** 1343 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC 1344 * @rq: request to be initialized 1345 * 1346 */ 1347 void blk_rq_set_block_pc(struct request *rq) 1348 { 1349 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1350 rq->__data_len = 0; 1351 rq->__sector = (sector_t) -1; 1352 rq->bio = rq->biotail = NULL; 1353 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1354 } 1355 EXPORT_SYMBOL(blk_rq_set_block_pc); 1356 1357 /** 1358 * blk_requeue_request - put a request back on queue 1359 * @q: request queue where request should be inserted 1360 * @rq: request to be inserted 1361 * 1362 * Description: 1363 * Drivers often keep queueing requests until the hardware cannot accept 1364 * more, when that condition happens we need to put the request back 1365 * on the queue. Must be called with queue lock held. 1366 */ 1367 void blk_requeue_request(struct request_queue *q, struct request *rq) 1368 { 1369 blk_delete_timer(rq); 1370 blk_clear_rq_complete(rq); 1371 trace_block_rq_requeue(q, rq); 1372 1373 if (rq->cmd_flags & REQ_QUEUED) 1374 blk_queue_end_tag(q, rq); 1375 1376 BUG_ON(blk_queued_rq(rq)); 1377 1378 elv_requeue_request(q, rq); 1379 } 1380 EXPORT_SYMBOL(blk_requeue_request); 1381 1382 static void add_acct_request(struct request_queue *q, struct request *rq, 1383 int where) 1384 { 1385 blk_account_io_start(rq, true); 1386 __elv_add_request(q, rq, where); 1387 } 1388 1389 static void part_round_stats_single(int cpu, struct hd_struct *part, 1390 unsigned long now) 1391 { 1392 int inflight; 1393 1394 if (now == part->stamp) 1395 return; 1396 1397 inflight = part_in_flight(part); 1398 if (inflight) { 1399 __part_stat_add(cpu, part, time_in_queue, 1400 inflight * (now - part->stamp)); 1401 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1402 } 1403 part->stamp = now; 1404 } 1405 1406 /** 1407 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1408 * @cpu: cpu number for stats access 1409 * @part: target partition 1410 * 1411 * The average IO queue length and utilisation statistics are maintained 1412 * by observing the current state of the queue length and the amount of 1413 * time it has been in this state for. 1414 * 1415 * Normally, that accounting is done on IO completion, but that can result 1416 * in more than a second's worth of IO being accounted for within any one 1417 * second, leading to >100% utilisation. To deal with that, we call this 1418 * function to do a round-off before returning the results when reading 1419 * /proc/diskstats. This accounts immediately for all queue usage up to 1420 * the current jiffies and restarts the counters again. 1421 */ 1422 void part_round_stats(int cpu, struct hd_struct *part) 1423 { 1424 unsigned long now = jiffies; 1425 1426 if (part->partno) 1427 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1428 part_round_stats_single(cpu, part, now); 1429 } 1430 EXPORT_SYMBOL_GPL(part_round_stats); 1431 1432 #ifdef CONFIG_PM 1433 static void blk_pm_put_request(struct request *rq) 1434 { 1435 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1436 pm_runtime_mark_last_busy(rq->q->dev); 1437 } 1438 #else 1439 static inline void blk_pm_put_request(struct request *rq) {} 1440 #endif 1441 1442 /* 1443 * queue lock must be held 1444 */ 1445 void __blk_put_request(struct request_queue *q, struct request *req) 1446 { 1447 if (unlikely(!q)) 1448 return; 1449 1450 if (q->mq_ops) { 1451 blk_mq_free_request(req); 1452 return; 1453 } 1454 1455 blk_pm_put_request(req); 1456 1457 elv_completed_request(q, req); 1458 1459 /* this is a bio leak */ 1460 WARN_ON(req->bio != NULL); 1461 1462 /* 1463 * Request may not have originated from ll_rw_blk. if not, 1464 * it didn't come out of our reserved rq pools 1465 */ 1466 if (req->cmd_flags & REQ_ALLOCED) { 1467 unsigned int flags = req->cmd_flags; 1468 struct request_list *rl = blk_rq_rl(req); 1469 1470 BUG_ON(!list_empty(&req->queuelist)); 1471 BUG_ON(ELV_ON_HASH(req)); 1472 1473 blk_free_request(rl, req); 1474 freed_request(rl, flags); 1475 blk_put_rl(rl); 1476 } 1477 } 1478 EXPORT_SYMBOL_GPL(__blk_put_request); 1479 1480 void blk_put_request(struct request *req) 1481 { 1482 struct request_queue *q = req->q; 1483 1484 if (q->mq_ops) 1485 blk_mq_free_request(req); 1486 else { 1487 unsigned long flags; 1488 1489 spin_lock_irqsave(q->queue_lock, flags); 1490 __blk_put_request(q, req); 1491 spin_unlock_irqrestore(q->queue_lock, flags); 1492 } 1493 } 1494 EXPORT_SYMBOL(blk_put_request); 1495 1496 /** 1497 * blk_add_request_payload - add a payload to a request 1498 * @rq: request to update 1499 * @page: page backing the payload 1500 * @len: length of the payload. 1501 * 1502 * This allows to later add a payload to an already submitted request by 1503 * a block driver. The driver needs to take care of freeing the payload 1504 * itself. 1505 * 1506 * Note that this is a quite horrible hack and nothing but handling of 1507 * discard requests should ever use it. 1508 */ 1509 void blk_add_request_payload(struct request *rq, struct page *page, 1510 unsigned int len) 1511 { 1512 struct bio *bio = rq->bio; 1513 1514 bio->bi_io_vec->bv_page = page; 1515 bio->bi_io_vec->bv_offset = 0; 1516 bio->bi_io_vec->bv_len = len; 1517 1518 bio->bi_iter.bi_size = len; 1519 bio->bi_vcnt = 1; 1520 bio->bi_phys_segments = 1; 1521 1522 rq->__data_len = rq->resid_len = len; 1523 rq->nr_phys_segments = 1; 1524 } 1525 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1526 1527 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1528 struct bio *bio) 1529 { 1530 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1531 1532 if (!ll_back_merge_fn(q, req, bio)) 1533 return false; 1534 1535 trace_block_bio_backmerge(q, req, bio); 1536 1537 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1538 blk_rq_set_mixed_merge(req); 1539 1540 req->biotail->bi_next = bio; 1541 req->biotail = bio; 1542 req->__data_len += bio->bi_iter.bi_size; 1543 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1544 1545 blk_account_io_start(req, false); 1546 return true; 1547 } 1548 1549 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1550 struct bio *bio) 1551 { 1552 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1553 1554 if (!ll_front_merge_fn(q, req, bio)) 1555 return false; 1556 1557 trace_block_bio_frontmerge(q, req, bio); 1558 1559 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1560 blk_rq_set_mixed_merge(req); 1561 1562 bio->bi_next = req->bio; 1563 req->bio = bio; 1564 1565 req->__sector = bio->bi_iter.bi_sector; 1566 req->__data_len += bio->bi_iter.bi_size; 1567 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1568 1569 blk_account_io_start(req, false); 1570 return true; 1571 } 1572 1573 /** 1574 * blk_attempt_plug_merge - try to merge with %current's plugged list 1575 * @q: request_queue new bio is being queued at 1576 * @bio: new bio being queued 1577 * @request_count: out parameter for number of traversed plugged requests 1578 * 1579 * Determine whether @bio being queued on @q can be merged with a request 1580 * on %current's plugged list. Returns %true if merge was successful, 1581 * otherwise %false. 1582 * 1583 * Plugging coalesces IOs from the same issuer for the same purpose without 1584 * going through @q->queue_lock. As such it's more of an issuing mechanism 1585 * than scheduling, and the request, while may have elvpriv data, is not 1586 * added on the elevator at this point. In addition, we don't have 1587 * reliable access to the elevator outside queue lock. Only check basic 1588 * merging parameters without querying the elevator. 1589 * 1590 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1591 */ 1592 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1593 unsigned int *request_count, 1594 struct request **same_queue_rq) 1595 { 1596 struct blk_plug *plug; 1597 struct request *rq; 1598 bool ret = false; 1599 struct list_head *plug_list; 1600 1601 plug = current->plug; 1602 if (!plug) 1603 goto out; 1604 *request_count = 0; 1605 1606 if (q->mq_ops) 1607 plug_list = &plug->mq_list; 1608 else 1609 plug_list = &plug->list; 1610 1611 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1612 int el_ret; 1613 1614 if (rq->q == q) { 1615 (*request_count)++; 1616 /* 1617 * Only blk-mq multiple hardware queues case checks the 1618 * rq in the same queue, there should be only one such 1619 * rq in a queue 1620 **/ 1621 if (same_queue_rq) 1622 *same_queue_rq = rq; 1623 } 1624 1625 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1626 continue; 1627 1628 el_ret = blk_try_merge(rq, bio); 1629 if (el_ret == ELEVATOR_BACK_MERGE) { 1630 ret = bio_attempt_back_merge(q, rq, bio); 1631 if (ret) 1632 break; 1633 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1634 ret = bio_attempt_front_merge(q, rq, bio); 1635 if (ret) 1636 break; 1637 } 1638 } 1639 out: 1640 return ret; 1641 } 1642 1643 unsigned int blk_plug_queued_count(struct request_queue *q) 1644 { 1645 struct blk_plug *plug; 1646 struct request *rq; 1647 struct list_head *plug_list; 1648 unsigned int ret = 0; 1649 1650 plug = current->plug; 1651 if (!plug) 1652 goto out; 1653 1654 if (q->mq_ops) 1655 plug_list = &plug->mq_list; 1656 else 1657 plug_list = &plug->list; 1658 1659 list_for_each_entry(rq, plug_list, queuelist) { 1660 if (rq->q == q) 1661 ret++; 1662 } 1663 out: 1664 return ret; 1665 } 1666 1667 void init_request_from_bio(struct request *req, struct bio *bio) 1668 { 1669 req->cmd_type = REQ_TYPE_FS; 1670 1671 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1672 if (bio->bi_rw & REQ_RAHEAD) 1673 req->cmd_flags |= REQ_FAILFAST_MASK; 1674 1675 req->errors = 0; 1676 req->__sector = bio->bi_iter.bi_sector; 1677 req->ioprio = bio_prio(bio); 1678 blk_rq_bio_prep(req->q, req, bio); 1679 } 1680 1681 static void blk_queue_bio(struct request_queue *q, struct bio *bio) 1682 { 1683 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1684 struct blk_plug *plug; 1685 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1686 struct request *req; 1687 unsigned int request_count = 0; 1688 1689 blk_queue_split(q, &bio, q->bio_split); 1690 1691 /* 1692 * low level driver can indicate that it wants pages above a 1693 * certain limit bounced to low memory (ie for highmem, or even 1694 * ISA dma in theory) 1695 */ 1696 blk_queue_bounce(q, &bio); 1697 1698 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1699 bio->bi_error = -EIO; 1700 bio_endio(bio); 1701 return; 1702 } 1703 1704 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1705 spin_lock_irq(q->queue_lock); 1706 where = ELEVATOR_INSERT_FLUSH; 1707 goto get_rq; 1708 } 1709 1710 /* 1711 * Check if we can merge with the plugged list before grabbing 1712 * any locks. 1713 */ 1714 if (!blk_queue_nomerges(q)) { 1715 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1716 return; 1717 } else 1718 request_count = blk_plug_queued_count(q); 1719 1720 spin_lock_irq(q->queue_lock); 1721 1722 el_ret = elv_merge(q, &req, bio); 1723 if (el_ret == ELEVATOR_BACK_MERGE) { 1724 if (bio_attempt_back_merge(q, req, bio)) { 1725 elv_bio_merged(q, req, bio); 1726 if (!attempt_back_merge(q, req)) 1727 elv_merged_request(q, req, el_ret); 1728 goto out_unlock; 1729 } 1730 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1731 if (bio_attempt_front_merge(q, req, bio)) { 1732 elv_bio_merged(q, req, bio); 1733 if (!attempt_front_merge(q, req)) 1734 elv_merged_request(q, req, el_ret); 1735 goto out_unlock; 1736 } 1737 } 1738 1739 get_rq: 1740 /* 1741 * This sync check and mask will be re-done in init_request_from_bio(), 1742 * but we need to set it earlier to expose the sync flag to the 1743 * rq allocator and io schedulers. 1744 */ 1745 rw_flags = bio_data_dir(bio); 1746 if (sync) 1747 rw_flags |= REQ_SYNC; 1748 1749 /* 1750 * Grab a free request. This is might sleep but can not fail. 1751 * Returns with the queue unlocked. 1752 */ 1753 req = get_request(q, rw_flags, bio, GFP_NOIO); 1754 if (IS_ERR(req)) { 1755 bio->bi_error = PTR_ERR(req); 1756 bio_endio(bio); 1757 goto out_unlock; 1758 } 1759 1760 /* 1761 * After dropping the lock and possibly sleeping here, our request 1762 * may now be mergeable after it had proven unmergeable (above). 1763 * We don't worry about that case for efficiency. It won't happen 1764 * often, and the elevators are able to handle it. 1765 */ 1766 init_request_from_bio(req, bio); 1767 1768 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1769 req->cpu = raw_smp_processor_id(); 1770 1771 plug = current->plug; 1772 if (plug) { 1773 /* 1774 * If this is the first request added after a plug, fire 1775 * of a plug trace. 1776 */ 1777 if (!request_count) 1778 trace_block_plug(q); 1779 else { 1780 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1781 blk_flush_plug_list(plug, false); 1782 trace_block_plug(q); 1783 } 1784 } 1785 list_add_tail(&req->queuelist, &plug->list); 1786 blk_account_io_start(req, true); 1787 } else { 1788 spin_lock_irq(q->queue_lock); 1789 add_acct_request(q, req, where); 1790 __blk_run_queue(q); 1791 out_unlock: 1792 spin_unlock_irq(q->queue_lock); 1793 } 1794 } 1795 1796 /* 1797 * If bio->bi_dev is a partition, remap the location 1798 */ 1799 static inline void blk_partition_remap(struct bio *bio) 1800 { 1801 struct block_device *bdev = bio->bi_bdev; 1802 1803 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1804 struct hd_struct *p = bdev->bd_part; 1805 1806 bio->bi_iter.bi_sector += p->start_sect; 1807 bio->bi_bdev = bdev->bd_contains; 1808 1809 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1810 bdev->bd_dev, 1811 bio->bi_iter.bi_sector - p->start_sect); 1812 } 1813 } 1814 1815 static void handle_bad_sector(struct bio *bio) 1816 { 1817 char b[BDEVNAME_SIZE]; 1818 1819 printk(KERN_INFO "attempt to access beyond end of device\n"); 1820 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1821 bdevname(bio->bi_bdev, b), 1822 bio->bi_rw, 1823 (unsigned long long)bio_end_sector(bio), 1824 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1825 } 1826 1827 #ifdef CONFIG_FAIL_MAKE_REQUEST 1828 1829 static DECLARE_FAULT_ATTR(fail_make_request); 1830 1831 static int __init setup_fail_make_request(char *str) 1832 { 1833 return setup_fault_attr(&fail_make_request, str); 1834 } 1835 __setup("fail_make_request=", setup_fail_make_request); 1836 1837 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1838 { 1839 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1840 } 1841 1842 static int __init fail_make_request_debugfs(void) 1843 { 1844 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1845 NULL, &fail_make_request); 1846 1847 return PTR_ERR_OR_ZERO(dir); 1848 } 1849 1850 late_initcall(fail_make_request_debugfs); 1851 1852 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1853 1854 static inline bool should_fail_request(struct hd_struct *part, 1855 unsigned int bytes) 1856 { 1857 return false; 1858 } 1859 1860 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1861 1862 /* 1863 * Check whether this bio extends beyond the end of the device. 1864 */ 1865 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1866 { 1867 sector_t maxsector; 1868 1869 if (!nr_sectors) 1870 return 0; 1871 1872 /* Test device or partition size, when known. */ 1873 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1874 if (maxsector) { 1875 sector_t sector = bio->bi_iter.bi_sector; 1876 1877 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1878 /* 1879 * This may well happen - the kernel calls bread() 1880 * without checking the size of the device, e.g., when 1881 * mounting a device. 1882 */ 1883 handle_bad_sector(bio); 1884 return 1; 1885 } 1886 } 1887 1888 return 0; 1889 } 1890 1891 static noinline_for_stack bool 1892 generic_make_request_checks(struct bio *bio) 1893 { 1894 struct request_queue *q; 1895 int nr_sectors = bio_sectors(bio); 1896 int err = -EIO; 1897 char b[BDEVNAME_SIZE]; 1898 struct hd_struct *part; 1899 1900 might_sleep(); 1901 1902 if (bio_check_eod(bio, nr_sectors)) 1903 goto end_io; 1904 1905 q = bdev_get_queue(bio->bi_bdev); 1906 if (unlikely(!q)) { 1907 printk(KERN_ERR 1908 "generic_make_request: Trying to access " 1909 "nonexistent block-device %s (%Lu)\n", 1910 bdevname(bio->bi_bdev, b), 1911 (long long) bio->bi_iter.bi_sector); 1912 goto end_io; 1913 } 1914 1915 part = bio->bi_bdev->bd_part; 1916 if (should_fail_request(part, bio->bi_iter.bi_size) || 1917 should_fail_request(&part_to_disk(part)->part0, 1918 bio->bi_iter.bi_size)) 1919 goto end_io; 1920 1921 /* 1922 * If this device has partitions, remap block n 1923 * of partition p to block n+start(p) of the disk. 1924 */ 1925 blk_partition_remap(bio); 1926 1927 if (bio_check_eod(bio, nr_sectors)) 1928 goto end_io; 1929 1930 /* 1931 * Filter flush bio's early so that make_request based 1932 * drivers without flush support don't have to worry 1933 * about them. 1934 */ 1935 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1936 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1937 if (!nr_sectors) { 1938 err = 0; 1939 goto end_io; 1940 } 1941 } 1942 1943 if ((bio->bi_rw & REQ_DISCARD) && 1944 (!blk_queue_discard(q) || 1945 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1946 err = -EOPNOTSUPP; 1947 goto end_io; 1948 } 1949 1950 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { 1951 err = -EOPNOTSUPP; 1952 goto end_io; 1953 } 1954 1955 /* 1956 * Various block parts want %current->io_context and lazy ioc 1957 * allocation ends up trading a lot of pain for a small amount of 1958 * memory. Just allocate it upfront. This may fail and block 1959 * layer knows how to live with it. 1960 */ 1961 create_io_context(GFP_ATOMIC, q->node); 1962 1963 if (!blkcg_bio_issue_check(q, bio)) 1964 return false; 1965 1966 trace_block_bio_queue(q, bio); 1967 return true; 1968 1969 end_io: 1970 bio->bi_error = err; 1971 bio_endio(bio); 1972 return false; 1973 } 1974 1975 /** 1976 * generic_make_request - hand a buffer to its device driver for I/O 1977 * @bio: The bio describing the location in memory and on the device. 1978 * 1979 * generic_make_request() is used to make I/O requests of block 1980 * devices. It is passed a &struct bio, which describes the I/O that needs 1981 * to be done. 1982 * 1983 * generic_make_request() does not return any status. The 1984 * success/failure status of the request, along with notification of 1985 * completion, is delivered asynchronously through the bio->bi_end_io 1986 * function described (one day) else where. 1987 * 1988 * The caller of generic_make_request must make sure that bi_io_vec 1989 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1990 * set to describe the device address, and the 1991 * bi_end_io and optionally bi_private are set to describe how 1992 * completion notification should be signaled. 1993 * 1994 * generic_make_request and the drivers it calls may use bi_next if this 1995 * bio happens to be merged with someone else, and may resubmit the bio to 1996 * a lower device by calling into generic_make_request recursively, which 1997 * means the bio should NOT be touched after the call to ->make_request_fn. 1998 */ 1999 void generic_make_request(struct bio *bio) 2000 { 2001 struct bio_list bio_list_on_stack; 2002 2003 if (!generic_make_request_checks(bio)) 2004 return; 2005 2006 /* 2007 * We only want one ->make_request_fn to be active at a time, else 2008 * stack usage with stacked devices could be a problem. So use 2009 * current->bio_list to keep a list of requests submited by a 2010 * make_request_fn function. current->bio_list is also used as a 2011 * flag to say if generic_make_request is currently active in this 2012 * task or not. If it is NULL, then no make_request is active. If 2013 * it is non-NULL, then a make_request is active, and new requests 2014 * should be added at the tail 2015 */ 2016 if (current->bio_list) { 2017 bio_list_add(current->bio_list, bio); 2018 return; 2019 } 2020 2021 /* following loop may be a bit non-obvious, and so deserves some 2022 * explanation. 2023 * Before entering the loop, bio->bi_next is NULL (as all callers 2024 * ensure that) so we have a list with a single bio. 2025 * We pretend that we have just taken it off a longer list, so 2026 * we assign bio_list to a pointer to the bio_list_on_stack, 2027 * thus initialising the bio_list of new bios to be 2028 * added. ->make_request() may indeed add some more bios 2029 * through a recursive call to generic_make_request. If it 2030 * did, we find a non-NULL value in bio_list and re-enter the loop 2031 * from the top. In this case we really did just take the bio 2032 * of the top of the list (no pretending) and so remove it from 2033 * bio_list, and call into ->make_request() again. 2034 */ 2035 BUG_ON(bio->bi_next); 2036 bio_list_init(&bio_list_on_stack); 2037 current->bio_list = &bio_list_on_stack; 2038 do { 2039 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2040 2041 if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) { 2042 2043 q->make_request_fn(q, bio); 2044 2045 blk_queue_exit(q); 2046 2047 bio = bio_list_pop(current->bio_list); 2048 } else { 2049 struct bio *bio_next = bio_list_pop(current->bio_list); 2050 2051 bio_io_error(bio); 2052 bio = bio_next; 2053 } 2054 } while (bio); 2055 current->bio_list = NULL; /* deactivate */ 2056 } 2057 EXPORT_SYMBOL(generic_make_request); 2058 2059 /** 2060 * submit_bio - submit a bio to the block device layer for I/O 2061 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 2062 * @bio: The &struct bio which describes the I/O 2063 * 2064 * submit_bio() is very similar in purpose to generic_make_request(), and 2065 * uses that function to do most of the work. Both are fairly rough 2066 * interfaces; @bio must be presetup and ready for I/O. 2067 * 2068 */ 2069 void submit_bio(int rw, struct bio *bio) 2070 { 2071 bio->bi_rw |= rw; 2072 2073 /* 2074 * If it's a regular read/write or a barrier with data attached, 2075 * go through the normal accounting stuff before submission. 2076 */ 2077 if (bio_has_data(bio)) { 2078 unsigned int count; 2079 2080 if (unlikely(rw & REQ_WRITE_SAME)) 2081 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 2082 else 2083 count = bio_sectors(bio); 2084 2085 if (rw & WRITE) { 2086 count_vm_events(PGPGOUT, count); 2087 } else { 2088 task_io_account_read(bio->bi_iter.bi_size); 2089 count_vm_events(PGPGIN, count); 2090 } 2091 2092 if (unlikely(block_dump)) { 2093 char b[BDEVNAME_SIZE]; 2094 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2095 current->comm, task_pid_nr(current), 2096 (rw & WRITE) ? "WRITE" : "READ", 2097 (unsigned long long)bio->bi_iter.bi_sector, 2098 bdevname(bio->bi_bdev, b), 2099 count); 2100 } 2101 } 2102 2103 generic_make_request(bio); 2104 } 2105 EXPORT_SYMBOL(submit_bio); 2106 2107 /** 2108 * blk_rq_check_limits - Helper function to check a request for the queue limit 2109 * @q: the queue 2110 * @rq: the request being checked 2111 * 2112 * Description: 2113 * @rq may have been made based on weaker limitations of upper-level queues 2114 * in request stacking drivers, and it may violate the limitation of @q. 2115 * Since the block layer and the underlying device driver trust @rq 2116 * after it is inserted to @q, it should be checked against @q before 2117 * the insertion using this generic function. 2118 * 2119 * This function should also be useful for request stacking drivers 2120 * in some cases below, so export this function. 2121 * Request stacking drivers like request-based dm may change the queue 2122 * limits while requests are in the queue (e.g. dm's table swapping). 2123 * Such request stacking drivers should check those requests against 2124 * the new queue limits again when they dispatch those requests, 2125 * although such checkings are also done against the old queue limits 2126 * when submitting requests. 2127 */ 2128 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 2129 { 2130 if (!rq_mergeable(rq)) 2131 return 0; 2132 2133 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 2134 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2135 return -EIO; 2136 } 2137 2138 /* 2139 * queue's settings related to segment counting like q->bounce_pfn 2140 * may differ from that of other stacking queues. 2141 * Recalculate it to check the request correctly on this queue's 2142 * limitation. 2143 */ 2144 blk_recalc_rq_segments(rq); 2145 if (rq->nr_phys_segments > queue_max_segments(q)) { 2146 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2147 return -EIO; 2148 } 2149 2150 return 0; 2151 } 2152 EXPORT_SYMBOL_GPL(blk_rq_check_limits); 2153 2154 /** 2155 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2156 * @q: the queue to submit the request 2157 * @rq: the request being queued 2158 */ 2159 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2160 { 2161 unsigned long flags; 2162 int where = ELEVATOR_INSERT_BACK; 2163 2164 if (blk_rq_check_limits(q, rq)) 2165 return -EIO; 2166 2167 if (rq->rq_disk && 2168 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2169 return -EIO; 2170 2171 if (q->mq_ops) { 2172 if (blk_queue_io_stat(q)) 2173 blk_account_io_start(rq, true); 2174 blk_mq_insert_request(rq, false, true, true); 2175 return 0; 2176 } 2177 2178 spin_lock_irqsave(q->queue_lock, flags); 2179 if (unlikely(blk_queue_dying(q))) { 2180 spin_unlock_irqrestore(q->queue_lock, flags); 2181 return -ENODEV; 2182 } 2183 2184 /* 2185 * Submitting request must be dequeued before calling this function 2186 * because it will be linked to another request_queue 2187 */ 2188 BUG_ON(blk_queued_rq(rq)); 2189 2190 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 2191 where = ELEVATOR_INSERT_FLUSH; 2192 2193 add_acct_request(q, rq, where); 2194 if (where == ELEVATOR_INSERT_FLUSH) 2195 __blk_run_queue(q); 2196 spin_unlock_irqrestore(q->queue_lock, flags); 2197 2198 return 0; 2199 } 2200 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2201 2202 /** 2203 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2204 * @rq: request to examine 2205 * 2206 * Description: 2207 * A request could be merge of IOs which require different failure 2208 * handling. This function determines the number of bytes which 2209 * can be failed from the beginning of the request without 2210 * crossing into area which need to be retried further. 2211 * 2212 * Return: 2213 * The number of bytes to fail. 2214 * 2215 * Context: 2216 * queue_lock must be held. 2217 */ 2218 unsigned int blk_rq_err_bytes(const struct request *rq) 2219 { 2220 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2221 unsigned int bytes = 0; 2222 struct bio *bio; 2223 2224 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 2225 return blk_rq_bytes(rq); 2226 2227 /* 2228 * Currently the only 'mixing' which can happen is between 2229 * different fastfail types. We can safely fail portions 2230 * which have all the failfast bits that the first one has - 2231 * the ones which are at least as eager to fail as the first 2232 * one. 2233 */ 2234 for (bio = rq->bio; bio; bio = bio->bi_next) { 2235 if ((bio->bi_rw & ff) != ff) 2236 break; 2237 bytes += bio->bi_iter.bi_size; 2238 } 2239 2240 /* this could lead to infinite loop */ 2241 BUG_ON(blk_rq_bytes(rq) && !bytes); 2242 return bytes; 2243 } 2244 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2245 2246 void blk_account_io_completion(struct request *req, unsigned int bytes) 2247 { 2248 if (blk_do_io_stat(req)) { 2249 const int rw = rq_data_dir(req); 2250 struct hd_struct *part; 2251 int cpu; 2252 2253 cpu = part_stat_lock(); 2254 part = req->part; 2255 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2256 part_stat_unlock(); 2257 } 2258 } 2259 2260 void blk_account_io_done(struct request *req) 2261 { 2262 /* 2263 * Account IO completion. flush_rq isn't accounted as a 2264 * normal IO on queueing nor completion. Accounting the 2265 * containing request is enough. 2266 */ 2267 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2268 unsigned long duration = jiffies - req->start_time; 2269 const int rw = rq_data_dir(req); 2270 struct hd_struct *part; 2271 int cpu; 2272 2273 cpu = part_stat_lock(); 2274 part = req->part; 2275 2276 part_stat_inc(cpu, part, ios[rw]); 2277 part_stat_add(cpu, part, ticks[rw], duration); 2278 part_round_stats(cpu, part); 2279 part_dec_in_flight(part, rw); 2280 2281 hd_struct_put(part); 2282 part_stat_unlock(); 2283 } 2284 } 2285 2286 #ifdef CONFIG_PM 2287 /* 2288 * Don't process normal requests when queue is suspended 2289 * or in the process of suspending/resuming 2290 */ 2291 static struct request *blk_pm_peek_request(struct request_queue *q, 2292 struct request *rq) 2293 { 2294 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2295 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2296 return NULL; 2297 else 2298 return rq; 2299 } 2300 #else 2301 static inline struct request *blk_pm_peek_request(struct request_queue *q, 2302 struct request *rq) 2303 { 2304 return rq; 2305 } 2306 #endif 2307 2308 void blk_account_io_start(struct request *rq, bool new_io) 2309 { 2310 struct hd_struct *part; 2311 int rw = rq_data_dir(rq); 2312 int cpu; 2313 2314 if (!blk_do_io_stat(rq)) 2315 return; 2316 2317 cpu = part_stat_lock(); 2318 2319 if (!new_io) { 2320 part = rq->part; 2321 part_stat_inc(cpu, part, merges[rw]); 2322 } else { 2323 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2324 if (!hd_struct_try_get(part)) { 2325 /* 2326 * The partition is already being removed, 2327 * the request will be accounted on the disk only 2328 * 2329 * We take a reference on disk->part0 although that 2330 * partition will never be deleted, so we can treat 2331 * it as any other partition. 2332 */ 2333 part = &rq->rq_disk->part0; 2334 hd_struct_get(part); 2335 } 2336 part_round_stats(cpu, part); 2337 part_inc_in_flight(part, rw); 2338 rq->part = part; 2339 } 2340 2341 part_stat_unlock(); 2342 } 2343 2344 /** 2345 * blk_peek_request - peek at the top of a request queue 2346 * @q: request queue to peek at 2347 * 2348 * Description: 2349 * Return the request at the top of @q. The returned request 2350 * should be started using blk_start_request() before LLD starts 2351 * processing it. 2352 * 2353 * Return: 2354 * Pointer to the request at the top of @q if available. Null 2355 * otherwise. 2356 * 2357 * Context: 2358 * queue_lock must be held. 2359 */ 2360 struct request *blk_peek_request(struct request_queue *q) 2361 { 2362 struct request *rq; 2363 int ret; 2364 2365 while ((rq = __elv_next_request(q)) != NULL) { 2366 2367 rq = blk_pm_peek_request(q, rq); 2368 if (!rq) 2369 break; 2370 2371 if (!(rq->cmd_flags & REQ_STARTED)) { 2372 /* 2373 * This is the first time the device driver 2374 * sees this request (possibly after 2375 * requeueing). Notify IO scheduler. 2376 */ 2377 if (rq->cmd_flags & REQ_SORTED) 2378 elv_activate_rq(q, rq); 2379 2380 /* 2381 * just mark as started even if we don't start 2382 * it, a request that has been delayed should 2383 * not be passed by new incoming requests 2384 */ 2385 rq->cmd_flags |= REQ_STARTED; 2386 trace_block_rq_issue(q, rq); 2387 } 2388 2389 if (!q->boundary_rq || q->boundary_rq == rq) { 2390 q->end_sector = rq_end_sector(rq); 2391 q->boundary_rq = NULL; 2392 } 2393 2394 if (rq->cmd_flags & REQ_DONTPREP) 2395 break; 2396 2397 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2398 /* 2399 * make sure space for the drain appears we 2400 * know we can do this because max_hw_segments 2401 * has been adjusted to be one fewer than the 2402 * device can handle 2403 */ 2404 rq->nr_phys_segments++; 2405 } 2406 2407 if (!q->prep_rq_fn) 2408 break; 2409 2410 ret = q->prep_rq_fn(q, rq); 2411 if (ret == BLKPREP_OK) { 2412 break; 2413 } else if (ret == BLKPREP_DEFER) { 2414 /* 2415 * the request may have been (partially) prepped. 2416 * we need to keep this request in the front to 2417 * avoid resource deadlock. REQ_STARTED will 2418 * prevent other fs requests from passing this one. 2419 */ 2420 if (q->dma_drain_size && blk_rq_bytes(rq) && 2421 !(rq->cmd_flags & REQ_DONTPREP)) { 2422 /* 2423 * remove the space for the drain we added 2424 * so that we don't add it again 2425 */ 2426 --rq->nr_phys_segments; 2427 } 2428 2429 rq = NULL; 2430 break; 2431 } else if (ret == BLKPREP_KILL) { 2432 rq->cmd_flags |= REQ_QUIET; 2433 /* 2434 * Mark this request as started so we don't trigger 2435 * any debug logic in the end I/O path. 2436 */ 2437 blk_start_request(rq); 2438 __blk_end_request_all(rq, -EIO); 2439 } else { 2440 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2441 break; 2442 } 2443 } 2444 2445 return rq; 2446 } 2447 EXPORT_SYMBOL(blk_peek_request); 2448 2449 void blk_dequeue_request(struct request *rq) 2450 { 2451 struct request_queue *q = rq->q; 2452 2453 BUG_ON(list_empty(&rq->queuelist)); 2454 BUG_ON(ELV_ON_HASH(rq)); 2455 2456 list_del_init(&rq->queuelist); 2457 2458 /* 2459 * the time frame between a request being removed from the lists 2460 * and to it is freed is accounted as io that is in progress at 2461 * the driver side. 2462 */ 2463 if (blk_account_rq(rq)) { 2464 q->in_flight[rq_is_sync(rq)]++; 2465 set_io_start_time_ns(rq); 2466 } 2467 } 2468 2469 /** 2470 * blk_start_request - start request processing on the driver 2471 * @req: request to dequeue 2472 * 2473 * Description: 2474 * Dequeue @req and start timeout timer on it. This hands off the 2475 * request to the driver. 2476 * 2477 * Block internal functions which don't want to start timer should 2478 * call blk_dequeue_request(). 2479 * 2480 * Context: 2481 * queue_lock must be held. 2482 */ 2483 void blk_start_request(struct request *req) 2484 { 2485 blk_dequeue_request(req); 2486 2487 /* 2488 * We are now handing the request to the hardware, initialize 2489 * resid_len to full count and add the timeout handler. 2490 */ 2491 req->resid_len = blk_rq_bytes(req); 2492 if (unlikely(blk_bidi_rq(req))) 2493 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2494 2495 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 2496 blk_add_timer(req); 2497 } 2498 EXPORT_SYMBOL(blk_start_request); 2499 2500 /** 2501 * blk_fetch_request - fetch a request from a request queue 2502 * @q: request queue to fetch a request from 2503 * 2504 * Description: 2505 * Return the request at the top of @q. The request is started on 2506 * return and LLD can start processing it immediately. 2507 * 2508 * Return: 2509 * Pointer to the request at the top of @q if available. Null 2510 * otherwise. 2511 * 2512 * Context: 2513 * queue_lock must be held. 2514 */ 2515 struct request *blk_fetch_request(struct request_queue *q) 2516 { 2517 struct request *rq; 2518 2519 rq = blk_peek_request(q); 2520 if (rq) 2521 blk_start_request(rq); 2522 return rq; 2523 } 2524 EXPORT_SYMBOL(blk_fetch_request); 2525 2526 /** 2527 * blk_update_request - Special helper function for request stacking drivers 2528 * @req: the request being processed 2529 * @error: %0 for success, < %0 for error 2530 * @nr_bytes: number of bytes to complete @req 2531 * 2532 * Description: 2533 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2534 * the request structure even if @req doesn't have leftover. 2535 * If @req has leftover, sets it up for the next range of segments. 2536 * 2537 * This special helper function is only for request stacking drivers 2538 * (e.g. request-based dm) so that they can handle partial completion. 2539 * Actual device drivers should use blk_end_request instead. 2540 * 2541 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2542 * %false return from this function. 2543 * 2544 * Return: 2545 * %false - this request doesn't have any more data 2546 * %true - this request has more data 2547 **/ 2548 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2549 { 2550 int total_bytes; 2551 2552 trace_block_rq_complete(req->q, req, nr_bytes); 2553 2554 if (!req->bio) 2555 return false; 2556 2557 /* 2558 * For fs requests, rq is just carrier of independent bio's 2559 * and each partial completion should be handled separately. 2560 * Reset per-request error on each partial completion. 2561 * 2562 * TODO: tj: This is too subtle. It would be better to let 2563 * low level drivers do what they see fit. 2564 */ 2565 if (req->cmd_type == REQ_TYPE_FS) 2566 req->errors = 0; 2567 2568 if (error && req->cmd_type == REQ_TYPE_FS && 2569 !(req->cmd_flags & REQ_QUIET)) { 2570 char *error_type; 2571 2572 switch (error) { 2573 case -ENOLINK: 2574 error_type = "recoverable transport"; 2575 break; 2576 case -EREMOTEIO: 2577 error_type = "critical target"; 2578 break; 2579 case -EBADE: 2580 error_type = "critical nexus"; 2581 break; 2582 case -ETIMEDOUT: 2583 error_type = "timeout"; 2584 break; 2585 case -ENOSPC: 2586 error_type = "critical space allocation"; 2587 break; 2588 case -ENODATA: 2589 error_type = "critical medium"; 2590 break; 2591 case -EIO: 2592 default: 2593 error_type = "I/O"; 2594 break; 2595 } 2596 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 2597 __func__, error_type, req->rq_disk ? 2598 req->rq_disk->disk_name : "?", 2599 (unsigned long long)blk_rq_pos(req)); 2600 2601 } 2602 2603 blk_account_io_completion(req, nr_bytes); 2604 2605 total_bytes = 0; 2606 while (req->bio) { 2607 struct bio *bio = req->bio; 2608 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2609 2610 if (bio_bytes == bio->bi_iter.bi_size) 2611 req->bio = bio->bi_next; 2612 2613 req_bio_endio(req, bio, bio_bytes, error); 2614 2615 total_bytes += bio_bytes; 2616 nr_bytes -= bio_bytes; 2617 2618 if (!nr_bytes) 2619 break; 2620 } 2621 2622 /* 2623 * completely done 2624 */ 2625 if (!req->bio) { 2626 /* 2627 * Reset counters so that the request stacking driver 2628 * can find how many bytes remain in the request 2629 * later. 2630 */ 2631 req->__data_len = 0; 2632 return false; 2633 } 2634 2635 req->__data_len -= total_bytes; 2636 2637 /* update sector only for requests with clear definition of sector */ 2638 if (req->cmd_type == REQ_TYPE_FS) 2639 req->__sector += total_bytes >> 9; 2640 2641 /* mixed attributes always follow the first bio */ 2642 if (req->cmd_flags & REQ_MIXED_MERGE) { 2643 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2644 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2645 } 2646 2647 /* 2648 * If total number of sectors is less than the first segment 2649 * size, something has gone terribly wrong. 2650 */ 2651 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2652 blk_dump_rq_flags(req, "request botched"); 2653 req->__data_len = blk_rq_cur_bytes(req); 2654 } 2655 2656 /* recalculate the number of segments */ 2657 blk_recalc_rq_segments(req); 2658 2659 return true; 2660 } 2661 EXPORT_SYMBOL_GPL(blk_update_request); 2662 2663 static bool blk_update_bidi_request(struct request *rq, int error, 2664 unsigned int nr_bytes, 2665 unsigned int bidi_bytes) 2666 { 2667 if (blk_update_request(rq, error, nr_bytes)) 2668 return true; 2669 2670 /* Bidi request must be completed as a whole */ 2671 if (unlikely(blk_bidi_rq(rq)) && 2672 blk_update_request(rq->next_rq, error, bidi_bytes)) 2673 return true; 2674 2675 if (blk_queue_add_random(rq->q)) 2676 add_disk_randomness(rq->rq_disk); 2677 2678 return false; 2679 } 2680 2681 /** 2682 * blk_unprep_request - unprepare a request 2683 * @req: the request 2684 * 2685 * This function makes a request ready for complete resubmission (or 2686 * completion). It happens only after all error handling is complete, 2687 * so represents the appropriate moment to deallocate any resources 2688 * that were allocated to the request in the prep_rq_fn. The queue 2689 * lock is held when calling this. 2690 */ 2691 void blk_unprep_request(struct request *req) 2692 { 2693 struct request_queue *q = req->q; 2694 2695 req->cmd_flags &= ~REQ_DONTPREP; 2696 if (q->unprep_rq_fn) 2697 q->unprep_rq_fn(q, req); 2698 } 2699 EXPORT_SYMBOL_GPL(blk_unprep_request); 2700 2701 /* 2702 * queue lock must be held 2703 */ 2704 void blk_finish_request(struct request *req, int error) 2705 { 2706 if (req->cmd_flags & REQ_QUEUED) 2707 blk_queue_end_tag(req->q, req); 2708 2709 BUG_ON(blk_queued_rq(req)); 2710 2711 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2712 laptop_io_completion(&req->q->backing_dev_info); 2713 2714 blk_delete_timer(req); 2715 2716 if (req->cmd_flags & REQ_DONTPREP) 2717 blk_unprep_request(req); 2718 2719 blk_account_io_done(req); 2720 2721 if (req->end_io) 2722 req->end_io(req, error); 2723 else { 2724 if (blk_bidi_rq(req)) 2725 __blk_put_request(req->next_rq->q, req->next_rq); 2726 2727 __blk_put_request(req->q, req); 2728 } 2729 } 2730 EXPORT_SYMBOL(blk_finish_request); 2731 2732 /** 2733 * blk_end_bidi_request - Complete a bidi request 2734 * @rq: the request to complete 2735 * @error: %0 for success, < %0 for error 2736 * @nr_bytes: number of bytes to complete @rq 2737 * @bidi_bytes: number of bytes to complete @rq->next_rq 2738 * 2739 * Description: 2740 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2741 * Drivers that supports bidi can safely call this member for any 2742 * type of request, bidi or uni. In the later case @bidi_bytes is 2743 * just ignored. 2744 * 2745 * Return: 2746 * %false - we are done with this request 2747 * %true - still buffers pending for this request 2748 **/ 2749 static bool blk_end_bidi_request(struct request *rq, int error, 2750 unsigned int nr_bytes, unsigned int bidi_bytes) 2751 { 2752 struct request_queue *q = rq->q; 2753 unsigned long flags; 2754 2755 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2756 return true; 2757 2758 spin_lock_irqsave(q->queue_lock, flags); 2759 blk_finish_request(rq, error); 2760 spin_unlock_irqrestore(q->queue_lock, flags); 2761 2762 return false; 2763 } 2764 2765 /** 2766 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2767 * @rq: the request to complete 2768 * @error: %0 for success, < %0 for error 2769 * @nr_bytes: number of bytes to complete @rq 2770 * @bidi_bytes: number of bytes to complete @rq->next_rq 2771 * 2772 * Description: 2773 * Identical to blk_end_bidi_request() except that queue lock is 2774 * assumed to be locked on entry and remains so on return. 2775 * 2776 * Return: 2777 * %false - we are done with this request 2778 * %true - still buffers pending for this request 2779 **/ 2780 bool __blk_end_bidi_request(struct request *rq, int error, 2781 unsigned int nr_bytes, unsigned int bidi_bytes) 2782 { 2783 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2784 return true; 2785 2786 blk_finish_request(rq, error); 2787 2788 return false; 2789 } 2790 2791 /** 2792 * blk_end_request - Helper function for drivers to complete the request. 2793 * @rq: the request being processed 2794 * @error: %0 for success, < %0 for error 2795 * @nr_bytes: number of bytes to complete 2796 * 2797 * Description: 2798 * Ends I/O on a number of bytes attached to @rq. 2799 * If @rq has leftover, sets it up for the next range of segments. 2800 * 2801 * Return: 2802 * %false - we are done with this request 2803 * %true - still buffers pending for this request 2804 **/ 2805 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2806 { 2807 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2808 } 2809 EXPORT_SYMBOL(blk_end_request); 2810 2811 /** 2812 * blk_end_request_all - Helper function for drives to finish the request. 2813 * @rq: the request to finish 2814 * @error: %0 for success, < %0 for error 2815 * 2816 * Description: 2817 * Completely finish @rq. 2818 */ 2819 void blk_end_request_all(struct request *rq, int error) 2820 { 2821 bool pending; 2822 unsigned int bidi_bytes = 0; 2823 2824 if (unlikely(blk_bidi_rq(rq))) 2825 bidi_bytes = blk_rq_bytes(rq->next_rq); 2826 2827 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2828 BUG_ON(pending); 2829 } 2830 EXPORT_SYMBOL(blk_end_request_all); 2831 2832 /** 2833 * blk_end_request_cur - Helper function to finish the current request chunk. 2834 * @rq: the request to finish the current chunk for 2835 * @error: %0 for success, < %0 for error 2836 * 2837 * Description: 2838 * Complete the current consecutively mapped chunk from @rq. 2839 * 2840 * Return: 2841 * %false - we are done with this request 2842 * %true - still buffers pending for this request 2843 */ 2844 bool blk_end_request_cur(struct request *rq, int error) 2845 { 2846 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2847 } 2848 EXPORT_SYMBOL(blk_end_request_cur); 2849 2850 /** 2851 * blk_end_request_err - Finish a request till the next failure boundary. 2852 * @rq: the request to finish till the next failure boundary for 2853 * @error: must be negative errno 2854 * 2855 * Description: 2856 * Complete @rq till the next failure boundary. 2857 * 2858 * Return: 2859 * %false - we are done with this request 2860 * %true - still buffers pending for this request 2861 */ 2862 bool blk_end_request_err(struct request *rq, int error) 2863 { 2864 WARN_ON(error >= 0); 2865 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2866 } 2867 EXPORT_SYMBOL_GPL(blk_end_request_err); 2868 2869 /** 2870 * __blk_end_request - Helper function for drivers to complete the request. 2871 * @rq: the request being processed 2872 * @error: %0 for success, < %0 for error 2873 * @nr_bytes: number of bytes to complete 2874 * 2875 * Description: 2876 * Must be called with queue lock held unlike blk_end_request(). 2877 * 2878 * Return: 2879 * %false - we are done with this request 2880 * %true - still buffers pending for this request 2881 **/ 2882 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2883 { 2884 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2885 } 2886 EXPORT_SYMBOL(__blk_end_request); 2887 2888 /** 2889 * __blk_end_request_all - Helper function for drives to finish the request. 2890 * @rq: the request to finish 2891 * @error: %0 for success, < %0 for error 2892 * 2893 * Description: 2894 * Completely finish @rq. Must be called with queue lock held. 2895 */ 2896 void __blk_end_request_all(struct request *rq, int error) 2897 { 2898 bool pending; 2899 unsigned int bidi_bytes = 0; 2900 2901 if (unlikely(blk_bidi_rq(rq))) 2902 bidi_bytes = blk_rq_bytes(rq->next_rq); 2903 2904 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2905 BUG_ON(pending); 2906 } 2907 EXPORT_SYMBOL(__blk_end_request_all); 2908 2909 /** 2910 * __blk_end_request_cur - Helper function to finish the current request chunk. 2911 * @rq: the request to finish the current chunk for 2912 * @error: %0 for success, < %0 for error 2913 * 2914 * Description: 2915 * Complete the current consecutively mapped chunk from @rq. Must 2916 * be called with queue lock held. 2917 * 2918 * Return: 2919 * %false - we are done with this request 2920 * %true - still buffers pending for this request 2921 */ 2922 bool __blk_end_request_cur(struct request *rq, int error) 2923 { 2924 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2925 } 2926 EXPORT_SYMBOL(__blk_end_request_cur); 2927 2928 /** 2929 * __blk_end_request_err - Finish a request till the next failure boundary. 2930 * @rq: the request to finish till the next failure boundary for 2931 * @error: must be negative errno 2932 * 2933 * Description: 2934 * Complete @rq till the next failure boundary. Must be called 2935 * with queue lock held. 2936 * 2937 * Return: 2938 * %false - we are done with this request 2939 * %true - still buffers pending for this request 2940 */ 2941 bool __blk_end_request_err(struct request *rq, int error) 2942 { 2943 WARN_ON(error >= 0); 2944 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2945 } 2946 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2947 2948 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2949 struct bio *bio) 2950 { 2951 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2952 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2953 2954 if (bio_has_data(bio)) 2955 rq->nr_phys_segments = bio_phys_segments(q, bio); 2956 2957 rq->__data_len = bio->bi_iter.bi_size; 2958 rq->bio = rq->biotail = bio; 2959 2960 if (bio->bi_bdev) 2961 rq->rq_disk = bio->bi_bdev->bd_disk; 2962 } 2963 2964 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2965 /** 2966 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2967 * @rq: the request to be flushed 2968 * 2969 * Description: 2970 * Flush all pages in @rq. 2971 */ 2972 void rq_flush_dcache_pages(struct request *rq) 2973 { 2974 struct req_iterator iter; 2975 struct bio_vec bvec; 2976 2977 rq_for_each_segment(bvec, rq, iter) 2978 flush_dcache_page(bvec.bv_page); 2979 } 2980 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2981 #endif 2982 2983 /** 2984 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2985 * @q : the queue of the device being checked 2986 * 2987 * Description: 2988 * Check if underlying low-level drivers of a device are busy. 2989 * If the drivers want to export their busy state, they must set own 2990 * exporting function using blk_queue_lld_busy() first. 2991 * 2992 * Basically, this function is used only by request stacking drivers 2993 * to stop dispatching requests to underlying devices when underlying 2994 * devices are busy. This behavior helps more I/O merging on the queue 2995 * of the request stacking driver and prevents I/O throughput regression 2996 * on burst I/O load. 2997 * 2998 * Return: 2999 * 0 - Not busy (The request stacking driver should dispatch request) 3000 * 1 - Busy (The request stacking driver should stop dispatching request) 3001 */ 3002 int blk_lld_busy(struct request_queue *q) 3003 { 3004 if (q->lld_busy_fn) 3005 return q->lld_busy_fn(q); 3006 3007 return 0; 3008 } 3009 EXPORT_SYMBOL_GPL(blk_lld_busy); 3010 3011 /** 3012 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3013 * @rq: the clone request to be cleaned up 3014 * 3015 * Description: 3016 * Free all bios in @rq for a cloned request. 3017 */ 3018 void blk_rq_unprep_clone(struct request *rq) 3019 { 3020 struct bio *bio; 3021 3022 while ((bio = rq->bio) != NULL) { 3023 rq->bio = bio->bi_next; 3024 3025 bio_put(bio); 3026 } 3027 } 3028 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3029 3030 /* 3031 * Copy attributes of the original request to the clone request. 3032 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3033 */ 3034 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3035 { 3036 dst->cpu = src->cpu; 3037 dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 3038 dst->cmd_type = src->cmd_type; 3039 dst->__sector = blk_rq_pos(src); 3040 dst->__data_len = blk_rq_bytes(src); 3041 dst->nr_phys_segments = src->nr_phys_segments; 3042 dst->ioprio = src->ioprio; 3043 dst->extra_len = src->extra_len; 3044 } 3045 3046 /** 3047 * blk_rq_prep_clone - Helper function to setup clone request 3048 * @rq: the request to be setup 3049 * @rq_src: original request to be cloned 3050 * @bs: bio_set that bios for clone are allocated from 3051 * @gfp_mask: memory allocation mask for bio 3052 * @bio_ctr: setup function to be called for each clone bio. 3053 * Returns %0 for success, non %0 for failure. 3054 * @data: private data to be passed to @bio_ctr 3055 * 3056 * Description: 3057 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3058 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3059 * are not copied, and copying such parts is the caller's responsibility. 3060 * Also, pages which the original bios are pointing to are not copied 3061 * and the cloned bios just point same pages. 3062 * So cloned bios must be completed before original bios, which means 3063 * the caller must complete @rq before @rq_src. 3064 */ 3065 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3066 struct bio_set *bs, gfp_t gfp_mask, 3067 int (*bio_ctr)(struct bio *, struct bio *, void *), 3068 void *data) 3069 { 3070 struct bio *bio, *bio_src; 3071 3072 if (!bs) 3073 bs = fs_bio_set; 3074 3075 __rq_for_each_bio(bio_src, rq_src) { 3076 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3077 if (!bio) 3078 goto free_and_out; 3079 3080 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3081 goto free_and_out; 3082 3083 if (rq->bio) { 3084 rq->biotail->bi_next = bio; 3085 rq->biotail = bio; 3086 } else 3087 rq->bio = rq->biotail = bio; 3088 } 3089 3090 __blk_rq_prep_clone(rq, rq_src); 3091 3092 return 0; 3093 3094 free_and_out: 3095 if (bio) 3096 bio_put(bio); 3097 blk_rq_unprep_clone(rq); 3098 3099 return -ENOMEM; 3100 } 3101 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3102 3103 int kblockd_schedule_work(struct work_struct *work) 3104 { 3105 return queue_work(kblockd_workqueue, work); 3106 } 3107 EXPORT_SYMBOL(kblockd_schedule_work); 3108 3109 int kblockd_schedule_delayed_work(struct delayed_work *dwork, 3110 unsigned long delay) 3111 { 3112 return queue_delayed_work(kblockd_workqueue, dwork, delay); 3113 } 3114 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 3115 3116 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3117 unsigned long delay) 3118 { 3119 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3120 } 3121 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 3122 3123 /** 3124 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3125 * @plug: The &struct blk_plug that needs to be initialized 3126 * 3127 * Description: 3128 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3129 * pending I/O should the task end up blocking between blk_start_plug() and 3130 * blk_finish_plug(). This is important from a performance perspective, but 3131 * also ensures that we don't deadlock. For instance, if the task is blocking 3132 * for a memory allocation, memory reclaim could end up wanting to free a 3133 * page belonging to that request that is currently residing in our private 3134 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3135 * this kind of deadlock. 3136 */ 3137 void blk_start_plug(struct blk_plug *plug) 3138 { 3139 struct task_struct *tsk = current; 3140 3141 /* 3142 * If this is a nested plug, don't actually assign it. 3143 */ 3144 if (tsk->plug) 3145 return; 3146 3147 INIT_LIST_HEAD(&plug->list); 3148 INIT_LIST_HEAD(&plug->mq_list); 3149 INIT_LIST_HEAD(&plug->cb_list); 3150 /* 3151 * Store ordering should not be needed here, since a potential 3152 * preempt will imply a full memory barrier 3153 */ 3154 tsk->plug = plug; 3155 } 3156 EXPORT_SYMBOL(blk_start_plug); 3157 3158 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3159 { 3160 struct request *rqa = container_of(a, struct request, queuelist); 3161 struct request *rqb = container_of(b, struct request, queuelist); 3162 3163 return !(rqa->q < rqb->q || 3164 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3165 } 3166 3167 /* 3168 * If 'from_schedule' is true, then postpone the dispatch of requests 3169 * until a safe kblockd context. We due this to avoid accidental big 3170 * additional stack usage in driver dispatch, in places where the originally 3171 * plugger did not intend it. 3172 */ 3173 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3174 bool from_schedule) 3175 __releases(q->queue_lock) 3176 { 3177 trace_block_unplug(q, depth, !from_schedule); 3178 3179 if (from_schedule) 3180 blk_run_queue_async(q); 3181 else 3182 __blk_run_queue(q); 3183 spin_unlock(q->queue_lock); 3184 } 3185 3186 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3187 { 3188 LIST_HEAD(callbacks); 3189 3190 while (!list_empty(&plug->cb_list)) { 3191 list_splice_init(&plug->cb_list, &callbacks); 3192 3193 while (!list_empty(&callbacks)) { 3194 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3195 struct blk_plug_cb, 3196 list); 3197 list_del(&cb->list); 3198 cb->callback(cb, from_schedule); 3199 } 3200 } 3201 } 3202 3203 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3204 int size) 3205 { 3206 struct blk_plug *plug = current->plug; 3207 struct blk_plug_cb *cb; 3208 3209 if (!plug) 3210 return NULL; 3211 3212 list_for_each_entry(cb, &plug->cb_list, list) 3213 if (cb->callback == unplug && cb->data == data) 3214 return cb; 3215 3216 /* Not currently on the callback list */ 3217 BUG_ON(size < sizeof(*cb)); 3218 cb = kzalloc(size, GFP_ATOMIC); 3219 if (cb) { 3220 cb->data = data; 3221 cb->callback = unplug; 3222 list_add(&cb->list, &plug->cb_list); 3223 } 3224 return cb; 3225 } 3226 EXPORT_SYMBOL(blk_check_plugged); 3227 3228 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3229 { 3230 struct request_queue *q; 3231 unsigned long flags; 3232 struct request *rq; 3233 LIST_HEAD(list); 3234 unsigned int depth; 3235 3236 flush_plug_callbacks(plug, from_schedule); 3237 3238 if (!list_empty(&plug->mq_list)) 3239 blk_mq_flush_plug_list(plug, from_schedule); 3240 3241 if (list_empty(&plug->list)) 3242 return; 3243 3244 list_splice_init(&plug->list, &list); 3245 3246 list_sort(NULL, &list, plug_rq_cmp); 3247 3248 q = NULL; 3249 depth = 0; 3250 3251 /* 3252 * Save and disable interrupts here, to avoid doing it for every 3253 * queue lock we have to take. 3254 */ 3255 local_irq_save(flags); 3256 while (!list_empty(&list)) { 3257 rq = list_entry_rq(list.next); 3258 list_del_init(&rq->queuelist); 3259 BUG_ON(!rq->q); 3260 if (rq->q != q) { 3261 /* 3262 * This drops the queue lock 3263 */ 3264 if (q) 3265 queue_unplugged(q, depth, from_schedule); 3266 q = rq->q; 3267 depth = 0; 3268 spin_lock(q->queue_lock); 3269 } 3270 3271 /* 3272 * Short-circuit if @q is dead 3273 */ 3274 if (unlikely(blk_queue_dying(q))) { 3275 __blk_end_request_all(rq, -ENODEV); 3276 continue; 3277 } 3278 3279 /* 3280 * rq is already accounted, so use raw insert 3281 */ 3282 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3283 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3284 else 3285 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3286 3287 depth++; 3288 } 3289 3290 /* 3291 * This drops the queue lock 3292 */ 3293 if (q) 3294 queue_unplugged(q, depth, from_schedule); 3295 3296 local_irq_restore(flags); 3297 } 3298 3299 void blk_finish_plug(struct blk_plug *plug) 3300 { 3301 if (plug != current->plug) 3302 return; 3303 blk_flush_plug_list(plug, false); 3304 3305 current->plug = NULL; 3306 } 3307 EXPORT_SYMBOL(blk_finish_plug); 3308 3309 #ifdef CONFIG_PM 3310 /** 3311 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3312 * @q: the queue of the device 3313 * @dev: the device the queue belongs to 3314 * 3315 * Description: 3316 * Initialize runtime-PM-related fields for @q and start auto suspend for 3317 * @dev. Drivers that want to take advantage of request-based runtime PM 3318 * should call this function after @dev has been initialized, and its 3319 * request queue @q has been allocated, and runtime PM for it can not happen 3320 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3321 * cases, driver should call this function before any I/O has taken place. 3322 * 3323 * This function takes care of setting up using auto suspend for the device, 3324 * the autosuspend delay is set to -1 to make runtime suspend impossible 3325 * until an updated value is either set by user or by driver. Drivers do 3326 * not need to touch other autosuspend settings. 3327 * 3328 * The block layer runtime PM is request based, so only works for drivers 3329 * that use request as their IO unit instead of those directly use bio's. 3330 */ 3331 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3332 { 3333 q->dev = dev; 3334 q->rpm_status = RPM_ACTIVE; 3335 pm_runtime_set_autosuspend_delay(q->dev, -1); 3336 pm_runtime_use_autosuspend(q->dev); 3337 } 3338 EXPORT_SYMBOL(blk_pm_runtime_init); 3339 3340 /** 3341 * blk_pre_runtime_suspend - Pre runtime suspend check 3342 * @q: the queue of the device 3343 * 3344 * Description: 3345 * This function will check if runtime suspend is allowed for the device 3346 * by examining if there are any requests pending in the queue. If there 3347 * are requests pending, the device can not be runtime suspended; otherwise, 3348 * the queue's status will be updated to SUSPENDING and the driver can 3349 * proceed to suspend the device. 3350 * 3351 * For the not allowed case, we mark last busy for the device so that 3352 * runtime PM core will try to autosuspend it some time later. 3353 * 3354 * This function should be called near the start of the device's 3355 * runtime_suspend callback. 3356 * 3357 * Return: 3358 * 0 - OK to runtime suspend the device 3359 * -EBUSY - Device should not be runtime suspended 3360 */ 3361 int blk_pre_runtime_suspend(struct request_queue *q) 3362 { 3363 int ret = 0; 3364 3365 spin_lock_irq(q->queue_lock); 3366 if (q->nr_pending) { 3367 ret = -EBUSY; 3368 pm_runtime_mark_last_busy(q->dev); 3369 } else { 3370 q->rpm_status = RPM_SUSPENDING; 3371 } 3372 spin_unlock_irq(q->queue_lock); 3373 return ret; 3374 } 3375 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3376 3377 /** 3378 * blk_post_runtime_suspend - Post runtime suspend processing 3379 * @q: the queue of the device 3380 * @err: return value of the device's runtime_suspend function 3381 * 3382 * Description: 3383 * Update the queue's runtime status according to the return value of the 3384 * device's runtime suspend function and mark last busy for the device so 3385 * that PM core will try to auto suspend the device at a later time. 3386 * 3387 * This function should be called near the end of the device's 3388 * runtime_suspend callback. 3389 */ 3390 void blk_post_runtime_suspend(struct request_queue *q, int err) 3391 { 3392 spin_lock_irq(q->queue_lock); 3393 if (!err) { 3394 q->rpm_status = RPM_SUSPENDED; 3395 } else { 3396 q->rpm_status = RPM_ACTIVE; 3397 pm_runtime_mark_last_busy(q->dev); 3398 } 3399 spin_unlock_irq(q->queue_lock); 3400 } 3401 EXPORT_SYMBOL(blk_post_runtime_suspend); 3402 3403 /** 3404 * blk_pre_runtime_resume - Pre runtime resume processing 3405 * @q: the queue of the device 3406 * 3407 * Description: 3408 * Update the queue's runtime status to RESUMING in preparation for the 3409 * runtime resume of the device. 3410 * 3411 * This function should be called near the start of the device's 3412 * runtime_resume callback. 3413 */ 3414 void blk_pre_runtime_resume(struct request_queue *q) 3415 { 3416 spin_lock_irq(q->queue_lock); 3417 q->rpm_status = RPM_RESUMING; 3418 spin_unlock_irq(q->queue_lock); 3419 } 3420 EXPORT_SYMBOL(blk_pre_runtime_resume); 3421 3422 /** 3423 * blk_post_runtime_resume - Post runtime resume processing 3424 * @q: the queue of the device 3425 * @err: return value of the device's runtime_resume function 3426 * 3427 * Description: 3428 * Update the queue's runtime status according to the return value of the 3429 * device's runtime_resume function. If it is successfully resumed, process 3430 * the requests that are queued into the device's queue when it is resuming 3431 * and then mark last busy and initiate autosuspend for it. 3432 * 3433 * This function should be called near the end of the device's 3434 * runtime_resume callback. 3435 */ 3436 void blk_post_runtime_resume(struct request_queue *q, int err) 3437 { 3438 spin_lock_irq(q->queue_lock); 3439 if (!err) { 3440 q->rpm_status = RPM_ACTIVE; 3441 __blk_run_queue(q); 3442 pm_runtime_mark_last_busy(q->dev); 3443 pm_request_autosuspend(q->dev); 3444 } else { 3445 q->rpm_status = RPM_SUSPENDED; 3446 } 3447 spin_unlock_irq(q->queue_lock); 3448 } 3449 EXPORT_SYMBOL(blk_post_runtime_resume); 3450 #endif 3451 3452 int __init blk_dev_init(void) 3453 { 3454 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3455 FIELD_SIZEOF(struct request, cmd_flags)); 3456 3457 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3458 kblockd_workqueue = alloc_workqueue("kblockd", 3459 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3460 if (!kblockd_workqueue) 3461 panic("Failed to create kblockd\n"); 3462 3463 request_cachep = kmem_cache_create("blkdev_requests", 3464 sizeof(struct request), 0, SLAB_PANIC, NULL); 3465 3466 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 3467 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3468 3469 return 0; 3470 } 3471