1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11 /* 12 * This handles all read/write requests to block devices 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-mq.h> 20 #include <linux/highmem.h> 21 #include <linux/mm.h> 22 #include <linux/kernel_stat.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/completion.h> 26 #include <linux/slab.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/task_io_accounting_ops.h> 30 #include <linux/fault-inject.h> 31 #include <linux/list_sort.h> 32 #include <linux/delay.h> 33 #include <linux/ratelimit.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/blk-cgroup.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/block.h> 39 40 #include "blk.h" 41 #include "blk-mq.h" 42 #include "blk-wbt.h" 43 44 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 45 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 46 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 47 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 48 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 49 50 DEFINE_IDA(blk_queue_ida); 51 52 /* 53 * For the allocated request tables 54 */ 55 struct kmem_cache *request_cachep; 56 57 /* 58 * For queue allocation 59 */ 60 struct kmem_cache *blk_requestq_cachep; 61 62 /* 63 * Controlling structure to kblockd 64 */ 65 static struct workqueue_struct *kblockd_workqueue; 66 67 static void blk_clear_congested(struct request_list *rl, int sync) 68 { 69 #ifdef CONFIG_CGROUP_WRITEBACK 70 clear_wb_congested(rl->blkg->wb_congested, sync); 71 #else 72 /* 73 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't 74 * flip its congestion state for events on other blkcgs. 75 */ 76 if (rl == &rl->q->root_rl) 77 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 78 #endif 79 } 80 81 static void blk_set_congested(struct request_list *rl, int sync) 82 { 83 #ifdef CONFIG_CGROUP_WRITEBACK 84 set_wb_congested(rl->blkg->wb_congested, sync); 85 #else 86 /* see blk_clear_congested() */ 87 if (rl == &rl->q->root_rl) 88 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); 89 #endif 90 } 91 92 void blk_queue_congestion_threshold(struct request_queue *q) 93 { 94 int nr; 95 96 nr = q->nr_requests - (q->nr_requests / 8) + 1; 97 if (nr > q->nr_requests) 98 nr = q->nr_requests; 99 q->nr_congestion_on = nr; 100 101 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 102 if (nr < 1) 103 nr = 1; 104 q->nr_congestion_off = nr; 105 } 106 107 /** 108 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 109 * @bdev: device 110 * 111 * Locates the passed device's request queue and returns the address of its 112 * backing_dev_info. This function can only be called if @bdev is opened 113 * and the return value is never NULL. 114 */ 115 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 116 { 117 struct request_queue *q = bdev_get_queue(bdev); 118 119 return &q->backing_dev_info; 120 } 121 EXPORT_SYMBOL(blk_get_backing_dev_info); 122 123 void blk_rq_init(struct request_queue *q, struct request *rq) 124 { 125 memset(rq, 0, sizeof(*rq)); 126 127 INIT_LIST_HEAD(&rq->queuelist); 128 INIT_LIST_HEAD(&rq->timeout_list); 129 rq->cpu = -1; 130 rq->q = q; 131 rq->__sector = (sector_t) -1; 132 INIT_HLIST_NODE(&rq->hash); 133 RB_CLEAR_NODE(&rq->rb_node); 134 rq->cmd = rq->__cmd; 135 rq->cmd_len = BLK_MAX_CDB; 136 rq->tag = -1; 137 rq->start_time = jiffies; 138 set_start_time_ns(rq); 139 rq->part = NULL; 140 } 141 EXPORT_SYMBOL(blk_rq_init); 142 143 static void req_bio_endio(struct request *rq, struct bio *bio, 144 unsigned int nbytes, int error) 145 { 146 if (error) 147 bio->bi_error = error; 148 149 if (unlikely(rq->rq_flags & RQF_QUIET)) 150 bio_set_flag(bio, BIO_QUIET); 151 152 bio_advance(bio, nbytes); 153 154 /* don't actually finish bio if it's part of flush sequence */ 155 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 156 bio_endio(bio); 157 } 158 159 void blk_dump_rq_flags(struct request *rq, char *msg) 160 { 161 int bit; 162 163 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 164 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 165 (unsigned long long) rq->cmd_flags); 166 167 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 168 (unsigned long long)blk_rq_pos(rq), 169 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 170 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 171 rq->bio, rq->biotail, blk_rq_bytes(rq)); 172 173 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 174 printk(KERN_INFO " cdb: "); 175 for (bit = 0; bit < BLK_MAX_CDB; bit++) 176 printk("%02x ", rq->cmd[bit]); 177 printk("\n"); 178 } 179 } 180 EXPORT_SYMBOL(blk_dump_rq_flags); 181 182 static void blk_delay_work(struct work_struct *work) 183 { 184 struct request_queue *q; 185 186 q = container_of(work, struct request_queue, delay_work.work); 187 spin_lock_irq(q->queue_lock); 188 __blk_run_queue(q); 189 spin_unlock_irq(q->queue_lock); 190 } 191 192 /** 193 * blk_delay_queue - restart queueing after defined interval 194 * @q: The &struct request_queue in question 195 * @msecs: Delay in msecs 196 * 197 * Description: 198 * Sometimes queueing needs to be postponed for a little while, to allow 199 * resources to come back. This function will make sure that queueing is 200 * restarted around the specified time. Queue lock must be held. 201 */ 202 void blk_delay_queue(struct request_queue *q, unsigned long msecs) 203 { 204 if (likely(!blk_queue_dead(q))) 205 queue_delayed_work(kblockd_workqueue, &q->delay_work, 206 msecs_to_jiffies(msecs)); 207 } 208 EXPORT_SYMBOL(blk_delay_queue); 209 210 /** 211 * blk_start_queue_async - asynchronously restart a previously stopped queue 212 * @q: The &struct request_queue in question 213 * 214 * Description: 215 * blk_start_queue_async() will clear the stop flag on the queue, and 216 * ensure that the request_fn for the queue is run from an async 217 * context. 218 **/ 219 void blk_start_queue_async(struct request_queue *q) 220 { 221 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 222 blk_run_queue_async(q); 223 } 224 EXPORT_SYMBOL(blk_start_queue_async); 225 226 /** 227 * blk_start_queue - restart a previously stopped queue 228 * @q: The &struct request_queue in question 229 * 230 * Description: 231 * blk_start_queue() will clear the stop flag on the queue, and call 232 * the request_fn for the queue if it was in a stopped state when 233 * entered. Also see blk_stop_queue(). Queue lock must be held. 234 **/ 235 void blk_start_queue(struct request_queue *q) 236 { 237 WARN_ON(!irqs_disabled()); 238 239 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 240 __blk_run_queue(q); 241 } 242 EXPORT_SYMBOL(blk_start_queue); 243 244 /** 245 * blk_stop_queue - stop a queue 246 * @q: The &struct request_queue in question 247 * 248 * Description: 249 * The Linux block layer assumes that a block driver will consume all 250 * entries on the request queue when the request_fn strategy is called. 251 * Often this will not happen, because of hardware limitations (queue 252 * depth settings). If a device driver gets a 'queue full' response, 253 * or if it simply chooses not to queue more I/O at one point, it can 254 * call this function to prevent the request_fn from being called until 255 * the driver has signalled it's ready to go again. This happens by calling 256 * blk_start_queue() to restart queue operations. Queue lock must be held. 257 **/ 258 void blk_stop_queue(struct request_queue *q) 259 { 260 cancel_delayed_work(&q->delay_work); 261 queue_flag_set(QUEUE_FLAG_STOPPED, q); 262 } 263 EXPORT_SYMBOL(blk_stop_queue); 264 265 /** 266 * blk_sync_queue - cancel any pending callbacks on a queue 267 * @q: the queue 268 * 269 * Description: 270 * The block layer may perform asynchronous callback activity 271 * on a queue, such as calling the unplug function after a timeout. 272 * A block device may call blk_sync_queue to ensure that any 273 * such activity is cancelled, thus allowing it to release resources 274 * that the callbacks might use. The caller must already have made sure 275 * that its ->make_request_fn will not re-add plugging prior to calling 276 * this function. 277 * 278 * This function does not cancel any asynchronous activity arising 279 * out of elevator or throttling code. That would require elevator_exit() 280 * and blkcg_exit_queue() to be called with queue lock initialized. 281 * 282 */ 283 void blk_sync_queue(struct request_queue *q) 284 { 285 del_timer_sync(&q->timeout); 286 287 if (q->mq_ops) { 288 struct blk_mq_hw_ctx *hctx; 289 int i; 290 291 queue_for_each_hw_ctx(q, hctx, i) { 292 cancel_work_sync(&hctx->run_work); 293 cancel_delayed_work_sync(&hctx->delay_work); 294 } 295 } else { 296 cancel_delayed_work_sync(&q->delay_work); 297 } 298 } 299 EXPORT_SYMBOL(blk_sync_queue); 300 301 /** 302 * __blk_run_queue_uncond - run a queue whether or not it has been stopped 303 * @q: The queue to run 304 * 305 * Description: 306 * Invoke request handling on a queue if there are any pending requests. 307 * May be used to restart request handling after a request has completed. 308 * This variant runs the queue whether or not the queue has been 309 * stopped. Must be called with the queue lock held and interrupts 310 * disabled. See also @blk_run_queue. 311 */ 312 inline void __blk_run_queue_uncond(struct request_queue *q) 313 { 314 if (unlikely(blk_queue_dead(q))) 315 return; 316 317 /* 318 * Some request_fn implementations, e.g. scsi_request_fn(), unlock 319 * the queue lock internally. As a result multiple threads may be 320 * running such a request function concurrently. Keep track of the 321 * number of active request_fn invocations such that blk_drain_queue() 322 * can wait until all these request_fn calls have finished. 323 */ 324 q->request_fn_active++; 325 q->request_fn(q); 326 q->request_fn_active--; 327 } 328 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); 329 330 /** 331 * __blk_run_queue - run a single device queue 332 * @q: The queue to run 333 * 334 * Description: 335 * See @blk_run_queue. This variant must be called with the queue lock 336 * held and interrupts disabled. 337 */ 338 void __blk_run_queue(struct request_queue *q) 339 { 340 if (unlikely(blk_queue_stopped(q))) 341 return; 342 343 __blk_run_queue_uncond(q); 344 } 345 EXPORT_SYMBOL(__blk_run_queue); 346 347 /** 348 * blk_run_queue_async - run a single device queue in workqueue context 349 * @q: The queue to run 350 * 351 * Description: 352 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 353 * of us. The caller must hold the queue lock. 354 */ 355 void blk_run_queue_async(struct request_queue *q) 356 { 357 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) 358 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 359 } 360 EXPORT_SYMBOL(blk_run_queue_async); 361 362 /** 363 * blk_run_queue - run a single device queue 364 * @q: The queue to run 365 * 366 * Description: 367 * Invoke request handling on this queue, if it has pending work to do. 368 * May be used to restart queueing when a request has completed. 369 */ 370 void blk_run_queue(struct request_queue *q) 371 { 372 unsigned long flags; 373 374 spin_lock_irqsave(q->queue_lock, flags); 375 __blk_run_queue(q); 376 spin_unlock_irqrestore(q->queue_lock, flags); 377 } 378 EXPORT_SYMBOL(blk_run_queue); 379 380 void blk_put_queue(struct request_queue *q) 381 { 382 kobject_put(&q->kobj); 383 } 384 EXPORT_SYMBOL(blk_put_queue); 385 386 /** 387 * __blk_drain_queue - drain requests from request_queue 388 * @q: queue to drain 389 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 390 * 391 * Drain requests from @q. If @drain_all is set, all requests are drained. 392 * If not, only ELVPRIV requests are drained. The caller is responsible 393 * for ensuring that no new requests which need to be drained are queued. 394 */ 395 static void __blk_drain_queue(struct request_queue *q, bool drain_all) 396 __releases(q->queue_lock) 397 __acquires(q->queue_lock) 398 { 399 int i; 400 401 lockdep_assert_held(q->queue_lock); 402 403 while (true) { 404 bool drain = false; 405 406 /* 407 * The caller might be trying to drain @q before its 408 * elevator is initialized. 409 */ 410 if (q->elevator) 411 elv_drain_elevator(q); 412 413 blkcg_drain_queue(q); 414 415 /* 416 * This function might be called on a queue which failed 417 * driver init after queue creation or is not yet fully 418 * active yet. Some drivers (e.g. fd and loop) get unhappy 419 * in such cases. Kick queue iff dispatch queue has 420 * something on it and @q has request_fn set. 421 */ 422 if (!list_empty(&q->queue_head) && q->request_fn) 423 __blk_run_queue(q); 424 425 drain |= q->nr_rqs_elvpriv; 426 drain |= q->request_fn_active; 427 428 /* 429 * Unfortunately, requests are queued at and tracked from 430 * multiple places and there's no single counter which can 431 * be drained. Check all the queues and counters. 432 */ 433 if (drain_all) { 434 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 435 drain |= !list_empty(&q->queue_head); 436 for (i = 0; i < 2; i++) { 437 drain |= q->nr_rqs[i]; 438 drain |= q->in_flight[i]; 439 if (fq) 440 drain |= !list_empty(&fq->flush_queue[i]); 441 } 442 } 443 444 if (!drain) 445 break; 446 447 spin_unlock_irq(q->queue_lock); 448 449 msleep(10); 450 451 spin_lock_irq(q->queue_lock); 452 } 453 454 /* 455 * With queue marked dead, any woken up waiter will fail the 456 * allocation path, so the wakeup chaining is lost and we're 457 * left with hung waiters. We need to wake up those waiters. 458 */ 459 if (q->request_fn) { 460 struct request_list *rl; 461 462 blk_queue_for_each_rl(rl, q) 463 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 464 wake_up_all(&rl->wait[i]); 465 } 466 } 467 468 /** 469 * blk_queue_bypass_start - enter queue bypass mode 470 * @q: queue of interest 471 * 472 * In bypass mode, only the dispatch FIFO queue of @q is used. This 473 * function makes @q enter bypass mode and drains all requests which were 474 * throttled or issued before. On return, it's guaranteed that no request 475 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true 476 * inside queue or RCU read lock. 477 */ 478 void blk_queue_bypass_start(struct request_queue *q) 479 { 480 spin_lock_irq(q->queue_lock); 481 q->bypass_depth++; 482 queue_flag_set(QUEUE_FLAG_BYPASS, q); 483 spin_unlock_irq(q->queue_lock); 484 485 /* 486 * Queues start drained. Skip actual draining till init is 487 * complete. This avoids lenghty delays during queue init which 488 * can happen many times during boot. 489 */ 490 if (blk_queue_init_done(q)) { 491 spin_lock_irq(q->queue_lock); 492 __blk_drain_queue(q, false); 493 spin_unlock_irq(q->queue_lock); 494 495 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 496 synchronize_rcu(); 497 } 498 } 499 EXPORT_SYMBOL_GPL(blk_queue_bypass_start); 500 501 /** 502 * blk_queue_bypass_end - leave queue bypass mode 503 * @q: queue of interest 504 * 505 * Leave bypass mode and restore the normal queueing behavior. 506 */ 507 void blk_queue_bypass_end(struct request_queue *q) 508 { 509 spin_lock_irq(q->queue_lock); 510 if (!--q->bypass_depth) 511 queue_flag_clear(QUEUE_FLAG_BYPASS, q); 512 WARN_ON_ONCE(q->bypass_depth < 0); 513 spin_unlock_irq(q->queue_lock); 514 } 515 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 516 517 void blk_set_queue_dying(struct request_queue *q) 518 { 519 spin_lock_irq(q->queue_lock); 520 queue_flag_set(QUEUE_FLAG_DYING, q); 521 spin_unlock_irq(q->queue_lock); 522 523 if (q->mq_ops) 524 blk_mq_wake_waiters(q); 525 else { 526 struct request_list *rl; 527 528 blk_queue_for_each_rl(rl, q) { 529 if (rl->rq_pool) { 530 wake_up(&rl->wait[BLK_RW_SYNC]); 531 wake_up(&rl->wait[BLK_RW_ASYNC]); 532 } 533 } 534 } 535 } 536 EXPORT_SYMBOL_GPL(blk_set_queue_dying); 537 538 /** 539 * blk_cleanup_queue - shutdown a request queue 540 * @q: request queue to shutdown 541 * 542 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 543 * put it. All future requests will be failed immediately with -ENODEV. 544 */ 545 void blk_cleanup_queue(struct request_queue *q) 546 { 547 spinlock_t *lock = q->queue_lock; 548 549 /* mark @q DYING, no new request or merges will be allowed afterwards */ 550 mutex_lock(&q->sysfs_lock); 551 blk_set_queue_dying(q); 552 spin_lock_irq(lock); 553 554 /* 555 * A dying queue is permanently in bypass mode till released. Note 556 * that, unlike blk_queue_bypass_start(), we aren't performing 557 * synchronize_rcu() after entering bypass mode to avoid the delay 558 * as some drivers create and destroy a lot of queues while 559 * probing. This is still safe because blk_release_queue() will be 560 * called only after the queue refcnt drops to zero and nothing, 561 * RCU or not, would be traversing the queue by then. 562 */ 563 q->bypass_depth++; 564 queue_flag_set(QUEUE_FLAG_BYPASS, q); 565 566 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 567 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 568 queue_flag_set(QUEUE_FLAG_DYING, q); 569 spin_unlock_irq(lock); 570 mutex_unlock(&q->sysfs_lock); 571 572 /* 573 * Drain all requests queued before DYING marking. Set DEAD flag to 574 * prevent that q->request_fn() gets invoked after draining finished. 575 */ 576 blk_freeze_queue(q); 577 spin_lock_irq(lock); 578 if (!q->mq_ops) 579 __blk_drain_queue(q, true); 580 queue_flag_set(QUEUE_FLAG_DEAD, q); 581 spin_unlock_irq(lock); 582 583 /* for synchronous bio-based driver finish in-flight integrity i/o */ 584 blk_flush_integrity(); 585 586 /* @q won't process any more request, flush async actions */ 587 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 588 blk_sync_queue(q); 589 590 if (q->mq_ops) 591 blk_mq_free_queue(q); 592 percpu_ref_exit(&q->q_usage_counter); 593 594 spin_lock_irq(lock); 595 if (q->queue_lock != &q->__queue_lock) 596 q->queue_lock = &q->__queue_lock; 597 spin_unlock_irq(lock); 598 599 bdi_unregister(&q->backing_dev_info); 600 601 /* @q is and will stay empty, shutdown and put */ 602 blk_put_queue(q); 603 } 604 EXPORT_SYMBOL(blk_cleanup_queue); 605 606 /* Allocate memory local to the request queue */ 607 static void *alloc_request_struct(gfp_t gfp_mask, void *data) 608 { 609 int nid = (int)(long)data; 610 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); 611 } 612 613 static void free_request_struct(void *element, void *unused) 614 { 615 kmem_cache_free(request_cachep, element); 616 } 617 618 int blk_init_rl(struct request_list *rl, struct request_queue *q, 619 gfp_t gfp_mask) 620 { 621 if (unlikely(rl->rq_pool)) 622 return 0; 623 624 rl->q = q; 625 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 626 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 627 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 628 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 629 630 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, 631 free_request_struct, 632 (void *)(long)q->node, gfp_mask, 633 q->node); 634 if (!rl->rq_pool) 635 return -ENOMEM; 636 637 return 0; 638 } 639 640 void blk_exit_rl(struct request_list *rl) 641 { 642 if (rl->rq_pool) 643 mempool_destroy(rl->rq_pool); 644 } 645 646 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 647 { 648 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 649 } 650 EXPORT_SYMBOL(blk_alloc_queue); 651 652 int blk_queue_enter(struct request_queue *q, bool nowait) 653 { 654 while (true) { 655 int ret; 656 657 if (percpu_ref_tryget_live(&q->q_usage_counter)) 658 return 0; 659 660 if (nowait) 661 return -EBUSY; 662 663 ret = wait_event_interruptible(q->mq_freeze_wq, 664 !atomic_read(&q->mq_freeze_depth) || 665 blk_queue_dying(q)); 666 if (blk_queue_dying(q)) 667 return -ENODEV; 668 if (ret) 669 return ret; 670 } 671 } 672 673 void blk_queue_exit(struct request_queue *q) 674 { 675 percpu_ref_put(&q->q_usage_counter); 676 } 677 678 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 679 { 680 struct request_queue *q = 681 container_of(ref, struct request_queue, q_usage_counter); 682 683 wake_up_all(&q->mq_freeze_wq); 684 } 685 686 static void blk_rq_timed_out_timer(unsigned long data) 687 { 688 struct request_queue *q = (struct request_queue *)data; 689 690 kblockd_schedule_work(&q->timeout_work); 691 } 692 693 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 694 { 695 struct request_queue *q; 696 int err; 697 698 q = kmem_cache_alloc_node(blk_requestq_cachep, 699 gfp_mask | __GFP_ZERO, node_id); 700 if (!q) 701 return NULL; 702 703 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 704 if (q->id < 0) 705 goto fail_q; 706 707 q->bio_split = bioset_create(BIO_POOL_SIZE, 0); 708 if (!q->bio_split) 709 goto fail_id; 710 711 q->backing_dev_info.ra_pages = 712 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; 713 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; 714 q->backing_dev_info.name = "block"; 715 q->node = node_id; 716 717 err = bdi_init(&q->backing_dev_info); 718 if (err) 719 goto fail_split; 720 721 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 722 laptop_mode_timer_fn, (unsigned long) q); 723 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 724 INIT_LIST_HEAD(&q->queue_head); 725 INIT_LIST_HEAD(&q->timeout_list); 726 INIT_LIST_HEAD(&q->icq_list); 727 #ifdef CONFIG_BLK_CGROUP 728 INIT_LIST_HEAD(&q->blkg_list); 729 #endif 730 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 731 732 kobject_init(&q->kobj, &blk_queue_ktype); 733 734 mutex_init(&q->sysfs_lock); 735 spin_lock_init(&q->__queue_lock); 736 737 /* 738 * By default initialize queue_lock to internal lock and driver can 739 * override it later if need be. 740 */ 741 q->queue_lock = &q->__queue_lock; 742 743 /* 744 * A queue starts its life with bypass turned on to avoid 745 * unnecessary bypass on/off overhead and nasty surprises during 746 * init. The initial bypass will be finished when the queue is 747 * registered by blk_register_queue(). 748 */ 749 q->bypass_depth = 1; 750 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); 751 752 init_waitqueue_head(&q->mq_freeze_wq); 753 754 /* 755 * Init percpu_ref in atomic mode so that it's faster to shutdown. 756 * See blk_register_queue() for details. 757 */ 758 if (percpu_ref_init(&q->q_usage_counter, 759 blk_queue_usage_counter_release, 760 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 761 goto fail_bdi; 762 763 if (blkcg_init_queue(q)) 764 goto fail_ref; 765 766 return q; 767 768 fail_ref: 769 percpu_ref_exit(&q->q_usage_counter); 770 fail_bdi: 771 bdi_destroy(&q->backing_dev_info); 772 fail_split: 773 bioset_free(q->bio_split); 774 fail_id: 775 ida_simple_remove(&blk_queue_ida, q->id); 776 fail_q: 777 kmem_cache_free(blk_requestq_cachep, q); 778 return NULL; 779 } 780 EXPORT_SYMBOL(blk_alloc_queue_node); 781 782 /** 783 * blk_init_queue - prepare a request queue for use with a block device 784 * @rfn: The function to be called to process requests that have been 785 * placed on the queue. 786 * @lock: Request queue spin lock 787 * 788 * Description: 789 * If a block device wishes to use the standard request handling procedures, 790 * which sorts requests and coalesces adjacent requests, then it must 791 * call blk_init_queue(). The function @rfn will be called when there 792 * are requests on the queue that need to be processed. If the device 793 * supports plugging, then @rfn may not be called immediately when requests 794 * are available on the queue, but may be called at some time later instead. 795 * Plugged queues are generally unplugged when a buffer belonging to one 796 * of the requests on the queue is needed, or due to memory pressure. 797 * 798 * @rfn is not required, or even expected, to remove all requests off the 799 * queue, but only as many as it can handle at a time. If it does leave 800 * requests on the queue, it is responsible for arranging that the requests 801 * get dealt with eventually. 802 * 803 * The queue spin lock must be held while manipulating the requests on the 804 * request queue; this lock will be taken also from interrupt context, so irq 805 * disabling is needed for it. 806 * 807 * Function returns a pointer to the initialized request queue, or %NULL if 808 * it didn't succeed. 809 * 810 * Note: 811 * blk_init_queue() must be paired with a blk_cleanup_queue() call 812 * when the block device is deactivated (such as at module unload). 813 **/ 814 815 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 816 { 817 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); 818 } 819 EXPORT_SYMBOL(blk_init_queue); 820 821 struct request_queue * 822 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 823 { 824 struct request_queue *uninit_q, *q; 825 826 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 827 if (!uninit_q) 828 return NULL; 829 830 q = blk_init_allocated_queue(uninit_q, rfn, lock); 831 if (!q) 832 blk_cleanup_queue(uninit_q); 833 834 return q; 835 } 836 EXPORT_SYMBOL(blk_init_queue_node); 837 838 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 839 840 struct request_queue * 841 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 842 spinlock_t *lock) 843 { 844 if (!q) 845 return NULL; 846 847 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); 848 if (!q->fq) 849 return NULL; 850 851 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 852 goto fail; 853 854 INIT_WORK(&q->timeout_work, blk_timeout_work); 855 q->request_fn = rfn; 856 q->prep_rq_fn = NULL; 857 q->unprep_rq_fn = NULL; 858 q->queue_flags |= QUEUE_FLAG_DEFAULT; 859 860 /* Override internal queue lock with supplied lock pointer */ 861 if (lock) 862 q->queue_lock = lock; 863 864 /* 865 * This also sets hw/phys segments, boundary and size 866 */ 867 blk_queue_make_request(q, blk_queue_bio); 868 869 q->sg_reserved_size = INT_MAX; 870 871 /* Protect q->elevator from elevator_change */ 872 mutex_lock(&q->sysfs_lock); 873 874 /* init elevator */ 875 if (elevator_init(q, NULL)) { 876 mutex_unlock(&q->sysfs_lock); 877 goto fail; 878 } 879 880 mutex_unlock(&q->sysfs_lock); 881 882 return q; 883 884 fail: 885 blk_free_flush_queue(q->fq); 886 wbt_exit(q); 887 return NULL; 888 } 889 EXPORT_SYMBOL(blk_init_allocated_queue); 890 891 bool blk_get_queue(struct request_queue *q) 892 { 893 if (likely(!blk_queue_dying(q))) { 894 __blk_get_queue(q); 895 return true; 896 } 897 898 return false; 899 } 900 EXPORT_SYMBOL(blk_get_queue); 901 902 static inline void blk_free_request(struct request_list *rl, struct request *rq) 903 { 904 if (rq->rq_flags & RQF_ELVPRIV) { 905 elv_put_request(rl->q, rq); 906 if (rq->elv.icq) 907 put_io_context(rq->elv.icq->ioc); 908 } 909 910 mempool_free(rq, rl->rq_pool); 911 } 912 913 /* 914 * ioc_batching returns true if the ioc is a valid batching request and 915 * should be given priority access to a request. 916 */ 917 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 918 { 919 if (!ioc) 920 return 0; 921 922 /* 923 * Make sure the process is able to allocate at least 1 request 924 * even if the batch times out, otherwise we could theoretically 925 * lose wakeups. 926 */ 927 return ioc->nr_batch_requests == q->nr_batching || 928 (ioc->nr_batch_requests > 0 929 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 930 } 931 932 /* 933 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 934 * will cause the process to be a "batcher" on all queues in the system. This 935 * is the behaviour we want though - once it gets a wakeup it should be given 936 * a nice run. 937 */ 938 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 939 { 940 if (!ioc || ioc_batching(q, ioc)) 941 return; 942 943 ioc->nr_batch_requests = q->nr_batching; 944 ioc->last_waited = jiffies; 945 } 946 947 static void __freed_request(struct request_list *rl, int sync) 948 { 949 struct request_queue *q = rl->q; 950 951 if (rl->count[sync] < queue_congestion_off_threshold(q)) 952 blk_clear_congested(rl, sync); 953 954 if (rl->count[sync] + 1 <= q->nr_requests) { 955 if (waitqueue_active(&rl->wait[sync])) 956 wake_up(&rl->wait[sync]); 957 958 blk_clear_rl_full(rl, sync); 959 } 960 } 961 962 /* 963 * A request has just been released. Account for it, update the full and 964 * congestion status, wake up any waiters. Called under q->queue_lock. 965 */ 966 static void freed_request(struct request_list *rl, bool sync, 967 req_flags_t rq_flags) 968 { 969 struct request_queue *q = rl->q; 970 971 q->nr_rqs[sync]--; 972 rl->count[sync]--; 973 if (rq_flags & RQF_ELVPRIV) 974 q->nr_rqs_elvpriv--; 975 976 __freed_request(rl, sync); 977 978 if (unlikely(rl->starved[sync ^ 1])) 979 __freed_request(rl, sync ^ 1); 980 } 981 982 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) 983 { 984 struct request_list *rl; 985 int on_thresh, off_thresh; 986 987 spin_lock_irq(q->queue_lock); 988 q->nr_requests = nr; 989 blk_queue_congestion_threshold(q); 990 on_thresh = queue_congestion_on_threshold(q); 991 off_thresh = queue_congestion_off_threshold(q); 992 993 blk_queue_for_each_rl(rl, q) { 994 if (rl->count[BLK_RW_SYNC] >= on_thresh) 995 blk_set_congested(rl, BLK_RW_SYNC); 996 else if (rl->count[BLK_RW_SYNC] < off_thresh) 997 blk_clear_congested(rl, BLK_RW_SYNC); 998 999 if (rl->count[BLK_RW_ASYNC] >= on_thresh) 1000 blk_set_congested(rl, BLK_RW_ASYNC); 1001 else if (rl->count[BLK_RW_ASYNC] < off_thresh) 1002 blk_clear_congested(rl, BLK_RW_ASYNC); 1003 1004 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 1005 blk_set_rl_full(rl, BLK_RW_SYNC); 1006 } else { 1007 blk_clear_rl_full(rl, BLK_RW_SYNC); 1008 wake_up(&rl->wait[BLK_RW_SYNC]); 1009 } 1010 1011 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 1012 blk_set_rl_full(rl, BLK_RW_ASYNC); 1013 } else { 1014 blk_clear_rl_full(rl, BLK_RW_ASYNC); 1015 wake_up(&rl->wait[BLK_RW_ASYNC]); 1016 } 1017 } 1018 1019 spin_unlock_irq(q->queue_lock); 1020 return 0; 1021 } 1022 1023 /* 1024 * Determine if elevator data should be initialized when allocating the 1025 * request associated with @bio. 1026 */ 1027 static bool blk_rq_should_init_elevator(struct bio *bio) 1028 { 1029 if (!bio) 1030 return true; 1031 1032 /* 1033 * Flush requests do not use the elevator so skip initialization. 1034 * This allows a request to share the flush and elevator data. 1035 */ 1036 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) 1037 return false; 1038 1039 return true; 1040 } 1041 1042 /** 1043 * rq_ioc - determine io_context for request allocation 1044 * @bio: request being allocated is for this bio (can be %NULL) 1045 * 1046 * Determine io_context to use for request allocation for @bio. May return 1047 * %NULL if %current->io_context doesn't exist. 1048 */ 1049 static struct io_context *rq_ioc(struct bio *bio) 1050 { 1051 #ifdef CONFIG_BLK_CGROUP 1052 if (bio && bio->bi_ioc) 1053 return bio->bi_ioc; 1054 #endif 1055 return current->io_context; 1056 } 1057 1058 /** 1059 * __get_request - get a free request 1060 * @rl: request list to allocate from 1061 * @op: operation and flags 1062 * @bio: bio to allocate request for (can be %NULL) 1063 * @gfp_mask: allocation mask 1064 * 1065 * Get a free request from @q. This function may fail under memory 1066 * pressure or if @q is dead. 1067 * 1068 * Must be called with @q->queue_lock held and, 1069 * Returns ERR_PTR on failure, with @q->queue_lock held. 1070 * Returns request pointer on success, with @q->queue_lock *not held*. 1071 */ 1072 static struct request *__get_request(struct request_list *rl, unsigned int op, 1073 struct bio *bio, gfp_t gfp_mask) 1074 { 1075 struct request_queue *q = rl->q; 1076 struct request *rq; 1077 struct elevator_type *et = q->elevator->type; 1078 struct io_context *ioc = rq_ioc(bio); 1079 struct io_cq *icq = NULL; 1080 const bool is_sync = op_is_sync(op); 1081 int may_queue; 1082 req_flags_t rq_flags = RQF_ALLOCED; 1083 1084 if (unlikely(blk_queue_dying(q))) 1085 return ERR_PTR(-ENODEV); 1086 1087 may_queue = elv_may_queue(q, op); 1088 if (may_queue == ELV_MQUEUE_NO) 1089 goto rq_starved; 1090 1091 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 1092 if (rl->count[is_sync]+1 >= q->nr_requests) { 1093 /* 1094 * The queue will fill after this allocation, so set 1095 * it as full, and mark this process as "batching". 1096 * This process will be allowed to complete a batch of 1097 * requests, others will be blocked. 1098 */ 1099 if (!blk_rl_full(rl, is_sync)) { 1100 ioc_set_batching(q, ioc); 1101 blk_set_rl_full(rl, is_sync); 1102 } else { 1103 if (may_queue != ELV_MQUEUE_MUST 1104 && !ioc_batching(q, ioc)) { 1105 /* 1106 * The queue is full and the allocating 1107 * process is not a "batcher", and not 1108 * exempted by the IO scheduler 1109 */ 1110 return ERR_PTR(-ENOMEM); 1111 } 1112 } 1113 } 1114 blk_set_congested(rl, is_sync); 1115 } 1116 1117 /* 1118 * Only allow batching queuers to allocate up to 50% over the defined 1119 * limit of requests, otherwise we could have thousands of requests 1120 * allocated with any setting of ->nr_requests 1121 */ 1122 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 1123 return ERR_PTR(-ENOMEM); 1124 1125 q->nr_rqs[is_sync]++; 1126 rl->count[is_sync]++; 1127 rl->starved[is_sync] = 0; 1128 1129 /* 1130 * Decide whether the new request will be managed by elevator. If 1131 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will 1132 * prevent the current elevator from being destroyed until the new 1133 * request is freed. This guarantees icq's won't be destroyed and 1134 * makes creating new ones safe. 1135 * 1136 * Also, lookup icq while holding queue_lock. If it doesn't exist, 1137 * it will be created after releasing queue_lock. 1138 */ 1139 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1140 rq_flags |= RQF_ELVPRIV; 1141 q->nr_rqs_elvpriv++; 1142 if (et->icq_cache && ioc) 1143 icq = ioc_lookup_icq(ioc, q); 1144 } 1145 1146 if (blk_queue_io_stat(q)) 1147 rq_flags |= RQF_IO_STAT; 1148 spin_unlock_irq(q->queue_lock); 1149 1150 /* allocate and init request */ 1151 rq = mempool_alloc(rl->rq_pool, gfp_mask); 1152 if (!rq) 1153 goto fail_alloc; 1154 1155 blk_rq_init(q, rq); 1156 blk_rq_set_rl(rq, rl); 1157 rq->cmd_flags = op; 1158 rq->rq_flags = rq_flags; 1159 1160 /* init elvpriv */ 1161 if (rq_flags & RQF_ELVPRIV) { 1162 if (unlikely(et->icq_cache && !icq)) { 1163 if (ioc) 1164 icq = ioc_create_icq(ioc, q, gfp_mask); 1165 if (!icq) 1166 goto fail_elvpriv; 1167 } 1168 1169 rq->elv.icq = icq; 1170 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) 1171 goto fail_elvpriv; 1172 1173 /* @rq->elv.icq holds io_context until @rq is freed */ 1174 if (icq) 1175 get_io_context(icq->ioc); 1176 } 1177 out: 1178 /* 1179 * ioc may be NULL here, and ioc_batching will be false. That's 1180 * OK, if the queue is under the request limit then requests need 1181 * not count toward the nr_batch_requests limit. There will always 1182 * be some limit enforced by BLK_BATCH_TIME. 1183 */ 1184 if (ioc_batching(q, ioc)) 1185 ioc->nr_batch_requests--; 1186 1187 trace_block_getrq(q, bio, op); 1188 return rq; 1189 1190 fail_elvpriv: 1191 /* 1192 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed 1193 * and may fail indefinitely under memory pressure and thus 1194 * shouldn't stall IO. Treat this request as !elvpriv. This will 1195 * disturb iosched and blkcg but weird is bettern than dead. 1196 */ 1197 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1198 __func__, dev_name(q->backing_dev_info.dev)); 1199 1200 rq->rq_flags &= ~RQF_ELVPRIV; 1201 rq->elv.icq = NULL; 1202 1203 spin_lock_irq(q->queue_lock); 1204 q->nr_rqs_elvpriv--; 1205 spin_unlock_irq(q->queue_lock); 1206 goto out; 1207 1208 fail_alloc: 1209 /* 1210 * Allocation failed presumably due to memory. Undo anything we 1211 * might have messed up. 1212 * 1213 * Allocating task should really be put onto the front of the wait 1214 * queue, but this is pretty rare. 1215 */ 1216 spin_lock_irq(q->queue_lock); 1217 freed_request(rl, is_sync, rq_flags); 1218 1219 /* 1220 * in the very unlikely event that allocation failed and no 1221 * requests for this direction was pending, mark us starved so that 1222 * freeing of a request in the other direction will notice 1223 * us. another possible fix would be to split the rq mempool into 1224 * READ and WRITE 1225 */ 1226 rq_starved: 1227 if (unlikely(rl->count[is_sync] == 0)) 1228 rl->starved[is_sync] = 1; 1229 return ERR_PTR(-ENOMEM); 1230 } 1231 1232 /** 1233 * get_request - get a free request 1234 * @q: request_queue to allocate request from 1235 * @op: operation and flags 1236 * @bio: bio to allocate request for (can be %NULL) 1237 * @gfp_mask: allocation mask 1238 * 1239 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask, 1240 * this function keeps retrying under memory pressure and fails iff @q is dead. 1241 * 1242 * Must be called with @q->queue_lock held and, 1243 * Returns ERR_PTR on failure, with @q->queue_lock held. 1244 * Returns request pointer on success, with @q->queue_lock *not held*. 1245 */ 1246 static struct request *get_request(struct request_queue *q, unsigned int op, 1247 struct bio *bio, gfp_t gfp_mask) 1248 { 1249 const bool is_sync = op_is_sync(op); 1250 DEFINE_WAIT(wait); 1251 struct request_list *rl; 1252 struct request *rq; 1253 1254 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1255 retry: 1256 rq = __get_request(rl, op, bio, gfp_mask); 1257 if (!IS_ERR(rq)) 1258 return rq; 1259 1260 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { 1261 blk_put_rl(rl); 1262 return rq; 1263 } 1264 1265 /* wait on @rl and retry */ 1266 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1267 TASK_UNINTERRUPTIBLE); 1268 1269 trace_block_sleeprq(q, bio, op); 1270 1271 spin_unlock_irq(q->queue_lock); 1272 io_schedule(); 1273 1274 /* 1275 * After sleeping, we become a "batching" process and will be able 1276 * to allocate at least one request, and up to a big batch of them 1277 * for a small period time. See ioc_batching, ioc_set_batching 1278 */ 1279 ioc_set_batching(q, current->io_context); 1280 1281 spin_lock_irq(q->queue_lock); 1282 finish_wait(&rl->wait[is_sync], &wait); 1283 1284 goto retry; 1285 } 1286 1287 static struct request *blk_old_get_request(struct request_queue *q, int rw, 1288 gfp_t gfp_mask) 1289 { 1290 struct request *rq; 1291 1292 BUG_ON(rw != READ && rw != WRITE); 1293 1294 /* create ioc upfront */ 1295 create_io_context(gfp_mask, q->node); 1296 1297 spin_lock_irq(q->queue_lock); 1298 rq = get_request(q, rw, NULL, gfp_mask); 1299 if (IS_ERR(rq)) { 1300 spin_unlock_irq(q->queue_lock); 1301 return rq; 1302 } 1303 1304 /* q->queue_lock is unlocked at this point */ 1305 rq->__data_len = 0; 1306 rq->__sector = (sector_t) -1; 1307 rq->bio = rq->biotail = NULL; 1308 return rq; 1309 } 1310 1311 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1312 { 1313 if (q->mq_ops) 1314 return blk_mq_alloc_request(q, rw, 1315 (gfp_mask & __GFP_DIRECT_RECLAIM) ? 1316 0 : BLK_MQ_REQ_NOWAIT); 1317 else 1318 return blk_old_get_request(q, rw, gfp_mask); 1319 } 1320 EXPORT_SYMBOL(blk_get_request); 1321 1322 /** 1323 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC 1324 * @rq: request to be initialized 1325 * 1326 */ 1327 void blk_rq_set_block_pc(struct request *rq) 1328 { 1329 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1330 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1331 } 1332 EXPORT_SYMBOL(blk_rq_set_block_pc); 1333 1334 /** 1335 * blk_requeue_request - put a request back on queue 1336 * @q: request queue where request should be inserted 1337 * @rq: request to be inserted 1338 * 1339 * Description: 1340 * Drivers often keep queueing requests until the hardware cannot accept 1341 * more, when that condition happens we need to put the request back 1342 * on the queue. Must be called with queue lock held. 1343 */ 1344 void blk_requeue_request(struct request_queue *q, struct request *rq) 1345 { 1346 blk_delete_timer(rq); 1347 blk_clear_rq_complete(rq); 1348 trace_block_rq_requeue(q, rq); 1349 wbt_requeue(q->rq_wb, &rq->issue_stat); 1350 1351 if (rq->rq_flags & RQF_QUEUED) 1352 blk_queue_end_tag(q, rq); 1353 1354 BUG_ON(blk_queued_rq(rq)); 1355 1356 elv_requeue_request(q, rq); 1357 } 1358 EXPORT_SYMBOL(blk_requeue_request); 1359 1360 static void add_acct_request(struct request_queue *q, struct request *rq, 1361 int where) 1362 { 1363 blk_account_io_start(rq, true); 1364 __elv_add_request(q, rq, where); 1365 } 1366 1367 static void part_round_stats_single(int cpu, struct hd_struct *part, 1368 unsigned long now) 1369 { 1370 int inflight; 1371 1372 if (now == part->stamp) 1373 return; 1374 1375 inflight = part_in_flight(part); 1376 if (inflight) { 1377 __part_stat_add(cpu, part, time_in_queue, 1378 inflight * (now - part->stamp)); 1379 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1380 } 1381 part->stamp = now; 1382 } 1383 1384 /** 1385 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1386 * @cpu: cpu number for stats access 1387 * @part: target partition 1388 * 1389 * The average IO queue length and utilisation statistics are maintained 1390 * by observing the current state of the queue length and the amount of 1391 * time it has been in this state for. 1392 * 1393 * Normally, that accounting is done on IO completion, but that can result 1394 * in more than a second's worth of IO being accounted for within any one 1395 * second, leading to >100% utilisation. To deal with that, we call this 1396 * function to do a round-off before returning the results when reading 1397 * /proc/diskstats. This accounts immediately for all queue usage up to 1398 * the current jiffies and restarts the counters again. 1399 */ 1400 void part_round_stats(int cpu, struct hd_struct *part) 1401 { 1402 unsigned long now = jiffies; 1403 1404 if (part->partno) 1405 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1406 part_round_stats_single(cpu, part, now); 1407 } 1408 EXPORT_SYMBOL_GPL(part_round_stats); 1409 1410 #ifdef CONFIG_PM 1411 static void blk_pm_put_request(struct request *rq) 1412 { 1413 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) 1414 pm_runtime_mark_last_busy(rq->q->dev); 1415 } 1416 #else 1417 static inline void blk_pm_put_request(struct request *rq) {} 1418 #endif 1419 1420 /* 1421 * queue lock must be held 1422 */ 1423 void __blk_put_request(struct request_queue *q, struct request *req) 1424 { 1425 req_flags_t rq_flags = req->rq_flags; 1426 1427 if (unlikely(!q)) 1428 return; 1429 1430 if (q->mq_ops) { 1431 blk_mq_free_request(req); 1432 return; 1433 } 1434 1435 blk_pm_put_request(req); 1436 1437 elv_completed_request(q, req); 1438 1439 /* this is a bio leak */ 1440 WARN_ON(req->bio != NULL); 1441 1442 wbt_done(q->rq_wb, &req->issue_stat); 1443 1444 /* 1445 * Request may not have originated from ll_rw_blk. if not, 1446 * it didn't come out of our reserved rq pools 1447 */ 1448 if (rq_flags & RQF_ALLOCED) { 1449 struct request_list *rl = blk_rq_rl(req); 1450 bool sync = op_is_sync(req->cmd_flags); 1451 1452 BUG_ON(!list_empty(&req->queuelist)); 1453 BUG_ON(ELV_ON_HASH(req)); 1454 1455 blk_free_request(rl, req); 1456 freed_request(rl, sync, rq_flags); 1457 blk_put_rl(rl); 1458 } 1459 } 1460 EXPORT_SYMBOL_GPL(__blk_put_request); 1461 1462 void blk_put_request(struct request *req) 1463 { 1464 struct request_queue *q = req->q; 1465 1466 if (q->mq_ops) 1467 blk_mq_free_request(req); 1468 else { 1469 unsigned long flags; 1470 1471 spin_lock_irqsave(q->queue_lock, flags); 1472 __blk_put_request(q, req); 1473 spin_unlock_irqrestore(q->queue_lock, flags); 1474 } 1475 } 1476 EXPORT_SYMBOL(blk_put_request); 1477 1478 /** 1479 * blk_add_request_payload - add a payload to a request 1480 * @rq: request to update 1481 * @page: page backing the payload 1482 * @offset: offset in page 1483 * @len: length of the payload. 1484 * 1485 * This allows to later add a payload to an already submitted request by 1486 * a block driver. The driver needs to take care of freeing the payload 1487 * itself. 1488 * 1489 * Note that this is a quite horrible hack and nothing but handling of 1490 * discard requests should ever use it. 1491 */ 1492 void blk_add_request_payload(struct request *rq, struct page *page, 1493 int offset, unsigned int len) 1494 { 1495 struct bio *bio = rq->bio; 1496 1497 bio->bi_io_vec->bv_page = page; 1498 bio->bi_io_vec->bv_offset = offset; 1499 bio->bi_io_vec->bv_len = len; 1500 1501 bio->bi_iter.bi_size = len; 1502 bio->bi_vcnt = 1; 1503 bio->bi_phys_segments = 1; 1504 1505 rq->__data_len = rq->resid_len = len; 1506 rq->nr_phys_segments = 1; 1507 } 1508 EXPORT_SYMBOL_GPL(blk_add_request_payload); 1509 1510 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1511 struct bio *bio) 1512 { 1513 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1514 1515 if (!ll_back_merge_fn(q, req, bio)) 1516 return false; 1517 1518 trace_block_bio_backmerge(q, req, bio); 1519 1520 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1521 blk_rq_set_mixed_merge(req); 1522 1523 req->biotail->bi_next = bio; 1524 req->biotail = bio; 1525 req->__data_len += bio->bi_iter.bi_size; 1526 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1527 1528 blk_account_io_start(req, false); 1529 return true; 1530 } 1531 1532 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 1533 struct bio *bio) 1534 { 1535 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 1536 1537 if (!ll_front_merge_fn(q, req, bio)) 1538 return false; 1539 1540 trace_block_bio_frontmerge(q, req, bio); 1541 1542 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1543 blk_rq_set_mixed_merge(req); 1544 1545 bio->bi_next = req->bio; 1546 req->bio = bio; 1547 1548 req->__sector = bio->bi_iter.bi_sector; 1549 req->__data_len += bio->bi_iter.bi_size; 1550 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1551 1552 blk_account_io_start(req, false); 1553 return true; 1554 } 1555 1556 /** 1557 * blk_attempt_plug_merge - try to merge with %current's plugged list 1558 * @q: request_queue new bio is being queued at 1559 * @bio: new bio being queued 1560 * @request_count: out parameter for number of traversed plugged requests 1561 * @same_queue_rq: pointer to &struct request that gets filled in when 1562 * another request associated with @q is found on the plug list 1563 * (optional, may be %NULL) 1564 * 1565 * Determine whether @bio being queued on @q can be merged with a request 1566 * on %current's plugged list. Returns %true if merge was successful, 1567 * otherwise %false. 1568 * 1569 * Plugging coalesces IOs from the same issuer for the same purpose without 1570 * going through @q->queue_lock. As such it's more of an issuing mechanism 1571 * than scheduling, and the request, while may have elvpriv data, is not 1572 * added on the elevator at this point. In addition, we don't have 1573 * reliable access to the elevator outside queue lock. Only check basic 1574 * merging parameters without querying the elevator. 1575 * 1576 * Caller must ensure !blk_queue_nomerges(q) beforehand. 1577 */ 1578 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1579 unsigned int *request_count, 1580 struct request **same_queue_rq) 1581 { 1582 struct blk_plug *plug; 1583 struct request *rq; 1584 bool ret = false; 1585 struct list_head *plug_list; 1586 1587 plug = current->plug; 1588 if (!plug) 1589 goto out; 1590 *request_count = 0; 1591 1592 if (q->mq_ops) 1593 plug_list = &plug->mq_list; 1594 else 1595 plug_list = &plug->list; 1596 1597 list_for_each_entry_reverse(rq, plug_list, queuelist) { 1598 int el_ret; 1599 1600 if (rq->q == q) { 1601 (*request_count)++; 1602 /* 1603 * Only blk-mq multiple hardware queues case checks the 1604 * rq in the same queue, there should be only one such 1605 * rq in a queue 1606 **/ 1607 if (same_queue_rq) 1608 *same_queue_rq = rq; 1609 } 1610 1611 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1612 continue; 1613 1614 el_ret = blk_try_merge(rq, bio); 1615 if (el_ret == ELEVATOR_BACK_MERGE) { 1616 ret = bio_attempt_back_merge(q, rq, bio); 1617 if (ret) 1618 break; 1619 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1620 ret = bio_attempt_front_merge(q, rq, bio); 1621 if (ret) 1622 break; 1623 } 1624 } 1625 out: 1626 return ret; 1627 } 1628 1629 unsigned int blk_plug_queued_count(struct request_queue *q) 1630 { 1631 struct blk_plug *plug; 1632 struct request *rq; 1633 struct list_head *plug_list; 1634 unsigned int ret = 0; 1635 1636 plug = current->plug; 1637 if (!plug) 1638 goto out; 1639 1640 if (q->mq_ops) 1641 plug_list = &plug->mq_list; 1642 else 1643 plug_list = &plug->list; 1644 1645 list_for_each_entry(rq, plug_list, queuelist) { 1646 if (rq->q == q) 1647 ret++; 1648 } 1649 out: 1650 return ret; 1651 } 1652 1653 void init_request_from_bio(struct request *req, struct bio *bio) 1654 { 1655 req->cmd_type = REQ_TYPE_FS; 1656 if (bio->bi_opf & REQ_RAHEAD) 1657 req->cmd_flags |= REQ_FAILFAST_MASK; 1658 1659 req->errors = 0; 1660 req->__sector = bio->bi_iter.bi_sector; 1661 req->ioprio = bio_prio(bio); 1662 blk_rq_bio_prep(req->q, req, bio); 1663 } 1664 1665 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1666 { 1667 struct blk_plug *plug; 1668 int el_ret, where = ELEVATOR_INSERT_SORT; 1669 struct request *req; 1670 unsigned int request_count = 0; 1671 unsigned int wb_acct; 1672 1673 /* 1674 * low level driver can indicate that it wants pages above a 1675 * certain limit bounced to low memory (ie for highmem, or even 1676 * ISA dma in theory) 1677 */ 1678 blk_queue_bounce(q, &bio); 1679 1680 blk_queue_split(q, &bio, q->bio_split); 1681 1682 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1683 bio->bi_error = -EIO; 1684 bio_endio(bio); 1685 return BLK_QC_T_NONE; 1686 } 1687 1688 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) { 1689 spin_lock_irq(q->queue_lock); 1690 where = ELEVATOR_INSERT_FLUSH; 1691 goto get_rq; 1692 } 1693 1694 /* 1695 * Check if we can merge with the plugged list before grabbing 1696 * any locks. 1697 */ 1698 if (!blk_queue_nomerges(q)) { 1699 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1700 return BLK_QC_T_NONE; 1701 } else 1702 request_count = blk_plug_queued_count(q); 1703 1704 spin_lock_irq(q->queue_lock); 1705 1706 el_ret = elv_merge(q, &req, bio); 1707 if (el_ret == ELEVATOR_BACK_MERGE) { 1708 if (bio_attempt_back_merge(q, req, bio)) { 1709 elv_bio_merged(q, req, bio); 1710 if (!attempt_back_merge(q, req)) 1711 elv_merged_request(q, req, el_ret); 1712 goto out_unlock; 1713 } 1714 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1715 if (bio_attempt_front_merge(q, req, bio)) { 1716 elv_bio_merged(q, req, bio); 1717 if (!attempt_front_merge(q, req)) 1718 elv_merged_request(q, req, el_ret); 1719 goto out_unlock; 1720 } 1721 } 1722 1723 get_rq: 1724 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock); 1725 1726 /* 1727 * Grab a free request. This is might sleep but can not fail. 1728 * Returns with the queue unlocked. 1729 */ 1730 req = get_request(q, bio->bi_opf, bio, GFP_NOIO); 1731 if (IS_ERR(req)) { 1732 __wbt_done(q->rq_wb, wb_acct); 1733 bio->bi_error = PTR_ERR(req); 1734 bio_endio(bio); 1735 goto out_unlock; 1736 } 1737 1738 wbt_track(&req->issue_stat, wb_acct); 1739 1740 /* 1741 * After dropping the lock and possibly sleeping here, our request 1742 * may now be mergeable after it had proven unmergeable (above). 1743 * We don't worry about that case for efficiency. It won't happen 1744 * often, and the elevators are able to handle it. 1745 */ 1746 init_request_from_bio(req, bio); 1747 1748 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) 1749 req->cpu = raw_smp_processor_id(); 1750 1751 plug = current->plug; 1752 if (plug) { 1753 /* 1754 * If this is the first request added after a plug, fire 1755 * of a plug trace. 1756 * 1757 * @request_count may become stale because of schedule 1758 * out, so check plug list again. 1759 */ 1760 if (!request_count || list_empty(&plug->list)) 1761 trace_block_plug(q); 1762 else { 1763 struct request *last = list_entry_rq(plug->list.prev); 1764 if (request_count >= BLK_MAX_REQUEST_COUNT || 1765 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) { 1766 blk_flush_plug_list(plug, false); 1767 trace_block_plug(q); 1768 } 1769 } 1770 list_add_tail(&req->queuelist, &plug->list); 1771 blk_account_io_start(req, true); 1772 } else { 1773 spin_lock_irq(q->queue_lock); 1774 add_acct_request(q, req, where); 1775 __blk_run_queue(q); 1776 out_unlock: 1777 spin_unlock_irq(q->queue_lock); 1778 } 1779 1780 return BLK_QC_T_NONE; 1781 } 1782 1783 /* 1784 * If bio->bi_dev is a partition, remap the location 1785 */ 1786 static inline void blk_partition_remap(struct bio *bio) 1787 { 1788 struct block_device *bdev = bio->bi_bdev; 1789 1790 /* 1791 * Zone reset does not include bi_size so bio_sectors() is always 0. 1792 * Include a test for the reset op code and perform the remap if needed. 1793 */ 1794 if (bdev != bdev->bd_contains && 1795 (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) { 1796 struct hd_struct *p = bdev->bd_part; 1797 1798 bio->bi_iter.bi_sector += p->start_sect; 1799 bio->bi_bdev = bdev->bd_contains; 1800 1801 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1802 bdev->bd_dev, 1803 bio->bi_iter.bi_sector - p->start_sect); 1804 } 1805 } 1806 1807 static void handle_bad_sector(struct bio *bio) 1808 { 1809 char b[BDEVNAME_SIZE]; 1810 1811 printk(KERN_INFO "attempt to access beyond end of device\n"); 1812 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 1813 bdevname(bio->bi_bdev, b), 1814 bio->bi_opf, 1815 (unsigned long long)bio_end_sector(bio), 1816 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1817 } 1818 1819 #ifdef CONFIG_FAIL_MAKE_REQUEST 1820 1821 static DECLARE_FAULT_ATTR(fail_make_request); 1822 1823 static int __init setup_fail_make_request(char *str) 1824 { 1825 return setup_fault_attr(&fail_make_request, str); 1826 } 1827 __setup("fail_make_request=", setup_fail_make_request); 1828 1829 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) 1830 { 1831 return part->make_it_fail && should_fail(&fail_make_request, bytes); 1832 } 1833 1834 static int __init fail_make_request_debugfs(void) 1835 { 1836 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1837 NULL, &fail_make_request); 1838 1839 return PTR_ERR_OR_ZERO(dir); 1840 } 1841 1842 late_initcall(fail_make_request_debugfs); 1843 1844 #else /* CONFIG_FAIL_MAKE_REQUEST */ 1845 1846 static inline bool should_fail_request(struct hd_struct *part, 1847 unsigned int bytes) 1848 { 1849 return false; 1850 } 1851 1852 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1853 1854 /* 1855 * Check whether this bio extends beyond the end of the device. 1856 */ 1857 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1858 { 1859 sector_t maxsector; 1860 1861 if (!nr_sectors) 1862 return 0; 1863 1864 /* Test device or partition size, when known. */ 1865 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1866 if (maxsector) { 1867 sector_t sector = bio->bi_iter.bi_sector; 1868 1869 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1870 /* 1871 * This may well happen - the kernel calls bread() 1872 * without checking the size of the device, e.g., when 1873 * mounting a device. 1874 */ 1875 handle_bad_sector(bio); 1876 return 1; 1877 } 1878 } 1879 1880 return 0; 1881 } 1882 1883 static noinline_for_stack bool 1884 generic_make_request_checks(struct bio *bio) 1885 { 1886 struct request_queue *q; 1887 int nr_sectors = bio_sectors(bio); 1888 int err = -EIO; 1889 char b[BDEVNAME_SIZE]; 1890 struct hd_struct *part; 1891 1892 might_sleep(); 1893 1894 if (bio_check_eod(bio, nr_sectors)) 1895 goto end_io; 1896 1897 q = bdev_get_queue(bio->bi_bdev); 1898 if (unlikely(!q)) { 1899 printk(KERN_ERR 1900 "generic_make_request: Trying to access " 1901 "nonexistent block-device %s (%Lu)\n", 1902 bdevname(bio->bi_bdev, b), 1903 (long long) bio->bi_iter.bi_sector); 1904 goto end_io; 1905 } 1906 1907 part = bio->bi_bdev->bd_part; 1908 if (should_fail_request(part, bio->bi_iter.bi_size) || 1909 should_fail_request(&part_to_disk(part)->part0, 1910 bio->bi_iter.bi_size)) 1911 goto end_io; 1912 1913 /* 1914 * If this device has partitions, remap block n 1915 * of partition p to block n+start(p) of the disk. 1916 */ 1917 blk_partition_remap(bio); 1918 1919 if (bio_check_eod(bio, nr_sectors)) 1920 goto end_io; 1921 1922 /* 1923 * Filter flush bio's early so that make_request based 1924 * drivers without flush support don't have to worry 1925 * about them. 1926 */ 1927 if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && 1928 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 1929 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 1930 if (!nr_sectors) { 1931 err = 0; 1932 goto end_io; 1933 } 1934 } 1935 1936 switch (bio_op(bio)) { 1937 case REQ_OP_DISCARD: 1938 if (!blk_queue_discard(q)) 1939 goto not_supported; 1940 break; 1941 case REQ_OP_SECURE_ERASE: 1942 if (!blk_queue_secure_erase(q)) 1943 goto not_supported; 1944 break; 1945 case REQ_OP_WRITE_SAME: 1946 if (!bdev_write_same(bio->bi_bdev)) 1947 goto not_supported; 1948 case REQ_OP_ZONE_REPORT: 1949 case REQ_OP_ZONE_RESET: 1950 if (!bdev_is_zoned(bio->bi_bdev)) 1951 goto not_supported; 1952 break; 1953 case REQ_OP_WRITE_ZEROES: 1954 if (!bdev_write_zeroes_sectors(bio->bi_bdev)) 1955 goto not_supported; 1956 break; 1957 default: 1958 break; 1959 } 1960 1961 /* 1962 * Various block parts want %current->io_context and lazy ioc 1963 * allocation ends up trading a lot of pain for a small amount of 1964 * memory. Just allocate it upfront. This may fail and block 1965 * layer knows how to live with it. 1966 */ 1967 create_io_context(GFP_ATOMIC, q->node); 1968 1969 if (!blkcg_bio_issue_check(q, bio)) 1970 return false; 1971 1972 trace_block_bio_queue(q, bio); 1973 return true; 1974 1975 not_supported: 1976 err = -EOPNOTSUPP; 1977 end_io: 1978 bio->bi_error = err; 1979 bio_endio(bio); 1980 return false; 1981 } 1982 1983 /** 1984 * generic_make_request - hand a buffer to its device driver for I/O 1985 * @bio: The bio describing the location in memory and on the device. 1986 * 1987 * generic_make_request() is used to make I/O requests of block 1988 * devices. It is passed a &struct bio, which describes the I/O that needs 1989 * to be done. 1990 * 1991 * generic_make_request() does not return any status. The 1992 * success/failure status of the request, along with notification of 1993 * completion, is delivered asynchronously through the bio->bi_end_io 1994 * function described (one day) else where. 1995 * 1996 * The caller of generic_make_request must make sure that bi_io_vec 1997 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1998 * set to describe the device address, and the 1999 * bi_end_io and optionally bi_private are set to describe how 2000 * completion notification should be signaled. 2001 * 2002 * generic_make_request and the drivers it calls may use bi_next if this 2003 * bio happens to be merged with someone else, and may resubmit the bio to 2004 * a lower device by calling into generic_make_request recursively, which 2005 * means the bio should NOT be touched after the call to ->make_request_fn. 2006 */ 2007 blk_qc_t generic_make_request(struct bio *bio) 2008 { 2009 struct bio_list bio_list_on_stack; 2010 blk_qc_t ret = BLK_QC_T_NONE; 2011 2012 if (!generic_make_request_checks(bio)) 2013 goto out; 2014 2015 /* 2016 * We only want one ->make_request_fn to be active at a time, else 2017 * stack usage with stacked devices could be a problem. So use 2018 * current->bio_list to keep a list of requests submited by a 2019 * make_request_fn function. current->bio_list is also used as a 2020 * flag to say if generic_make_request is currently active in this 2021 * task or not. If it is NULL, then no make_request is active. If 2022 * it is non-NULL, then a make_request is active, and new requests 2023 * should be added at the tail 2024 */ 2025 if (current->bio_list) { 2026 bio_list_add(current->bio_list, bio); 2027 goto out; 2028 } 2029 2030 /* following loop may be a bit non-obvious, and so deserves some 2031 * explanation. 2032 * Before entering the loop, bio->bi_next is NULL (as all callers 2033 * ensure that) so we have a list with a single bio. 2034 * We pretend that we have just taken it off a longer list, so 2035 * we assign bio_list to a pointer to the bio_list_on_stack, 2036 * thus initialising the bio_list of new bios to be 2037 * added. ->make_request() may indeed add some more bios 2038 * through a recursive call to generic_make_request. If it 2039 * did, we find a non-NULL value in bio_list and re-enter the loop 2040 * from the top. In this case we really did just take the bio 2041 * of the top of the list (no pretending) and so remove it from 2042 * bio_list, and call into ->make_request() again. 2043 */ 2044 BUG_ON(bio->bi_next); 2045 bio_list_init(&bio_list_on_stack); 2046 current->bio_list = &bio_list_on_stack; 2047 do { 2048 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2049 2050 if (likely(blk_queue_enter(q, false) == 0)) { 2051 ret = q->make_request_fn(q, bio); 2052 2053 blk_queue_exit(q); 2054 2055 bio = bio_list_pop(current->bio_list); 2056 } else { 2057 struct bio *bio_next = bio_list_pop(current->bio_list); 2058 2059 bio_io_error(bio); 2060 bio = bio_next; 2061 } 2062 } while (bio); 2063 current->bio_list = NULL; /* deactivate */ 2064 2065 out: 2066 return ret; 2067 } 2068 EXPORT_SYMBOL(generic_make_request); 2069 2070 /** 2071 * submit_bio - submit a bio to the block device layer for I/O 2072 * @bio: The &struct bio which describes the I/O 2073 * 2074 * submit_bio() is very similar in purpose to generic_make_request(), and 2075 * uses that function to do most of the work. Both are fairly rough 2076 * interfaces; @bio must be presetup and ready for I/O. 2077 * 2078 */ 2079 blk_qc_t submit_bio(struct bio *bio) 2080 { 2081 /* 2082 * If it's a regular read/write or a barrier with data attached, 2083 * go through the normal accounting stuff before submission. 2084 */ 2085 if (bio_has_data(bio)) { 2086 unsigned int count; 2087 2088 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2089 count = bdev_logical_block_size(bio->bi_bdev) >> 9; 2090 else 2091 count = bio_sectors(bio); 2092 2093 if (op_is_write(bio_op(bio))) { 2094 count_vm_events(PGPGOUT, count); 2095 } else { 2096 task_io_account_read(bio->bi_iter.bi_size); 2097 count_vm_events(PGPGIN, count); 2098 } 2099 2100 if (unlikely(block_dump)) { 2101 char b[BDEVNAME_SIZE]; 2102 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 2103 current->comm, task_pid_nr(current), 2104 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2105 (unsigned long long)bio->bi_iter.bi_sector, 2106 bdevname(bio->bi_bdev, b), 2107 count); 2108 } 2109 } 2110 2111 return generic_make_request(bio); 2112 } 2113 EXPORT_SYMBOL(submit_bio); 2114 2115 /** 2116 * blk_cloned_rq_check_limits - Helper function to check a cloned request 2117 * for new the queue limits 2118 * @q: the queue 2119 * @rq: the request being checked 2120 * 2121 * Description: 2122 * @rq may have been made based on weaker limitations of upper-level queues 2123 * in request stacking drivers, and it may violate the limitation of @q. 2124 * Since the block layer and the underlying device driver trust @rq 2125 * after it is inserted to @q, it should be checked against @q before 2126 * the insertion using this generic function. 2127 * 2128 * Request stacking drivers like request-based dm may change the queue 2129 * limits when retrying requests on other queues. Those requests need 2130 * to be checked against the new queue limits again during dispatch. 2131 */ 2132 static int blk_cloned_rq_check_limits(struct request_queue *q, 2133 struct request *rq) 2134 { 2135 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { 2136 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2137 return -EIO; 2138 } 2139 2140 /* 2141 * queue's settings related to segment counting like q->bounce_pfn 2142 * may differ from that of other stacking queues. 2143 * Recalculate it to check the request correctly on this queue's 2144 * limitation. 2145 */ 2146 blk_recalc_rq_segments(rq); 2147 if (rq->nr_phys_segments > queue_max_segments(q)) { 2148 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 2149 return -EIO; 2150 } 2151 2152 return 0; 2153 } 2154 2155 /** 2156 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2157 * @q: the queue to submit the request 2158 * @rq: the request being queued 2159 */ 2160 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 2161 { 2162 unsigned long flags; 2163 int where = ELEVATOR_INSERT_BACK; 2164 2165 if (blk_cloned_rq_check_limits(q, rq)) 2166 return -EIO; 2167 2168 if (rq->rq_disk && 2169 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 2170 return -EIO; 2171 2172 if (q->mq_ops) { 2173 if (blk_queue_io_stat(q)) 2174 blk_account_io_start(rq, true); 2175 blk_mq_insert_request(rq, false, true, false); 2176 return 0; 2177 } 2178 2179 spin_lock_irqsave(q->queue_lock, flags); 2180 if (unlikely(blk_queue_dying(q))) { 2181 spin_unlock_irqrestore(q->queue_lock, flags); 2182 return -ENODEV; 2183 } 2184 2185 /* 2186 * Submitting request must be dequeued before calling this function 2187 * because it will be linked to another request_queue 2188 */ 2189 BUG_ON(blk_queued_rq(rq)); 2190 2191 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) 2192 where = ELEVATOR_INSERT_FLUSH; 2193 2194 add_acct_request(q, rq, where); 2195 if (where == ELEVATOR_INSERT_FLUSH) 2196 __blk_run_queue(q); 2197 spin_unlock_irqrestore(q->queue_lock, flags); 2198 2199 return 0; 2200 } 2201 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2202 2203 /** 2204 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2205 * @rq: request to examine 2206 * 2207 * Description: 2208 * A request could be merge of IOs which require different failure 2209 * handling. This function determines the number of bytes which 2210 * can be failed from the beginning of the request without 2211 * crossing into area which need to be retried further. 2212 * 2213 * Return: 2214 * The number of bytes to fail. 2215 * 2216 * Context: 2217 * queue_lock must be held. 2218 */ 2219 unsigned int blk_rq_err_bytes(const struct request *rq) 2220 { 2221 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 2222 unsigned int bytes = 0; 2223 struct bio *bio; 2224 2225 if (!(rq->rq_flags & RQF_MIXED_MERGE)) 2226 return blk_rq_bytes(rq); 2227 2228 /* 2229 * Currently the only 'mixing' which can happen is between 2230 * different fastfail types. We can safely fail portions 2231 * which have all the failfast bits that the first one has - 2232 * the ones which are at least as eager to fail as the first 2233 * one. 2234 */ 2235 for (bio = rq->bio; bio; bio = bio->bi_next) { 2236 if ((bio->bi_opf & ff) != ff) 2237 break; 2238 bytes += bio->bi_iter.bi_size; 2239 } 2240 2241 /* this could lead to infinite loop */ 2242 BUG_ON(blk_rq_bytes(rq) && !bytes); 2243 return bytes; 2244 } 2245 EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 2246 2247 void blk_account_io_completion(struct request *req, unsigned int bytes) 2248 { 2249 if (blk_do_io_stat(req)) { 2250 const int rw = rq_data_dir(req); 2251 struct hd_struct *part; 2252 int cpu; 2253 2254 cpu = part_stat_lock(); 2255 part = req->part; 2256 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 2257 part_stat_unlock(); 2258 } 2259 } 2260 2261 void blk_account_io_done(struct request *req) 2262 { 2263 /* 2264 * Account IO completion. flush_rq isn't accounted as a 2265 * normal IO on queueing nor completion. Accounting the 2266 * containing request is enough. 2267 */ 2268 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2269 unsigned long duration = jiffies - req->start_time; 2270 const int rw = rq_data_dir(req); 2271 struct hd_struct *part; 2272 int cpu; 2273 2274 cpu = part_stat_lock(); 2275 part = req->part; 2276 2277 part_stat_inc(cpu, part, ios[rw]); 2278 part_stat_add(cpu, part, ticks[rw], duration); 2279 part_round_stats(cpu, part); 2280 part_dec_in_flight(part, rw); 2281 2282 hd_struct_put(part); 2283 part_stat_unlock(); 2284 } 2285 } 2286 2287 #ifdef CONFIG_PM 2288 /* 2289 * Don't process normal requests when queue is suspended 2290 * or in the process of suspending/resuming 2291 */ 2292 static struct request *blk_pm_peek_request(struct request_queue *q, 2293 struct request *rq) 2294 { 2295 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2296 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM)))) 2297 return NULL; 2298 else 2299 return rq; 2300 } 2301 #else 2302 static inline struct request *blk_pm_peek_request(struct request_queue *q, 2303 struct request *rq) 2304 { 2305 return rq; 2306 } 2307 #endif 2308 2309 void blk_account_io_start(struct request *rq, bool new_io) 2310 { 2311 struct hd_struct *part; 2312 int rw = rq_data_dir(rq); 2313 int cpu; 2314 2315 if (!blk_do_io_stat(rq)) 2316 return; 2317 2318 cpu = part_stat_lock(); 2319 2320 if (!new_io) { 2321 part = rq->part; 2322 part_stat_inc(cpu, part, merges[rw]); 2323 } else { 2324 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 2325 if (!hd_struct_try_get(part)) { 2326 /* 2327 * The partition is already being removed, 2328 * the request will be accounted on the disk only 2329 * 2330 * We take a reference on disk->part0 although that 2331 * partition will never be deleted, so we can treat 2332 * it as any other partition. 2333 */ 2334 part = &rq->rq_disk->part0; 2335 hd_struct_get(part); 2336 } 2337 part_round_stats(cpu, part); 2338 part_inc_in_flight(part, rw); 2339 rq->part = part; 2340 } 2341 2342 part_stat_unlock(); 2343 } 2344 2345 /** 2346 * blk_peek_request - peek at the top of a request queue 2347 * @q: request queue to peek at 2348 * 2349 * Description: 2350 * Return the request at the top of @q. The returned request 2351 * should be started using blk_start_request() before LLD starts 2352 * processing it. 2353 * 2354 * Return: 2355 * Pointer to the request at the top of @q if available. Null 2356 * otherwise. 2357 * 2358 * Context: 2359 * queue_lock must be held. 2360 */ 2361 struct request *blk_peek_request(struct request_queue *q) 2362 { 2363 struct request *rq; 2364 int ret; 2365 2366 while ((rq = __elv_next_request(q)) != NULL) { 2367 2368 rq = blk_pm_peek_request(q, rq); 2369 if (!rq) 2370 break; 2371 2372 if (!(rq->rq_flags & RQF_STARTED)) { 2373 /* 2374 * This is the first time the device driver 2375 * sees this request (possibly after 2376 * requeueing). Notify IO scheduler. 2377 */ 2378 if (rq->rq_flags & RQF_SORTED) 2379 elv_activate_rq(q, rq); 2380 2381 /* 2382 * just mark as started even if we don't start 2383 * it, a request that has been delayed should 2384 * not be passed by new incoming requests 2385 */ 2386 rq->rq_flags |= RQF_STARTED; 2387 trace_block_rq_issue(q, rq); 2388 } 2389 2390 if (!q->boundary_rq || q->boundary_rq == rq) { 2391 q->end_sector = rq_end_sector(rq); 2392 q->boundary_rq = NULL; 2393 } 2394 2395 if (rq->rq_flags & RQF_DONTPREP) 2396 break; 2397 2398 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2399 /* 2400 * make sure space for the drain appears we 2401 * know we can do this because max_hw_segments 2402 * has been adjusted to be one fewer than the 2403 * device can handle 2404 */ 2405 rq->nr_phys_segments++; 2406 } 2407 2408 if (!q->prep_rq_fn) 2409 break; 2410 2411 ret = q->prep_rq_fn(q, rq); 2412 if (ret == BLKPREP_OK) { 2413 break; 2414 } else if (ret == BLKPREP_DEFER) { 2415 /* 2416 * the request may have been (partially) prepped. 2417 * we need to keep this request in the front to 2418 * avoid resource deadlock. RQF_STARTED will 2419 * prevent other fs requests from passing this one. 2420 */ 2421 if (q->dma_drain_size && blk_rq_bytes(rq) && 2422 !(rq->rq_flags & RQF_DONTPREP)) { 2423 /* 2424 * remove the space for the drain we added 2425 * so that we don't add it again 2426 */ 2427 --rq->nr_phys_segments; 2428 } 2429 2430 rq = NULL; 2431 break; 2432 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2433 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; 2434 2435 rq->rq_flags |= RQF_QUIET; 2436 /* 2437 * Mark this request as started so we don't trigger 2438 * any debug logic in the end I/O path. 2439 */ 2440 blk_start_request(rq); 2441 __blk_end_request_all(rq, err); 2442 } else { 2443 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2444 break; 2445 } 2446 } 2447 2448 return rq; 2449 } 2450 EXPORT_SYMBOL(blk_peek_request); 2451 2452 void blk_dequeue_request(struct request *rq) 2453 { 2454 struct request_queue *q = rq->q; 2455 2456 BUG_ON(list_empty(&rq->queuelist)); 2457 BUG_ON(ELV_ON_HASH(rq)); 2458 2459 list_del_init(&rq->queuelist); 2460 2461 /* 2462 * the time frame between a request being removed from the lists 2463 * and to it is freed is accounted as io that is in progress at 2464 * the driver side. 2465 */ 2466 if (blk_account_rq(rq)) { 2467 q->in_flight[rq_is_sync(rq)]++; 2468 set_io_start_time_ns(rq); 2469 } 2470 } 2471 2472 /** 2473 * blk_start_request - start request processing on the driver 2474 * @req: request to dequeue 2475 * 2476 * Description: 2477 * Dequeue @req and start timeout timer on it. This hands off the 2478 * request to the driver. 2479 * 2480 * Block internal functions which don't want to start timer should 2481 * call blk_dequeue_request(). 2482 * 2483 * Context: 2484 * queue_lock must be held. 2485 */ 2486 void blk_start_request(struct request *req) 2487 { 2488 blk_dequeue_request(req); 2489 2490 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { 2491 blk_stat_set_issue_time(&req->issue_stat); 2492 req->rq_flags |= RQF_STATS; 2493 wbt_issue(req->q->rq_wb, &req->issue_stat); 2494 } 2495 2496 /* 2497 * We are now handing the request to the hardware, initialize 2498 * resid_len to full count and add the timeout handler. 2499 */ 2500 req->resid_len = blk_rq_bytes(req); 2501 if (unlikely(blk_bidi_rq(req))) 2502 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 2503 2504 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 2505 blk_add_timer(req); 2506 } 2507 EXPORT_SYMBOL(blk_start_request); 2508 2509 /** 2510 * blk_fetch_request - fetch a request from a request queue 2511 * @q: request queue to fetch a request from 2512 * 2513 * Description: 2514 * Return the request at the top of @q. The request is started on 2515 * return and LLD can start processing it immediately. 2516 * 2517 * Return: 2518 * Pointer to the request at the top of @q if available. Null 2519 * otherwise. 2520 * 2521 * Context: 2522 * queue_lock must be held. 2523 */ 2524 struct request *blk_fetch_request(struct request_queue *q) 2525 { 2526 struct request *rq; 2527 2528 rq = blk_peek_request(q); 2529 if (rq) 2530 blk_start_request(rq); 2531 return rq; 2532 } 2533 EXPORT_SYMBOL(blk_fetch_request); 2534 2535 /** 2536 * blk_update_request - Special helper function for request stacking drivers 2537 * @req: the request being processed 2538 * @error: %0 for success, < %0 for error 2539 * @nr_bytes: number of bytes to complete @req 2540 * 2541 * Description: 2542 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2543 * the request structure even if @req doesn't have leftover. 2544 * If @req has leftover, sets it up for the next range of segments. 2545 * 2546 * This special helper function is only for request stacking drivers 2547 * (e.g. request-based dm) so that they can handle partial completion. 2548 * Actual device drivers should use blk_end_request instead. 2549 * 2550 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2551 * %false return from this function. 2552 * 2553 * Return: 2554 * %false - this request doesn't have any more data 2555 * %true - this request has more data 2556 **/ 2557 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2558 { 2559 int total_bytes; 2560 2561 trace_block_rq_complete(req->q, req, nr_bytes); 2562 2563 if (!req->bio) 2564 return false; 2565 2566 /* 2567 * For fs requests, rq is just carrier of independent bio's 2568 * and each partial completion should be handled separately. 2569 * Reset per-request error on each partial completion. 2570 * 2571 * TODO: tj: This is too subtle. It would be better to let 2572 * low level drivers do what they see fit. 2573 */ 2574 if (req->cmd_type == REQ_TYPE_FS) 2575 req->errors = 0; 2576 2577 if (error && req->cmd_type == REQ_TYPE_FS && 2578 !(req->rq_flags & RQF_QUIET)) { 2579 char *error_type; 2580 2581 switch (error) { 2582 case -ENOLINK: 2583 error_type = "recoverable transport"; 2584 break; 2585 case -EREMOTEIO: 2586 error_type = "critical target"; 2587 break; 2588 case -EBADE: 2589 error_type = "critical nexus"; 2590 break; 2591 case -ETIMEDOUT: 2592 error_type = "timeout"; 2593 break; 2594 case -ENOSPC: 2595 error_type = "critical space allocation"; 2596 break; 2597 case -ENODATA: 2598 error_type = "critical medium"; 2599 break; 2600 case -EIO: 2601 default: 2602 error_type = "I/O"; 2603 break; 2604 } 2605 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", 2606 __func__, error_type, req->rq_disk ? 2607 req->rq_disk->disk_name : "?", 2608 (unsigned long long)blk_rq_pos(req)); 2609 2610 } 2611 2612 blk_account_io_completion(req, nr_bytes); 2613 2614 total_bytes = 0; 2615 while (req->bio) { 2616 struct bio *bio = req->bio; 2617 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 2618 2619 if (bio_bytes == bio->bi_iter.bi_size) 2620 req->bio = bio->bi_next; 2621 2622 req_bio_endio(req, bio, bio_bytes, error); 2623 2624 total_bytes += bio_bytes; 2625 nr_bytes -= bio_bytes; 2626 2627 if (!nr_bytes) 2628 break; 2629 } 2630 2631 /* 2632 * completely done 2633 */ 2634 if (!req->bio) { 2635 /* 2636 * Reset counters so that the request stacking driver 2637 * can find how many bytes remain in the request 2638 * later. 2639 */ 2640 req->__data_len = 0; 2641 return false; 2642 } 2643 2644 req->__data_len -= total_bytes; 2645 2646 /* update sector only for requests with clear definition of sector */ 2647 if (req->cmd_type == REQ_TYPE_FS) 2648 req->__sector += total_bytes >> 9; 2649 2650 /* mixed attributes always follow the first bio */ 2651 if (req->rq_flags & RQF_MIXED_MERGE) { 2652 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2653 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 2654 } 2655 2656 /* 2657 * If total number of sectors is less than the first segment 2658 * size, something has gone terribly wrong. 2659 */ 2660 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2661 blk_dump_rq_flags(req, "request botched"); 2662 req->__data_len = blk_rq_cur_bytes(req); 2663 } 2664 2665 /* recalculate the number of segments */ 2666 blk_recalc_rq_segments(req); 2667 2668 return true; 2669 } 2670 EXPORT_SYMBOL_GPL(blk_update_request); 2671 2672 static bool blk_update_bidi_request(struct request *rq, int error, 2673 unsigned int nr_bytes, 2674 unsigned int bidi_bytes) 2675 { 2676 if (blk_update_request(rq, error, nr_bytes)) 2677 return true; 2678 2679 /* Bidi request must be completed as a whole */ 2680 if (unlikely(blk_bidi_rq(rq)) && 2681 blk_update_request(rq->next_rq, error, bidi_bytes)) 2682 return true; 2683 2684 if (blk_queue_add_random(rq->q)) 2685 add_disk_randomness(rq->rq_disk); 2686 2687 return false; 2688 } 2689 2690 /** 2691 * blk_unprep_request - unprepare a request 2692 * @req: the request 2693 * 2694 * This function makes a request ready for complete resubmission (or 2695 * completion). It happens only after all error handling is complete, 2696 * so represents the appropriate moment to deallocate any resources 2697 * that were allocated to the request in the prep_rq_fn. The queue 2698 * lock is held when calling this. 2699 */ 2700 void blk_unprep_request(struct request *req) 2701 { 2702 struct request_queue *q = req->q; 2703 2704 req->rq_flags &= ~RQF_DONTPREP; 2705 if (q->unprep_rq_fn) 2706 q->unprep_rq_fn(q, req); 2707 } 2708 EXPORT_SYMBOL_GPL(blk_unprep_request); 2709 2710 /* 2711 * queue lock must be held 2712 */ 2713 void blk_finish_request(struct request *req, int error) 2714 { 2715 struct request_queue *q = req->q; 2716 2717 if (req->rq_flags & RQF_STATS) 2718 blk_stat_add(&q->rq_stats[rq_data_dir(req)], req); 2719 2720 if (req->rq_flags & RQF_QUEUED) 2721 blk_queue_end_tag(q, req); 2722 2723 BUG_ON(blk_queued_rq(req)); 2724 2725 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2726 laptop_io_completion(&req->q->backing_dev_info); 2727 2728 blk_delete_timer(req); 2729 2730 if (req->rq_flags & RQF_DONTPREP) 2731 blk_unprep_request(req); 2732 2733 blk_account_io_done(req); 2734 2735 if (req->end_io) { 2736 wbt_done(req->q->rq_wb, &req->issue_stat); 2737 req->end_io(req, error); 2738 } else { 2739 if (blk_bidi_rq(req)) 2740 __blk_put_request(req->next_rq->q, req->next_rq); 2741 2742 __blk_put_request(q, req); 2743 } 2744 } 2745 EXPORT_SYMBOL(blk_finish_request); 2746 2747 /** 2748 * blk_end_bidi_request - Complete a bidi request 2749 * @rq: the request to complete 2750 * @error: %0 for success, < %0 for error 2751 * @nr_bytes: number of bytes to complete @rq 2752 * @bidi_bytes: number of bytes to complete @rq->next_rq 2753 * 2754 * Description: 2755 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2756 * Drivers that supports bidi can safely call this member for any 2757 * type of request, bidi or uni. In the later case @bidi_bytes is 2758 * just ignored. 2759 * 2760 * Return: 2761 * %false - we are done with this request 2762 * %true - still buffers pending for this request 2763 **/ 2764 static bool blk_end_bidi_request(struct request *rq, int error, 2765 unsigned int nr_bytes, unsigned int bidi_bytes) 2766 { 2767 struct request_queue *q = rq->q; 2768 unsigned long flags; 2769 2770 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2771 return true; 2772 2773 spin_lock_irqsave(q->queue_lock, flags); 2774 blk_finish_request(rq, error); 2775 spin_unlock_irqrestore(q->queue_lock, flags); 2776 2777 return false; 2778 } 2779 2780 /** 2781 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2782 * @rq: the request to complete 2783 * @error: %0 for success, < %0 for error 2784 * @nr_bytes: number of bytes to complete @rq 2785 * @bidi_bytes: number of bytes to complete @rq->next_rq 2786 * 2787 * Description: 2788 * Identical to blk_end_bidi_request() except that queue lock is 2789 * assumed to be locked on entry and remains so on return. 2790 * 2791 * Return: 2792 * %false - we are done with this request 2793 * %true - still buffers pending for this request 2794 **/ 2795 bool __blk_end_bidi_request(struct request *rq, int error, 2796 unsigned int nr_bytes, unsigned int bidi_bytes) 2797 { 2798 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2799 return true; 2800 2801 blk_finish_request(rq, error); 2802 2803 return false; 2804 } 2805 2806 /** 2807 * blk_end_request - Helper function for drivers to complete the request. 2808 * @rq: the request being processed 2809 * @error: %0 for success, < %0 for error 2810 * @nr_bytes: number of bytes to complete 2811 * 2812 * Description: 2813 * Ends I/O on a number of bytes attached to @rq. 2814 * If @rq has leftover, sets it up for the next range of segments. 2815 * 2816 * Return: 2817 * %false - we are done with this request 2818 * %true - still buffers pending for this request 2819 **/ 2820 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2821 { 2822 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2823 } 2824 EXPORT_SYMBOL(blk_end_request); 2825 2826 /** 2827 * blk_end_request_all - Helper function for drives to finish the request. 2828 * @rq: the request to finish 2829 * @error: %0 for success, < %0 for error 2830 * 2831 * Description: 2832 * Completely finish @rq. 2833 */ 2834 void blk_end_request_all(struct request *rq, int error) 2835 { 2836 bool pending; 2837 unsigned int bidi_bytes = 0; 2838 2839 if (unlikely(blk_bidi_rq(rq))) 2840 bidi_bytes = blk_rq_bytes(rq->next_rq); 2841 2842 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2843 BUG_ON(pending); 2844 } 2845 EXPORT_SYMBOL(blk_end_request_all); 2846 2847 /** 2848 * blk_end_request_cur - Helper function to finish the current request chunk. 2849 * @rq: the request to finish the current chunk for 2850 * @error: %0 for success, < %0 for error 2851 * 2852 * Description: 2853 * Complete the current consecutively mapped chunk from @rq. 2854 * 2855 * Return: 2856 * %false - we are done with this request 2857 * %true - still buffers pending for this request 2858 */ 2859 bool blk_end_request_cur(struct request *rq, int error) 2860 { 2861 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2862 } 2863 EXPORT_SYMBOL(blk_end_request_cur); 2864 2865 /** 2866 * blk_end_request_err - Finish a request till the next failure boundary. 2867 * @rq: the request to finish till the next failure boundary for 2868 * @error: must be negative errno 2869 * 2870 * Description: 2871 * Complete @rq till the next failure boundary. 2872 * 2873 * Return: 2874 * %false - we are done with this request 2875 * %true - still buffers pending for this request 2876 */ 2877 bool blk_end_request_err(struct request *rq, int error) 2878 { 2879 WARN_ON(error >= 0); 2880 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2881 } 2882 EXPORT_SYMBOL_GPL(blk_end_request_err); 2883 2884 /** 2885 * __blk_end_request - Helper function for drivers to complete the request. 2886 * @rq: the request being processed 2887 * @error: %0 for success, < %0 for error 2888 * @nr_bytes: number of bytes to complete 2889 * 2890 * Description: 2891 * Must be called with queue lock held unlike blk_end_request(). 2892 * 2893 * Return: 2894 * %false - we are done with this request 2895 * %true - still buffers pending for this request 2896 **/ 2897 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2898 { 2899 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2900 } 2901 EXPORT_SYMBOL(__blk_end_request); 2902 2903 /** 2904 * __blk_end_request_all - Helper function for drives to finish the request. 2905 * @rq: the request to finish 2906 * @error: %0 for success, < %0 for error 2907 * 2908 * Description: 2909 * Completely finish @rq. Must be called with queue lock held. 2910 */ 2911 void __blk_end_request_all(struct request *rq, int error) 2912 { 2913 bool pending; 2914 unsigned int bidi_bytes = 0; 2915 2916 if (unlikely(blk_bidi_rq(rq))) 2917 bidi_bytes = blk_rq_bytes(rq->next_rq); 2918 2919 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2920 BUG_ON(pending); 2921 } 2922 EXPORT_SYMBOL(__blk_end_request_all); 2923 2924 /** 2925 * __blk_end_request_cur - Helper function to finish the current request chunk. 2926 * @rq: the request to finish the current chunk for 2927 * @error: %0 for success, < %0 for error 2928 * 2929 * Description: 2930 * Complete the current consecutively mapped chunk from @rq. Must 2931 * be called with queue lock held. 2932 * 2933 * Return: 2934 * %false - we are done with this request 2935 * %true - still buffers pending for this request 2936 */ 2937 bool __blk_end_request_cur(struct request *rq, int error) 2938 { 2939 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2940 } 2941 EXPORT_SYMBOL(__blk_end_request_cur); 2942 2943 /** 2944 * __blk_end_request_err - Finish a request till the next failure boundary. 2945 * @rq: the request to finish till the next failure boundary for 2946 * @error: must be negative errno 2947 * 2948 * Description: 2949 * Complete @rq till the next failure boundary. Must be called 2950 * with queue lock held. 2951 * 2952 * Return: 2953 * %false - we are done with this request 2954 * %true - still buffers pending for this request 2955 */ 2956 bool __blk_end_request_err(struct request *rq, int error) 2957 { 2958 WARN_ON(error >= 0); 2959 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2960 } 2961 EXPORT_SYMBOL_GPL(__blk_end_request_err); 2962 2963 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2964 struct bio *bio) 2965 { 2966 if (bio_has_data(bio)) 2967 rq->nr_phys_segments = bio_phys_segments(q, bio); 2968 2969 rq->__data_len = bio->bi_iter.bi_size; 2970 rq->bio = rq->biotail = bio; 2971 2972 if (bio->bi_bdev) 2973 rq->rq_disk = bio->bi_bdev->bd_disk; 2974 } 2975 2976 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2977 /** 2978 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2979 * @rq: the request to be flushed 2980 * 2981 * Description: 2982 * Flush all pages in @rq. 2983 */ 2984 void rq_flush_dcache_pages(struct request *rq) 2985 { 2986 struct req_iterator iter; 2987 struct bio_vec bvec; 2988 2989 rq_for_each_segment(bvec, rq, iter) 2990 flush_dcache_page(bvec.bv_page); 2991 } 2992 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2993 #endif 2994 2995 /** 2996 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2997 * @q : the queue of the device being checked 2998 * 2999 * Description: 3000 * Check if underlying low-level drivers of a device are busy. 3001 * If the drivers want to export their busy state, they must set own 3002 * exporting function using blk_queue_lld_busy() first. 3003 * 3004 * Basically, this function is used only by request stacking drivers 3005 * to stop dispatching requests to underlying devices when underlying 3006 * devices are busy. This behavior helps more I/O merging on the queue 3007 * of the request stacking driver and prevents I/O throughput regression 3008 * on burst I/O load. 3009 * 3010 * Return: 3011 * 0 - Not busy (The request stacking driver should dispatch request) 3012 * 1 - Busy (The request stacking driver should stop dispatching request) 3013 */ 3014 int blk_lld_busy(struct request_queue *q) 3015 { 3016 if (q->lld_busy_fn) 3017 return q->lld_busy_fn(q); 3018 3019 return 0; 3020 } 3021 EXPORT_SYMBOL_GPL(blk_lld_busy); 3022 3023 /** 3024 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3025 * @rq: the clone request to be cleaned up 3026 * 3027 * Description: 3028 * Free all bios in @rq for a cloned request. 3029 */ 3030 void blk_rq_unprep_clone(struct request *rq) 3031 { 3032 struct bio *bio; 3033 3034 while ((bio = rq->bio) != NULL) { 3035 rq->bio = bio->bi_next; 3036 3037 bio_put(bio); 3038 } 3039 } 3040 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3041 3042 /* 3043 * Copy attributes of the original request to the clone request. 3044 * The actual data parts (e.g. ->cmd, ->sense) are not copied. 3045 */ 3046 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3047 { 3048 dst->cpu = src->cpu; 3049 dst->cmd_flags = src->cmd_flags | REQ_NOMERGE; 3050 dst->cmd_type = src->cmd_type; 3051 dst->__sector = blk_rq_pos(src); 3052 dst->__data_len = blk_rq_bytes(src); 3053 dst->nr_phys_segments = src->nr_phys_segments; 3054 dst->ioprio = src->ioprio; 3055 dst->extra_len = src->extra_len; 3056 } 3057 3058 /** 3059 * blk_rq_prep_clone - Helper function to setup clone request 3060 * @rq: the request to be setup 3061 * @rq_src: original request to be cloned 3062 * @bs: bio_set that bios for clone are allocated from 3063 * @gfp_mask: memory allocation mask for bio 3064 * @bio_ctr: setup function to be called for each clone bio. 3065 * Returns %0 for success, non %0 for failure. 3066 * @data: private data to be passed to @bio_ctr 3067 * 3068 * Description: 3069 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3070 * The actual data parts of @rq_src (e.g. ->cmd, ->sense) 3071 * are not copied, and copying such parts is the caller's responsibility. 3072 * Also, pages which the original bios are pointing to are not copied 3073 * and the cloned bios just point same pages. 3074 * So cloned bios must be completed before original bios, which means 3075 * the caller must complete @rq before @rq_src. 3076 */ 3077 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3078 struct bio_set *bs, gfp_t gfp_mask, 3079 int (*bio_ctr)(struct bio *, struct bio *, void *), 3080 void *data) 3081 { 3082 struct bio *bio, *bio_src; 3083 3084 if (!bs) 3085 bs = fs_bio_set; 3086 3087 __rq_for_each_bio(bio_src, rq_src) { 3088 bio = bio_clone_fast(bio_src, gfp_mask, bs); 3089 if (!bio) 3090 goto free_and_out; 3091 3092 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3093 goto free_and_out; 3094 3095 if (rq->bio) { 3096 rq->biotail->bi_next = bio; 3097 rq->biotail = bio; 3098 } else 3099 rq->bio = rq->biotail = bio; 3100 } 3101 3102 __blk_rq_prep_clone(rq, rq_src); 3103 3104 return 0; 3105 3106 free_and_out: 3107 if (bio) 3108 bio_put(bio); 3109 blk_rq_unprep_clone(rq); 3110 3111 return -ENOMEM; 3112 } 3113 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3114 3115 int kblockd_schedule_work(struct work_struct *work) 3116 { 3117 return queue_work(kblockd_workqueue, work); 3118 } 3119 EXPORT_SYMBOL(kblockd_schedule_work); 3120 3121 int kblockd_schedule_work_on(int cpu, struct work_struct *work) 3122 { 3123 return queue_work_on(cpu, kblockd_workqueue, work); 3124 } 3125 EXPORT_SYMBOL(kblockd_schedule_work_on); 3126 3127 int kblockd_schedule_delayed_work(struct delayed_work *dwork, 3128 unsigned long delay) 3129 { 3130 return queue_delayed_work(kblockd_workqueue, dwork, delay); 3131 } 3132 EXPORT_SYMBOL(kblockd_schedule_delayed_work); 3133 3134 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3135 unsigned long delay) 3136 { 3137 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 3138 } 3139 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 3140 3141 /** 3142 * blk_start_plug - initialize blk_plug and track it inside the task_struct 3143 * @plug: The &struct blk_plug that needs to be initialized 3144 * 3145 * Description: 3146 * Tracking blk_plug inside the task_struct will help with auto-flushing the 3147 * pending I/O should the task end up blocking between blk_start_plug() and 3148 * blk_finish_plug(). This is important from a performance perspective, but 3149 * also ensures that we don't deadlock. For instance, if the task is blocking 3150 * for a memory allocation, memory reclaim could end up wanting to free a 3151 * page belonging to that request that is currently residing in our private 3152 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 3153 * this kind of deadlock. 3154 */ 3155 void blk_start_plug(struct blk_plug *plug) 3156 { 3157 struct task_struct *tsk = current; 3158 3159 /* 3160 * If this is a nested plug, don't actually assign it. 3161 */ 3162 if (tsk->plug) 3163 return; 3164 3165 INIT_LIST_HEAD(&plug->list); 3166 INIT_LIST_HEAD(&plug->mq_list); 3167 INIT_LIST_HEAD(&plug->cb_list); 3168 /* 3169 * Store ordering should not be needed here, since a potential 3170 * preempt will imply a full memory barrier 3171 */ 3172 tsk->plug = plug; 3173 } 3174 EXPORT_SYMBOL(blk_start_plug); 3175 3176 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 3177 { 3178 struct request *rqa = container_of(a, struct request, queuelist); 3179 struct request *rqb = container_of(b, struct request, queuelist); 3180 3181 return !(rqa->q < rqb->q || 3182 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); 3183 } 3184 3185 /* 3186 * If 'from_schedule' is true, then postpone the dispatch of requests 3187 * until a safe kblockd context. We due this to avoid accidental big 3188 * additional stack usage in driver dispatch, in places where the originally 3189 * plugger did not intend it. 3190 */ 3191 static void queue_unplugged(struct request_queue *q, unsigned int depth, 3192 bool from_schedule) 3193 __releases(q->queue_lock) 3194 { 3195 trace_block_unplug(q, depth, !from_schedule); 3196 3197 if (from_schedule) 3198 blk_run_queue_async(q); 3199 else 3200 __blk_run_queue(q); 3201 spin_unlock(q->queue_lock); 3202 } 3203 3204 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 3205 { 3206 LIST_HEAD(callbacks); 3207 3208 while (!list_empty(&plug->cb_list)) { 3209 list_splice_init(&plug->cb_list, &callbacks); 3210 3211 while (!list_empty(&callbacks)) { 3212 struct blk_plug_cb *cb = list_first_entry(&callbacks, 3213 struct blk_plug_cb, 3214 list); 3215 list_del(&cb->list); 3216 cb->callback(cb, from_schedule); 3217 } 3218 } 3219 } 3220 3221 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 3222 int size) 3223 { 3224 struct blk_plug *plug = current->plug; 3225 struct blk_plug_cb *cb; 3226 3227 if (!plug) 3228 return NULL; 3229 3230 list_for_each_entry(cb, &plug->cb_list, list) 3231 if (cb->callback == unplug && cb->data == data) 3232 return cb; 3233 3234 /* Not currently on the callback list */ 3235 BUG_ON(size < sizeof(*cb)); 3236 cb = kzalloc(size, GFP_ATOMIC); 3237 if (cb) { 3238 cb->data = data; 3239 cb->callback = unplug; 3240 list_add(&cb->list, &plug->cb_list); 3241 } 3242 return cb; 3243 } 3244 EXPORT_SYMBOL(blk_check_plugged); 3245 3246 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 3247 { 3248 struct request_queue *q; 3249 unsigned long flags; 3250 struct request *rq; 3251 LIST_HEAD(list); 3252 unsigned int depth; 3253 3254 flush_plug_callbacks(plug, from_schedule); 3255 3256 if (!list_empty(&plug->mq_list)) 3257 blk_mq_flush_plug_list(plug, from_schedule); 3258 3259 if (list_empty(&plug->list)) 3260 return; 3261 3262 list_splice_init(&plug->list, &list); 3263 3264 list_sort(NULL, &list, plug_rq_cmp); 3265 3266 q = NULL; 3267 depth = 0; 3268 3269 /* 3270 * Save and disable interrupts here, to avoid doing it for every 3271 * queue lock we have to take. 3272 */ 3273 local_irq_save(flags); 3274 while (!list_empty(&list)) { 3275 rq = list_entry_rq(list.next); 3276 list_del_init(&rq->queuelist); 3277 BUG_ON(!rq->q); 3278 if (rq->q != q) { 3279 /* 3280 * This drops the queue lock 3281 */ 3282 if (q) 3283 queue_unplugged(q, depth, from_schedule); 3284 q = rq->q; 3285 depth = 0; 3286 spin_lock(q->queue_lock); 3287 } 3288 3289 /* 3290 * Short-circuit if @q is dead 3291 */ 3292 if (unlikely(blk_queue_dying(q))) { 3293 __blk_end_request_all(rq, -ENODEV); 3294 continue; 3295 } 3296 3297 /* 3298 * rq is already accounted, so use raw insert 3299 */ 3300 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) 3301 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3302 else 3303 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3304 3305 depth++; 3306 } 3307 3308 /* 3309 * This drops the queue lock 3310 */ 3311 if (q) 3312 queue_unplugged(q, depth, from_schedule); 3313 3314 local_irq_restore(flags); 3315 } 3316 3317 void blk_finish_plug(struct blk_plug *plug) 3318 { 3319 if (plug != current->plug) 3320 return; 3321 blk_flush_plug_list(plug, false); 3322 3323 current->plug = NULL; 3324 } 3325 EXPORT_SYMBOL(blk_finish_plug); 3326 3327 #ifdef CONFIG_PM 3328 /** 3329 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3330 * @q: the queue of the device 3331 * @dev: the device the queue belongs to 3332 * 3333 * Description: 3334 * Initialize runtime-PM-related fields for @q and start auto suspend for 3335 * @dev. Drivers that want to take advantage of request-based runtime PM 3336 * should call this function after @dev has been initialized, and its 3337 * request queue @q has been allocated, and runtime PM for it can not happen 3338 * yet(either due to disabled/forbidden or its usage_count > 0). In most 3339 * cases, driver should call this function before any I/O has taken place. 3340 * 3341 * This function takes care of setting up using auto suspend for the device, 3342 * the autosuspend delay is set to -1 to make runtime suspend impossible 3343 * until an updated value is either set by user or by driver. Drivers do 3344 * not need to touch other autosuspend settings. 3345 * 3346 * The block layer runtime PM is request based, so only works for drivers 3347 * that use request as their IO unit instead of those directly use bio's. 3348 */ 3349 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3350 { 3351 q->dev = dev; 3352 q->rpm_status = RPM_ACTIVE; 3353 pm_runtime_set_autosuspend_delay(q->dev, -1); 3354 pm_runtime_use_autosuspend(q->dev); 3355 } 3356 EXPORT_SYMBOL(blk_pm_runtime_init); 3357 3358 /** 3359 * blk_pre_runtime_suspend - Pre runtime suspend check 3360 * @q: the queue of the device 3361 * 3362 * Description: 3363 * This function will check if runtime suspend is allowed for the device 3364 * by examining if there are any requests pending in the queue. If there 3365 * are requests pending, the device can not be runtime suspended; otherwise, 3366 * the queue's status will be updated to SUSPENDING and the driver can 3367 * proceed to suspend the device. 3368 * 3369 * For the not allowed case, we mark last busy for the device so that 3370 * runtime PM core will try to autosuspend it some time later. 3371 * 3372 * This function should be called near the start of the device's 3373 * runtime_suspend callback. 3374 * 3375 * Return: 3376 * 0 - OK to runtime suspend the device 3377 * -EBUSY - Device should not be runtime suspended 3378 */ 3379 int blk_pre_runtime_suspend(struct request_queue *q) 3380 { 3381 int ret = 0; 3382 3383 if (!q->dev) 3384 return ret; 3385 3386 spin_lock_irq(q->queue_lock); 3387 if (q->nr_pending) { 3388 ret = -EBUSY; 3389 pm_runtime_mark_last_busy(q->dev); 3390 } else { 3391 q->rpm_status = RPM_SUSPENDING; 3392 } 3393 spin_unlock_irq(q->queue_lock); 3394 return ret; 3395 } 3396 EXPORT_SYMBOL(blk_pre_runtime_suspend); 3397 3398 /** 3399 * blk_post_runtime_suspend - Post runtime suspend processing 3400 * @q: the queue of the device 3401 * @err: return value of the device's runtime_suspend function 3402 * 3403 * Description: 3404 * Update the queue's runtime status according to the return value of the 3405 * device's runtime suspend function and mark last busy for the device so 3406 * that PM core will try to auto suspend the device at a later time. 3407 * 3408 * This function should be called near the end of the device's 3409 * runtime_suspend callback. 3410 */ 3411 void blk_post_runtime_suspend(struct request_queue *q, int err) 3412 { 3413 if (!q->dev) 3414 return; 3415 3416 spin_lock_irq(q->queue_lock); 3417 if (!err) { 3418 q->rpm_status = RPM_SUSPENDED; 3419 } else { 3420 q->rpm_status = RPM_ACTIVE; 3421 pm_runtime_mark_last_busy(q->dev); 3422 } 3423 spin_unlock_irq(q->queue_lock); 3424 } 3425 EXPORT_SYMBOL(blk_post_runtime_suspend); 3426 3427 /** 3428 * blk_pre_runtime_resume - Pre runtime resume processing 3429 * @q: the queue of the device 3430 * 3431 * Description: 3432 * Update the queue's runtime status to RESUMING in preparation for the 3433 * runtime resume of the device. 3434 * 3435 * This function should be called near the start of the device's 3436 * runtime_resume callback. 3437 */ 3438 void blk_pre_runtime_resume(struct request_queue *q) 3439 { 3440 if (!q->dev) 3441 return; 3442 3443 spin_lock_irq(q->queue_lock); 3444 q->rpm_status = RPM_RESUMING; 3445 spin_unlock_irq(q->queue_lock); 3446 } 3447 EXPORT_SYMBOL(blk_pre_runtime_resume); 3448 3449 /** 3450 * blk_post_runtime_resume - Post runtime resume processing 3451 * @q: the queue of the device 3452 * @err: return value of the device's runtime_resume function 3453 * 3454 * Description: 3455 * Update the queue's runtime status according to the return value of the 3456 * device's runtime_resume function. If it is successfully resumed, process 3457 * the requests that are queued into the device's queue when it is resuming 3458 * and then mark last busy and initiate autosuspend for it. 3459 * 3460 * This function should be called near the end of the device's 3461 * runtime_resume callback. 3462 */ 3463 void blk_post_runtime_resume(struct request_queue *q, int err) 3464 { 3465 if (!q->dev) 3466 return; 3467 3468 spin_lock_irq(q->queue_lock); 3469 if (!err) { 3470 q->rpm_status = RPM_ACTIVE; 3471 __blk_run_queue(q); 3472 pm_runtime_mark_last_busy(q->dev); 3473 pm_request_autosuspend(q->dev); 3474 } else { 3475 q->rpm_status = RPM_SUSPENDED; 3476 } 3477 spin_unlock_irq(q->queue_lock); 3478 } 3479 EXPORT_SYMBOL(blk_post_runtime_resume); 3480 3481 /** 3482 * blk_set_runtime_active - Force runtime status of the queue to be active 3483 * @q: the queue of the device 3484 * 3485 * If the device is left runtime suspended during system suspend the resume 3486 * hook typically resumes the device and corrects runtime status 3487 * accordingly. However, that does not affect the queue runtime PM status 3488 * which is still "suspended". This prevents processing requests from the 3489 * queue. 3490 * 3491 * This function can be used in driver's resume hook to correct queue 3492 * runtime PM status and re-enable peeking requests from the queue. It 3493 * should be called before first request is added to the queue. 3494 */ 3495 void blk_set_runtime_active(struct request_queue *q) 3496 { 3497 spin_lock_irq(q->queue_lock); 3498 q->rpm_status = RPM_ACTIVE; 3499 pm_runtime_mark_last_busy(q->dev); 3500 pm_request_autosuspend(q->dev); 3501 spin_unlock_irq(q->queue_lock); 3502 } 3503 EXPORT_SYMBOL(blk_set_runtime_active); 3504 #endif 3505 3506 int __init blk_dev_init(void) 3507 { 3508 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3509 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3510 FIELD_SIZEOF(struct request, cmd_flags)); 3511 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3512 FIELD_SIZEOF(struct bio, bi_opf)); 3513 3514 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3515 kblockd_workqueue = alloc_workqueue("kblockd", 3516 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3517 if (!kblockd_workqueue) 3518 panic("Failed to create kblockd\n"); 3519 3520 request_cachep = kmem_cache_create("blkdev_requests", 3521 sizeof(struct request), 0, SLAB_PANIC, NULL); 3522 3523 blk_requestq_cachep = kmem_cache_create("request_queue", 3524 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3525 3526 return 0; 3527 } 3528