1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 8 * - July2000 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 10 */ 11 12 /* 13 * This handles all read/write requests to block devices 14 */ 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-pm.h> 20 #include <linux/blk-integrity.h> 21 #include <linux/highmem.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> 24 #include <linux/kernel_stat.h> 25 #include <linux/string.h> 26 #include <linux/init.h> 27 #include <linux/completion.h> 28 #include <linux/slab.h> 29 #include <linux/swap.h> 30 #include <linux/writeback.h> 31 #include <linux/task_io_accounting_ops.h> 32 #include <linux/fault-inject.h> 33 #include <linux/list_sort.h> 34 #include <linux/delay.h> 35 #include <linux/ratelimit.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/t10-pi.h> 38 #include <linux/debugfs.h> 39 #include <linux/bpf.h> 40 #include <linux/part_stat.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/blk-crypto.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/block.h> 46 47 #include "blk.h" 48 #include "blk-mq-sched.h" 49 #include "blk-pm.h" 50 #include "blk-cgroup.h" 51 #include "blk-throttle.h" 52 #include "blk-ioprio.h" 53 54 struct dentry *blk_debugfs_root; 55 56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); 62 63 static DEFINE_IDA(blk_queue_ida); 64 65 /* 66 * For queue allocation 67 */ 68 static struct kmem_cache *blk_requestq_cachep; 69 70 /* 71 * Controlling structure to kblockd 72 */ 73 static struct workqueue_struct *kblockd_workqueue; 74 75 /** 76 * blk_queue_flag_set - atomically set a queue flag 77 * @flag: flag to be set 78 * @q: request queue 79 */ 80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 81 { 82 set_bit(flag, &q->queue_flags); 83 } 84 EXPORT_SYMBOL(blk_queue_flag_set); 85 86 /** 87 * blk_queue_flag_clear - atomically clear a queue flag 88 * @flag: flag to be cleared 89 * @q: request queue 90 */ 91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 92 { 93 clear_bit(flag, &q->queue_flags); 94 } 95 EXPORT_SYMBOL(blk_queue_flag_clear); 96 97 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 98 static const char *const blk_op_name[] = { 99 REQ_OP_NAME(READ), 100 REQ_OP_NAME(WRITE), 101 REQ_OP_NAME(FLUSH), 102 REQ_OP_NAME(DISCARD), 103 REQ_OP_NAME(SECURE_ERASE), 104 REQ_OP_NAME(ZONE_RESET), 105 REQ_OP_NAME(ZONE_RESET_ALL), 106 REQ_OP_NAME(ZONE_OPEN), 107 REQ_OP_NAME(ZONE_CLOSE), 108 REQ_OP_NAME(ZONE_FINISH), 109 REQ_OP_NAME(ZONE_APPEND), 110 REQ_OP_NAME(WRITE_ZEROES), 111 REQ_OP_NAME(DRV_IN), 112 REQ_OP_NAME(DRV_OUT), 113 }; 114 #undef REQ_OP_NAME 115 116 /** 117 * blk_op_str - Return string XXX in the REQ_OP_XXX. 118 * @op: REQ_OP_XXX. 119 * 120 * Description: Centralize block layer function to convert REQ_OP_XXX into 121 * string format. Useful in the debugging and tracing bio or request. For 122 * invalid REQ_OP_XXX it returns string "UNKNOWN". 123 */ 124 inline const char *blk_op_str(enum req_op op) 125 { 126 const char *op_str = "UNKNOWN"; 127 128 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) 129 op_str = blk_op_name[op]; 130 131 return op_str; 132 } 133 EXPORT_SYMBOL_GPL(blk_op_str); 134 135 static const struct { 136 int errno; 137 const char *name; 138 } blk_errors[] = { 139 [BLK_STS_OK] = { 0, "" }, 140 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 141 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 142 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 143 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 144 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 145 [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" }, 146 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 147 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 148 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 149 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 150 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 151 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" }, 152 153 /* device mapper special case, should not leak out: */ 154 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 155 156 /* zone device specific errors */ 157 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, 158 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, 159 160 /* Command duration limit device-side timeout */ 161 [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" }, 162 163 [BLK_STS_INVAL] = { -EINVAL, "invalid" }, 164 165 /* everything else not covered above: */ 166 [BLK_STS_IOERR] = { -EIO, "I/O" }, 167 }; 168 169 blk_status_t errno_to_blk_status(int errno) 170 { 171 int i; 172 173 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 174 if (blk_errors[i].errno == errno) 175 return (__force blk_status_t)i; 176 } 177 178 return BLK_STS_IOERR; 179 } 180 EXPORT_SYMBOL_GPL(errno_to_blk_status); 181 182 int blk_status_to_errno(blk_status_t status) 183 { 184 int idx = (__force int)status; 185 186 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 187 return -EIO; 188 return blk_errors[idx].errno; 189 } 190 EXPORT_SYMBOL_GPL(blk_status_to_errno); 191 192 const char *blk_status_to_str(blk_status_t status) 193 { 194 int idx = (__force int)status; 195 196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 197 return "<null>"; 198 return blk_errors[idx].name; 199 } 200 EXPORT_SYMBOL_GPL(blk_status_to_str); 201 202 /** 203 * blk_sync_queue - cancel any pending callbacks on a queue 204 * @q: the queue 205 * 206 * Description: 207 * The block layer may perform asynchronous callback activity 208 * on a queue, such as calling the unplug function after a timeout. 209 * A block device may call blk_sync_queue to ensure that any 210 * such activity is cancelled, thus allowing it to release resources 211 * that the callbacks might use. The caller must already have made sure 212 * that its ->submit_bio will not re-add plugging prior to calling 213 * this function. 214 * 215 * This function does not cancel any asynchronous activity arising 216 * out of elevator or throttling code. That would require elevator_exit() 217 * and blkcg_exit_queue() to be called with queue lock initialized. 218 * 219 */ 220 void blk_sync_queue(struct request_queue *q) 221 { 222 timer_delete_sync(&q->timeout); 223 cancel_work_sync(&q->timeout_work); 224 } 225 EXPORT_SYMBOL(blk_sync_queue); 226 227 /** 228 * blk_set_pm_only - increment pm_only counter 229 * @q: request queue pointer 230 */ 231 void blk_set_pm_only(struct request_queue *q) 232 { 233 atomic_inc(&q->pm_only); 234 } 235 EXPORT_SYMBOL_GPL(blk_set_pm_only); 236 237 void blk_clear_pm_only(struct request_queue *q) 238 { 239 int pm_only; 240 241 pm_only = atomic_dec_return(&q->pm_only); 242 WARN_ON_ONCE(pm_only < 0); 243 if (pm_only == 0) 244 wake_up_all(&q->mq_freeze_wq); 245 } 246 EXPORT_SYMBOL_GPL(blk_clear_pm_only); 247 248 static void blk_free_queue_rcu(struct rcu_head *rcu_head) 249 { 250 struct request_queue *q = container_of(rcu_head, 251 struct request_queue, rcu_head); 252 253 percpu_ref_exit(&q->q_usage_counter); 254 kmem_cache_free(blk_requestq_cachep, q); 255 } 256 257 static void blk_free_queue(struct request_queue *q) 258 { 259 blk_free_queue_stats(q->stats); 260 if (queue_is_mq(q)) 261 blk_mq_release(q); 262 263 ida_free(&blk_queue_ida, q->id); 264 lockdep_unregister_key(&q->io_lock_cls_key); 265 lockdep_unregister_key(&q->q_lock_cls_key); 266 call_rcu(&q->rcu_head, blk_free_queue_rcu); 267 } 268 269 /** 270 * blk_put_queue - decrement the request_queue refcount 271 * @q: the request_queue structure to decrement the refcount for 272 * 273 * Decrements the refcount of the request_queue and free it when the refcount 274 * reaches 0. 275 */ 276 void blk_put_queue(struct request_queue *q) 277 { 278 if (refcount_dec_and_test(&q->refs)) 279 blk_free_queue(q); 280 } 281 EXPORT_SYMBOL(blk_put_queue); 282 283 bool blk_queue_start_drain(struct request_queue *q) 284 { 285 /* 286 * When queue DYING flag is set, we need to block new req 287 * entering queue, so we call blk_freeze_queue_start() to 288 * prevent I/O from crossing blk_queue_enter(). 289 */ 290 bool freeze = __blk_freeze_queue_start(q, current); 291 if (queue_is_mq(q)) 292 blk_mq_wake_waiters(q); 293 /* Make blk_queue_enter() reexamine the DYING flag. */ 294 wake_up_all(&q->mq_freeze_wq); 295 296 return freeze; 297 } 298 299 /** 300 * blk_queue_enter() - try to increase q->q_usage_counter 301 * @q: request queue pointer 302 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 303 */ 304 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 305 { 306 const bool pm = flags & BLK_MQ_REQ_PM; 307 308 while (!blk_try_enter_queue(q, pm)) { 309 if (flags & BLK_MQ_REQ_NOWAIT) 310 return -EAGAIN; 311 312 /* 313 * read pair of barrier in blk_freeze_queue_start(), we need to 314 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 315 * reading .mq_freeze_depth or queue dying flag, otherwise the 316 * following wait may never return if the two reads are 317 * reordered. 318 */ 319 smp_rmb(); 320 wait_event(q->mq_freeze_wq, 321 (!q->mq_freeze_depth && 322 blk_pm_resume_queue(pm, q)) || 323 blk_queue_dying(q)); 324 if (blk_queue_dying(q)) 325 return -ENODEV; 326 } 327 328 rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_); 329 rwsem_release(&q->q_lockdep_map, _RET_IP_); 330 return 0; 331 } 332 333 int __bio_queue_enter(struct request_queue *q, struct bio *bio) 334 { 335 while (!blk_try_enter_queue(q, false)) { 336 struct gendisk *disk = bio->bi_bdev->bd_disk; 337 338 if (bio->bi_opf & REQ_NOWAIT) { 339 if (test_bit(GD_DEAD, &disk->state)) 340 goto dead; 341 bio_wouldblock_error(bio); 342 return -EAGAIN; 343 } 344 345 /* 346 * read pair of barrier in blk_freeze_queue_start(), we need to 347 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 348 * reading .mq_freeze_depth or queue dying flag, otherwise the 349 * following wait may never return if the two reads are 350 * reordered. 351 */ 352 smp_rmb(); 353 wait_event(q->mq_freeze_wq, 354 (!q->mq_freeze_depth && 355 blk_pm_resume_queue(false, q)) || 356 test_bit(GD_DEAD, &disk->state)); 357 if (test_bit(GD_DEAD, &disk->state)) 358 goto dead; 359 } 360 361 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); 362 rwsem_release(&q->io_lockdep_map, _RET_IP_); 363 return 0; 364 dead: 365 bio_io_error(bio); 366 return -ENODEV; 367 } 368 369 void blk_queue_exit(struct request_queue *q) 370 { 371 percpu_ref_put(&q->q_usage_counter); 372 } 373 374 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 375 { 376 struct request_queue *q = 377 container_of(ref, struct request_queue, q_usage_counter); 378 379 wake_up_all(&q->mq_freeze_wq); 380 } 381 382 static void blk_rq_timed_out_timer(struct timer_list *t) 383 { 384 struct request_queue *q = timer_container_of(q, t, timeout); 385 386 kblockd_schedule_work(&q->timeout_work); 387 } 388 389 static void blk_timeout_work(struct work_struct *work) 390 { 391 } 392 393 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) 394 { 395 struct request_queue *q; 396 int error; 397 398 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, 399 node_id); 400 if (!q) 401 return ERR_PTR(-ENOMEM); 402 403 q->last_merge = NULL; 404 405 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); 406 if (q->id < 0) { 407 error = q->id; 408 goto fail_q; 409 } 410 411 q->stats = blk_alloc_queue_stats(); 412 if (!q->stats) { 413 error = -ENOMEM; 414 goto fail_id; 415 } 416 417 error = blk_set_default_limits(lim); 418 if (error) 419 goto fail_stats; 420 q->limits = *lim; 421 422 q->node = node_id; 423 424 atomic_set(&q->nr_active_requests_shared_tags, 0); 425 426 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 427 INIT_WORK(&q->timeout_work, blk_timeout_work); 428 INIT_LIST_HEAD(&q->icq_list); 429 430 refcount_set(&q->refs, 1); 431 mutex_init(&q->debugfs_mutex); 432 mutex_init(&q->elevator_lock); 433 mutex_init(&q->sysfs_lock); 434 mutex_init(&q->limits_lock); 435 mutex_init(&q->rq_qos_mutex); 436 spin_lock_init(&q->queue_lock); 437 438 init_waitqueue_head(&q->mq_freeze_wq); 439 mutex_init(&q->mq_freeze_lock); 440 441 blkg_init_queue(q); 442 443 /* 444 * Init percpu_ref in atomic mode so that it's faster to shutdown. 445 * See blk_register_queue() for details. 446 */ 447 error = percpu_ref_init(&q->q_usage_counter, 448 blk_queue_usage_counter_release, 449 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL); 450 if (error) 451 goto fail_stats; 452 lockdep_register_key(&q->io_lock_cls_key); 453 lockdep_register_key(&q->q_lock_cls_key); 454 lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)", 455 &q->io_lock_cls_key, 0); 456 lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)", 457 &q->q_lock_cls_key, 0); 458 459 /* Teach lockdep about lock ordering (reclaim WRT queue freeze lock). */ 460 fs_reclaim_acquire(GFP_KERNEL); 461 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); 462 rwsem_release(&q->io_lockdep_map, _RET_IP_); 463 fs_reclaim_release(GFP_KERNEL); 464 465 q->nr_requests = BLKDEV_DEFAULT_RQ; 466 467 return q; 468 469 fail_stats: 470 blk_free_queue_stats(q->stats); 471 fail_id: 472 ida_free(&blk_queue_ida, q->id); 473 fail_q: 474 kmem_cache_free(blk_requestq_cachep, q); 475 return ERR_PTR(error); 476 } 477 478 /** 479 * blk_get_queue - increment the request_queue refcount 480 * @q: the request_queue structure to increment the refcount for 481 * 482 * Increment the refcount of the request_queue kobject. 483 * 484 * Context: Any context. 485 */ 486 bool blk_get_queue(struct request_queue *q) 487 { 488 if (unlikely(blk_queue_dying(q))) 489 return false; 490 refcount_inc(&q->refs); 491 return true; 492 } 493 EXPORT_SYMBOL(blk_get_queue); 494 495 #ifdef CONFIG_FAIL_MAKE_REQUEST 496 497 static DECLARE_FAULT_ATTR(fail_make_request); 498 499 static int __init setup_fail_make_request(char *str) 500 { 501 return setup_fault_attr(&fail_make_request, str); 502 } 503 __setup("fail_make_request=", setup_fail_make_request); 504 505 bool should_fail_request(struct block_device *part, unsigned int bytes) 506 { 507 return bdev_test_flag(part, BD_MAKE_IT_FAIL) && 508 should_fail(&fail_make_request, bytes); 509 } 510 511 static int __init fail_make_request_debugfs(void) 512 { 513 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 514 NULL, &fail_make_request); 515 516 return PTR_ERR_OR_ZERO(dir); 517 } 518 519 late_initcall(fail_make_request_debugfs); 520 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 521 522 static inline void bio_check_ro(struct bio *bio) 523 { 524 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { 525 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 526 return; 527 528 if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) 529 return; 530 531 bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); 532 533 /* 534 * Use ioctl to set underlying disk of raid/dm to read-only 535 * will trigger this. 536 */ 537 pr_warn("Trying to write to read-only block-device %pg\n", 538 bio->bi_bdev); 539 } 540 } 541 542 static noinline int should_fail_bio(struct bio *bio) 543 { 544 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 545 return -EIO; 546 return 0; 547 } 548 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 549 550 /* 551 * Check whether this bio extends beyond the end of the device or partition. 552 * This may well happen - the kernel calls bread() without checking the size of 553 * the device, e.g., when mounting a file system. 554 */ 555 static inline int bio_check_eod(struct bio *bio) 556 { 557 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 558 unsigned int nr_sectors = bio_sectors(bio); 559 560 if (nr_sectors && 561 (nr_sectors > maxsector || 562 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 563 if (!maxsector) 564 return -EIO; 565 pr_info_ratelimited("%s: attempt to access beyond end of device\n" 566 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n", 567 current->comm, bio->bi_bdev, bio->bi_opf, 568 bio->bi_iter.bi_sector, nr_sectors, maxsector); 569 return -EIO; 570 } 571 return 0; 572 } 573 574 /* 575 * Remap block n of partition p to block n+start(p) of the disk. 576 */ 577 static int blk_partition_remap(struct bio *bio) 578 { 579 struct block_device *p = bio->bi_bdev; 580 581 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 582 return -EIO; 583 if (bio_sectors(bio)) { 584 bio->bi_iter.bi_sector += p->bd_start_sect; 585 trace_block_bio_remap(bio, p->bd_dev, 586 bio->bi_iter.bi_sector - 587 p->bd_start_sect); 588 } 589 bio_set_flag(bio, BIO_REMAPPED); 590 return 0; 591 } 592 593 /* 594 * Check write append to a zoned block device. 595 */ 596 static inline blk_status_t blk_check_zone_append(struct request_queue *q, 597 struct bio *bio) 598 { 599 int nr_sectors = bio_sectors(bio); 600 601 /* Only applicable to zoned block devices */ 602 if (!bdev_is_zoned(bio->bi_bdev)) 603 return BLK_STS_NOTSUPP; 604 605 /* The bio sector must point to the start of a sequential zone */ 606 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector)) 607 return BLK_STS_IOERR; 608 609 /* 610 * Not allowed to cross zone boundaries. Otherwise, the BIO will be 611 * split and could result in non-contiguous sectors being written in 612 * different zones. 613 */ 614 if (nr_sectors > q->limits.chunk_sectors) 615 return BLK_STS_IOERR; 616 617 /* Make sure the BIO is small enough and will not get split */ 618 if (nr_sectors > q->limits.max_zone_append_sectors) 619 return BLK_STS_IOERR; 620 621 bio->bi_opf |= REQ_NOMERGE; 622 623 return BLK_STS_OK; 624 } 625 626 static void __submit_bio(struct bio *bio) 627 { 628 /* If plug is not used, add new plug here to cache nsecs time. */ 629 struct blk_plug plug; 630 631 if (unlikely(!blk_crypto_bio_prep(&bio))) 632 return; 633 634 blk_start_plug(&plug); 635 636 if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { 637 blk_mq_submit_bio(bio); 638 } else if (likely(bio_queue_enter(bio) == 0)) { 639 struct gendisk *disk = bio->bi_bdev->bd_disk; 640 641 if ((bio->bi_opf & REQ_POLLED) && 642 !(disk->queue->limits.features & BLK_FEAT_POLL)) { 643 bio->bi_status = BLK_STS_NOTSUPP; 644 bio_endio(bio); 645 } else { 646 disk->fops->submit_bio(bio); 647 } 648 blk_queue_exit(disk->queue); 649 } 650 651 blk_finish_plug(&plug); 652 } 653 654 /* 655 * The loop in this function may be a bit non-obvious, and so deserves some 656 * explanation: 657 * 658 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure 659 * that), so we have a list with a single bio. 660 * - We pretend that we have just taken it off a longer list, so we assign 661 * bio_list to a pointer to the bio_list_on_stack, thus initialising the 662 * bio_list of new bios to be added. ->submit_bio() may indeed add some more 663 * bios through a recursive call to submit_bio_noacct. If it did, we find a 664 * non-NULL value in bio_list and re-enter the loop from the top. 665 * - In this case we really did just take the bio of the top of the list (no 666 * pretending) and so remove it from bio_list, and call into ->submit_bio() 667 * again. 668 * 669 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. 670 * bio_list_on_stack[1] contains bios that were submitted before the current 671 * ->submit_bio, but that haven't been processed yet. 672 */ 673 static void __submit_bio_noacct(struct bio *bio) 674 { 675 struct bio_list bio_list_on_stack[2]; 676 677 BUG_ON(bio->bi_next); 678 679 bio_list_init(&bio_list_on_stack[0]); 680 current->bio_list = bio_list_on_stack; 681 682 do { 683 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 684 struct bio_list lower, same; 685 686 /* 687 * Create a fresh bio_list for all subordinate requests. 688 */ 689 bio_list_on_stack[1] = bio_list_on_stack[0]; 690 bio_list_init(&bio_list_on_stack[0]); 691 692 __submit_bio(bio); 693 694 /* 695 * Sort new bios into those for a lower level and those for the 696 * same level. 697 */ 698 bio_list_init(&lower); 699 bio_list_init(&same); 700 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 701 if (q == bdev_get_queue(bio->bi_bdev)) 702 bio_list_add(&same, bio); 703 else 704 bio_list_add(&lower, bio); 705 706 /* 707 * Now assemble so we handle the lowest level first. 708 */ 709 bio_list_merge(&bio_list_on_stack[0], &lower); 710 bio_list_merge(&bio_list_on_stack[0], &same); 711 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 712 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 713 714 current->bio_list = NULL; 715 } 716 717 static void __submit_bio_noacct_mq(struct bio *bio) 718 { 719 struct bio_list bio_list[2] = { }; 720 721 current->bio_list = bio_list; 722 723 do { 724 __submit_bio(bio); 725 } while ((bio = bio_list_pop(&bio_list[0]))); 726 727 current->bio_list = NULL; 728 } 729 730 void submit_bio_noacct_nocheck(struct bio *bio) 731 { 732 blk_cgroup_bio_start(bio); 733 blkcg_bio_issue_init(bio); 734 735 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 736 trace_block_bio_queue(bio); 737 /* 738 * Now that enqueuing has been traced, we need to trace 739 * completion as well. 740 */ 741 bio_set_flag(bio, BIO_TRACE_COMPLETION); 742 } 743 744 /* 745 * We only want one ->submit_bio to be active at a time, else stack 746 * usage with stacked devices could be a problem. Use current->bio_list 747 * to collect a list of requests submited by a ->submit_bio method while 748 * it is active, and then process them after it returned. 749 */ 750 if (current->bio_list) 751 bio_list_add(¤t->bio_list[0], bio); 752 else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) 753 __submit_bio_noacct_mq(bio); 754 else 755 __submit_bio_noacct(bio); 756 } 757 758 static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, 759 struct bio *bio) 760 { 761 if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) 762 return BLK_STS_INVAL; 763 764 if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) 765 return BLK_STS_INVAL; 766 767 return BLK_STS_OK; 768 } 769 770 /** 771 * submit_bio_noacct - re-submit a bio to the block device layer for I/O 772 * @bio: The bio describing the location in memory and on the device. 773 * 774 * This is a version of submit_bio() that shall only be used for I/O that is 775 * resubmitted to lower level drivers by stacking block drivers. All file 776 * systems and other upper level users of the block layer should use 777 * submit_bio() instead. 778 */ 779 void submit_bio_noacct(struct bio *bio) 780 { 781 struct block_device *bdev = bio->bi_bdev; 782 struct request_queue *q = bdev_get_queue(bdev); 783 blk_status_t status = BLK_STS_IOERR; 784 785 might_sleep(); 786 787 /* 788 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 789 * if queue does not support NOWAIT. 790 */ 791 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) 792 goto not_supported; 793 794 if (should_fail_bio(bio)) 795 goto end_io; 796 bio_check_ro(bio); 797 if (!bio_flagged(bio, BIO_REMAPPED)) { 798 if (unlikely(bio_check_eod(bio))) 799 goto end_io; 800 if (bdev_is_partition(bdev) && 801 unlikely(blk_partition_remap(bio))) 802 goto end_io; 803 } 804 805 /* 806 * Filter flush bio's early so that bio based drivers without flush 807 * support don't have to worry about them. 808 */ 809 if (op_is_flush(bio->bi_opf)) { 810 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && 811 bio_op(bio) != REQ_OP_ZONE_APPEND)) 812 goto end_io; 813 if (!bdev_write_cache(bdev)) { 814 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 815 if (!bio_sectors(bio)) { 816 status = BLK_STS_OK; 817 goto end_io; 818 } 819 } 820 } 821 822 switch (bio_op(bio)) { 823 case REQ_OP_READ: 824 break; 825 case REQ_OP_WRITE: 826 if (bio->bi_opf & REQ_ATOMIC) { 827 status = blk_validate_atomic_write_op_size(q, bio); 828 if (status != BLK_STS_OK) 829 goto end_io; 830 } 831 break; 832 case REQ_OP_FLUSH: 833 /* 834 * REQ_OP_FLUSH can't be submitted through bios, it is only 835 * synthetized in struct request by the flush state machine. 836 */ 837 goto not_supported; 838 case REQ_OP_DISCARD: 839 if (!bdev_max_discard_sectors(bdev)) 840 goto not_supported; 841 break; 842 case REQ_OP_SECURE_ERASE: 843 if (!bdev_max_secure_erase_sectors(bdev)) 844 goto not_supported; 845 break; 846 case REQ_OP_ZONE_APPEND: 847 status = blk_check_zone_append(q, bio); 848 if (status != BLK_STS_OK) 849 goto end_io; 850 break; 851 case REQ_OP_WRITE_ZEROES: 852 if (!q->limits.max_write_zeroes_sectors) 853 goto not_supported; 854 break; 855 case REQ_OP_ZONE_RESET: 856 case REQ_OP_ZONE_OPEN: 857 case REQ_OP_ZONE_CLOSE: 858 case REQ_OP_ZONE_FINISH: 859 case REQ_OP_ZONE_RESET_ALL: 860 if (!bdev_is_zoned(bio->bi_bdev)) 861 goto not_supported; 862 break; 863 case REQ_OP_DRV_IN: 864 case REQ_OP_DRV_OUT: 865 /* 866 * Driver private operations are only used with passthrough 867 * requests. 868 */ 869 fallthrough; 870 default: 871 goto not_supported; 872 } 873 874 if (blk_throtl_bio(bio)) 875 return; 876 submit_bio_noacct_nocheck(bio); 877 return; 878 879 not_supported: 880 status = BLK_STS_NOTSUPP; 881 end_io: 882 bio->bi_status = status; 883 bio_endio(bio); 884 } 885 EXPORT_SYMBOL(submit_bio_noacct); 886 887 static void bio_set_ioprio(struct bio *bio) 888 { 889 /* Nobody set ioprio so far? Initialize it based on task's nice value */ 890 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) 891 bio->bi_ioprio = get_current_ioprio(); 892 blkcg_set_ioprio(bio); 893 } 894 895 /** 896 * submit_bio - submit a bio to the block device layer for I/O 897 * @bio: The &struct bio which describes the I/O 898 * 899 * submit_bio() is used to submit I/O requests to block devices. It is passed a 900 * fully set up &struct bio that describes the I/O that needs to be done. The 901 * bio will be send to the device described by the bi_bdev field. 902 * 903 * The success/failure status of the request, along with notification of 904 * completion, is delivered asynchronously through the ->bi_end_io() callback 905 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has 906 * been called. 907 */ 908 void submit_bio(struct bio *bio) 909 { 910 if (bio_op(bio) == REQ_OP_READ) { 911 task_io_account_read(bio->bi_iter.bi_size); 912 count_vm_events(PGPGIN, bio_sectors(bio)); 913 } else if (bio_op(bio) == REQ_OP_WRITE) { 914 count_vm_events(PGPGOUT, bio_sectors(bio)); 915 } 916 917 bio_set_ioprio(bio); 918 submit_bio_noacct(bio); 919 } 920 EXPORT_SYMBOL(submit_bio); 921 922 /** 923 * bio_poll - poll for BIO completions 924 * @bio: bio to poll for 925 * @iob: batches of IO 926 * @flags: BLK_POLL_* flags that control the behavior 927 * 928 * Poll for completions on queue associated with the bio. Returns number of 929 * completed entries found. 930 * 931 * Note: the caller must either be the context that submitted @bio, or 932 * be in a RCU critical section to prevent freeing of @bio. 933 */ 934 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) 935 { 936 blk_qc_t cookie = READ_ONCE(bio->bi_cookie); 937 struct block_device *bdev; 938 struct request_queue *q; 939 int ret = 0; 940 941 bdev = READ_ONCE(bio->bi_bdev); 942 if (!bdev) 943 return 0; 944 945 q = bdev_get_queue(bdev); 946 if (cookie == BLK_QC_T_NONE) 947 return 0; 948 949 blk_flush_plug(current->plug, false); 950 951 /* 952 * We need to be able to enter a frozen queue, similar to how 953 * timeouts also need to do that. If that is blocked, then we can 954 * have pending IO when a queue freeze is started, and then the 955 * wait for the freeze to finish will wait for polled requests to 956 * timeout as the poller is preventer from entering the queue and 957 * completing them. As long as we prevent new IO from being queued, 958 * that should be all that matters. 959 */ 960 if (!percpu_ref_tryget(&q->q_usage_counter)) 961 return 0; 962 if (queue_is_mq(q)) { 963 ret = blk_mq_poll(q, cookie, iob, flags); 964 } else { 965 struct gendisk *disk = q->disk; 966 967 if ((q->limits.features & BLK_FEAT_POLL) && disk && 968 disk->fops->poll_bio) 969 ret = disk->fops->poll_bio(bio, iob, flags); 970 } 971 blk_queue_exit(q); 972 return ret; 973 } 974 EXPORT_SYMBOL_GPL(bio_poll); 975 976 /* 977 * Helper to implement file_operations.iopoll. Requires the bio to be stored 978 * in iocb->private, and cleared before freeing the bio. 979 */ 980 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 981 unsigned int flags) 982 { 983 struct bio *bio; 984 int ret = 0; 985 986 /* 987 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can 988 * point to a freshly allocated bio at this point. If that happens 989 * we have a few cases to consider: 990 * 991 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just 992 * simply nothing in this case 993 * 2) the bio points to a not poll enabled device. bio_poll will catch 994 * this and return 0 995 * 3) the bio points to a poll capable device, including but not 996 * limited to the one that the original bio pointed to. In this 997 * case we will call into the actual poll method and poll for I/O, 998 * even if we don't need to, but it won't cause harm either. 999 * 1000 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev 1001 * is still allocated. Because partitions hold a reference to the whole 1002 * device bdev and thus disk, the disk is also still valid. Grabbing 1003 * a reference to the queue in bio_poll() ensures the hctxs and requests 1004 * are still valid as well. 1005 */ 1006 rcu_read_lock(); 1007 bio = READ_ONCE(kiocb->private); 1008 if (bio) 1009 ret = bio_poll(bio, iob, flags); 1010 rcu_read_unlock(); 1011 1012 return ret; 1013 } 1014 EXPORT_SYMBOL_GPL(iocb_bio_iopoll); 1015 1016 void update_io_ticks(struct block_device *part, unsigned long now, bool end) 1017 { 1018 unsigned long stamp; 1019 again: 1020 stamp = READ_ONCE(part->bd_stamp); 1021 if (unlikely(time_after(now, stamp)) && 1022 likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) && 1023 (end || bdev_count_inflight(part))) 1024 __part_stat_add(part, io_ticks, now - stamp); 1025 1026 if (bdev_is_partition(part)) { 1027 part = bdev_whole(part); 1028 goto again; 1029 } 1030 } 1031 1032 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1033 unsigned long start_time) 1034 { 1035 part_stat_lock(); 1036 update_io_ticks(bdev, start_time, false); 1037 part_stat_local_inc(bdev, in_flight[op_is_write(op)]); 1038 part_stat_unlock(); 1039 1040 return start_time; 1041 } 1042 EXPORT_SYMBOL(bdev_start_io_acct); 1043 1044 /** 1045 * bio_start_io_acct - start I/O accounting for bio based drivers 1046 * @bio: bio to start account for 1047 * 1048 * Returns the start time that should be passed back to bio_end_io_acct(). 1049 */ 1050 unsigned long bio_start_io_acct(struct bio *bio) 1051 { 1052 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies); 1053 } 1054 EXPORT_SYMBOL_GPL(bio_start_io_acct); 1055 1056 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1057 unsigned int sectors, unsigned long start_time) 1058 { 1059 const int sgrp = op_stat_group(op); 1060 unsigned long now = READ_ONCE(jiffies); 1061 unsigned long duration = now - start_time; 1062 1063 part_stat_lock(); 1064 update_io_ticks(bdev, now, true); 1065 part_stat_inc(bdev, ios[sgrp]); 1066 part_stat_add(bdev, sectors[sgrp], sectors); 1067 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration)); 1068 part_stat_local_dec(bdev, in_flight[op_is_write(op)]); 1069 part_stat_unlock(); 1070 } 1071 EXPORT_SYMBOL(bdev_end_io_acct); 1072 1073 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1074 struct block_device *orig_bdev) 1075 { 1076 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); 1077 } 1078 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); 1079 1080 /** 1081 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 1082 * @q : the queue of the device being checked 1083 * 1084 * Description: 1085 * Check if underlying low-level drivers of a device are busy. 1086 * If the drivers want to export their busy state, they must set own 1087 * exporting function using blk_queue_lld_busy() first. 1088 * 1089 * Basically, this function is used only by request stacking drivers 1090 * to stop dispatching requests to underlying devices when underlying 1091 * devices are busy. This behavior helps more I/O merging on the queue 1092 * of the request stacking driver and prevents I/O throughput regression 1093 * on burst I/O load. 1094 * 1095 * Return: 1096 * 0 - Not busy (The request stacking driver should dispatch request) 1097 * 1 - Busy (The request stacking driver should stop dispatching request) 1098 */ 1099 int blk_lld_busy(struct request_queue *q) 1100 { 1101 if (queue_is_mq(q) && q->mq_ops->busy) 1102 return q->mq_ops->busy(q); 1103 1104 return 0; 1105 } 1106 EXPORT_SYMBOL_GPL(blk_lld_busy); 1107 1108 int kblockd_schedule_work(struct work_struct *work) 1109 { 1110 return queue_work(kblockd_workqueue, work); 1111 } 1112 EXPORT_SYMBOL(kblockd_schedule_work); 1113 1114 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 1115 unsigned long delay) 1116 { 1117 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 1118 } 1119 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 1120 1121 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) 1122 { 1123 struct task_struct *tsk = current; 1124 1125 /* 1126 * If this is a nested plug, don't actually assign it. 1127 */ 1128 if (tsk->plug) 1129 return; 1130 1131 plug->cur_ktime = 0; 1132 rq_list_init(&plug->mq_list); 1133 rq_list_init(&plug->cached_rqs); 1134 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); 1135 plug->rq_count = 0; 1136 plug->multiple_queues = false; 1137 plug->has_elevator = false; 1138 INIT_LIST_HEAD(&plug->cb_list); 1139 1140 /* 1141 * Store ordering should not be needed here, since a potential 1142 * preempt will imply a full memory barrier 1143 */ 1144 tsk->plug = plug; 1145 } 1146 1147 /** 1148 * blk_start_plug - initialize blk_plug and track it inside the task_struct 1149 * @plug: The &struct blk_plug that needs to be initialized 1150 * 1151 * Description: 1152 * blk_start_plug() indicates to the block layer an intent by the caller 1153 * to submit multiple I/O requests in a batch. The block layer may use 1154 * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1155 * is called. However, the block layer may choose to submit requests 1156 * before a call to blk_finish_plug() if the number of queued I/Os 1157 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1158 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1159 * the task schedules (see below). 1160 * 1161 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1162 * pending I/O should the task end up blocking between blk_start_plug() and 1163 * blk_finish_plug(). This is important from a performance perspective, but 1164 * also ensures that we don't deadlock. For instance, if the task is blocking 1165 * for a memory allocation, memory reclaim could end up wanting to free a 1166 * page belonging to that request that is currently residing in our private 1167 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 1168 * this kind of deadlock. 1169 */ 1170 void blk_start_plug(struct blk_plug *plug) 1171 { 1172 blk_start_plug_nr_ios(plug, 1); 1173 } 1174 EXPORT_SYMBOL(blk_start_plug); 1175 1176 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 1177 { 1178 LIST_HEAD(callbacks); 1179 1180 while (!list_empty(&plug->cb_list)) { 1181 list_splice_init(&plug->cb_list, &callbacks); 1182 1183 while (!list_empty(&callbacks)) { 1184 struct blk_plug_cb *cb = list_first_entry(&callbacks, 1185 struct blk_plug_cb, 1186 list); 1187 list_del(&cb->list); 1188 cb->callback(cb, from_schedule); 1189 } 1190 } 1191 } 1192 1193 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 1194 int size) 1195 { 1196 struct blk_plug *plug = current->plug; 1197 struct blk_plug_cb *cb; 1198 1199 if (!plug) 1200 return NULL; 1201 1202 list_for_each_entry(cb, &plug->cb_list, list) 1203 if (cb->callback == unplug && cb->data == data) 1204 return cb; 1205 1206 /* Not currently on the callback list */ 1207 BUG_ON(size < sizeof(*cb)); 1208 cb = kzalloc(size, GFP_ATOMIC); 1209 if (cb) { 1210 cb->data = data; 1211 cb->callback = unplug; 1212 list_add(&cb->list, &plug->cb_list); 1213 } 1214 return cb; 1215 } 1216 EXPORT_SYMBOL(blk_check_plugged); 1217 1218 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) 1219 { 1220 if (!list_empty(&plug->cb_list)) 1221 flush_plug_callbacks(plug, from_schedule); 1222 blk_mq_flush_plug_list(plug, from_schedule); 1223 /* 1224 * Unconditionally flush out cached requests, even if the unplug 1225 * event came from schedule. Since we know hold references to the 1226 * queue for cached requests, we don't want a blocked task holding 1227 * up a queue freeze/quiesce event. 1228 */ 1229 if (unlikely(!rq_list_empty(&plug->cached_rqs))) 1230 blk_mq_free_plug_rqs(plug); 1231 1232 plug->cur_ktime = 0; 1233 current->flags &= ~PF_BLOCK_TS; 1234 } 1235 1236 /** 1237 * blk_finish_plug - mark the end of a batch of submitted I/O 1238 * @plug: The &struct blk_plug passed to blk_start_plug() 1239 * 1240 * Description: 1241 * Indicate that a batch of I/O submissions is complete. This function 1242 * must be paired with an initial call to blk_start_plug(). The intent 1243 * is to allow the block layer to optimize I/O submission. See the 1244 * documentation for blk_start_plug() for more information. 1245 */ 1246 void blk_finish_plug(struct blk_plug *plug) 1247 { 1248 if (plug == current->plug) { 1249 __blk_flush_plug(plug, false); 1250 current->plug = NULL; 1251 } 1252 } 1253 EXPORT_SYMBOL(blk_finish_plug); 1254 1255 void blk_io_schedule(void) 1256 { 1257 /* Prevent hang_check timer from firing at us during very long I/O */ 1258 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 1259 1260 if (timeout) 1261 io_schedule_timeout(timeout); 1262 else 1263 io_schedule(); 1264 } 1265 EXPORT_SYMBOL_GPL(blk_io_schedule); 1266 1267 int __init blk_dev_init(void) 1268 { 1269 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1270 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1271 sizeof_field(struct request, cmd_flags)); 1272 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1273 sizeof_field(struct bio, bi_opf)); 1274 1275 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1276 kblockd_workqueue = alloc_workqueue("kblockd", 1277 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1278 if (!kblockd_workqueue) 1279 panic("Failed to create kblockd\n"); 1280 1281 blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC); 1282 1283 blk_debugfs_root = debugfs_create_dir("block", NULL); 1284 1285 return 0; 1286 } 1287