1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 8 * - July2000 9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 10 */ 11 12 /* 13 * This handles all read/write requests to block devices 14 */ 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/bio.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-pm.h> 20 #include <linux/blk-integrity.h> 21 #include <linux/highmem.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> 24 #include <linux/kernel_stat.h> 25 #include <linux/string.h> 26 #include <linux/init.h> 27 #include <linux/completion.h> 28 #include <linux/slab.h> 29 #include <linux/swap.h> 30 #include <linux/writeback.h> 31 #include <linux/task_io_accounting_ops.h> 32 #include <linux/fault-inject.h> 33 #include <linux/list_sort.h> 34 #include <linux/delay.h> 35 #include <linux/ratelimit.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/blk-cgroup.h> 38 #include <linux/t10-pi.h> 39 #include <linux/debugfs.h> 40 #include <linux/bpf.h> 41 #include <linux/psi.h> 42 #include <linux/part_stat.h> 43 #include <linux/sched/sysctl.h> 44 #include <linux/blk-crypto.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/block.h> 48 49 #include "blk.h" 50 #include "blk-mq-sched.h" 51 #include "blk-pm.h" 52 #include "blk-throttle.h" 53 #include "blk-rq-qos.h" 54 55 struct dentry *blk_debugfs_root; 56 57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); 61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); 63 64 DEFINE_IDA(blk_queue_ida); 65 66 /* 67 * For queue allocation 68 */ 69 struct kmem_cache *blk_requestq_cachep; 70 struct kmem_cache *blk_requestq_srcu_cachep; 71 72 /* 73 * Controlling structure to kblockd 74 */ 75 static struct workqueue_struct *kblockd_workqueue; 76 77 /** 78 * blk_queue_flag_set - atomically set a queue flag 79 * @flag: flag to be set 80 * @q: request queue 81 */ 82 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) 83 { 84 set_bit(flag, &q->queue_flags); 85 } 86 EXPORT_SYMBOL(blk_queue_flag_set); 87 88 /** 89 * blk_queue_flag_clear - atomically clear a queue flag 90 * @flag: flag to be cleared 91 * @q: request queue 92 */ 93 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) 94 { 95 clear_bit(flag, &q->queue_flags); 96 } 97 EXPORT_SYMBOL(blk_queue_flag_clear); 98 99 /** 100 * blk_queue_flag_test_and_set - atomically test and set a queue flag 101 * @flag: flag to be set 102 * @q: request queue 103 * 104 * Returns the previous value of @flag - 0 if the flag was not set and 1 if 105 * the flag was already set. 106 */ 107 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) 108 { 109 return test_and_set_bit(flag, &q->queue_flags); 110 } 111 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); 112 113 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 114 static const char *const blk_op_name[] = { 115 REQ_OP_NAME(READ), 116 REQ_OP_NAME(WRITE), 117 REQ_OP_NAME(FLUSH), 118 REQ_OP_NAME(DISCARD), 119 REQ_OP_NAME(SECURE_ERASE), 120 REQ_OP_NAME(ZONE_RESET), 121 REQ_OP_NAME(ZONE_RESET_ALL), 122 REQ_OP_NAME(ZONE_OPEN), 123 REQ_OP_NAME(ZONE_CLOSE), 124 REQ_OP_NAME(ZONE_FINISH), 125 REQ_OP_NAME(ZONE_APPEND), 126 REQ_OP_NAME(WRITE_SAME), 127 REQ_OP_NAME(WRITE_ZEROES), 128 REQ_OP_NAME(DRV_IN), 129 REQ_OP_NAME(DRV_OUT), 130 }; 131 #undef REQ_OP_NAME 132 133 /** 134 * blk_op_str - Return string XXX in the REQ_OP_XXX. 135 * @op: REQ_OP_XXX. 136 * 137 * Description: Centralize block layer function to convert REQ_OP_XXX into 138 * string format. Useful in the debugging and tracing bio or request. For 139 * invalid REQ_OP_XXX it returns string "UNKNOWN". 140 */ 141 inline const char *blk_op_str(unsigned int op) 142 { 143 const char *op_str = "UNKNOWN"; 144 145 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) 146 op_str = blk_op_name[op]; 147 148 return op_str; 149 } 150 EXPORT_SYMBOL_GPL(blk_op_str); 151 152 static const struct { 153 int errno; 154 const char *name; 155 } blk_errors[] = { 156 [BLK_STS_OK] = { 0, "" }, 157 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, 158 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, 159 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 160 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 161 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 162 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 163 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 164 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 165 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 166 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, 167 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 168 169 /* device mapper special case, should not leak out: */ 170 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 171 172 /* zone device specific errors */ 173 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, 174 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, 175 176 /* everything else not covered above: */ 177 [BLK_STS_IOERR] = { -EIO, "I/O" }, 178 }; 179 180 blk_status_t errno_to_blk_status(int errno) 181 { 182 int i; 183 184 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { 185 if (blk_errors[i].errno == errno) 186 return (__force blk_status_t)i; 187 } 188 189 return BLK_STS_IOERR; 190 } 191 EXPORT_SYMBOL_GPL(errno_to_blk_status); 192 193 int blk_status_to_errno(blk_status_t status) 194 { 195 int idx = (__force int)status; 196 197 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 198 return -EIO; 199 return blk_errors[idx].errno; 200 } 201 EXPORT_SYMBOL_GPL(blk_status_to_errno); 202 203 const char *blk_status_to_str(blk_status_t status) 204 { 205 int idx = (__force int)status; 206 207 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) 208 return "<null>"; 209 return blk_errors[idx].name; 210 } 211 212 /** 213 * blk_sync_queue - cancel any pending callbacks on a queue 214 * @q: the queue 215 * 216 * Description: 217 * The block layer may perform asynchronous callback activity 218 * on a queue, such as calling the unplug function after a timeout. 219 * A block device may call blk_sync_queue to ensure that any 220 * such activity is cancelled, thus allowing it to release resources 221 * that the callbacks might use. The caller must already have made sure 222 * that its ->submit_bio will not re-add plugging prior to calling 223 * this function. 224 * 225 * This function does not cancel any asynchronous activity arising 226 * out of elevator or throttling code. That would require elevator_exit() 227 * and blkcg_exit_queue() to be called with queue lock initialized. 228 * 229 */ 230 void blk_sync_queue(struct request_queue *q) 231 { 232 del_timer_sync(&q->timeout); 233 cancel_work_sync(&q->timeout_work); 234 } 235 EXPORT_SYMBOL(blk_sync_queue); 236 237 /** 238 * blk_set_pm_only - increment pm_only counter 239 * @q: request queue pointer 240 */ 241 void blk_set_pm_only(struct request_queue *q) 242 { 243 atomic_inc(&q->pm_only); 244 } 245 EXPORT_SYMBOL_GPL(blk_set_pm_only); 246 247 void blk_clear_pm_only(struct request_queue *q) 248 { 249 int pm_only; 250 251 pm_only = atomic_dec_return(&q->pm_only); 252 WARN_ON_ONCE(pm_only < 0); 253 if (pm_only == 0) 254 wake_up_all(&q->mq_freeze_wq); 255 } 256 EXPORT_SYMBOL_GPL(blk_clear_pm_only); 257 258 /** 259 * blk_put_queue - decrement the request_queue refcount 260 * @q: the request_queue structure to decrement the refcount for 261 * 262 * Decrements the refcount of the request_queue kobject. When this reaches 0 263 * we'll have blk_release_queue() called. 264 * 265 * Context: Any context, but the last reference must not be dropped from 266 * atomic context. 267 */ 268 void blk_put_queue(struct request_queue *q) 269 { 270 kobject_put(&q->kobj); 271 } 272 EXPORT_SYMBOL(blk_put_queue); 273 274 void blk_queue_start_drain(struct request_queue *q) 275 { 276 /* 277 * When queue DYING flag is set, we need to block new req 278 * entering queue, so we call blk_freeze_queue_start() to 279 * prevent I/O from crossing blk_queue_enter(). 280 */ 281 blk_freeze_queue_start(q); 282 if (queue_is_mq(q)) 283 blk_mq_wake_waiters(q); 284 /* Make blk_queue_enter() reexamine the DYING flag. */ 285 wake_up_all(&q->mq_freeze_wq); 286 } 287 288 /** 289 * blk_cleanup_queue - shutdown a request queue 290 * @q: request queue to shutdown 291 * 292 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and 293 * put it. All future requests will be failed immediately with -ENODEV. 294 * 295 * Context: can sleep 296 */ 297 void blk_cleanup_queue(struct request_queue *q) 298 { 299 /* cannot be called from atomic context */ 300 might_sleep(); 301 302 WARN_ON_ONCE(blk_queue_registered(q)); 303 304 /* mark @q DYING, no new request or merges will be allowed afterwards */ 305 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 306 blk_queue_start_drain(q); 307 308 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 309 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 310 311 /* 312 * Drain all requests queued before DYING marking. Set DEAD flag to 313 * prevent that blk_mq_run_hw_queues() accesses the hardware queues 314 * after draining finished. 315 */ 316 blk_freeze_queue(q); 317 318 /* cleanup rq qos structures for queue without disk */ 319 rq_qos_exit(q); 320 321 blk_queue_flag_set(QUEUE_FLAG_DEAD, q); 322 323 blk_sync_queue(q); 324 if (queue_is_mq(q)) { 325 blk_mq_cancel_work_sync(q); 326 blk_mq_exit_queue(q); 327 } 328 329 /* 330 * In theory, request pool of sched_tags belongs to request queue. 331 * However, the current implementation requires tag_set for freeing 332 * requests, so free the pool now. 333 * 334 * Queue has become frozen, there can't be any in-queue requests, so 335 * it is safe to free requests now. 336 */ 337 mutex_lock(&q->sysfs_lock); 338 if (q->elevator) 339 blk_mq_sched_free_rqs(q); 340 mutex_unlock(&q->sysfs_lock); 341 342 percpu_ref_exit(&q->q_usage_counter); 343 344 /* @q is and will stay empty, shutdown and put */ 345 blk_put_queue(q); 346 } 347 EXPORT_SYMBOL(blk_cleanup_queue); 348 349 /** 350 * blk_queue_enter() - try to increase q->q_usage_counter 351 * @q: request queue pointer 352 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 353 */ 354 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 355 { 356 const bool pm = flags & BLK_MQ_REQ_PM; 357 358 while (!blk_try_enter_queue(q, pm)) { 359 if (flags & BLK_MQ_REQ_NOWAIT) 360 return -EBUSY; 361 362 /* 363 * read pair of barrier in blk_freeze_queue_start(), we need to 364 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 365 * reading .mq_freeze_depth or queue dying flag, otherwise the 366 * following wait may never return if the two reads are 367 * reordered. 368 */ 369 smp_rmb(); 370 wait_event(q->mq_freeze_wq, 371 (!q->mq_freeze_depth && 372 blk_pm_resume_queue(pm, q)) || 373 blk_queue_dying(q)); 374 if (blk_queue_dying(q)) 375 return -ENODEV; 376 } 377 378 return 0; 379 } 380 381 int __bio_queue_enter(struct request_queue *q, struct bio *bio) 382 { 383 while (!blk_try_enter_queue(q, false)) { 384 struct gendisk *disk = bio->bi_bdev->bd_disk; 385 386 if (bio->bi_opf & REQ_NOWAIT) { 387 if (test_bit(GD_DEAD, &disk->state)) 388 goto dead; 389 bio_wouldblock_error(bio); 390 return -EBUSY; 391 } 392 393 /* 394 * read pair of barrier in blk_freeze_queue_start(), we need to 395 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and 396 * reading .mq_freeze_depth or queue dying flag, otherwise the 397 * following wait may never return if the two reads are 398 * reordered. 399 */ 400 smp_rmb(); 401 wait_event(q->mq_freeze_wq, 402 (!q->mq_freeze_depth && 403 blk_pm_resume_queue(false, q)) || 404 test_bit(GD_DEAD, &disk->state)); 405 if (test_bit(GD_DEAD, &disk->state)) 406 goto dead; 407 } 408 409 return 0; 410 dead: 411 bio_io_error(bio); 412 return -ENODEV; 413 } 414 415 void blk_queue_exit(struct request_queue *q) 416 { 417 percpu_ref_put(&q->q_usage_counter); 418 } 419 420 static void blk_queue_usage_counter_release(struct percpu_ref *ref) 421 { 422 struct request_queue *q = 423 container_of(ref, struct request_queue, q_usage_counter); 424 425 wake_up_all(&q->mq_freeze_wq); 426 } 427 428 static void blk_rq_timed_out_timer(struct timer_list *t) 429 { 430 struct request_queue *q = from_timer(q, t, timeout); 431 432 kblockd_schedule_work(&q->timeout_work); 433 } 434 435 static void blk_timeout_work(struct work_struct *work) 436 { 437 } 438 439 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) 440 { 441 struct request_queue *q; 442 int ret; 443 444 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu), 445 GFP_KERNEL | __GFP_ZERO, node_id); 446 if (!q) 447 return NULL; 448 449 if (alloc_srcu) { 450 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q); 451 if (init_srcu_struct(q->srcu) != 0) 452 goto fail_q; 453 } 454 455 q->last_merge = NULL; 456 457 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 458 if (q->id < 0) 459 goto fail_srcu; 460 461 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); 462 if (ret) 463 goto fail_id; 464 465 q->stats = blk_alloc_queue_stats(); 466 if (!q->stats) 467 goto fail_split; 468 469 q->node = node_id; 470 471 atomic_set(&q->nr_active_requests_shared_tags, 0); 472 473 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 474 INIT_WORK(&q->timeout_work, blk_timeout_work); 475 INIT_LIST_HEAD(&q->icq_list); 476 #ifdef CONFIG_BLK_CGROUP 477 INIT_LIST_HEAD(&q->blkg_list); 478 #endif 479 480 kobject_init(&q->kobj, &blk_queue_ktype); 481 482 mutex_init(&q->debugfs_mutex); 483 mutex_init(&q->sysfs_lock); 484 mutex_init(&q->sysfs_dir_lock); 485 spin_lock_init(&q->queue_lock); 486 487 init_waitqueue_head(&q->mq_freeze_wq); 488 mutex_init(&q->mq_freeze_lock); 489 490 /* 491 * Init percpu_ref in atomic mode so that it's faster to shutdown. 492 * See blk_register_queue() for details. 493 */ 494 if (percpu_ref_init(&q->q_usage_counter, 495 blk_queue_usage_counter_release, 496 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 497 goto fail_stats; 498 499 if (blkcg_init_queue(q)) 500 goto fail_ref; 501 502 blk_queue_dma_alignment(q, 511); 503 blk_set_default_limits(&q->limits); 504 q->nr_requests = BLKDEV_DEFAULT_RQ; 505 506 return q; 507 508 fail_ref: 509 percpu_ref_exit(&q->q_usage_counter); 510 fail_stats: 511 blk_free_queue_stats(q->stats); 512 fail_split: 513 bioset_exit(&q->bio_split); 514 fail_id: 515 ida_simple_remove(&blk_queue_ida, q->id); 516 fail_srcu: 517 if (alloc_srcu) 518 cleanup_srcu_struct(q->srcu); 519 fail_q: 520 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q); 521 return NULL; 522 } 523 524 /** 525 * blk_get_queue - increment the request_queue refcount 526 * @q: the request_queue structure to increment the refcount for 527 * 528 * Increment the refcount of the request_queue kobject. 529 * 530 * Context: Any context. 531 */ 532 bool blk_get_queue(struct request_queue *q) 533 { 534 if (likely(!blk_queue_dying(q))) { 535 __blk_get_queue(q); 536 return true; 537 } 538 539 return false; 540 } 541 EXPORT_SYMBOL(blk_get_queue); 542 543 static void handle_bad_sector(struct bio *bio, sector_t maxsector) 544 { 545 char b[BDEVNAME_SIZE]; 546 547 pr_info_ratelimited("%s: attempt to access beyond end of device\n" 548 "%s: rw=%d, want=%llu, limit=%llu\n", 549 current->comm, 550 bio_devname(bio, b), bio->bi_opf, 551 bio_end_sector(bio), maxsector); 552 } 553 554 #ifdef CONFIG_FAIL_MAKE_REQUEST 555 556 static DECLARE_FAULT_ATTR(fail_make_request); 557 558 static int __init setup_fail_make_request(char *str) 559 { 560 return setup_fault_attr(&fail_make_request, str); 561 } 562 __setup("fail_make_request=", setup_fail_make_request); 563 564 bool should_fail_request(struct block_device *part, unsigned int bytes) 565 { 566 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); 567 } 568 569 static int __init fail_make_request_debugfs(void) 570 { 571 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 572 NULL, &fail_make_request); 573 574 return PTR_ERR_OR_ZERO(dir); 575 } 576 577 late_initcall(fail_make_request_debugfs); 578 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 579 580 static inline bool bio_check_ro(struct bio *bio) 581 { 582 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { 583 char b[BDEVNAME_SIZE]; 584 585 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 586 return false; 587 588 WARN_ONCE(1, 589 "Trying to write to read-only block-device %s (partno %d)\n", 590 bio_devname(bio, b), bio->bi_bdev->bd_partno); 591 /* Older lvm-tools actually trigger this */ 592 return false; 593 } 594 595 return false; 596 } 597 598 static noinline int should_fail_bio(struct bio *bio) 599 { 600 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) 601 return -EIO; 602 return 0; 603 } 604 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); 605 606 /* 607 * Check whether this bio extends beyond the end of the device or partition. 608 * This may well happen - the kernel calls bread() without checking the size of 609 * the device, e.g., when mounting a file system. 610 */ 611 static inline int bio_check_eod(struct bio *bio) 612 { 613 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 614 unsigned int nr_sectors = bio_sectors(bio); 615 616 if (nr_sectors && maxsector && 617 (nr_sectors > maxsector || 618 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { 619 handle_bad_sector(bio, maxsector); 620 return -EIO; 621 } 622 return 0; 623 } 624 625 /* 626 * Remap block n of partition p to block n+start(p) of the disk. 627 */ 628 static int blk_partition_remap(struct bio *bio) 629 { 630 struct block_device *p = bio->bi_bdev; 631 632 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) 633 return -EIO; 634 if (bio_sectors(bio)) { 635 bio->bi_iter.bi_sector += p->bd_start_sect; 636 trace_block_bio_remap(bio, p->bd_dev, 637 bio->bi_iter.bi_sector - 638 p->bd_start_sect); 639 } 640 bio_set_flag(bio, BIO_REMAPPED); 641 return 0; 642 } 643 644 /* 645 * Check write append to a zoned block device. 646 */ 647 static inline blk_status_t blk_check_zone_append(struct request_queue *q, 648 struct bio *bio) 649 { 650 sector_t pos = bio->bi_iter.bi_sector; 651 int nr_sectors = bio_sectors(bio); 652 653 /* Only applicable to zoned block devices */ 654 if (!blk_queue_is_zoned(q)) 655 return BLK_STS_NOTSUPP; 656 657 /* The bio sector must point to the start of a sequential zone */ 658 if (pos & (blk_queue_zone_sectors(q) - 1) || 659 !blk_queue_zone_is_seq(q, pos)) 660 return BLK_STS_IOERR; 661 662 /* 663 * Not allowed to cross zone boundaries. Otherwise, the BIO will be 664 * split and could result in non-contiguous sectors being written in 665 * different zones. 666 */ 667 if (nr_sectors > q->limits.chunk_sectors) 668 return BLK_STS_IOERR; 669 670 /* Make sure the BIO is small enough and will not get split */ 671 if (nr_sectors > q->limits.max_zone_append_sectors) 672 return BLK_STS_IOERR; 673 674 bio->bi_opf |= REQ_NOMERGE; 675 676 return BLK_STS_OK; 677 } 678 679 noinline_for_stack bool submit_bio_checks(struct bio *bio) 680 { 681 struct block_device *bdev = bio->bi_bdev; 682 struct request_queue *q = bdev_get_queue(bdev); 683 blk_status_t status = BLK_STS_IOERR; 684 struct blk_plug *plug; 685 686 might_sleep(); 687 688 plug = blk_mq_plug(q, bio); 689 if (plug && plug->nowait) 690 bio->bi_opf |= REQ_NOWAIT; 691 692 /* 693 * For a REQ_NOWAIT based request, return -EOPNOTSUPP 694 * if queue does not support NOWAIT. 695 */ 696 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) 697 goto not_supported; 698 699 if (should_fail_bio(bio)) 700 goto end_io; 701 if (unlikely(bio_check_ro(bio))) 702 goto end_io; 703 if (!bio_flagged(bio, BIO_REMAPPED)) { 704 if (unlikely(bio_check_eod(bio))) 705 goto end_io; 706 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) 707 goto end_io; 708 } 709 710 /* 711 * Filter flush bio's early so that bio based drivers without flush 712 * support don't have to worry about them. 713 */ 714 if (op_is_flush(bio->bi_opf) && 715 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 716 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 717 if (!bio_sectors(bio)) { 718 status = BLK_STS_OK; 719 goto end_io; 720 } 721 } 722 723 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 724 bio_clear_polled(bio); 725 726 switch (bio_op(bio)) { 727 case REQ_OP_DISCARD: 728 if (!blk_queue_discard(q)) 729 goto not_supported; 730 break; 731 case REQ_OP_SECURE_ERASE: 732 if (!blk_queue_secure_erase(q)) 733 goto not_supported; 734 break; 735 case REQ_OP_WRITE_SAME: 736 if (!q->limits.max_write_same_sectors) 737 goto not_supported; 738 break; 739 case REQ_OP_ZONE_APPEND: 740 status = blk_check_zone_append(q, bio); 741 if (status != BLK_STS_OK) 742 goto end_io; 743 break; 744 case REQ_OP_ZONE_RESET: 745 case REQ_OP_ZONE_OPEN: 746 case REQ_OP_ZONE_CLOSE: 747 case REQ_OP_ZONE_FINISH: 748 if (!blk_queue_is_zoned(q)) 749 goto not_supported; 750 break; 751 case REQ_OP_ZONE_RESET_ALL: 752 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) 753 goto not_supported; 754 break; 755 case REQ_OP_WRITE_ZEROES: 756 if (!q->limits.max_write_zeroes_sectors) 757 goto not_supported; 758 break; 759 default: 760 break; 761 } 762 763 if (blk_throtl_bio(bio)) 764 return false; 765 766 blk_cgroup_bio_start(bio); 767 blkcg_bio_issue_init(bio); 768 769 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { 770 trace_block_bio_queue(bio); 771 /* Now that enqueuing has been traced, we need to trace 772 * completion as well. 773 */ 774 bio_set_flag(bio, BIO_TRACE_COMPLETION); 775 } 776 return true; 777 778 not_supported: 779 status = BLK_STS_NOTSUPP; 780 end_io: 781 bio->bi_status = status; 782 bio_endio(bio); 783 return false; 784 } 785 786 static void __submit_bio_fops(struct gendisk *disk, struct bio *bio) 787 { 788 if (blk_crypto_bio_prep(&bio)) { 789 if (likely(bio_queue_enter(bio) == 0)) { 790 disk->fops->submit_bio(bio); 791 blk_queue_exit(disk->queue); 792 } 793 } 794 } 795 796 static void __submit_bio(struct bio *bio) 797 { 798 struct gendisk *disk = bio->bi_bdev->bd_disk; 799 800 if (unlikely(!submit_bio_checks(bio))) 801 return; 802 803 if (!disk->fops->submit_bio) 804 blk_mq_submit_bio(bio); 805 else 806 __submit_bio_fops(disk, bio); 807 } 808 809 /* 810 * The loop in this function may be a bit non-obvious, and so deserves some 811 * explanation: 812 * 813 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure 814 * that), so we have a list with a single bio. 815 * - We pretend that we have just taken it off a longer list, so we assign 816 * bio_list to a pointer to the bio_list_on_stack, thus initialising the 817 * bio_list of new bios to be added. ->submit_bio() may indeed add some more 818 * bios through a recursive call to submit_bio_noacct. If it did, we find a 819 * non-NULL value in bio_list and re-enter the loop from the top. 820 * - In this case we really did just take the bio of the top of the list (no 821 * pretending) and so remove it from bio_list, and call into ->submit_bio() 822 * again. 823 * 824 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. 825 * bio_list_on_stack[1] contains bios that were submitted before the current 826 * ->submit_bio_bio, but that haven't been processed yet. 827 */ 828 static void __submit_bio_noacct(struct bio *bio) 829 { 830 struct bio_list bio_list_on_stack[2]; 831 832 BUG_ON(bio->bi_next); 833 834 bio_list_init(&bio_list_on_stack[0]); 835 current->bio_list = bio_list_on_stack; 836 837 do { 838 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 839 struct bio_list lower, same; 840 841 /* 842 * Create a fresh bio_list for all subordinate requests. 843 */ 844 bio_list_on_stack[1] = bio_list_on_stack[0]; 845 bio_list_init(&bio_list_on_stack[0]); 846 847 __submit_bio(bio); 848 849 /* 850 * Sort new bios into those for a lower level and those for the 851 * same level. 852 */ 853 bio_list_init(&lower); 854 bio_list_init(&same); 855 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 856 if (q == bdev_get_queue(bio->bi_bdev)) 857 bio_list_add(&same, bio); 858 else 859 bio_list_add(&lower, bio); 860 861 /* 862 * Now assemble so we handle the lowest level first. 863 */ 864 bio_list_merge(&bio_list_on_stack[0], &lower); 865 bio_list_merge(&bio_list_on_stack[0], &same); 866 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); 867 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 868 869 current->bio_list = NULL; 870 } 871 872 static void __submit_bio_noacct_mq(struct bio *bio) 873 { 874 struct bio_list bio_list[2] = { }; 875 876 current->bio_list = bio_list; 877 878 do { 879 __submit_bio(bio); 880 } while ((bio = bio_list_pop(&bio_list[0]))); 881 882 current->bio_list = NULL; 883 } 884 885 /** 886 * submit_bio_noacct - re-submit a bio to the block device layer for I/O 887 * @bio: The bio describing the location in memory and on the device. 888 * 889 * This is a version of submit_bio() that shall only be used for I/O that is 890 * resubmitted to lower level drivers by stacking block drivers. All file 891 * systems and other upper level users of the block layer should use 892 * submit_bio() instead. 893 */ 894 void submit_bio_noacct(struct bio *bio) 895 { 896 /* 897 * We only want one ->submit_bio to be active at a time, else stack 898 * usage with stacked devices could be a problem. Use current->bio_list 899 * to collect a list of requests submited by a ->submit_bio method while 900 * it is active, and then process them after it returned. 901 */ 902 if (current->bio_list) 903 bio_list_add(¤t->bio_list[0], bio); 904 else if (!bio->bi_bdev->bd_disk->fops->submit_bio) 905 __submit_bio_noacct_mq(bio); 906 else 907 __submit_bio_noacct(bio); 908 } 909 EXPORT_SYMBOL(submit_bio_noacct); 910 911 /** 912 * submit_bio - submit a bio to the block device layer for I/O 913 * @bio: The &struct bio which describes the I/O 914 * 915 * submit_bio() is used to submit I/O requests to block devices. It is passed a 916 * fully set up &struct bio that describes the I/O that needs to be done. The 917 * bio will be send to the device described by the bi_bdev field. 918 * 919 * The success/failure status of the request, along with notification of 920 * completion, is delivered asynchronously through the ->bi_end_io() callback 921 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has 922 * been called. 923 */ 924 void submit_bio(struct bio *bio) 925 { 926 if (blkcg_punt_bio_submit(bio)) 927 return; 928 929 /* 930 * If it's a regular read/write or a barrier with data attached, 931 * go through the normal accounting stuff before submission. 932 */ 933 if (bio_has_data(bio)) { 934 unsigned int count; 935 936 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 937 count = queue_logical_block_size( 938 bdev_get_queue(bio->bi_bdev)) >> 9; 939 else 940 count = bio_sectors(bio); 941 942 if (op_is_write(bio_op(bio))) { 943 count_vm_events(PGPGOUT, count); 944 } else { 945 task_io_account_read(bio->bi_iter.bi_size); 946 count_vm_events(PGPGIN, count); 947 } 948 } 949 950 /* 951 * If we're reading data that is part of the userspace workingset, count 952 * submission time as memory stall. When the device is congested, or 953 * the submitting cgroup IO-throttled, submission can be a significant 954 * part of overall IO time. 955 */ 956 if (unlikely(bio_op(bio) == REQ_OP_READ && 957 bio_flagged(bio, BIO_WORKINGSET))) { 958 unsigned long pflags; 959 960 psi_memstall_enter(&pflags); 961 submit_bio_noacct(bio); 962 psi_memstall_leave(&pflags); 963 return; 964 } 965 966 submit_bio_noacct(bio); 967 } 968 EXPORT_SYMBOL(submit_bio); 969 970 /** 971 * bio_poll - poll for BIO completions 972 * @bio: bio to poll for 973 * @iob: batches of IO 974 * @flags: BLK_POLL_* flags that control the behavior 975 * 976 * Poll for completions on queue associated with the bio. Returns number of 977 * completed entries found. 978 * 979 * Note: the caller must either be the context that submitted @bio, or 980 * be in a RCU critical section to prevent freeing of @bio. 981 */ 982 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) 983 { 984 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 985 blk_qc_t cookie = READ_ONCE(bio->bi_cookie); 986 int ret; 987 988 if (cookie == BLK_QC_T_NONE || 989 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 990 return 0; 991 992 if (current->plug) 993 blk_flush_plug(current->plug, false); 994 995 if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) 996 return 0; 997 if (WARN_ON_ONCE(!queue_is_mq(q))) 998 ret = 0; /* not yet implemented, should not happen */ 999 else 1000 ret = blk_mq_poll(q, cookie, iob, flags); 1001 blk_queue_exit(q); 1002 return ret; 1003 } 1004 EXPORT_SYMBOL_GPL(bio_poll); 1005 1006 /* 1007 * Helper to implement file_operations.iopoll. Requires the bio to be stored 1008 * in iocb->private, and cleared before freeing the bio. 1009 */ 1010 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 1011 unsigned int flags) 1012 { 1013 struct bio *bio; 1014 int ret = 0; 1015 1016 /* 1017 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can 1018 * point to a freshly allocated bio at this point. If that happens 1019 * we have a few cases to consider: 1020 * 1021 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just 1022 * simply nothing in this case 1023 * 2) the bio points to a not poll enabled device. bio_poll will catch 1024 * this and return 0 1025 * 3) the bio points to a poll capable device, including but not 1026 * limited to the one that the original bio pointed to. In this 1027 * case we will call into the actual poll method and poll for I/O, 1028 * even if we don't need to, but it won't cause harm either. 1029 * 1030 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev 1031 * is still allocated. Because partitions hold a reference to the whole 1032 * device bdev and thus disk, the disk is also still valid. Grabbing 1033 * a reference to the queue in bio_poll() ensures the hctxs and requests 1034 * are still valid as well. 1035 */ 1036 rcu_read_lock(); 1037 bio = READ_ONCE(kiocb->private); 1038 if (bio && bio->bi_bdev) 1039 ret = bio_poll(bio, iob, flags); 1040 rcu_read_unlock(); 1041 1042 return ret; 1043 } 1044 EXPORT_SYMBOL_GPL(iocb_bio_iopoll); 1045 1046 void update_io_ticks(struct block_device *part, unsigned long now, bool end) 1047 { 1048 unsigned long stamp; 1049 again: 1050 stamp = READ_ONCE(part->bd_stamp); 1051 if (unlikely(time_after(now, stamp))) { 1052 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) 1053 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1054 } 1055 if (part->bd_partno) { 1056 part = bdev_whole(part); 1057 goto again; 1058 } 1059 } 1060 1061 static unsigned long __part_start_io_acct(struct block_device *part, 1062 unsigned int sectors, unsigned int op, 1063 unsigned long start_time) 1064 { 1065 const int sgrp = op_stat_group(op); 1066 1067 part_stat_lock(); 1068 update_io_ticks(part, start_time, false); 1069 part_stat_inc(part, ios[sgrp]); 1070 part_stat_add(part, sectors[sgrp], sectors); 1071 part_stat_local_inc(part, in_flight[op_is_write(op)]); 1072 part_stat_unlock(); 1073 1074 return start_time; 1075 } 1076 1077 /** 1078 * bio_start_io_acct_time - start I/O accounting for bio based drivers 1079 * @bio: bio to start account for 1080 * @start_time: start time that should be passed back to bio_end_io_acct(). 1081 */ 1082 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) 1083 { 1084 __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), 1085 bio_op(bio), start_time); 1086 } 1087 EXPORT_SYMBOL_GPL(bio_start_io_acct_time); 1088 1089 /** 1090 * bio_start_io_acct - start I/O accounting for bio based drivers 1091 * @bio: bio to start account for 1092 * 1093 * Returns the start time that should be passed back to bio_end_io_acct(). 1094 */ 1095 unsigned long bio_start_io_acct(struct bio *bio) 1096 { 1097 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), 1098 bio_op(bio), jiffies); 1099 } 1100 EXPORT_SYMBOL_GPL(bio_start_io_acct); 1101 1102 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1103 unsigned int op) 1104 { 1105 return __part_start_io_acct(disk->part0, sectors, op, jiffies); 1106 } 1107 EXPORT_SYMBOL(disk_start_io_acct); 1108 1109 static void __part_end_io_acct(struct block_device *part, unsigned int op, 1110 unsigned long start_time) 1111 { 1112 const int sgrp = op_stat_group(op); 1113 unsigned long now = READ_ONCE(jiffies); 1114 unsigned long duration = now - start_time; 1115 1116 part_stat_lock(); 1117 update_io_ticks(part, now, true); 1118 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); 1119 part_stat_local_dec(part, in_flight[op_is_write(op)]); 1120 part_stat_unlock(); 1121 } 1122 1123 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1124 struct block_device *orig_bdev) 1125 { 1126 __part_end_io_acct(orig_bdev, bio_op(bio), start_time); 1127 } 1128 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); 1129 1130 void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1131 unsigned long start_time) 1132 { 1133 __part_end_io_acct(disk->part0, op, start_time); 1134 } 1135 EXPORT_SYMBOL(disk_end_io_acct); 1136 1137 /** 1138 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 1139 * @q : the queue of the device being checked 1140 * 1141 * Description: 1142 * Check if underlying low-level drivers of a device are busy. 1143 * If the drivers want to export their busy state, they must set own 1144 * exporting function using blk_queue_lld_busy() first. 1145 * 1146 * Basically, this function is used only by request stacking drivers 1147 * to stop dispatching requests to underlying devices when underlying 1148 * devices are busy. This behavior helps more I/O merging on the queue 1149 * of the request stacking driver and prevents I/O throughput regression 1150 * on burst I/O load. 1151 * 1152 * Return: 1153 * 0 - Not busy (The request stacking driver should dispatch request) 1154 * 1 - Busy (The request stacking driver should stop dispatching request) 1155 */ 1156 int blk_lld_busy(struct request_queue *q) 1157 { 1158 if (queue_is_mq(q) && q->mq_ops->busy) 1159 return q->mq_ops->busy(q); 1160 1161 return 0; 1162 } 1163 EXPORT_SYMBOL_GPL(blk_lld_busy); 1164 1165 int kblockd_schedule_work(struct work_struct *work) 1166 { 1167 return queue_work(kblockd_workqueue, work); 1168 } 1169 EXPORT_SYMBOL(kblockd_schedule_work); 1170 1171 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, 1172 unsigned long delay) 1173 { 1174 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); 1175 } 1176 EXPORT_SYMBOL(kblockd_mod_delayed_work_on); 1177 1178 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) 1179 { 1180 struct task_struct *tsk = current; 1181 1182 /* 1183 * If this is a nested plug, don't actually assign it. 1184 */ 1185 if (tsk->plug) 1186 return; 1187 1188 plug->mq_list = NULL; 1189 plug->cached_rq = NULL; 1190 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); 1191 plug->rq_count = 0; 1192 plug->multiple_queues = false; 1193 plug->has_elevator = false; 1194 plug->nowait = false; 1195 INIT_LIST_HEAD(&plug->cb_list); 1196 1197 /* 1198 * Store ordering should not be needed here, since a potential 1199 * preempt will imply a full memory barrier 1200 */ 1201 tsk->plug = plug; 1202 } 1203 1204 /** 1205 * blk_start_plug - initialize blk_plug and track it inside the task_struct 1206 * @plug: The &struct blk_plug that needs to be initialized 1207 * 1208 * Description: 1209 * blk_start_plug() indicates to the block layer an intent by the caller 1210 * to submit multiple I/O requests in a batch. The block layer may use 1211 * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1212 * is called. However, the block layer may choose to submit requests 1213 * before a call to blk_finish_plug() if the number of queued I/Os 1214 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1215 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1216 * the task schedules (see below). 1217 * 1218 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1219 * pending I/O should the task end up blocking between blk_start_plug() and 1220 * blk_finish_plug(). This is important from a performance perspective, but 1221 * also ensures that we don't deadlock. For instance, if the task is blocking 1222 * for a memory allocation, memory reclaim could end up wanting to free a 1223 * page belonging to that request that is currently residing in our private 1224 * plug. By flushing the pending I/O when the process goes to sleep, we avoid 1225 * this kind of deadlock. 1226 */ 1227 void blk_start_plug(struct blk_plug *plug) 1228 { 1229 blk_start_plug_nr_ios(plug, 1); 1230 } 1231 EXPORT_SYMBOL(blk_start_plug); 1232 1233 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 1234 { 1235 LIST_HEAD(callbacks); 1236 1237 while (!list_empty(&plug->cb_list)) { 1238 list_splice_init(&plug->cb_list, &callbacks); 1239 1240 while (!list_empty(&callbacks)) { 1241 struct blk_plug_cb *cb = list_first_entry(&callbacks, 1242 struct blk_plug_cb, 1243 list); 1244 list_del(&cb->list); 1245 cb->callback(cb, from_schedule); 1246 } 1247 } 1248 } 1249 1250 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, 1251 int size) 1252 { 1253 struct blk_plug *plug = current->plug; 1254 struct blk_plug_cb *cb; 1255 1256 if (!plug) 1257 return NULL; 1258 1259 list_for_each_entry(cb, &plug->cb_list, list) 1260 if (cb->callback == unplug && cb->data == data) 1261 return cb; 1262 1263 /* Not currently on the callback list */ 1264 BUG_ON(size < sizeof(*cb)); 1265 cb = kzalloc(size, GFP_ATOMIC); 1266 if (cb) { 1267 cb->data = data; 1268 cb->callback = unplug; 1269 list_add(&cb->list, &plug->cb_list); 1270 } 1271 return cb; 1272 } 1273 EXPORT_SYMBOL(blk_check_plugged); 1274 1275 void blk_flush_plug(struct blk_plug *plug, bool from_schedule) 1276 { 1277 if (!list_empty(&plug->cb_list)) 1278 flush_plug_callbacks(plug, from_schedule); 1279 if (!rq_list_empty(plug->mq_list)) 1280 blk_mq_flush_plug_list(plug, from_schedule); 1281 /* 1282 * Unconditionally flush out cached requests, even if the unplug 1283 * event came from schedule. Since we know hold references to the 1284 * queue for cached requests, we don't want a blocked task holding 1285 * up a queue freeze/quiesce event. 1286 */ 1287 if (unlikely(!rq_list_empty(plug->cached_rq))) 1288 blk_mq_free_plug_rqs(plug); 1289 } 1290 1291 /** 1292 * blk_finish_plug - mark the end of a batch of submitted I/O 1293 * @plug: The &struct blk_plug passed to blk_start_plug() 1294 * 1295 * Description: 1296 * Indicate that a batch of I/O submissions is complete. This function 1297 * must be paired with an initial call to blk_start_plug(). The intent 1298 * is to allow the block layer to optimize I/O submission. See the 1299 * documentation for blk_start_plug() for more information. 1300 */ 1301 void blk_finish_plug(struct blk_plug *plug) 1302 { 1303 if (plug == current->plug) { 1304 blk_flush_plug(plug, false); 1305 current->plug = NULL; 1306 } 1307 } 1308 EXPORT_SYMBOL(blk_finish_plug); 1309 1310 void blk_io_schedule(void) 1311 { 1312 /* Prevent hang_check timer from firing at us during very long I/O */ 1313 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 1314 1315 if (timeout) 1316 io_schedule_timeout(timeout); 1317 else 1318 io_schedule(); 1319 } 1320 EXPORT_SYMBOL_GPL(blk_io_schedule); 1321 1322 int __init blk_dev_init(void) 1323 { 1324 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1325 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1326 sizeof_field(struct request, cmd_flags)); 1327 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1328 sizeof_field(struct bio, bi_opf)); 1329 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu), 1330 __alignof__(struct request_queue)) != 1331 sizeof(struct request_queue)); 1332 1333 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1334 kblockd_workqueue = alloc_workqueue("kblockd", 1335 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1336 if (!kblockd_workqueue) 1337 panic("Failed to create kblockd\n"); 1338 1339 blk_requestq_cachep = kmem_cache_create("request_queue", 1340 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 1341 1342 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu", 1343 sizeof(struct request_queue) + 1344 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL); 1345 1346 blk_debugfs_root = debugfs_create_dir("block", NULL); 1347 1348 return 0; 1349 } 1350