1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 4 */ 5 #include <linux/mm.h> 6 #include <linux/swap.h> 7 #include <linux/bio-integrity.h> 8 #include <linux/blkdev.h> 9 #include <linux/uio.h> 10 #include <linux/iocontext.h> 11 #include <linux/slab.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/mempool.h> 16 #include <linux/workqueue.h> 17 #include <linux/cgroup.h> 18 #include <linux/highmem.h> 19 #include <linux/blk-crypto.h> 20 #include <linux/xarray.h> 21 22 #include <trace/events/block.h> 23 #include "blk.h" 24 #include "blk-rq-qos.h" 25 #include "blk-cgroup.h" 26 27 #define ALLOC_CACHE_THRESHOLD 16 28 #define ALLOC_CACHE_MAX 256 29 30 struct bio_alloc_cache { 31 struct bio *free_list; 32 struct bio *free_list_irq; 33 unsigned int nr; 34 unsigned int nr_irq; 35 }; 36 37 #define BIO_INLINE_VECS 4 38 39 static struct biovec_slab { 40 int nr_vecs; 41 char *name; 42 struct kmem_cache *slab; 43 } bvec_slabs[] __read_mostly = { 44 { .nr_vecs = 16, .name = "biovec-16" }, 45 { .nr_vecs = 64, .name = "biovec-64" }, 46 { .nr_vecs = 128, .name = "biovec-128" }, 47 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, 48 }; 49 50 static struct biovec_slab *biovec_slab(unsigned short nr_vecs) 51 { 52 switch (nr_vecs) { 53 /* smaller bios use inline vecs */ 54 case 5 ... 16: 55 return &bvec_slabs[0]; 56 case 17 ... 64: 57 return &bvec_slabs[1]; 58 case 65 ... 128: 59 return &bvec_slabs[2]; 60 case 129 ... BIO_MAX_VECS: 61 return &bvec_slabs[3]; 62 default: 63 BUG(); 64 return NULL; 65 } 66 } 67 68 /* 69 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 70 * IO code that does not need private memory pools. 71 */ 72 struct bio_set fs_bio_set; 73 EXPORT_SYMBOL(fs_bio_set); 74 75 /* 76 * Our slab pool management 77 */ 78 struct bio_slab { 79 struct kmem_cache *slab; 80 unsigned int slab_ref; 81 unsigned int slab_size; 82 char name[12]; 83 }; 84 static DEFINE_MUTEX(bio_slab_lock); 85 static DEFINE_XARRAY(bio_slabs); 86 87 static struct bio_slab *create_bio_slab(unsigned int size) 88 { 89 struct bio_slab *bslab = kzalloc_obj(*bslab); 90 91 if (!bslab) 92 return NULL; 93 94 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); 95 bslab->slab = kmem_cache_create(bslab->name, size, 96 ARCH_KMALLOC_MINALIGN, 97 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); 98 if (!bslab->slab) 99 goto fail_alloc_slab; 100 101 bslab->slab_ref = 1; 102 bslab->slab_size = size; 103 104 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) 105 return bslab; 106 107 kmem_cache_destroy(bslab->slab); 108 109 fail_alloc_slab: 110 kfree(bslab); 111 return NULL; 112 } 113 114 static inline unsigned int bs_bio_slab_size(struct bio_set *bs) 115 { 116 return bs->front_pad + sizeof(struct bio) + bs->back_pad; 117 } 118 119 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) 120 { 121 unsigned int size = bs_bio_slab_size(bs); 122 struct bio_slab *bslab; 123 124 mutex_lock(&bio_slab_lock); 125 bslab = xa_load(&bio_slabs, size); 126 if (bslab) 127 bslab->slab_ref++; 128 else 129 bslab = create_bio_slab(size); 130 mutex_unlock(&bio_slab_lock); 131 132 if (bslab) 133 return bslab->slab; 134 return NULL; 135 } 136 137 static void bio_put_slab(struct bio_set *bs) 138 { 139 struct bio_slab *bslab = NULL; 140 unsigned int slab_size = bs_bio_slab_size(bs); 141 142 mutex_lock(&bio_slab_lock); 143 144 bslab = xa_load(&bio_slabs, slab_size); 145 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 146 goto out; 147 148 WARN_ON_ONCE(bslab->slab != bs->bio_slab); 149 150 WARN_ON(!bslab->slab_ref); 151 152 if (--bslab->slab_ref) 153 goto out; 154 155 xa_erase(&bio_slabs, slab_size); 156 157 kmem_cache_destroy(bslab->slab); 158 kfree(bslab); 159 160 out: 161 mutex_unlock(&bio_slab_lock); 162 } 163 164 /* 165 * Make the first allocation restricted and don't dump info on allocation 166 * failures, since we'll fall back to the mempool in case of failure. 167 */ 168 static inline gfp_t try_alloc_gfp(gfp_t gfp) 169 { 170 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | 171 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 172 } 173 174 void bio_uninit(struct bio *bio) 175 { 176 #ifdef CONFIG_BLK_CGROUP 177 if (bio->bi_blkg) { 178 blkg_put(bio->bi_blkg); 179 bio->bi_blkg = NULL; 180 } 181 #endif 182 if (bio_integrity(bio)) 183 bio_integrity_free(bio); 184 185 bio_crypt_free_ctx(bio); 186 } 187 EXPORT_SYMBOL(bio_uninit); 188 189 static void bio_free(struct bio *bio) 190 { 191 struct bio_set *bs = bio->bi_pool; 192 void *p = bio; 193 194 WARN_ON_ONCE(!bs); 195 WARN_ON_ONCE(bio->bi_max_vecs > BIO_MAX_VECS); 196 197 bio_uninit(bio); 198 if (bio->bi_max_vecs == BIO_MAX_VECS) 199 mempool_free(bio->bi_io_vec, &bs->bvec_pool); 200 else if (bio->bi_max_vecs > BIO_INLINE_VECS) 201 kmem_cache_free(biovec_slab(bio->bi_max_vecs)->slab, 202 bio->bi_io_vec); 203 mempool_free(p - bs->front_pad, &bs->bio_pool); 204 } 205 206 /* 207 * Users of this function have their own bio allocation. Subsequently, 208 * they must remember to pair any call to bio_init() with bio_uninit() 209 * when IO has completed, or when the bio is released. 210 */ 211 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 212 unsigned short max_vecs, blk_opf_t opf) 213 { 214 bio->bi_next = NULL; 215 bio->bi_bdev = bdev; 216 bio->bi_opf = opf; 217 bio->bi_flags = 0; 218 bio->bi_ioprio = 0; 219 bio->bi_write_hint = 0; 220 bio->bi_write_stream = 0; 221 bio->bi_status = 0; 222 bio->bi_bvec_gap_bit = 0; 223 bio->bi_iter.bi_sector = 0; 224 bio->bi_iter.bi_size = 0; 225 bio->bi_iter.bi_idx = 0; 226 bio->bi_iter.bi_bvec_done = 0; 227 bio->bi_end_io = NULL; 228 bio->bi_private = NULL; 229 #ifdef CONFIG_BLK_CGROUP 230 bio->bi_blkg = NULL; 231 bio->issue_time_ns = 0; 232 if (bdev) 233 bio_associate_blkg(bio); 234 #ifdef CONFIG_BLK_CGROUP_IOCOST 235 bio->bi_iocost_cost = 0; 236 #endif 237 #endif 238 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 239 bio->bi_crypt_context = NULL; 240 #endif 241 #ifdef CONFIG_BLK_DEV_INTEGRITY 242 bio->bi_integrity = NULL; 243 #endif 244 bio->bi_vcnt = 0; 245 246 atomic_set(&bio->__bi_remaining, 1); 247 atomic_set(&bio->__bi_cnt, 1); 248 bio->bi_cookie = BLK_QC_T_NONE; 249 250 bio->bi_max_vecs = max_vecs; 251 bio->bi_io_vec = table; 252 bio->bi_pool = NULL; 253 } 254 EXPORT_SYMBOL(bio_init); 255 256 /** 257 * bio_reset - reinitialize a bio 258 * @bio: bio to reset 259 * @bdev: block device to use the bio for 260 * @opf: operation and flags for bio 261 * 262 * Description: 263 * After calling bio_reset(), @bio will be in the same state as a freshly 264 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 265 * preserved are the ones that are initialized by bio_alloc_bioset(). See 266 * comment in struct bio. 267 */ 268 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) 269 { 270 struct bio_vec *bv = bio->bi_io_vec; 271 272 bio_uninit(bio); 273 memset(bio, 0, BIO_RESET_BYTES); 274 atomic_set(&bio->__bi_remaining, 1); 275 bio->bi_io_vec = bv; 276 bio->bi_bdev = bdev; 277 if (bio->bi_bdev) 278 bio_associate_blkg(bio); 279 bio->bi_opf = opf; 280 } 281 EXPORT_SYMBOL(bio_reset); 282 283 /** 284 * bio_reuse - reuse a bio with the payload left intact 285 * @bio: bio to reuse 286 * @opf: operation and flags for the next I/O 287 * 288 * Allow reusing an existing bio for another operation with all set up 289 * fields including the payload, device and end_io handler left intact. 290 * 291 * Typically used when @bio is first used to read data which is then written 292 * to another location without modification. @bio must not be in-flight and 293 * owned by the caller. Can't be used for cloned bios. 294 * 295 * Note: Can't be used when @bio has integrity or blk-crypto contexts for now. 296 * Feel free to add that support when you need it, though. 297 */ 298 void bio_reuse(struct bio *bio, blk_opf_t opf) 299 { 300 unsigned short vcnt = bio->bi_vcnt, i; 301 bio_end_io_t *end_io = bio->bi_end_io; 302 void *private = bio->bi_private; 303 304 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 305 WARN_ON_ONCE(bio_integrity(bio)); 306 WARN_ON_ONCE(bio_has_crypt_ctx(bio)); 307 308 bio_reset(bio, bio->bi_bdev, opf); 309 for (i = 0; i < vcnt; i++) 310 bio->bi_iter.bi_size += bio->bi_io_vec[i].bv_len; 311 bio->bi_vcnt = vcnt; 312 bio->bi_private = private; 313 bio->bi_end_io = end_io; 314 } 315 EXPORT_SYMBOL_GPL(bio_reuse); 316 317 static struct bio *__bio_chain_endio(struct bio *bio) 318 { 319 struct bio *parent = bio->bi_private; 320 321 if (bio->bi_status && !parent->bi_status) 322 parent->bi_status = bio->bi_status; 323 bio_put(bio); 324 return parent; 325 } 326 327 /* 328 * This function should only be used as a flag and must never be called. 329 * If execution reaches here, it indicates a serious programming error. 330 */ 331 static void bio_chain_endio(struct bio *bio) 332 { 333 BUG(); 334 } 335 336 /** 337 * bio_chain - chain bio completions 338 * @bio: the target bio 339 * @parent: the parent bio of @bio 340 * 341 * The caller won't have a bi_end_io called when @bio completes - instead, 342 * @parent's bi_end_io won't be called until both @parent and @bio have 343 * completed; the chained bio will also be freed when it completes. 344 * 345 * The caller must not set bi_private or bi_end_io in @bio. 346 */ 347 void bio_chain(struct bio *bio, struct bio *parent) 348 { 349 BUG_ON(bio->bi_private || bio->bi_end_io); 350 351 bio->bi_private = parent; 352 bio->bi_end_io = bio_chain_endio; 353 bio_inc_remaining(parent); 354 } 355 EXPORT_SYMBOL(bio_chain); 356 357 /** 358 * bio_chain_and_submit - submit a bio after chaining it to another one 359 * @prev: bio to chain and submit 360 * @new: bio to chain to 361 * 362 * If @prev is non-NULL, chain it to @new and submit it. 363 * 364 * Return: @new. 365 */ 366 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new) 367 { 368 if (prev) { 369 bio_chain(prev, new); 370 submit_bio(prev); 371 } 372 return new; 373 } 374 375 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 376 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) 377 { 378 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp)); 379 } 380 EXPORT_SYMBOL_GPL(blk_next_bio); 381 382 static void bio_alloc_rescue(struct work_struct *work) 383 { 384 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 385 struct bio *bio; 386 387 while (1) { 388 spin_lock(&bs->rescue_lock); 389 bio = bio_list_pop(&bs->rescue_list); 390 spin_unlock(&bs->rescue_lock); 391 392 if (!bio) 393 break; 394 395 submit_bio_noacct(bio); 396 } 397 } 398 399 /* 400 * submit_bio_noacct() converts recursion to iteration; this means if we're 401 * running beneath it, any bios we allocate and submit will not be submitted 402 * (and thus freed) until after we return. 403 * 404 * This exposes us to a potential deadlock if we allocate multiple bios from the 405 * same bio_set while running underneath submit_bio_noacct(). If we were to 406 * allocate multiple bios (say a stacking block driver that was splitting bios), 407 * we would deadlock if we exhausted the mempool's reserve. 408 * 409 * We solve this, and guarantee forward progress by punting the bios on 410 * current->bio_list to a per bio_set rescuer workqueue before blocking to wait 411 * for elements being returned to the mempool. 412 */ 413 static void punt_bios_to_rescuer(struct bio_set *bs) 414 { 415 struct bio_list punt, nopunt; 416 struct bio *bio; 417 418 if (!current->bio_list || !bs->rescue_workqueue) 419 return; 420 if (bio_list_empty(¤t->bio_list[0]) && 421 bio_list_empty(¤t->bio_list[1])) 422 return; 423 424 /* 425 * In order to guarantee forward progress we must punt only bios that 426 * were allocated from this bio_set; otherwise, if there was a bio on 427 * there for a stacking driver higher up in the stack, processing it 428 * could require allocating bios from this bio_set, and doing that from 429 * our own rescuer would be bad. 430 * 431 * Since bio lists are singly linked, pop them all instead of trying to 432 * remove from the middle of the list: 433 */ 434 435 bio_list_init(&punt); 436 bio_list_init(&nopunt); 437 438 while ((bio = bio_list_pop(¤t->bio_list[0]))) 439 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 440 current->bio_list[0] = nopunt; 441 442 bio_list_init(&nopunt); 443 while ((bio = bio_list_pop(¤t->bio_list[1]))) 444 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 445 current->bio_list[1] = nopunt; 446 447 spin_lock(&bs->rescue_lock); 448 bio_list_merge(&bs->rescue_list, &punt); 449 spin_unlock(&bs->rescue_lock); 450 451 queue_work(bs->rescue_workqueue, &bs->rescue_work); 452 } 453 454 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) 455 { 456 unsigned long flags; 457 458 /* cache->free_list must be empty */ 459 if (WARN_ON_ONCE(cache->free_list)) 460 return; 461 462 local_irq_save(flags); 463 cache->free_list = cache->free_list_irq; 464 cache->free_list_irq = NULL; 465 cache->nr += cache->nr_irq; 466 cache->nr_irq = 0; 467 local_irq_restore(flags); 468 } 469 470 static struct bio *bio_alloc_percpu_cache(struct bio_set *bs) 471 { 472 struct bio_alloc_cache *cache; 473 struct bio *bio; 474 475 cache = per_cpu_ptr(bs->cache, get_cpu()); 476 if (!cache->free_list) { 477 if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) 478 bio_alloc_irq_cache_splice(cache); 479 if (!cache->free_list) { 480 put_cpu(); 481 return NULL; 482 } 483 } 484 bio = cache->free_list; 485 cache->free_list = bio->bi_next; 486 cache->nr--; 487 put_cpu(); 488 bio->bi_pool = bs; 489 return bio; 490 } 491 492 /** 493 * bio_alloc_bioset - allocate a bio for I/O 494 * @bdev: block device to allocate the bio for (can be %NULL) 495 * @nr_vecs: number of bvecs to pre-allocate 496 * @opf: operation and flags for bio 497 * @gfp: the GFP_* mask given to the slab allocator 498 * @bs: the bio_set to allocate from. 499 * 500 * Allocate a bio from the mempools in @bs. 501 * 502 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to 503 * allocate a bio. This is due to the mempool guarantees. To make this work, 504 * callers must never allocate more than 1 bio at a time from the general pool. 505 * Callers that need to allocate more than 1 bio must always submit the 506 * previously allocated bio for IO before attempting to allocate a new one. 507 * Failure to do so can cause deadlocks under memory pressure. 508 * 509 * Note that when running under submit_bio_noacct() (i.e. any block driver), 510 * bios are not submitted until after you return - see the code in 511 * submit_bio_noacct() that converts recursion into iteration, to prevent 512 * stack overflows. 513 * 514 * This would normally mean allocating multiple bios under submit_bio_noacct() 515 * would be susceptible to deadlocks, but we have 516 * deadlock avoidance code that resubmits any blocked bios from a rescuer 517 * thread. 518 * 519 * However, we do not guarantee forward progress for allocations from other 520 * mempools. Doing multiple allocations from the same mempool under 521 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad 522 * for per bio allocations. 523 * 524 * Returns: Pointer to new bio on success, NULL on failure. 525 */ 526 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 527 blk_opf_t opf, gfp_t gfp, struct bio_set *bs) 528 { 529 struct bio_vec *bvecs = NULL; 530 struct bio *bio = NULL; 531 gfp_t saved_gfp = gfp; 532 void *p; 533 534 /* should not use nobvec bioset for nr_vecs > 0 */ 535 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 536 return NULL; 537 538 gfp = try_alloc_gfp(gfp); 539 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { 540 /* 541 * Set REQ_ALLOC_CACHE even if no cached bio is available to 542 * return the allocated bio to the percpu cache when done. 543 */ 544 opf |= REQ_ALLOC_CACHE; 545 bio = bio_alloc_percpu_cache(bs); 546 } else { 547 opf &= ~REQ_ALLOC_CACHE; 548 p = kmem_cache_alloc(bs->bio_slab, gfp); 549 if (p) 550 bio = p + bs->front_pad; 551 } 552 553 if (bio && nr_vecs > BIO_INLINE_VECS) { 554 struct biovec_slab *bvs = biovec_slab(nr_vecs); 555 556 /* 557 * Upgrade nr_vecs to take full advantage of the allocation. 558 * We also rely on this in bio_free(). 559 */ 560 nr_vecs = bvs->nr_vecs; 561 bvecs = kmem_cache_alloc(bvs->slab, gfp); 562 if (unlikely(!bvecs)) { 563 kmem_cache_free(bs->bio_slab, p); 564 bio = NULL; 565 } 566 } 567 568 if (unlikely(!bio)) { 569 /* 570 * Give up if we are not allow to sleep as non-blocking mempool 571 * allocations just go back to the slab allocation. 572 */ 573 if (!(saved_gfp & __GFP_DIRECT_RECLAIM)) 574 return NULL; 575 576 punt_bios_to_rescuer(bs); 577 578 /* 579 * Don't rob the mempools by returning to the per-CPU cache if 580 * we're tight on memory. 581 */ 582 opf &= ~REQ_ALLOC_CACHE; 583 584 p = mempool_alloc(&bs->bio_pool, saved_gfp); 585 bio = p + bs->front_pad; 586 if (nr_vecs > BIO_INLINE_VECS) { 587 nr_vecs = BIO_MAX_VECS; 588 bvecs = mempool_alloc(&bs->bvec_pool, saved_gfp); 589 } 590 } 591 592 if (nr_vecs && nr_vecs <= BIO_INLINE_VECS) 593 bio_init_inline(bio, bdev, nr_vecs, opf); 594 else 595 bio_init(bio, bdev, bvecs, nr_vecs, opf); 596 bio->bi_pool = bs; 597 return bio; 598 } 599 EXPORT_SYMBOL(bio_alloc_bioset); 600 601 /** 602 * bio_kmalloc - kmalloc a bio 603 * @nr_vecs: number of bio_vecs to allocate 604 * @gfp_mask: the GFP_* mask given to the slab allocator 605 * 606 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized 607 * using bio_init() before use. To free a bio returned from this function use 608 * kfree() after calling bio_uninit(). A bio returned from this function can 609 * be reused by calling bio_uninit() before calling bio_init() again. 610 * 611 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this 612 * function are not backed by a mempool can fail. Do not use this function 613 * for allocations in the file system I/O path. 614 * 615 * Returns: Pointer to new bio on success, NULL on failure. 616 */ 617 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) 618 { 619 struct bio *bio; 620 621 if (nr_vecs > BIO_MAX_INLINE_VECS) 622 return NULL; 623 return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec), 624 gfp_mask); 625 } 626 EXPORT_SYMBOL(bio_kmalloc); 627 628 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) 629 { 630 struct bio_vec bv; 631 struct bvec_iter iter; 632 633 __bio_for_each_segment(bv, bio, iter, start) 634 memzero_bvec(&bv); 635 } 636 EXPORT_SYMBOL(zero_fill_bio_iter); 637 638 /** 639 * bio_truncate - truncate the bio to small size of @new_size 640 * @bio: the bio to be truncated 641 * @new_size: new size for truncating the bio 642 * 643 * Description: 644 * Truncate the bio to new size of @new_size. If bio_op(bio) is 645 * REQ_OP_READ, zero the truncated part. This function should only 646 * be used for handling corner cases, such as bio eod. 647 */ 648 static void bio_truncate(struct bio *bio, unsigned new_size) 649 { 650 struct bio_vec bv; 651 struct bvec_iter iter; 652 unsigned int done = 0; 653 bool truncated = false; 654 655 if (new_size >= bio->bi_iter.bi_size) 656 return; 657 658 if (bio_op(bio) != REQ_OP_READ) 659 goto exit; 660 661 bio_for_each_segment(bv, bio, iter) { 662 if (done + bv.bv_len > new_size) { 663 size_t offset; 664 665 if (!truncated) 666 offset = new_size - done; 667 else 668 offset = 0; 669 memzero_page(bv.bv_page, bv.bv_offset + offset, 670 bv.bv_len - offset); 671 truncated = true; 672 } 673 done += bv.bv_len; 674 } 675 676 exit: 677 /* 678 * Don't touch bvec table here and make it really immutable, since 679 * fs bio user has to retrieve all pages via bio_for_each_segment_all 680 * in its .end_bio() callback. 681 * 682 * It is enough to truncate bio by updating .bi_size since we can make 683 * correct bvec with the updated .bi_size for drivers. 684 */ 685 bio->bi_iter.bi_size = new_size; 686 } 687 688 /** 689 * guard_bio_eod - truncate a BIO to fit the block device 690 * @bio: bio to truncate 691 * 692 * This allows us to do IO even on the odd last sectors of a device, even if the 693 * block size is some multiple of the physical sector size. 694 * 695 * We'll just truncate the bio to the size of the device, and clear the end of 696 * the buffer head manually. Truly out-of-range accesses will turn into actual 697 * I/O errors, this only handles the "we need to be able to do I/O at the final 698 * sector" case. 699 */ 700 void guard_bio_eod(struct bio *bio) 701 { 702 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 703 704 if (!maxsector) 705 return; 706 707 /* 708 * If the *whole* IO is past the end of the device, 709 * let it through, and the IO layer will turn it into 710 * an EIO. 711 */ 712 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 713 return; 714 715 maxsector -= bio->bi_iter.bi_sector; 716 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 717 return; 718 719 bio_truncate(bio, maxsector << 9); 720 } 721 722 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, 723 unsigned int nr) 724 { 725 unsigned int i = 0; 726 struct bio *bio; 727 728 while ((bio = cache->free_list) != NULL) { 729 cache->free_list = bio->bi_next; 730 cache->nr--; 731 bio_free(bio); 732 if (++i == nr) 733 break; 734 } 735 return i; 736 } 737 738 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, 739 unsigned int nr) 740 { 741 nr -= __bio_alloc_cache_prune(cache, nr); 742 if (!READ_ONCE(cache->free_list)) { 743 bio_alloc_irq_cache_splice(cache); 744 __bio_alloc_cache_prune(cache, nr); 745 } 746 } 747 748 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) 749 { 750 struct bio_set *bs; 751 752 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); 753 if (bs->cache) { 754 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); 755 756 bio_alloc_cache_prune(cache, -1U); 757 } 758 return 0; 759 } 760 761 static void bio_alloc_cache_destroy(struct bio_set *bs) 762 { 763 int cpu; 764 765 if (!bs->cache) 766 return; 767 768 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 769 for_each_possible_cpu(cpu) { 770 struct bio_alloc_cache *cache; 771 772 cache = per_cpu_ptr(bs->cache, cpu); 773 bio_alloc_cache_prune(cache, -1U); 774 } 775 free_percpu(bs->cache); 776 bs->cache = NULL; 777 } 778 779 static inline void bio_put_percpu_cache(struct bio *bio) 780 { 781 struct bio_alloc_cache *cache; 782 783 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); 784 if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) 785 goto out_free; 786 787 if (in_task()) { 788 bio_uninit(bio); 789 bio->bi_next = cache->free_list; 790 /* Not necessary but helps not to iopoll already freed bios */ 791 bio->bi_bdev = NULL; 792 cache->free_list = bio; 793 cache->nr++; 794 } else if (in_hardirq()) { 795 lockdep_assert_irqs_disabled(); 796 797 bio_uninit(bio); 798 bio->bi_next = cache->free_list_irq; 799 cache->free_list_irq = bio; 800 cache->nr_irq++; 801 } else { 802 goto out_free; 803 } 804 put_cpu(); 805 return; 806 out_free: 807 put_cpu(); 808 bio_free(bio); 809 } 810 811 /** 812 * bio_put - release a reference to a bio 813 * @bio: bio to release reference to 814 * 815 * Description: 816 * Put a reference to a &struct bio, either one you have gotten with 817 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 818 **/ 819 void bio_put(struct bio *bio) 820 { 821 if (unlikely(bio_flagged(bio, BIO_REFFED))) { 822 BUG_ON(!atomic_read(&bio->__bi_cnt)); 823 if (!atomic_dec_and_test(&bio->__bi_cnt)) 824 return; 825 } 826 if (bio->bi_opf & REQ_ALLOC_CACHE) 827 bio_put_percpu_cache(bio); 828 else 829 bio_free(bio); 830 } 831 EXPORT_SYMBOL(bio_put); 832 833 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) 834 { 835 bio_set_flag(bio, BIO_CLONED); 836 bio->bi_ioprio = bio_src->bi_ioprio; 837 bio->bi_write_hint = bio_src->bi_write_hint; 838 bio->bi_write_stream = bio_src->bi_write_stream; 839 bio->bi_iter = bio_src->bi_iter; 840 841 if (bio->bi_bdev) { 842 if (bio->bi_bdev == bio_src->bi_bdev && 843 bio_flagged(bio_src, BIO_REMAPPED)) 844 bio_set_flag(bio, BIO_REMAPPED); 845 bio_clone_blkg_association(bio, bio_src); 846 } 847 848 if (bio_crypt_clone(bio, bio_src, gfp) < 0) 849 return -ENOMEM; 850 if (bio_integrity(bio_src) && 851 bio_integrity_clone(bio, bio_src, gfp) < 0) 852 return -ENOMEM; 853 return 0; 854 } 855 856 /** 857 * bio_alloc_clone - clone a bio that shares the original bio's biovec 858 * @bdev: block_device to clone onto 859 * @bio_src: bio to clone from 860 * @gfp: allocation priority 861 * @bs: bio_set to allocate from 862 * 863 * Allocate a new bio that is a clone of @bio_src. This reuses the bio_vecs 864 * pointed to by @bio_src->bi_io_vec, and clones the iterator pointing to 865 * the current position in it. The caller owns the returned bio, but not 866 * the bio_vecs, and must ensure the bio is freed before the memory 867 * pointed to by @bio_Src->bi_io_vecs. 868 */ 869 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 870 gfp_t gfp, struct bio_set *bs) 871 { 872 struct bio *bio; 873 874 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); 875 if (!bio) 876 return NULL; 877 878 if (__bio_clone(bio, bio_src, gfp) < 0) { 879 bio_put(bio); 880 return NULL; 881 } 882 bio->bi_io_vec = bio_src->bi_io_vec; 883 884 return bio; 885 } 886 EXPORT_SYMBOL(bio_alloc_clone); 887 888 /** 889 * bio_init_clone - clone a bio that shares the original bio's biovec 890 * @bdev: block_device to clone onto 891 * @bio: bio to clone into 892 * @bio_src: bio to clone from 893 * @gfp: allocation priority 894 * 895 * Initialize a new bio in caller provided memory that is a clone of @bio_src. 896 * The same bio_vecs reuse and bio lifetime rules as bio_alloc_clone() apply. 897 */ 898 int bio_init_clone(struct block_device *bdev, struct bio *bio, 899 struct bio *bio_src, gfp_t gfp) 900 { 901 int ret; 902 903 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); 904 ret = __bio_clone(bio, bio_src, gfp); 905 if (ret) 906 bio_uninit(bio); 907 return ret; 908 } 909 EXPORT_SYMBOL(bio_init_clone); 910 911 /** 912 * bio_full - check if the bio is full 913 * @bio: bio to check 914 * @len: length of one segment to be added 915 * 916 * Return true if @bio is full and one segment with @len bytes can't be 917 * added to the bio, otherwise return false 918 */ 919 static inline bool bio_full(struct bio *bio, unsigned len) 920 { 921 if (bio->bi_vcnt >= bio->bi_max_vecs) 922 return true; 923 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len) 924 return true; 925 return false; 926 } 927 928 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, 929 unsigned int len, unsigned int off) 930 { 931 size_t bv_end = bv->bv_offset + bv->bv_len; 932 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; 933 phys_addr_t page_addr = page_to_phys(page); 934 935 if (vec_end_addr + 1 != page_addr + off) 936 return false; 937 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) 938 return false; 939 940 if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) { 941 if (IS_ENABLED(CONFIG_KMSAN)) 942 return false; 943 if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) 944 return false; 945 } 946 947 bv->bv_len += len; 948 return true; 949 } 950 951 /* 952 * Try to merge a page into a segment, while obeying the hardware segment 953 * size limit. 954 * 955 * This is kept around for the integrity metadata, which is still tries 956 * to build the initial bio to the hardware limit and doesn't have proper 957 * helpers to split. Hopefully this will go away soon. 958 */ 959 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, 960 struct page *page, unsigned len, unsigned offset) 961 { 962 unsigned long mask = queue_segment_boundary(q); 963 phys_addr_t addr1 = bvec_phys(bv); 964 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; 965 966 if ((addr1 | mask) != (addr2 | mask)) 967 return false; 968 if (len > queue_max_segment_size(q) - bv->bv_len) 969 return false; 970 return bvec_try_merge_page(bv, page, len, offset); 971 } 972 973 /** 974 * __bio_add_page - add page(s) to a bio in a new segment 975 * @bio: destination bio 976 * @page: start page to add 977 * @len: length of the data to add, may cross pages 978 * @off: offset of the data relative to @page, may cross pages 979 * 980 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure 981 * that @bio has space for another bvec. 982 */ 983 void __bio_add_page(struct bio *bio, struct page *page, 984 unsigned int len, unsigned int off) 985 { 986 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 987 WARN_ON_ONCE(bio_full(bio, len)); 988 989 if (is_pci_p2pdma_page(page)) 990 bio->bi_opf |= REQ_NOMERGE; 991 992 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); 993 bio->bi_iter.bi_size += len; 994 bio->bi_vcnt++; 995 } 996 EXPORT_SYMBOL_GPL(__bio_add_page); 997 998 /** 999 * bio_add_virt_nofail - add data in the direct kernel mapping to a bio 1000 * @bio: destination bio 1001 * @vaddr: data to add 1002 * @len: length of the data to add, may cross pages 1003 * 1004 * Add the data at @vaddr to @bio. The caller must have ensure a segment 1005 * is available for the added data. No merging into an existing segment 1006 * will be performed. 1007 */ 1008 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len) 1009 { 1010 __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr)); 1011 } 1012 EXPORT_SYMBOL_GPL(bio_add_virt_nofail); 1013 1014 /** 1015 * bio_add_page - attempt to add page(s) to bio 1016 * @bio: destination bio 1017 * @page: start page to add 1018 * @len: vec entry length, may cross pages 1019 * @offset: vec entry offset relative to @page, may cross pages 1020 * 1021 * Attempt to add page(s) to the bio_vec maplist. This will only fail 1022 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 1023 */ 1024 int bio_add_page(struct bio *bio, struct page *page, 1025 unsigned int len, unsigned int offset) 1026 { 1027 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 1028 return 0; 1029 if (WARN_ON_ONCE(len == 0)) 1030 return 0; 1031 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len) 1032 return 0; 1033 1034 if (bio->bi_vcnt > 0) { 1035 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 1036 1037 if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) 1038 return 0; 1039 1040 if (bvec_try_merge_page(bv, page, len, offset)) { 1041 bio->bi_iter.bi_size += len; 1042 return len; 1043 } 1044 } 1045 1046 if (bio->bi_vcnt >= bio->bi_max_vecs) 1047 return 0; 1048 __bio_add_page(bio, page, len, offset); 1049 return len; 1050 } 1051 EXPORT_SYMBOL(bio_add_page); 1052 1053 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, 1054 size_t off) 1055 { 1056 unsigned long nr = off / PAGE_SIZE; 1057 1058 WARN_ON_ONCE(len > BIO_MAX_SIZE); 1059 __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE); 1060 } 1061 EXPORT_SYMBOL_GPL(bio_add_folio_nofail); 1062 1063 /** 1064 * bio_add_folio - Attempt to add part of a folio to a bio. 1065 * @bio: BIO to add to. 1066 * @folio: Folio to add. 1067 * @len: How many bytes from the folio to add. 1068 * @off: First byte in this folio to add. 1069 * 1070 * Filesystems that use folios can call this function instead of calling 1071 * bio_add_page() for each page in the folio. If @off is bigger than 1072 * PAGE_SIZE, this function can create a bio_vec that starts in a page 1073 * after the bv_page. BIOs do not support folios that are 4GiB or larger. 1074 * 1075 * Return: Whether the addition was successful. 1076 */ 1077 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, 1078 size_t off) 1079 { 1080 unsigned long nr = off / PAGE_SIZE; 1081 1082 if (len > BIO_MAX_SIZE) 1083 return false; 1084 return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0; 1085 } 1086 EXPORT_SYMBOL(bio_add_folio); 1087 1088 /** 1089 * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio 1090 * @bio: destination bio 1091 * @vaddr: vmalloc address to add 1092 * @len: total length in bytes of the data to add 1093 * 1094 * Add data starting at @vaddr to @bio and return how many bytes were added. 1095 * This may be less than the amount originally asked. Returns 0 if no data 1096 * could be added to @bio. 1097 * 1098 * This helper calls flush_kernel_vmap_range() for the range added. For reads 1099 * the caller still needs to manually call invalidate_kernel_vmap_range() in 1100 * the completion handler. 1101 */ 1102 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len) 1103 { 1104 unsigned int offset = offset_in_page(vaddr); 1105 1106 len = min(len, PAGE_SIZE - offset); 1107 if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len) 1108 return 0; 1109 if (op_is_write(bio_op(bio))) 1110 flush_kernel_vmap_range(vaddr, len); 1111 return len; 1112 } 1113 EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk); 1114 1115 /** 1116 * bio_add_vmalloc - add a vmalloc region to a bio 1117 * @bio: destination bio 1118 * @vaddr: vmalloc address to add 1119 * @len: total length in bytes of the data to add 1120 * 1121 * Add data starting at @vaddr to @bio. Return %true on success or %false if 1122 * @bio does not have enough space for the payload. 1123 * 1124 * This helper calls flush_kernel_vmap_range() for the range added. For reads 1125 * the caller still needs to manually call invalidate_kernel_vmap_range() in 1126 * the completion handler. 1127 */ 1128 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len) 1129 { 1130 do { 1131 unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len); 1132 1133 if (!added) 1134 return false; 1135 vaddr += added; 1136 len -= added; 1137 } while (len); 1138 1139 return true; 1140 } 1141 EXPORT_SYMBOL_GPL(bio_add_vmalloc); 1142 1143 void __bio_release_pages(struct bio *bio, bool mark_dirty) 1144 { 1145 struct folio_iter fi; 1146 1147 bio_for_each_folio_all(fi, bio) { 1148 size_t nr_pages; 1149 1150 if (mark_dirty) { 1151 folio_lock(fi.folio); 1152 folio_mark_dirty(fi.folio); 1153 folio_unlock(fi.folio); 1154 } 1155 nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - 1156 fi.offset / PAGE_SIZE + 1; 1157 unpin_user_folio(fi.folio, nr_pages); 1158 } 1159 } 1160 EXPORT_SYMBOL_GPL(__bio_release_pages); 1161 1162 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter) 1163 { 1164 WARN_ON_ONCE(bio->bi_max_vecs); 1165 1166 bio->bi_io_vec = (struct bio_vec *)iter->bvec; 1167 bio->bi_iter.bi_idx = 0; 1168 bio->bi_iter.bi_bvec_done = iter->iov_offset; 1169 bio->bi_iter.bi_size = iov_iter_count(iter); 1170 bio_set_flag(bio, BIO_CLONED); 1171 } 1172 1173 /* 1174 * Aligns the bio size to the len_align_mask, releasing excessive bio vecs that 1175 * __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length 1176 * for the next iteration. 1177 */ 1178 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter, 1179 unsigned len_align_mask) 1180 { 1181 size_t nbytes = bio->bi_iter.bi_size & len_align_mask; 1182 1183 if (!nbytes) 1184 return 0; 1185 1186 iov_iter_revert(iter, nbytes); 1187 bio->bi_iter.bi_size -= nbytes; 1188 do { 1189 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 1190 1191 if (nbytes < bv->bv_len) { 1192 bv->bv_len -= nbytes; 1193 break; 1194 } 1195 1196 if (bio_flagged(bio, BIO_PAGE_PINNED)) 1197 unpin_user_page(bv->bv_page); 1198 1199 bio->bi_vcnt--; 1200 nbytes -= bv->bv_len; 1201 } while (nbytes); 1202 1203 if (!bio->bi_vcnt) 1204 return -EFAULT; 1205 return 0; 1206 } 1207 1208 /** 1209 * bio_iov_iter_get_pages - add user or kernel pages to a bio 1210 * @bio: bio to add pages to 1211 * @iter: iov iterator describing the region to be added 1212 * @len_align_mask: the mask to align the total size to, 0 for any length 1213 * 1214 * This takes either an iterator pointing to user memory, or one pointing to 1215 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 1216 * map them into the kernel. On IO completion, the caller should put those 1217 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided 1218 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs 1219 * to ensure the bvecs and pages stay referenced until the submitted I/O is 1220 * completed by a call to ->ki_complete() or returns with an error other than 1221 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF 1222 * on IO completion. If it isn't, then pages should be released. 1223 * 1224 * The function tries, but does not guarantee, to pin as many pages as 1225 * fit into the bio, or are requested in @iter, whatever is smaller. If 1226 * MM encounters an error pinning the requested pages, it stops. Error 1227 * is returned only if 0 pages could be pinned. 1228 */ 1229 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, 1230 unsigned len_align_mask) 1231 { 1232 iov_iter_extraction_t flags = 0; 1233 1234 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 1235 return -EIO; 1236 1237 if (iov_iter_is_bvec(iter)) { 1238 bio_iov_bvec_set(bio, iter); 1239 iov_iter_advance(iter, bio->bi_iter.bi_size); 1240 return 0; 1241 } 1242 1243 if (iov_iter_extract_will_pin(iter)) 1244 bio_set_flag(bio, BIO_PAGE_PINNED); 1245 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) 1246 flags |= ITER_ALLOW_P2PDMA; 1247 1248 do { 1249 ssize_t ret; 1250 1251 ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec, 1252 BIO_MAX_SIZE - bio->bi_iter.bi_size, 1253 &bio->bi_vcnt, bio->bi_max_vecs, flags); 1254 if (ret <= 0) { 1255 if (!bio->bi_vcnt) 1256 return ret; 1257 break; 1258 } 1259 bio->bi_iter.bi_size += ret; 1260 } while (iov_iter_count(iter) && !bio_full(bio, 0)); 1261 1262 if (is_pci_p2pdma_page(bio->bi_io_vec->bv_page)) 1263 bio->bi_opf |= REQ_NOMERGE; 1264 return bio_iov_iter_align_down(bio, iter, len_align_mask); 1265 } 1266 1267 static struct folio *folio_alloc_greedy(gfp_t gfp, size_t *size) 1268 { 1269 struct folio *folio; 1270 1271 while (*size > PAGE_SIZE) { 1272 folio = folio_alloc(gfp | __GFP_NORETRY, get_order(*size)); 1273 if (folio) 1274 return folio; 1275 *size = rounddown_pow_of_two(*size - 1); 1276 } 1277 1278 return folio_alloc(gfp, get_order(*size)); 1279 } 1280 1281 static void bio_free_folios(struct bio *bio) 1282 { 1283 struct bio_vec *bv; 1284 int i; 1285 1286 bio_for_each_bvec_all(bv, bio, i) { 1287 struct folio *folio = page_folio(bv->bv_page); 1288 1289 if (!is_zero_folio(folio)) 1290 folio_put(folio); 1291 } 1292 } 1293 1294 static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter, 1295 size_t maxlen) 1296 { 1297 size_t total_len = min(maxlen, iov_iter_count(iter)); 1298 1299 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 1300 return -EINVAL; 1301 if (WARN_ON_ONCE(bio->bi_iter.bi_size)) 1302 return -EINVAL; 1303 if (WARN_ON_ONCE(bio->bi_vcnt >= bio->bi_max_vecs)) 1304 return -EINVAL; 1305 1306 do { 1307 size_t this_len = min(total_len, SZ_1M); 1308 struct folio *folio; 1309 1310 if (this_len > PAGE_SIZE * 2) 1311 this_len = rounddown_pow_of_two(this_len); 1312 1313 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - this_len) 1314 break; 1315 1316 folio = folio_alloc_greedy(GFP_KERNEL, &this_len); 1317 if (!folio) 1318 break; 1319 bio_add_folio_nofail(bio, folio, this_len, 0); 1320 1321 if (copy_from_iter(folio_address(folio), this_len, iter) != 1322 this_len) { 1323 bio_free_folios(bio); 1324 return -EFAULT; 1325 } 1326 1327 total_len -= this_len; 1328 } while (total_len && bio->bi_vcnt < bio->bi_max_vecs); 1329 1330 if (!bio->bi_iter.bi_size) 1331 return -ENOMEM; 1332 return 0; 1333 } 1334 1335 static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter, 1336 size_t maxlen) 1337 { 1338 size_t len = min3(iov_iter_count(iter), maxlen, SZ_1M); 1339 struct folio *folio; 1340 1341 folio = folio_alloc_greedy(GFP_KERNEL, &len); 1342 if (!folio) 1343 return -ENOMEM; 1344 1345 do { 1346 ssize_t ret; 1347 1348 ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec + 1, len, 1349 &bio->bi_vcnt, bio->bi_max_vecs - 1, 0); 1350 if (ret <= 0) { 1351 if (!bio->bi_vcnt) { 1352 folio_put(folio); 1353 return ret; 1354 } 1355 break; 1356 } 1357 len -= ret; 1358 bio->bi_iter.bi_size += ret; 1359 } while (len && bio->bi_vcnt < bio->bi_max_vecs - 1); 1360 1361 /* 1362 * Set the folio directly here. The above loop has already calculated 1363 * the correct bi_size, and we use bi_vcnt for the user buffers. That 1364 * is safe as bi_vcnt is only used by the submitter and not the actual 1365 * I/O path. 1366 */ 1367 bvec_set_folio(&bio->bi_io_vec[0], folio, bio->bi_iter.bi_size, 0); 1368 if (iov_iter_extract_will_pin(iter)) 1369 bio_set_flag(bio, BIO_PAGE_PINNED); 1370 return 0; 1371 } 1372 1373 /** 1374 * bio_iov_iter_bounce - bounce buffer data from an iter into a bio 1375 * @bio: bio to send 1376 * @iter: iter to read from / write into 1377 * @maxlen: maximum size to bounce 1378 * 1379 * Helper for direct I/O implementations that need to bounce buffer because 1380 * we need to checksum the data or perform other operations that require 1381 * consistency. Allocates folios to back the bounce buffer, and for writes 1382 * copies the data into it. Needs to be paired with bio_iov_iter_unbounce() 1383 * called on completion. 1384 */ 1385 int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen) 1386 { 1387 if (op_is_write(bio_op(bio))) 1388 return bio_iov_iter_bounce_write(bio, iter, maxlen); 1389 return bio_iov_iter_bounce_read(bio, iter, maxlen); 1390 } 1391 1392 static void bvec_unpin(struct bio_vec *bv, bool mark_dirty) 1393 { 1394 struct folio *folio = page_folio(bv->bv_page); 1395 size_t nr_pages = (bv->bv_offset + bv->bv_len - 1) / PAGE_SIZE - 1396 bv->bv_offset / PAGE_SIZE + 1; 1397 1398 if (mark_dirty) 1399 folio_mark_dirty_lock(folio); 1400 unpin_user_folio(folio, nr_pages); 1401 } 1402 1403 static void bio_iov_iter_unbounce_read(struct bio *bio, bool is_error, 1404 bool mark_dirty) 1405 { 1406 unsigned int len = bio->bi_io_vec[0].bv_len; 1407 1408 if (likely(!is_error)) { 1409 void *buf = bvec_virt(&bio->bi_io_vec[0]); 1410 struct iov_iter to; 1411 1412 iov_iter_bvec(&to, ITER_DEST, bio->bi_io_vec + 1, bio->bi_vcnt, 1413 len); 1414 /* copying to pinned pages should always work */ 1415 WARN_ON_ONCE(copy_to_iter(buf, len, &to) != len); 1416 } else { 1417 /* No need to mark folios dirty if never copied to them */ 1418 mark_dirty = false; 1419 } 1420 1421 if (bio_flagged(bio, BIO_PAGE_PINNED)) { 1422 int i; 1423 1424 for (i = 0; i < bio->bi_vcnt; i++) 1425 bvec_unpin(&bio->bi_io_vec[1 + i], mark_dirty); 1426 } 1427 1428 folio_put(page_folio(bio->bi_io_vec[0].bv_page)); 1429 } 1430 1431 /** 1432 * bio_iov_iter_unbounce - finish a bounce buffer operation 1433 * @bio: completed bio 1434 * @is_error: %true if an I/O error occurred and data should not be copied 1435 * @mark_dirty: If %true, folios will be marked dirty. 1436 * 1437 * Helper for direct I/O implementations that need to bounce buffer because 1438 * we need to checksum the data or perform other operations that require 1439 * consistency. Called to complete a bio set up by bio_iov_iter_bounce(). 1440 * Copies data back for reads, and marks the original folios dirty if 1441 * requested and then frees the bounce buffer. 1442 */ 1443 void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty) 1444 { 1445 if (op_is_write(bio_op(bio))) 1446 bio_free_folios(bio); 1447 else 1448 bio_iov_iter_unbounce_read(bio, is_error, mark_dirty); 1449 } 1450 1451 static void submit_bio_wait_endio(struct bio *bio) 1452 { 1453 complete(bio->bi_private); 1454 } 1455 1456 /** 1457 * submit_bio_wait - submit a bio, and wait until it completes 1458 * @bio: The &struct bio which describes the I/O 1459 * 1460 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 1461 * bio_endio() on failure. 1462 * 1463 * WARNING: Unlike to how submit_bio() is usually used, this function does not 1464 * result in bio reference to be consumed. The caller must drop the reference 1465 * on his own. 1466 */ 1467 int submit_bio_wait(struct bio *bio) 1468 { 1469 DECLARE_COMPLETION_ONSTACK_MAP(done, 1470 bio->bi_bdev->bd_disk->lockdep_map); 1471 1472 bio->bi_private = &done; 1473 bio->bi_end_io = submit_bio_wait_endio; 1474 bio->bi_opf |= REQ_SYNC; 1475 submit_bio(bio); 1476 blk_wait_io(&done); 1477 1478 return blk_status_to_errno(bio->bi_status); 1479 } 1480 EXPORT_SYMBOL(submit_bio_wait); 1481 1482 /** 1483 * bdev_rw_virt - synchronously read into / write from kernel mapping 1484 * @bdev: block device to access 1485 * @sector: sector to access 1486 * @data: data to read/write 1487 * @len: length in byte to read/write 1488 * @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE) 1489 * 1490 * Performs synchronous I/O to @bdev for @data/@len. @data must be in 1491 * the kernel direct mapping and not a vmalloc address. 1492 */ 1493 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data, 1494 size_t len, enum req_op op) 1495 { 1496 struct bio_vec bv; 1497 struct bio bio; 1498 int error; 1499 1500 if (WARN_ON_ONCE(is_vmalloc_addr(data))) 1501 return -EIO; 1502 1503 bio_init(&bio, bdev, &bv, 1, op); 1504 bio.bi_iter.bi_sector = sector; 1505 bio_add_virt_nofail(&bio, data, len); 1506 error = submit_bio_wait(&bio); 1507 bio_uninit(&bio); 1508 return error; 1509 } 1510 EXPORT_SYMBOL_GPL(bdev_rw_virt); 1511 1512 static void bio_wait_end_io(struct bio *bio) 1513 { 1514 complete(bio->bi_private); 1515 bio_put(bio); 1516 } 1517 1518 /* 1519 * bio_await_chain - ends @bio and waits for every chained bio to complete 1520 */ 1521 void bio_await_chain(struct bio *bio) 1522 { 1523 DECLARE_COMPLETION_ONSTACK_MAP(done, 1524 bio->bi_bdev->bd_disk->lockdep_map); 1525 1526 bio->bi_private = &done; 1527 bio->bi_end_io = bio_wait_end_io; 1528 bio_endio(bio); 1529 blk_wait_io(&done); 1530 } 1531 1532 void __bio_advance(struct bio *bio, unsigned bytes) 1533 { 1534 if (bio_integrity(bio)) 1535 bio_integrity_advance(bio, bytes); 1536 1537 bio_crypt_advance(bio, bytes); 1538 bio_advance_iter(bio, &bio->bi_iter, bytes); 1539 } 1540 EXPORT_SYMBOL(__bio_advance); 1541 1542 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 1543 struct bio *src, struct bvec_iter *src_iter) 1544 { 1545 while (src_iter->bi_size && dst_iter->bi_size) { 1546 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); 1547 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); 1548 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); 1549 void *src_buf = bvec_kmap_local(&src_bv); 1550 void *dst_buf = bvec_kmap_local(&dst_bv); 1551 1552 memcpy(dst_buf, src_buf, bytes); 1553 1554 kunmap_local(dst_buf); 1555 kunmap_local(src_buf); 1556 1557 bio_advance_iter_single(src, src_iter, bytes); 1558 bio_advance_iter_single(dst, dst_iter, bytes); 1559 } 1560 } 1561 EXPORT_SYMBOL(bio_copy_data_iter); 1562 1563 /** 1564 * bio_copy_data - copy contents of data buffers from one bio to another 1565 * @src: source bio 1566 * @dst: destination bio 1567 * 1568 * Stops when it reaches the end of either @src or @dst - that is, copies 1569 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1570 */ 1571 void bio_copy_data(struct bio *dst, struct bio *src) 1572 { 1573 struct bvec_iter src_iter = src->bi_iter; 1574 struct bvec_iter dst_iter = dst->bi_iter; 1575 1576 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1577 } 1578 EXPORT_SYMBOL(bio_copy_data); 1579 1580 void bio_free_pages(struct bio *bio) 1581 { 1582 struct bio_vec *bvec; 1583 struct bvec_iter_all iter_all; 1584 1585 bio_for_each_segment_all(bvec, bio, iter_all) 1586 __free_page(bvec->bv_page); 1587 } 1588 EXPORT_SYMBOL(bio_free_pages); 1589 1590 /* 1591 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1592 * for performing direct-IO in BIOs. 1593 * 1594 * The problem is that we cannot run folio_mark_dirty() from interrupt context 1595 * because the required locks are not interrupt-safe. So what we can do is to 1596 * mark the pages dirty _before_ performing IO. And in interrupt context, 1597 * check that the pages are still dirty. If so, fine. If not, redirty them 1598 * in process context. 1599 * 1600 * Note that this code is very hard to test under normal circumstances because 1601 * direct-io pins the pages with get_user_pages(). This makes 1602 * is_page_cache_freeable return false, and the VM will not clean the pages. 1603 * But other code (eg, flusher threads) could clean the pages if they are mapped 1604 * pagecache. 1605 * 1606 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1607 * deferred bio dirtying paths. 1608 */ 1609 1610 /* 1611 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1612 */ 1613 void bio_set_pages_dirty(struct bio *bio) 1614 { 1615 struct folio_iter fi; 1616 1617 bio_for_each_folio_all(fi, bio) { 1618 folio_lock(fi.folio); 1619 folio_mark_dirty(fi.folio); 1620 folio_unlock(fi.folio); 1621 } 1622 } 1623 EXPORT_SYMBOL_GPL(bio_set_pages_dirty); 1624 1625 /* 1626 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1627 * If they are, then fine. If, however, some pages are clean then they must 1628 * have been written out during the direct-IO read. So we take another ref on 1629 * the BIO and re-dirty the pages in process context. 1630 * 1631 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1632 * here on. It will unpin each page and will run one bio_put() against the 1633 * BIO. 1634 */ 1635 1636 static void bio_dirty_fn(struct work_struct *work); 1637 1638 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1639 static DEFINE_SPINLOCK(bio_dirty_lock); 1640 static struct bio *bio_dirty_list; 1641 1642 /* 1643 * This runs in process context 1644 */ 1645 static void bio_dirty_fn(struct work_struct *work) 1646 { 1647 struct bio *bio, *next; 1648 1649 spin_lock_irq(&bio_dirty_lock); 1650 next = bio_dirty_list; 1651 bio_dirty_list = NULL; 1652 spin_unlock_irq(&bio_dirty_lock); 1653 1654 while ((bio = next) != NULL) { 1655 next = bio->bi_private; 1656 1657 bio_release_pages(bio, true); 1658 bio_put(bio); 1659 } 1660 } 1661 1662 void bio_check_pages_dirty(struct bio *bio) 1663 { 1664 struct folio_iter fi; 1665 unsigned long flags; 1666 1667 bio_for_each_folio_all(fi, bio) { 1668 if (!folio_test_dirty(fi.folio)) 1669 goto defer; 1670 } 1671 1672 bio_release_pages(bio, false); 1673 bio_put(bio); 1674 return; 1675 defer: 1676 spin_lock_irqsave(&bio_dirty_lock, flags); 1677 bio->bi_private = bio_dirty_list; 1678 bio_dirty_list = bio; 1679 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1680 schedule_work(&bio_dirty_work); 1681 } 1682 EXPORT_SYMBOL_GPL(bio_check_pages_dirty); 1683 1684 static inline bool bio_remaining_done(struct bio *bio) 1685 { 1686 /* 1687 * If we're not chaining, then ->__bi_remaining is always 1 and 1688 * we always end io on the first invocation. 1689 */ 1690 if (!bio_flagged(bio, BIO_CHAIN)) 1691 return true; 1692 1693 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1694 1695 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1696 bio_clear_flag(bio, BIO_CHAIN); 1697 return true; 1698 } 1699 1700 return false; 1701 } 1702 1703 /** 1704 * bio_endio - end I/O on a bio 1705 * @bio: bio 1706 * 1707 * Description: 1708 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1709 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1710 * bio unless they own it and thus know that it has an end_io function. 1711 * 1712 * bio_endio() can be called several times on a bio that has been chained 1713 * using bio_chain(). The ->bi_end_io() function will only be called the 1714 * last time. 1715 **/ 1716 void bio_endio(struct bio *bio) 1717 { 1718 again: 1719 if (!bio_remaining_done(bio)) 1720 return; 1721 if (!bio_integrity_endio(bio)) 1722 return; 1723 1724 blk_zone_bio_endio(bio); 1725 1726 rq_qos_done_bio(bio); 1727 1728 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1729 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); 1730 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1731 } 1732 1733 /* 1734 * Need to have a real endio function for chained bios, otherwise 1735 * various corner cases will break (like stacking block devices that 1736 * save/restore bi_end_io) - however, we want to avoid unbounded 1737 * recursion and blowing the stack. Tail call optimization would 1738 * handle this, but compiling with frame pointers also disables 1739 * gcc's sibling call optimization. 1740 */ 1741 if (bio->bi_end_io == bio_chain_endio) { 1742 bio = __bio_chain_endio(bio); 1743 goto again; 1744 } 1745 1746 #ifdef CONFIG_BLK_CGROUP 1747 /* 1748 * Release cgroup info. We shouldn't have to do this here, but quite 1749 * a few callers of bio_init fail to call bio_uninit, so we cover up 1750 * for that here at least for now. 1751 */ 1752 if (bio->bi_blkg) { 1753 blkg_put(bio->bi_blkg); 1754 bio->bi_blkg = NULL; 1755 } 1756 #endif 1757 1758 if (bio->bi_end_io) 1759 bio->bi_end_io(bio); 1760 } 1761 EXPORT_SYMBOL(bio_endio); 1762 1763 /** 1764 * bio_split - split a bio 1765 * @bio: bio to split 1766 * @sectors: number of sectors to split from the front of @bio 1767 * @gfp: gfp mask 1768 * @bs: bio set to allocate from 1769 * 1770 * Allocates and returns a new bio which represents @sectors from the start of 1771 * @bio, and updates @bio to represent the remaining sectors. 1772 * 1773 * Unless this is a discard request the newly allocated bio will point 1774 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that 1775 * neither @bio nor @bs are freed before the split bio. 1776 */ 1777 struct bio *bio_split(struct bio *bio, int sectors, 1778 gfp_t gfp, struct bio_set *bs) 1779 { 1780 struct bio *split; 1781 1782 if (WARN_ON_ONCE(sectors <= 0)) 1783 return ERR_PTR(-EINVAL); 1784 if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) 1785 return ERR_PTR(-EINVAL); 1786 1787 /* Zone append commands cannot be split */ 1788 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1789 return ERR_PTR(-EINVAL); 1790 1791 /* atomic writes cannot be split */ 1792 if (bio->bi_opf & REQ_ATOMIC) 1793 return ERR_PTR(-EINVAL); 1794 1795 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); 1796 if (!split) 1797 return ERR_PTR(-ENOMEM); 1798 1799 split->bi_iter.bi_size = sectors << 9; 1800 1801 if (bio_integrity(split)) 1802 bio_integrity_trim(split); 1803 1804 bio_advance(bio, split->bi_iter.bi_size); 1805 1806 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1807 bio_set_flag(split, BIO_TRACE_COMPLETION); 1808 1809 return split; 1810 } 1811 EXPORT_SYMBOL(bio_split); 1812 1813 /** 1814 * bio_trim - trim a bio 1815 * @bio: bio to trim 1816 * @offset: number of sectors to trim from the front of @bio 1817 * @size: size we want to trim @bio to, in sectors 1818 * 1819 * This function is typically used for bios that are cloned and submitted 1820 * to the underlying device in parts. 1821 */ 1822 void bio_trim(struct bio *bio, sector_t offset, sector_t size) 1823 { 1824 /* We should never trim an atomic write */ 1825 if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size)) 1826 return; 1827 1828 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || 1829 offset + size > bio_sectors(bio))) 1830 return; 1831 1832 size <<= 9; 1833 if (offset == 0 && size == bio->bi_iter.bi_size) 1834 return; 1835 1836 bio_advance(bio, offset << 9); 1837 bio->bi_iter.bi_size = size; 1838 1839 if (bio_integrity(bio)) 1840 bio_integrity_trim(bio); 1841 } 1842 EXPORT_SYMBOL_GPL(bio_trim); 1843 1844 /* 1845 * create memory pools for biovec's in a bio_set. 1846 * use the global biovec slabs created for general use. 1847 */ 1848 int biovec_init_pool(mempool_t *pool, int pool_entries) 1849 { 1850 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; 1851 1852 return mempool_init_slab_pool(pool, pool_entries, bp->slab); 1853 } 1854 1855 /* 1856 * bioset_exit - exit a bioset initialized with bioset_init() 1857 * 1858 * May be called on a zeroed but uninitialized bioset (i.e. allocated with 1859 * kzalloc()). 1860 */ 1861 void bioset_exit(struct bio_set *bs) 1862 { 1863 bio_alloc_cache_destroy(bs); 1864 if (bs->rescue_workqueue) 1865 destroy_workqueue(bs->rescue_workqueue); 1866 bs->rescue_workqueue = NULL; 1867 1868 mempool_exit(&bs->bio_pool); 1869 mempool_exit(&bs->bvec_pool); 1870 1871 if (bs->bio_slab) 1872 bio_put_slab(bs); 1873 bs->bio_slab = NULL; 1874 } 1875 EXPORT_SYMBOL(bioset_exit); 1876 1877 /** 1878 * bioset_init - Initialize a bio_set 1879 * @bs: pool to initialize 1880 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1881 * @front_pad: Number of bytes to allocate in front of the returned bio 1882 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 1883 * and %BIOSET_NEED_RESCUER 1884 * 1885 * Description: 1886 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1887 * to ask for a number of bytes to be allocated in front of the bio. 1888 * Front pad allocation is useful for embedding the bio inside 1889 * another structure, to avoid allocating extra data to go with the bio. 1890 * Note that the bio must be embedded at the END of that structure always, 1891 * or things will break badly. 1892 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1893 * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). 1894 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used 1895 * to dispatch queued requests when the mempool runs out of space. 1896 * 1897 */ 1898 int bioset_init(struct bio_set *bs, 1899 unsigned int pool_size, 1900 unsigned int front_pad, 1901 int flags) 1902 { 1903 bs->front_pad = front_pad; 1904 if (flags & BIOSET_NEED_BVECS) 1905 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1906 else 1907 bs->back_pad = 0; 1908 1909 spin_lock_init(&bs->rescue_lock); 1910 bio_list_init(&bs->rescue_list); 1911 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1912 1913 bs->bio_slab = bio_find_or_create_slab(bs); 1914 if (!bs->bio_slab) 1915 return -ENOMEM; 1916 1917 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) 1918 goto bad; 1919 1920 if ((flags & BIOSET_NEED_BVECS) && 1921 biovec_init_pool(&bs->bvec_pool, pool_size)) 1922 goto bad; 1923 1924 if (flags & BIOSET_NEED_RESCUER) { 1925 bs->rescue_workqueue = alloc_workqueue("bioset", 1926 WQ_MEM_RECLAIM, 0); 1927 if (!bs->rescue_workqueue) 1928 goto bad; 1929 } 1930 if (flags & BIOSET_PERCPU_CACHE) { 1931 bs->cache = alloc_percpu(struct bio_alloc_cache); 1932 if (!bs->cache) 1933 goto bad; 1934 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 1935 } 1936 1937 return 0; 1938 bad: 1939 bioset_exit(bs); 1940 return -ENOMEM; 1941 } 1942 EXPORT_SYMBOL(bioset_init); 1943 1944 static int __init init_bio(void) 1945 { 1946 int i; 1947 1948 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); 1949 1950 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { 1951 struct biovec_slab *bvs = bvec_slabs + i; 1952 1953 bvs->slab = kmem_cache_create(bvs->name, 1954 bvs->nr_vecs * sizeof(struct bio_vec), 0, 1955 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 1956 } 1957 1958 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, 1959 bio_cpu_dead); 1960 1961 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, 1962 BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) 1963 panic("bio: can't allocate bios\n"); 1964 1965 return 0; 1966 } 1967 subsys_initcall(init_bio); 1968