1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 4 */ 5 #include <linux/mm.h> 6 #include <linux/swap.h> 7 #include <linux/bio-integrity.h> 8 #include <linux/blkdev.h> 9 #include <linux/uio.h> 10 #include <linux/iocontext.h> 11 #include <linux/slab.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/mempool.h> 16 #include <linux/workqueue.h> 17 #include <linux/cgroup.h> 18 #include <linux/highmem.h> 19 #include <linux/blk-crypto.h> 20 #include <linux/xarray.h> 21 22 #include <trace/events/block.h> 23 #include "blk.h" 24 #include "blk-rq-qos.h" 25 #include "blk-cgroup.h" 26 27 #define ALLOC_CACHE_THRESHOLD 16 28 #define ALLOC_CACHE_MAX 256 29 30 struct bio_alloc_cache { 31 struct bio *free_list; 32 struct bio *free_list_irq; 33 unsigned int nr; 34 unsigned int nr_irq; 35 }; 36 37 static struct biovec_slab { 38 int nr_vecs; 39 char *name; 40 struct kmem_cache *slab; 41 } bvec_slabs[] __read_mostly = { 42 { .nr_vecs = 16, .name = "biovec-16" }, 43 { .nr_vecs = 64, .name = "biovec-64" }, 44 { .nr_vecs = 128, .name = "biovec-128" }, 45 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, 46 }; 47 48 static struct biovec_slab *biovec_slab(unsigned short nr_vecs) 49 { 50 switch (nr_vecs) { 51 /* smaller bios use inline vecs */ 52 case 5 ... 16: 53 return &bvec_slabs[0]; 54 case 17 ... 64: 55 return &bvec_slabs[1]; 56 case 65 ... 128: 57 return &bvec_slabs[2]; 58 case 129 ... BIO_MAX_VECS: 59 return &bvec_slabs[3]; 60 default: 61 BUG(); 62 return NULL; 63 } 64 } 65 66 /* 67 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 68 * IO code that does not need private memory pools. 69 */ 70 struct bio_set fs_bio_set; 71 EXPORT_SYMBOL(fs_bio_set); 72 73 /* 74 * Our slab pool management 75 */ 76 struct bio_slab { 77 struct kmem_cache *slab; 78 unsigned int slab_ref; 79 unsigned int slab_size; 80 char name[12]; 81 }; 82 static DEFINE_MUTEX(bio_slab_lock); 83 static DEFINE_XARRAY(bio_slabs); 84 85 static struct bio_slab *create_bio_slab(unsigned int size) 86 { 87 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); 88 89 if (!bslab) 90 return NULL; 91 92 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); 93 bslab->slab = kmem_cache_create(bslab->name, size, 94 ARCH_KMALLOC_MINALIGN, 95 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); 96 if (!bslab->slab) 97 goto fail_alloc_slab; 98 99 bslab->slab_ref = 1; 100 bslab->slab_size = size; 101 102 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) 103 return bslab; 104 105 kmem_cache_destroy(bslab->slab); 106 107 fail_alloc_slab: 108 kfree(bslab); 109 return NULL; 110 } 111 112 static inline unsigned int bs_bio_slab_size(struct bio_set *bs) 113 { 114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; 115 } 116 117 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) 118 { 119 unsigned int size = bs_bio_slab_size(bs); 120 struct bio_slab *bslab; 121 122 mutex_lock(&bio_slab_lock); 123 bslab = xa_load(&bio_slabs, size); 124 if (bslab) 125 bslab->slab_ref++; 126 else 127 bslab = create_bio_slab(size); 128 mutex_unlock(&bio_slab_lock); 129 130 if (bslab) 131 return bslab->slab; 132 return NULL; 133 } 134 135 static void bio_put_slab(struct bio_set *bs) 136 { 137 struct bio_slab *bslab = NULL; 138 unsigned int slab_size = bs_bio_slab_size(bs); 139 140 mutex_lock(&bio_slab_lock); 141 142 bslab = xa_load(&bio_slabs, slab_size); 143 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 144 goto out; 145 146 WARN_ON_ONCE(bslab->slab != bs->bio_slab); 147 148 WARN_ON(!bslab->slab_ref); 149 150 if (--bslab->slab_ref) 151 goto out; 152 153 xa_erase(&bio_slabs, slab_size); 154 155 kmem_cache_destroy(bslab->slab); 156 kfree(bslab); 157 158 out: 159 mutex_unlock(&bio_slab_lock); 160 } 161 162 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) 163 { 164 BUG_ON(nr_vecs > BIO_MAX_VECS); 165 166 if (nr_vecs == BIO_MAX_VECS) 167 mempool_free(bv, pool); 168 else if (nr_vecs > BIO_INLINE_VECS) 169 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); 170 } 171 172 /* 173 * Make the first allocation restricted and don't dump info on allocation 174 * failures, since we'll fall back to the mempool in case of failure. 175 */ 176 static inline gfp_t bvec_alloc_gfp(gfp_t gfp) 177 { 178 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | 179 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 180 } 181 182 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 183 gfp_t gfp_mask) 184 { 185 struct biovec_slab *bvs = biovec_slab(*nr_vecs); 186 187 if (WARN_ON_ONCE(!bvs)) 188 return NULL; 189 190 /* 191 * Upgrade the nr_vecs request to take full advantage of the allocation. 192 * We also rely on this in the bvec_free path. 193 */ 194 *nr_vecs = bvs->nr_vecs; 195 196 /* 197 * Try a slab allocation first for all smaller allocations. If that 198 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. 199 * The mempool is sized to handle up to BIO_MAX_VECS entries. 200 */ 201 if (*nr_vecs < BIO_MAX_VECS) { 202 struct bio_vec *bvl; 203 204 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); 205 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 206 return bvl; 207 *nr_vecs = BIO_MAX_VECS; 208 } 209 210 return mempool_alloc(pool, gfp_mask); 211 } 212 213 void bio_uninit(struct bio *bio) 214 { 215 #ifdef CONFIG_BLK_CGROUP 216 if (bio->bi_blkg) { 217 blkg_put(bio->bi_blkg); 218 bio->bi_blkg = NULL; 219 } 220 #endif 221 if (bio_integrity(bio)) 222 bio_integrity_free(bio); 223 224 bio_crypt_free_ctx(bio); 225 } 226 EXPORT_SYMBOL(bio_uninit); 227 228 static void bio_free(struct bio *bio) 229 { 230 struct bio_set *bs = bio->bi_pool; 231 void *p = bio; 232 233 WARN_ON_ONCE(!bs); 234 235 bio_uninit(bio); 236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); 237 mempool_free(p - bs->front_pad, &bs->bio_pool); 238 } 239 240 /* 241 * Users of this function have their own bio allocation. Subsequently, 242 * they must remember to pair any call to bio_init() with bio_uninit() 243 * when IO has completed, or when the bio is released. 244 */ 245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 246 unsigned short max_vecs, blk_opf_t opf) 247 { 248 bio->bi_next = NULL; 249 bio->bi_bdev = bdev; 250 bio->bi_opf = opf; 251 bio->bi_flags = 0; 252 bio->bi_ioprio = 0; 253 bio->bi_write_hint = 0; 254 bio->bi_write_stream = 0; 255 bio->bi_status = 0; 256 bio->bi_bvec_gap_bit = 0; 257 bio->bi_iter.bi_sector = 0; 258 bio->bi_iter.bi_size = 0; 259 bio->bi_iter.bi_idx = 0; 260 bio->bi_iter.bi_bvec_done = 0; 261 bio->bi_end_io = NULL; 262 bio->bi_private = NULL; 263 #ifdef CONFIG_BLK_CGROUP 264 bio->bi_blkg = NULL; 265 bio->issue_time_ns = 0; 266 if (bdev) 267 bio_associate_blkg(bio); 268 #ifdef CONFIG_BLK_CGROUP_IOCOST 269 bio->bi_iocost_cost = 0; 270 #endif 271 #endif 272 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 273 bio->bi_crypt_context = NULL; 274 #endif 275 #ifdef CONFIG_BLK_DEV_INTEGRITY 276 bio->bi_integrity = NULL; 277 #endif 278 bio->bi_vcnt = 0; 279 280 atomic_set(&bio->__bi_remaining, 1); 281 atomic_set(&bio->__bi_cnt, 1); 282 bio->bi_cookie = BLK_QC_T_NONE; 283 284 bio->bi_max_vecs = max_vecs; 285 bio->bi_io_vec = table; 286 bio->bi_pool = NULL; 287 } 288 EXPORT_SYMBOL(bio_init); 289 290 /** 291 * bio_reset - reinitialize a bio 292 * @bio: bio to reset 293 * @bdev: block device to use the bio for 294 * @opf: operation and flags for bio 295 * 296 * Description: 297 * After calling bio_reset(), @bio will be in the same state as a freshly 298 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 299 * preserved are the ones that are initialized by bio_alloc_bioset(). See 300 * comment in struct bio. 301 */ 302 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) 303 { 304 bio_uninit(bio); 305 memset(bio, 0, BIO_RESET_BYTES); 306 atomic_set(&bio->__bi_remaining, 1); 307 bio->bi_bdev = bdev; 308 if (bio->bi_bdev) 309 bio_associate_blkg(bio); 310 bio->bi_opf = opf; 311 } 312 EXPORT_SYMBOL(bio_reset); 313 314 static struct bio *__bio_chain_endio(struct bio *bio) 315 { 316 struct bio *parent = bio->bi_private; 317 318 if (bio->bi_status && !parent->bi_status) 319 parent->bi_status = bio->bi_status; 320 bio_put(bio); 321 return parent; 322 } 323 324 static void bio_chain_endio(struct bio *bio) 325 { 326 bio_endio(__bio_chain_endio(bio)); 327 } 328 329 /** 330 * bio_chain - chain bio completions 331 * @bio: the target bio 332 * @parent: the parent bio of @bio 333 * 334 * The caller won't have a bi_end_io called when @bio completes - instead, 335 * @parent's bi_end_io won't be called until both @parent and @bio have 336 * completed; the chained bio will also be freed when it completes. 337 * 338 * The caller must not set bi_private or bi_end_io in @bio. 339 */ 340 void bio_chain(struct bio *bio, struct bio *parent) 341 { 342 BUG_ON(bio->bi_private || bio->bi_end_io); 343 344 bio->bi_private = parent; 345 bio->bi_end_io = bio_chain_endio; 346 bio_inc_remaining(parent); 347 } 348 EXPORT_SYMBOL(bio_chain); 349 350 /** 351 * bio_chain_and_submit - submit a bio after chaining it to another one 352 * @prev: bio to chain and submit 353 * @new: bio to chain to 354 * 355 * If @prev is non-NULL, chain it to @new and submit it. 356 * 357 * Return: @new. 358 */ 359 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new) 360 { 361 if (prev) { 362 bio_chain(prev, new); 363 submit_bio(prev); 364 } 365 return new; 366 } 367 368 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 369 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) 370 { 371 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp)); 372 } 373 EXPORT_SYMBOL_GPL(blk_next_bio); 374 375 static void bio_alloc_rescue(struct work_struct *work) 376 { 377 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 378 struct bio *bio; 379 380 while (1) { 381 spin_lock(&bs->rescue_lock); 382 bio = bio_list_pop(&bs->rescue_list); 383 spin_unlock(&bs->rescue_lock); 384 385 if (!bio) 386 break; 387 388 submit_bio_noacct(bio); 389 } 390 } 391 392 static void punt_bios_to_rescuer(struct bio_set *bs) 393 { 394 struct bio_list punt, nopunt; 395 struct bio *bio; 396 397 if (WARN_ON_ONCE(!bs->rescue_workqueue)) 398 return; 399 /* 400 * In order to guarantee forward progress we must punt only bios that 401 * were allocated from this bio_set; otherwise, if there was a bio on 402 * there for a stacking driver higher up in the stack, processing it 403 * could require allocating bios from this bio_set, and doing that from 404 * our own rescuer would be bad. 405 * 406 * Since bio lists are singly linked, pop them all instead of trying to 407 * remove from the middle of the list: 408 */ 409 410 bio_list_init(&punt); 411 bio_list_init(&nopunt); 412 413 while ((bio = bio_list_pop(¤t->bio_list[0]))) 414 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 415 current->bio_list[0] = nopunt; 416 417 bio_list_init(&nopunt); 418 while ((bio = bio_list_pop(¤t->bio_list[1]))) 419 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 420 current->bio_list[1] = nopunt; 421 422 spin_lock(&bs->rescue_lock); 423 bio_list_merge(&bs->rescue_list, &punt); 424 spin_unlock(&bs->rescue_lock); 425 426 queue_work(bs->rescue_workqueue, &bs->rescue_work); 427 } 428 429 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) 430 { 431 unsigned long flags; 432 433 /* cache->free_list must be empty */ 434 if (WARN_ON_ONCE(cache->free_list)) 435 return; 436 437 local_irq_save(flags); 438 cache->free_list = cache->free_list_irq; 439 cache->free_list_irq = NULL; 440 cache->nr += cache->nr_irq; 441 cache->nr_irq = 0; 442 local_irq_restore(flags); 443 } 444 445 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, 446 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, 447 struct bio_set *bs) 448 { 449 struct bio_alloc_cache *cache; 450 struct bio *bio; 451 452 cache = per_cpu_ptr(bs->cache, get_cpu()); 453 if (!cache->free_list) { 454 if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) 455 bio_alloc_irq_cache_splice(cache); 456 if (!cache->free_list) { 457 put_cpu(); 458 return NULL; 459 } 460 } 461 bio = cache->free_list; 462 cache->free_list = bio->bi_next; 463 cache->nr--; 464 put_cpu(); 465 466 if (nr_vecs) 467 bio_init_inline(bio, bdev, nr_vecs, opf); 468 else 469 bio_init(bio, bdev, NULL, nr_vecs, opf); 470 bio->bi_pool = bs; 471 return bio; 472 } 473 474 /** 475 * bio_alloc_bioset - allocate a bio for I/O 476 * @bdev: block device to allocate the bio for (can be %NULL) 477 * @nr_vecs: number of bvecs to pre-allocate 478 * @opf: operation and flags for bio 479 * @gfp_mask: the GFP_* mask given to the slab allocator 480 * @bs: the bio_set to allocate from. 481 * 482 * Allocate a bio from the mempools in @bs. 483 * 484 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to 485 * allocate a bio. This is due to the mempool guarantees. To make this work, 486 * callers must never allocate more than 1 bio at a time from the general pool. 487 * Callers that need to allocate more than 1 bio must always submit the 488 * previously allocated bio for IO before attempting to allocate a new one. 489 * Failure to do so can cause deadlocks under memory pressure. 490 * 491 * Note that when running under submit_bio_noacct() (i.e. any block driver), 492 * bios are not submitted until after you return - see the code in 493 * submit_bio_noacct() that converts recursion into iteration, to prevent 494 * stack overflows. 495 * 496 * This would normally mean allocating multiple bios under submit_bio_noacct() 497 * would be susceptible to deadlocks, but we have 498 * deadlock avoidance code that resubmits any blocked bios from a rescuer 499 * thread. 500 * 501 * However, we do not guarantee forward progress for allocations from other 502 * mempools. Doing multiple allocations from the same mempool under 503 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad 504 * for per bio allocations. 505 * 506 * Returns: Pointer to new bio on success, NULL on failure. 507 */ 508 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 509 blk_opf_t opf, gfp_t gfp_mask, 510 struct bio_set *bs) 511 { 512 gfp_t saved_gfp = gfp_mask; 513 struct bio *bio; 514 void *p; 515 516 /* should not use nobvec bioset for nr_vecs > 0 */ 517 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 518 return NULL; 519 520 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { 521 opf |= REQ_ALLOC_CACHE; 522 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, 523 gfp_mask, bs); 524 if (bio) 525 return bio; 526 /* 527 * No cached bio available, bio returned below marked with 528 * REQ_ALLOC_CACHE to participate in per-cpu alloc cache. 529 */ 530 } else 531 opf &= ~REQ_ALLOC_CACHE; 532 533 /* 534 * submit_bio_noacct() converts recursion to iteration; this means if 535 * we're running beneath it, any bios we allocate and submit will not be 536 * submitted (and thus freed) until after we return. 537 * 538 * This exposes us to a potential deadlock if we allocate multiple bios 539 * from the same bio_set() while running underneath submit_bio_noacct(). 540 * If we were to allocate multiple bios (say a stacking block driver 541 * that was splitting bios), we would deadlock if we exhausted the 542 * mempool's reserve. 543 * 544 * We solve this, and guarantee forward progress, with a rescuer 545 * workqueue per bio_set. If we go to allocate and there are bios on 546 * current->bio_list, we first try the allocation without 547 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be 548 * blocking to the rescuer workqueue before we retry with the original 549 * gfp_flags. 550 */ 551 if (current->bio_list && 552 (!bio_list_empty(¤t->bio_list[0]) || 553 !bio_list_empty(¤t->bio_list[1])) && 554 bs->rescue_workqueue) 555 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 556 557 p = mempool_alloc(&bs->bio_pool, gfp_mask); 558 if (!p && gfp_mask != saved_gfp) { 559 punt_bios_to_rescuer(bs); 560 gfp_mask = saved_gfp; 561 p = mempool_alloc(&bs->bio_pool, gfp_mask); 562 } 563 if (unlikely(!p)) 564 return NULL; 565 if (!mempool_is_saturated(&bs->bio_pool)) 566 opf &= ~REQ_ALLOC_CACHE; 567 568 bio = p + bs->front_pad; 569 if (nr_vecs > BIO_INLINE_VECS) { 570 struct bio_vec *bvl = NULL; 571 572 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 573 if (!bvl && gfp_mask != saved_gfp) { 574 punt_bios_to_rescuer(bs); 575 gfp_mask = saved_gfp; 576 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 577 } 578 if (unlikely(!bvl)) 579 goto err_free; 580 581 bio_init(bio, bdev, bvl, nr_vecs, opf); 582 } else if (nr_vecs) { 583 bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf); 584 } else { 585 bio_init(bio, bdev, NULL, 0, opf); 586 } 587 588 bio->bi_pool = bs; 589 return bio; 590 591 err_free: 592 mempool_free(p, &bs->bio_pool); 593 return NULL; 594 } 595 EXPORT_SYMBOL(bio_alloc_bioset); 596 597 /** 598 * bio_kmalloc - kmalloc a bio 599 * @nr_vecs: number of bio_vecs to allocate 600 * @gfp_mask: the GFP_* mask given to the slab allocator 601 * 602 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized 603 * using bio_init() before use. To free a bio returned from this function use 604 * kfree() after calling bio_uninit(). A bio returned from this function can 605 * be reused by calling bio_uninit() before calling bio_init() again. 606 * 607 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this 608 * function are not backed by a mempool can fail. Do not use this function 609 * for allocations in the file system I/O path. 610 * 611 * Returns: Pointer to new bio on success, NULL on failure. 612 */ 613 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) 614 { 615 struct bio *bio; 616 617 if (nr_vecs > BIO_MAX_INLINE_VECS) 618 return NULL; 619 return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec), 620 gfp_mask); 621 } 622 EXPORT_SYMBOL(bio_kmalloc); 623 624 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) 625 { 626 struct bio_vec bv; 627 struct bvec_iter iter; 628 629 __bio_for_each_segment(bv, bio, iter, start) 630 memzero_bvec(&bv); 631 } 632 EXPORT_SYMBOL(zero_fill_bio_iter); 633 634 /** 635 * bio_truncate - truncate the bio to small size of @new_size 636 * @bio: the bio to be truncated 637 * @new_size: new size for truncating the bio 638 * 639 * Description: 640 * Truncate the bio to new size of @new_size. If bio_op(bio) is 641 * REQ_OP_READ, zero the truncated part. This function should only 642 * be used for handling corner cases, such as bio eod. 643 */ 644 static void bio_truncate(struct bio *bio, unsigned new_size) 645 { 646 struct bio_vec bv; 647 struct bvec_iter iter; 648 unsigned int done = 0; 649 bool truncated = false; 650 651 if (new_size >= bio->bi_iter.bi_size) 652 return; 653 654 if (bio_op(bio) != REQ_OP_READ) 655 goto exit; 656 657 bio_for_each_segment(bv, bio, iter) { 658 if (done + bv.bv_len > new_size) { 659 size_t offset; 660 661 if (!truncated) 662 offset = new_size - done; 663 else 664 offset = 0; 665 memzero_page(bv.bv_page, bv.bv_offset + offset, 666 bv.bv_len - offset); 667 truncated = true; 668 } 669 done += bv.bv_len; 670 } 671 672 exit: 673 /* 674 * Don't touch bvec table here and make it really immutable, since 675 * fs bio user has to retrieve all pages via bio_for_each_segment_all 676 * in its .end_bio() callback. 677 * 678 * It is enough to truncate bio by updating .bi_size since we can make 679 * correct bvec with the updated .bi_size for drivers. 680 */ 681 bio->bi_iter.bi_size = new_size; 682 } 683 684 /** 685 * guard_bio_eod - truncate a BIO to fit the block device 686 * @bio: bio to truncate 687 * 688 * This allows us to do IO even on the odd last sectors of a device, even if the 689 * block size is some multiple of the physical sector size. 690 * 691 * We'll just truncate the bio to the size of the device, and clear the end of 692 * the buffer head manually. Truly out-of-range accesses will turn into actual 693 * I/O errors, this only handles the "we need to be able to do I/O at the final 694 * sector" case. 695 */ 696 void guard_bio_eod(struct bio *bio) 697 { 698 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 699 700 if (!maxsector) 701 return; 702 703 /* 704 * If the *whole* IO is past the end of the device, 705 * let it through, and the IO layer will turn it into 706 * an EIO. 707 */ 708 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 709 return; 710 711 maxsector -= bio->bi_iter.bi_sector; 712 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 713 return; 714 715 bio_truncate(bio, maxsector << 9); 716 } 717 718 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, 719 unsigned int nr) 720 { 721 unsigned int i = 0; 722 struct bio *bio; 723 724 while ((bio = cache->free_list) != NULL) { 725 cache->free_list = bio->bi_next; 726 cache->nr--; 727 bio_free(bio); 728 if (++i == nr) 729 break; 730 } 731 return i; 732 } 733 734 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, 735 unsigned int nr) 736 { 737 nr -= __bio_alloc_cache_prune(cache, nr); 738 if (!READ_ONCE(cache->free_list)) { 739 bio_alloc_irq_cache_splice(cache); 740 __bio_alloc_cache_prune(cache, nr); 741 } 742 } 743 744 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) 745 { 746 struct bio_set *bs; 747 748 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); 749 if (bs->cache) { 750 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); 751 752 bio_alloc_cache_prune(cache, -1U); 753 } 754 return 0; 755 } 756 757 static void bio_alloc_cache_destroy(struct bio_set *bs) 758 { 759 int cpu; 760 761 if (!bs->cache) 762 return; 763 764 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 765 for_each_possible_cpu(cpu) { 766 struct bio_alloc_cache *cache; 767 768 cache = per_cpu_ptr(bs->cache, cpu); 769 bio_alloc_cache_prune(cache, -1U); 770 } 771 free_percpu(bs->cache); 772 bs->cache = NULL; 773 } 774 775 static inline void bio_put_percpu_cache(struct bio *bio) 776 { 777 struct bio_alloc_cache *cache; 778 779 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); 780 if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) 781 goto out_free; 782 783 if (in_task()) { 784 bio_uninit(bio); 785 bio->bi_next = cache->free_list; 786 /* Not necessary but helps not to iopoll already freed bios */ 787 bio->bi_bdev = NULL; 788 cache->free_list = bio; 789 cache->nr++; 790 } else if (in_hardirq()) { 791 lockdep_assert_irqs_disabled(); 792 793 bio_uninit(bio); 794 bio->bi_next = cache->free_list_irq; 795 cache->free_list_irq = bio; 796 cache->nr_irq++; 797 } else { 798 goto out_free; 799 } 800 put_cpu(); 801 return; 802 out_free: 803 put_cpu(); 804 bio_free(bio); 805 } 806 807 /** 808 * bio_put - release a reference to a bio 809 * @bio: bio to release reference to 810 * 811 * Description: 812 * Put a reference to a &struct bio, either one you have gotten with 813 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 814 **/ 815 void bio_put(struct bio *bio) 816 { 817 if (unlikely(bio_flagged(bio, BIO_REFFED))) { 818 BUG_ON(!atomic_read(&bio->__bi_cnt)); 819 if (!atomic_dec_and_test(&bio->__bi_cnt)) 820 return; 821 } 822 if (bio->bi_opf & REQ_ALLOC_CACHE) 823 bio_put_percpu_cache(bio); 824 else 825 bio_free(bio); 826 } 827 EXPORT_SYMBOL(bio_put); 828 829 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) 830 { 831 bio_set_flag(bio, BIO_CLONED); 832 bio->bi_ioprio = bio_src->bi_ioprio; 833 bio->bi_write_hint = bio_src->bi_write_hint; 834 bio->bi_write_stream = bio_src->bi_write_stream; 835 bio->bi_iter = bio_src->bi_iter; 836 837 if (bio->bi_bdev) { 838 if (bio->bi_bdev == bio_src->bi_bdev && 839 bio_flagged(bio_src, BIO_REMAPPED)) 840 bio_set_flag(bio, BIO_REMAPPED); 841 bio_clone_blkg_association(bio, bio_src); 842 } 843 844 if (bio_crypt_clone(bio, bio_src, gfp) < 0) 845 return -ENOMEM; 846 if (bio_integrity(bio_src) && 847 bio_integrity_clone(bio, bio_src, gfp) < 0) 848 return -ENOMEM; 849 return 0; 850 } 851 852 /** 853 * bio_alloc_clone - clone a bio that shares the original bio's biovec 854 * @bdev: block_device to clone onto 855 * @bio_src: bio to clone from 856 * @gfp: allocation priority 857 * @bs: bio_set to allocate from 858 * 859 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned 860 * bio, but not the actual data it points to. 861 * 862 * The caller must ensure that the return bio is not freed before @bio_src. 863 */ 864 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 865 gfp_t gfp, struct bio_set *bs) 866 { 867 struct bio *bio; 868 869 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); 870 if (!bio) 871 return NULL; 872 873 if (__bio_clone(bio, bio_src, gfp) < 0) { 874 bio_put(bio); 875 return NULL; 876 } 877 bio->bi_io_vec = bio_src->bi_io_vec; 878 879 return bio; 880 } 881 EXPORT_SYMBOL(bio_alloc_clone); 882 883 /** 884 * bio_init_clone - clone a bio that shares the original bio's biovec 885 * @bdev: block_device to clone onto 886 * @bio: bio to clone into 887 * @bio_src: bio to clone from 888 * @gfp: allocation priority 889 * 890 * Initialize a new bio in caller provided memory that is a clone of @bio_src. 891 * The caller owns the returned bio, but not the actual data it points to. 892 * 893 * The caller must ensure that @bio_src is not freed before @bio. 894 */ 895 int bio_init_clone(struct block_device *bdev, struct bio *bio, 896 struct bio *bio_src, gfp_t gfp) 897 { 898 int ret; 899 900 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); 901 ret = __bio_clone(bio, bio_src, gfp); 902 if (ret) 903 bio_uninit(bio); 904 return ret; 905 } 906 EXPORT_SYMBOL(bio_init_clone); 907 908 /** 909 * bio_full - check if the bio is full 910 * @bio: bio to check 911 * @len: length of one segment to be added 912 * 913 * Return true if @bio is full and one segment with @len bytes can't be 914 * added to the bio, otherwise return false 915 */ 916 static inline bool bio_full(struct bio *bio, unsigned len) 917 { 918 if (bio->bi_vcnt >= bio->bi_max_vecs) 919 return true; 920 if (bio->bi_iter.bi_size > UINT_MAX - len) 921 return true; 922 return false; 923 } 924 925 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, 926 unsigned int len, unsigned int off) 927 { 928 size_t bv_end = bv->bv_offset + bv->bv_len; 929 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; 930 phys_addr_t page_addr = page_to_phys(page); 931 932 if (vec_end_addr + 1 != page_addr + off) 933 return false; 934 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) 935 return false; 936 937 if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) { 938 if (IS_ENABLED(CONFIG_KMSAN)) 939 return false; 940 if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) 941 return false; 942 } 943 944 bv->bv_len += len; 945 return true; 946 } 947 948 /* 949 * Try to merge a page into a segment, while obeying the hardware segment 950 * size limit. 951 * 952 * This is kept around for the integrity metadata, which is still tries 953 * to build the initial bio to the hardware limit and doesn't have proper 954 * helpers to split. Hopefully this will go away soon. 955 */ 956 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, 957 struct page *page, unsigned len, unsigned offset) 958 { 959 unsigned long mask = queue_segment_boundary(q); 960 phys_addr_t addr1 = bvec_phys(bv); 961 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; 962 963 if ((addr1 | mask) != (addr2 | mask)) 964 return false; 965 if (len > queue_max_segment_size(q) - bv->bv_len) 966 return false; 967 return bvec_try_merge_page(bv, page, len, offset); 968 } 969 970 /** 971 * __bio_add_page - add page(s) to a bio in a new segment 972 * @bio: destination bio 973 * @page: start page to add 974 * @len: length of the data to add, may cross pages 975 * @off: offset of the data relative to @page, may cross pages 976 * 977 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure 978 * that @bio has space for another bvec. 979 */ 980 void __bio_add_page(struct bio *bio, struct page *page, 981 unsigned int len, unsigned int off) 982 { 983 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 984 WARN_ON_ONCE(bio_full(bio, len)); 985 986 if (is_pci_p2pdma_page(page)) 987 bio->bi_opf |= REQ_NOMERGE; 988 989 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); 990 bio->bi_iter.bi_size += len; 991 bio->bi_vcnt++; 992 } 993 EXPORT_SYMBOL_GPL(__bio_add_page); 994 995 /** 996 * bio_add_virt_nofail - add data in the direct kernel mapping to a bio 997 * @bio: destination bio 998 * @vaddr: data to add 999 * @len: length of the data to add, may cross pages 1000 * 1001 * Add the data at @vaddr to @bio. The caller must have ensure a segment 1002 * is available for the added data. No merging into an existing segment 1003 * will be performed. 1004 */ 1005 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len) 1006 { 1007 __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr)); 1008 } 1009 EXPORT_SYMBOL_GPL(bio_add_virt_nofail); 1010 1011 /** 1012 * bio_add_page - attempt to add page(s) to bio 1013 * @bio: destination bio 1014 * @page: start page to add 1015 * @len: vec entry length, may cross pages 1016 * @offset: vec entry offset relative to @page, may cross pages 1017 * 1018 * Attempt to add page(s) to the bio_vec maplist. This will only fail 1019 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 1020 */ 1021 int bio_add_page(struct bio *bio, struct page *page, 1022 unsigned int len, unsigned int offset) 1023 { 1024 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 1025 return 0; 1026 if (bio->bi_iter.bi_size > UINT_MAX - len) 1027 return 0; 1028 1029 if (bio->bi_vcnt > 0) { 1030 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 1031 1032 if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) 1033 return 0; 1034 1035 if (bvec_try_merge_page(bv, page, len, offset)) { 1036 bio->bi_iter.bi_size += len; 1037 return len; 1038 } 1039 } 1040 1041 if (bio->bi_vcnt >= bio->bi_max_vecs) 1042 return 0; 1043 __bio_add_page(bio, page, len, offset); 1044 return len; 1045 } 1046 EXPORT_SYMBOL(bio_add_page); 1047 1048 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, 1049 size_t off) 1050 { 1051 unsigned long nr = off / PAGE_SIZE; 1052 1053 WARN_ON_ONCE(len > UINT_MAX); 1054 __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE); 1055 } 1056 EXPORT_SYMBOL_GPL(bio_add_folio_nofail); 1057 1058 /** 1059 * bio_add_folio - Attempt to add part of a folio to a bio. 1060 * @bio: BIO to add to. 1061 * @folio: Folio to add. 1062 * @len: How many bytes from the folio to add. 1063 * @off: First byte in this folio to add. 1064 * 1065 * Filesystems that use folios can call this function instead of calling 1066 * bio_add_page() for each page in the folio. If @off is bigger than 1067 * PAGE_SIZE, this function can create a bio_vec that starts in a page 1068 * after the bv_page. BIOs do not support folios that are 4GiB or larger. 1069 * 1070 * Return: Whether the addition was successful. 1071 */ 1072 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, 1073 size_t off) 1074 { 1075 unsigned long nr = off / PAGE_SIZE; 1076 1077 if (len > UINT_MAX) 1078 return false; 1079 return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0; 1080 } 1081 EXPORT_SYMBOL(bio_add_folio); 1082 1083 /** 1084 * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio 1085 * @bio: destination bio 1086 * @vaddr: vmalloc address to add 1087 * @len: total length in bytes of the data to add 1088 * 1089 * Add data starting at @vaddr to @bio and return how many bytes were added. 1090 * This may be less than the amount originally asked. Returns 0 if no data 1091 * could be added to @bio. 1092 * 1093 * This helper calls flush_kernel_vmap_range() for the range added. For reads 1094 * the caller still needs to manually call invalidate_kernel_vmap_range() in 1095 * the completion handler. 1096 */ 1097 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len) 1098 { 1099 unsigned int offset = offset_in_page(vaddr); 1100 1101 len = min(len, PAGE_SIZE - offset); 1102 if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len) 1103 return 0; 1104 if (op_is_write(bio_op(bio))) 1105 flush_kernel_vmap_range(vaddr, len); 1106 return len; 1107 } 1108 EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk); 1109 1110 /** 1111 * bio_add_vmalloc - add a vmalloc region to a bio 1112 * @bio: destination bio 1113 * @vaddr: vmalloc address to add 1114 * @len: total length in bytes of the data to add 1115 * 1116 * Add data starting at @vaddr to @bio. Return %true on success or %false if 1117 * @bio does not have enough space for the payload. 1118 * 1119 * This helper calls flush_kernel_vmap_range() for the range added. For reads 1120 * the caller still needs to manually call invalidate_kernel_vmap_range() in 1121 * the completion handler. 1122 */ 1123 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len) 1124 { 1125 do { 1126 unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len); 1127 1128 if (!added) 1129 return false; 1130 vaddr += added; 1131 len -= added; 1132 } while (len); 1133 1134 return true; 1135 } 1136 EXPORT_SYMBOL_GPL(bio_add_vmalloc); 1137 1138 void __bio_release_pages(struct bio *bio, bool mark_dirty) 1139 { 1140 struct folio_iter fi; 1141 1142 bio_for_each_folio_all(fi, bio) { 1143 size_t nr_pages; 1144 1145 if (mark_dirty) { 1146 folio_lock(fi.folio); 1147 folio_mark_dirty(fi.folio); 1148 folio_unlock(fi.folio); 1149 } 1150 nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - 1151 fi.offset / PAGE_SIZE + 1; 1152 unpin_user_folio(fi.folio, nr_pages); 1153 } 1154 } 1155 EXPORT_SYMBOL_GPL(__bio_release_pages); 1156 1157 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter) 1158 { 1159 WARN_ON_ONCE(bio->bi_max_vecs); 1160 1161 bio->bi_vcnt = iter->nr_segs; 1162 bio->bi_io_vec = (struct bio_vec *)iter->bvec; 1163 bio->bi_iter.bi_bvec_done = iter->iov_offset; 1164 bio->bi_iter.bi_size = iov_iter_count(iter); 1165 bio_set_flag(bio, BIO_CLONED); 1166 } 1167 1168 static unsigned int get_contig_folio_len(unsigned int *num_pages, 1169 struct page **pages, unsigned int i, 1170 struct folio *folio, size_t left, 1171 size_t offset) 1172 { 1173 size_t bytes = left; 1174 size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes); 1175 unsigned int j; 1176 1177 /* 1178 * We might COW a single page in the middle of 1179 * a large folio, so we have to check that all 1180 * pages belong to the same folio. 1181 */ 1182 bytes -= contig_sz; 1183 for (j = i + 1; j < i + *num_pages; j++) { 1184 size_t next = min_t(size_t, PAGE_SIZE, bytes); 1185 1186 if (page_folio(pages[j]) != folio || 1187 pages[j] != pages[j - 1] + 1) { 1188 break; 1189 } 1190 contig_sz += next; 1191 bytes -= next; 1192 } 1193 *num_pages = j - i; 1194 1195 return contig_sz; 1196 } 1197 1198 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) 1199 1200 /** 1201 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 1202 * @bio: bio to add pages to 1203 * @iter: iov iterator describing the region to be mapped 1204 * 1205 * Extracts pages from *iter and appends them to @bio's bvec array. The pages 1206 * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag. 1207 * For a multi-segment *iter, this function only adds pages from the next 1208 * non-empty segment of the iov iterator. 1209 */ 1210 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1211 { 1212 iov_iter_extraction_t extraction_flags = 0; 1213 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1214 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1215 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1216 struct page **pages = (struct page **)bv; 1217 ssize_t size; 1218 unsigned int num_pages, i = 0; 1219 size_t offset, folio_offset, left, len; 1220 int ret = 0; 1221 1222 /* 1223 * Move page array up in the allocated memory for the bio vecs as far as 1224 * possible so that we can start filling biovecs from the beginning 1225 * without overwriting the temporary page array. 1226 */ 1227 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1228 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1229 1230 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) 1231 extraction_flags |= ITER_ALLOW_P2PDMA; 1232 1233 size = iov_iter_extract_pages(iter, &pages, 1234 UINT_MAX - bio->bi_iter.bi_size, 1235 nr_pages, extraction_flags, &offset); 1236 if (unlikely(size <= 0)) 1237 return size ? size : -EFAULT; 1238 1239 nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); 1240 for (left = size, i = 0; left > 0; left -= len, i += num_pages) { 1241 struct page *page = pages[i]; 1242 struct folio *folio = page_folio(page); 1243 unsigned int old_vcnt = bio->bi_vcnt; 1244 1245 folio_offset = ((size_t)folio_page_idx(folio, page) << 1246 PAGE_SHIFT) + offset; 1247 1248 len = min(folio_size(folio) - folio_offset, left); 1249 1250 num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); 1251 1252 if (num_pages > 1) 1253 len = get_contig_folio_len(&num_pages, pages, i, 1254 folio, left, offset); 1255 1256 if (!bio_add_folio(bio, folio, len, folio_offset)) { 1257 WARN_ON_ONCE(1); 1258 ret = -EINVAL; 1259 goto out; 1260 } 1261 1262 if (bio_flagged(bio, BIO_PAGE_PINNED)) { 1263 /* 1264 * We're adding another fragment of a page that already 1265 * was part of the last segment. Undo our pin as the 1266 * page was pinned when an earlier fragment of it was 1267 * added to the bio and __bio_release_pages expects a 1268 * single pin per page. 1269 */ 1270 if (offset && bio->bi_vcnt == old_vcnt) 1271 unpin_user_folio(folio, 1); 1272 } 1273 offset = 0; 1274 } 1275 1276 iov_iter_revert(iter, left); 1277 out: 1278 while (i < nr_pages) 1279 bio_release_page(bio, pages[i++]); 1280 1281 return ret; 1282 } 1283 1284 /* 1285 * Aligns the bio size to the len_align_mask, releasing excessive bio vecs that 1286 * __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length 1287 * for the next iteration. 1288 */ 1289 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter, 1290 unsigned len_align_mask) 1291 { 1292 size_t nbytes = bio->bi_iter.bi_size & len_align_mask; 1293 1294 if (!nbytes) 1295 return 0; 1296 1297 iov_iter_revert(iter, nbytes); 1298 bio->bi_iter.bi_size -= nbytes; 1299 do { 1300 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 1301 1302 if (nbytes < bv->bv_len) { 1303 bv->bv_len -= nbytes; 1304 break; 1305 } 1306 1307 bio_release_page(bio, bv->bv_page); 1308 bio->bi_vcnt--; 1309 nbytes -= bv->bv_len; 1310 } while (nbytes); 1311 1312 if (!bio->bi_vcnt) 1313 return -EFAULT; 1314 return 0; 1315 } 1316 1317 /** 1318 * bio_iov_iter_get_pages - add user or kernel pages to a bio 1319 * @bio: bio to add pages to 1320 * @iter: iov iterator describing the region to be added 1321 * @len_align_mask: the mask to align the total size to, 0 for any length 1322 * 1323 * This takes either an iterator pointing to user memory, or one pointing to 1324 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 1325 * map them into the kernel. On IO completion, the caller should put those 1326 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided 1327 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs 1328 * to ensure the bvecs and pages stay referenced until the submitted I/O is 1329 * completed by a call to ->ki_complete() or returns with an error other than 1330 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF 1331 * on IO completion. If it isn't, then pages should be released. 1332 * 1333 * The function tries, but does not guarantee, to pin as many pages as 1334 * fit into the bio, or are requested in @iter, whatever is smaller. If 1335 * MM encounters an error pinning the requested pages, it stops. Error 1336 * is returned only if 0 pages could be pinned. 1337 */ 1338 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, 1339 unsigned len_align_mask) 1340 { 1341 int ret = 0; 1342 1343 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 1344 return -EIO; 1345 1346 if (iov_iter_is_bvec(iter)) { 1347 bio_iov_bvec_set(bio, iter); 1348 iov_iter_advance(iter, bio->bi_iter.bi_size); 1349 return 0; 1350 } 1351 1352 if (iov_iter_extract_will_pin(iter)) 1353 bio_set_flag(bio, BIO_PAGE_PINNED); 1354 do { 1355 ret = __bio_iov_iter_get_pages(bio, iter); 1356 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); 1357 1358 if (bio->bi_vcnt) 1359 return bio_iov_iter_align_down(bio, iter, len_align_mask); 1360 return ret; 1361 } 1362 1363 static void submit_bio_wait_endio(struct bio *bio) 1364 { 1365 complete(bio->bi_private); 1366 } 1367 1368 /** 1369 * submit_bio_wait - submit a bio, and wait until it completes 1370 * @bio: The &struct bio which describes the I/O 1371 * 1372 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 1373 * bio_endio() on failure. 1374 * 1375 * WARNING: Unlike to how submit_bio() is usually used, this function does not 1376 * result in bio reference to be consumed. The caller must drop the reference 1377 * on his own. 1378 */ 1379 int submit_bio_wait(struct bio *bio) 1380 { 1381 DECLARE_COMPLETION_ONSTACK_MAP(done, 1382 bio->bi_bdev->bd_disk->lockdep_map); 1383 1384 bio->bi_private = &done; 1385 bio->bi_end_io = submit_bio_wait_endio; 1386 bio->bi_opf |= REQ_SYNC; 1387 submit_bio(bio); 1388 blk_wait_io(&done); 1389 1390 return blk_status_to_errno(bio->bi_status); 1391 } 1392 EXPORT_SYMBOL(submit_bio_wait); 1393 1394 /** 1395 * bdev_rw_virt - synchronously read into / write from kernel mapping 1396 * @bdev: block device to access 1397 * @sector: sector to access 1398 * @data: data to read/write 1399 * @len: length in byte to read/write 1400 * @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE) 1401 * 1402 * Performs synchronous I/O to @bdev for @data/@len. @data must be in 1403 * the kernel direct mapping and not a vmalloc address. 1404 */ 1405 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data, 1406 size_t len, enum req_op op) 1407 { 1408 struct bio_vec bv; 1409 struct bio bio; 1410 int error; 1411 1412 if (WARN_ON_ONCE(is_vmalloc_addr(data))) 1413 return -EIO; 1414 1415 bio_init(&bio, bdev, &bv, 1, op); 1416 bio.bi_iter.bi_sector = sector; 1417 bio_add_virt_nofail(&bio, data, len); 1418 error = submit_bio_wait(&bio); 1419 bio_uninit(&bio); 1420 return error; 1421 } 1422 EXPORT_SYMBOL_GPL(bdev_rw_virt); 1423 1424 static void bio_wait_end_io(struct bio *bio) 1425 { 1426 complete(bio->bi_private); 1427 bio_put(bio); 1428 } 1429 1430 /* 1431 * bio_await_chain - ends @bio and waits for every chained bio to complete 1432 */ 1433 void bio_await_chain(struct bio *bio) 1434 { 1435 DECLARE_COMPLETION_ONSTACK_MAP(done, 1436 bio->bi_bdev->bd_disk->lockdep_map); 1437 1438 bio->bi_private = &done; 1439 bio->bi_end_io = bio_wait_end_io; 1440 bio_endio(bio); 1441 blk_wait_io(&done); 1442 } 1443 1444 void __bio_advance(struct bio *bio, unsigned bytes) 1445 { 1446 if (bio_integrity(bio)) 1447 bio_integrity_advance(bio, bytes); 1448 1449 bio_crypt_advance(bio, bytes); 1450 bio_advance_iter(bio, &bio->bi_iter, bytes); 1451 } 1452 EXPORT_SYMBOL(__bio_advance); 1453 1454 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 1455 struct bio *src, struct bvec_iter *src_iter) 1456 { 1457 while (src_iter->bi_size && dst_iter->bi_size) { 1458 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); 1459 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); 1460 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); 1461 void *src_buf = bvec_kmap_local(&src_bv); 1462 void *dst_buf = bvec_kmap_local(&dst_bv); 1463 1464 memcpy(dst_buf, src_buf, bytes); 1465 1466 kunmap_local(dst_buf); 1467 kunmap_local(src_buf); 1468 1469 bio_advance_iter_single(src, src_iter, bytes); 1470 bio_advance_iter_single(dst, dst_iter, bytes); 1471 } 1472 } 1473 EXPORT_SYMBOL(bio_copy_data_iter); 1474 1475 /** 1476 * bio_copy_data - copy contents of data buffers from one bio to another 1477 * @src: source bio 1478 * @dst: destination bio 1479 * 1480 * Stops when it reaches the end of either @src or @dst - that is, copies 1481 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1482 */ 1483 void bio_copy_data(struct bio *dst, struct bio *src) 1484 { 1485 struct bvec_iter src_iter = src->bi_iter; 1486 struct bvec_iter dst_iter = dst->bi_iter; 1487 1488 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1489 } 1490 EXPORT_SYMBOL(bio_copy_data); 1491 1492 void bio_free_pages(struct bio *bio) 1493 { 1494 struct bio_vec *bvec; 1495 struct bvec_iter_all iter_all; 1496 1497 bio_for_each_segment_all(bvec, bio, iter_all) 1498 __free_page(bvec->bv_page); 1499 } 1500 EXPORT_SYMBOL(bio_free_pages); 1501 1502 /* 1503 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1504 * for performing direct-IO in BIOs. 1505 * 1506 * The problem is that we cannot run folio_mark_dirty() from interrupt context 1507 * because the required locks are not interrupt-safe. So what we can do is to 1508 * mark the pages dirty _before_ performing IO. And in interrupt context, 1509 * check that the pages are still dirty. If so, fine. If not, redirty them 1510 * in process context. 1511 * 1512 * Note that this code is very hard to test under normal circumstances because 1513 * direct-io pins the pages with get_user_pages(). This makes 1514 * is_page_cache_freeable return false, and the VM will not clean the pages. 1515 * But other code (eg, flusher threads) could clean the pages if they are mapped 1516 * pagecache. 1517 * 1518 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1519 * deferred bio dirtying paths. 1520 */ 1521 1522 /* 1523 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1524 */ 1525 void bio_set_pages_dirty(struct bio *bio) 1526 { 1527 struct folio_iter fi; 1528 1529 bio_for_each_folio_all(fi, bio) { 1530 folio_lock(fi.folio); 1531 folio_mark_dirty(fi.folio); 1532 folio_unlock(fi.folio); 1533 } 1534 } 1535 EXPORT_SYMBOL_GPL(bio_set_pages_dirty); 1536 1537 /* 1538 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1539 * If they are, then fine. If, however, some pages are clean then they must 1540 * have been written out during the direct-IO read. So we take another ref on 1541 * the BIO and re-dirty the pages in process context. 1542 * 1543 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1544 * here on. It will unpin each page and will run one bio_put() against the 1545 * BIO. 1546 */ 1547 1548 static void bio_dirty_fn(struct work_struct *work); 1549 1550 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1551 static DEFINE_SPINLOCK(bio_dirty_lock); 1552 static struct bio *bio_dirty_list; 1553 1554 /* 1555 * This runs in process context 1556 */ 1557 static void bio_dirty_fn(struct work_struct *work) 1558 { 1559 struct bio *bio, *next; 1560 1561 spin_lock_irq(&bio_dirty_lock); 1562 next = bio_dirty_list; 1563 bio_dirty_list = NULL; 1564 spin_unlock_irq(&bio_dirty_lock); 1565 1566 while ((bio = next) != NULL) { 1567 next = bio->bi_private; 1568 1569 bio_release_pages(bio, true); 1570 bio_put(bio); 1571 } 1572 } 1573 1574 void bio_check_pages_dirty(struct bio *bio) 1575 { 1576 struct folio_iter fi; 1577 unsigned long flags; 1578 1579 bio_for_each_folio_all(fi, bio) { 1580 if (!folio_test_dirty(fi.folio)) 1581 goto defer; 1582 } 1583 1584 bio_release_pages(bio, false); 1585 bio_put(bio); 1586 return; 1587 defer: 1588 spin_lock_irqsave(&bio_dirty_lock, flags); 1589 bio->bi_private = bio_dirty_list; 1590 bio_dirty_list = bio; 1591 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1592 schedule_work(&bio_dirty_work); 1593 } 1594 EXPORT_SYMBOL_GPL(bio_check_pages_dirty); 1595 1596 static inline bool bio_remaining_done(struct bio *bio) 1597 { 1598 /* 1599 * If we're not chaining, then ->__bi_remaining is always 1 and 1600 * we always end io on the first invocation. 1601 */ 1602 if (!bio_flagged(bio, BIO_CHAIN)) 1603 return true; 1604 1605 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1606 1607 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1608 bio_clear_flag(bio, BIO_CHAIN); 1609 return true; 1610 } 1611 1612 return false; 1613 } 1614 1615 /** 1616 * bio_endio - end I/O on a bio 1617 * @bio: bio 1618 * 1619 * Description: 1620 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1621 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1622 * bio unless they own it and thus know that it has an end_io function. 1623 * 1624 * bio_endio() can be called several times on a bio that has been chained 1625 * using bio_chain(). The ->bi_end_io() function will only be called the 1626 * last time. 1627 **/ 1628 void bio_endio(struct bio *bio) 1629 { 1630 again: 1631 if (!bio_remaining_done(bio)) 1632 return; 1633 if (!bio_integrity_endio(bio)) 1634 return; 1635 1636 blk_zone_bio_endio(bio); 1637 1638 rq_qos_done_bio(bio); 1639 1640 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1641 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); 1642 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1643 } 1644 1645 /* 1646 * Need to have a real endio function for chained bios, otherwise 1647 * various corner cases will break (like stacking block devices that 1648 * save/restore bi_end_io) - however, we want to avoid unbounded 1649 * recursion and blowing the stack. Tail call optimization would 1650 * handle this, but compiling with frame pointers also disables 1651 * gcc's sibling call optimization. 1652 */ 1653 if (bio->bi_end_io == bio_chain_endio) { 1654 bio = __bio_chain_endio(bio); 1655 goto again; 1656 } 1657 1658 #ifdef CONFIG_BLK_CGROUP 1659 /* 1660 * Release cgroup info. We shouldn't have to do this here, but quite 1661 * a few callers of bio_init fail to call bio_uninit, so we cover up 1662 * for that here at least for now. 1663 */ 1664 if (bio->bi_blkg) { 1665 blkg_put(bio->bi_blkg); 1666 bio->bi_blkg = NULL; 1667 } 1668 #endif 1669 1670 if (bio->bi_end_io) 1671 bio->bi_end_io(bio); 1672 } 1673 EXPORT_SYMBOL(bio_endio); 1674 1675 /** 1676 * bio_split - split a bio 1677 * @bio: bio to split 1678 * @sectors: number of sectors to split from the front of @bio 1679 * @gfp: gfp mask 1680 * @bs: bio set to allocate from 1681 * 1682 * Allocates and returns a new bio which represents @sectors from the start of 1683 * @bio, and updates @bio to represent the remaining sectors. 1684 * 1685 * Unless this is a discard request the newly allocated bio will point 1686 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that 1687 * neither @bio nor @bs are freed before the split bio. 1688 */ 1689 struct bio *bio_split(struct bio *bio, int sectors, 1690 gfp_t gfp, struct bio_set *bs) 1691 { 1692 struct bio *split; 1693 1694 if (WARN_ON_ONCE(sectors <= 0)) 1695 return ERR_PTR(-EINVAL); 1696 if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) 1697 return ERR_PTR(-EINVAL); 1698 1699 /* Zone append commands cannot be split */ 1700 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1701 return ERR_PTR(-EINVAL); 1702 1703 /* atomic writes cannot be split */ 1704 if (bio->bi_opf & REQ_ATOMIC) 1705 return ERR_PTR(-EINVAL); 1706 1707 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); 1708 if (!split) 1709 return ERR_PTR(-ENOMEM); 1710 1711 split->bi_iter.bi_size = sectors << 9; 1712 1713 if (bio_integrity(split)) 1714 bio_integrity_trim(split); 1715 1716 bio_advance(bio, split->bi_iter.bi_size); 1717 1718 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1719 bio_set_flag(split, BIO_TRACE_COMPLETION); 1720 1721 return split; 1722 } 1723 EXPORT_SYMBOL(bio_split); 1724 1725 /** 1726 * bio_trim - trim a bio 1727 * @bio: bio to trim 1728 * @offset: number of sectors to trim from the front of @bio 1729 * @size: size we want to trim @bio to, in sectors 1730 * 1731 * This function is typically used for bios that are cloned and submitted 1732 * to the underlying device in parts. 1733 */ 1734 void bio_trim(struct bio *bio, sector_t offset, sector_t size) 1735 { 1736 /* We should never trim an atomic write */ 1737 if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size)) 1738 return; 1739 1740 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || 1741 offset + size > bio_sectors(bio))) 1742 return; 1743 1744 size <<= 9; 1745 if (offset == 0 && size == bio->bi_iter.bi_size) 1746 return; 1747 1748 bio_advance(bio, offset << 9); 1749 bio->bi_iter.bi_size = size; 1750 1751 if (bio_integrity(bio)) 1752 bio_integrity_trim(bio); 1753 } 1754 EXPORT_SYMBOL_GPL(bio_trim); 1755 1756 /* 1757 * create memory pools for biovec's in a bio_set. 1758 * use the global biovec slabs created for general use. 1759 */ 1760 int biovec_init_pool(mempool_t *pool, int pool_entries) 1761 { 1762 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; 1763 1764 return mempool_init_slab_pool(pool, pool_entries, bp->slab); 1765 } 1766 1767 /* 1768 * bioset_exit - exit a bioset initialized with bioset_init() 1769 * 1770 * May be called on a zeroed but uninitialized bioset (i.e. allocated with 1771 * kzalloc()). 1772 */ 1773 void bioset_exit(struct bio_set *bs) 1774 { 1775 bio_alloc_cache_destroy(bs); 1776 if (bs->rescue_workqueue) 1777 destroy_workqueue(bs->rescue_workqueue); 1778 bs->rescue_workqueue = NULL; 1779 1780 mempool_exit(&bs->bio_pool); 1781 mempool_exit(&bs->bvec_pool); 1782 1783 if (bs->bio_slab) 1784 bio_put_slab(bs); 1785 bs->bio_slab = NULL; 1786 } 1787 EXPORT_SYMBOL(bioset_exit); 1788 1789 /** 1790 * bioset_init - Initialize a bio_set 1791 * @bs: pool to initialize 1792 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1793 * @front_pad: Number of bytes to allocate in front of the returned bio 1794 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 1795 * and %BIOSET_NEED_RESCUER 1796 * 1797 * Description: 1798 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1799 * to ask for a number of bytes to be allocated in front of the bio. 1800 * Front pad allocation is useful for embedding the bio inside 1801 * another structure, to avoid allocating extra data to go with the bio. 1802 * Note that the bio must be embedded at the END of that structure always, 1803 * or things will break badly. 1804 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1805 * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). 1806 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used 1807 * to dispatch queued requests when the mempool runs out of space. 1808 * 1809 */ 1810 int bioset_init(struct bio_set *bs, 1811 unsigned int pool_size, 1812 unsigned int front_pad, 1813 int flags) 1814 { 1815 bs->front_pad = front_pad; 1816 if (flags & BIOSET_NEED_BVECS) 1817 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1818 else 1819 bs->back_pad = 0; 1820 1821 spin_lock_init(&bs->rescue_lock); 1822 bio_list_init(&bs->rescue_list); 1823 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1824 1825 bs->bio_slab = bio_find_or_create_slab(bs); 1826 if (!bs->bio_slab) 1827 return -ENOMEM; 1828 1829 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) 1830 goto bad; 1831 1832 if ((flags & BIOSET_NEED_BVECS) && 1833 biovec_init_pool(&bs->bvec_pool, pool_size)) 1834 goto bad; 1835 1836 if (flags & BIOSET_NEED_RESCUER) { 1837 bs->rescue_workqueue = alloc_workqueue("bioset", 1838 WQ_MEM_RECLAIM, 0); 1839 if (!bs->rescue_workqueue) 1840 goto bad; 1841 } 1842 if (flags & BIOSET_PERCPU_CACHE) { 1843 bs->cache = alloc_percpu(struct bio_alloc_cache); 1844 if (!bs->cache) 1845 goto bad; 1846 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 1847 } 1848 1849 return 0; 1850 bad: 1851 bioset_exit(bs); 1852 return -ENOMEM; 1853 } 1854 EXPORT_SYMBOL(bioset_init); 1855 1856 static int __init init_bio(void) 1857 { 1858 int i; 1859 1860 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); 1861 1862 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { 1863 struct biovec_slab *bvs = bvec_slabs + i; 1864 1865 bvs->slab = kmem_cache_create(bvs->name, 1866 bvs->nr_vecs * sizeof(struct bio_vec), 0, 1867 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 1868 } 1869 1870 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, 1871 bio_cpu_dead); 1872 1873 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, 1874 BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) 1875 panic("bio: can't allocate bios\n"); 1876 1877 return 0; 1878 } 1879 subsys_initcall(init_bio); 1880