1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 4 */ 5 #include <linux/mm.h> 6 #include <linux/swap.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/uio.h> 10 #include <linux/iocontext.h> 11 #include <linux/slab.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/mempool.h> 16 #include <linux/workqueue.h> 17 #include <linux/cgroup.h> 18 #include <linux/highmem.h> 19 #include <linux/sched/sysctl.h> 20 #include <linux/blk-crypto.h> 21 #include <linux/xarray.h> 22 23 #include <trace/events/block.h> 24 #include "blk.h" 25 #include "blk-rq-qos.h" 26 #include "blk-cgroup.h" 27 28 struct bio_alloc_cache { 29 struct bio *free_list; 30 unsigned int nr; 31 }; 32 33 static struct biovec_slab { 34 int nr_vecs; 35 char *name; 36 struct kmem_cache *slab; 37 } bvec_slabs[] __read_mostly = { 38 { .nr_vecs = 16, .name = "biovec-16" }, 39 { .nr_vecs = 64, .name = "biovec-64" }, 40 { .nr_vecs = 128, .name = "biovec-128" }, 41 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, 42 }; 43 44 static struct biovec_slab *biovec_slab(unsigned short nr_vecs) 45 { 46 switch (nr_vecs) { 47 /* smaller bios use inline vecs */ 48 case 5 ... 16: 49 return &bvec_slabs[0]; 50 case 17 ... 64: 51 return &bvec_slabs[1]; 52 case 65 ... 128: 53 return &bvec_slabs[2]; 54 case 129 ... BIO_MAX_VECS: 55 return &bvec_slabs[3]; 56 default: 57 BUG(); 58 return NULL; 59 } 60 } 61 62 /* 63 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 64 * IO code that does not need private memory pools. 65 */ 66 struct bio_set fs_bio_set; 67 EXPORT_SYMBOL(fs_bio_set); 68 69 /* 70 * Our slab pool management 71 */ 72 struct bio_slab { 73 struct kmem_cache *slab; 74 unsigned int slab_ref; 75 unsigned int slab_size; 76 char name[8]; 77 }; 78 static DEFINE_MUTEX(bio_slab_lock); 79 static DEFINE_XARRAY(bio_slabs); 80 81 static struct bio_slab *create_bio_slab(unsigned int size) 82 { 83 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); 84 85 if (!bslab) 86 return NULL; 87 88 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); 89 bslab->slab = kmem_cache_create(bslab->name, size, 90 ARCH_KMALLOC_MINALIGN, 91 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); 92 if (!bslab->slab) 93 goto fail_alloc_slab; 94 95 bslab->slab_ref = 1; 96 bslab->slab_size = size; 97 98 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) 99 return bslab; 100 101 kmem_cache_destroy(bslab->slab); 102 103 fail_alloc_slab: 104 kfree(bslab); 105 return NULL; 106 } 107 108 static inline unsigned int bs_bio_slab_size(struct bio_set *bs) 109 { 110 return bs->front_pad + sizeof(struct bio) + bs->back_pad; 111 } 112 113 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) 114 { 115 unsigned int size = bs_bio_slab_size(bs); 116 struct bio_slab *bslab; 117 118 mutex_lock(&bio_slab_lock); 119 bslab = xa_load(&bio_slabs, size); 120 if (bslab) 121 bslab->slab_ref++; 122 else 123 bslab = create_bio_slab(size); 124 mutex_unlock(&bio_slab_lock); 125 126 if (bslab) 127 return bslab->slab; 128 return NULL; 129 } 130 131 static void bio_put_slab(struct bio_set *bs) 132 { 133 struct bio_slab *bslab = NULL; 134 unsigned int slab_size = bs_bio_slab_size(bs); 135 136 mutex_lock(&bio_slab_lock); 137 138 bslab = xa_load(&bio_slabs, slab_size); 139 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 140 goto out; 141 142 WARN_ON_ONCE(bslab->slab != bs->bio_slab); 143 144 WARN_ON(!bslab->slab_ref); 145 146 if (--bslab->slab_ref) 147 goto out; 148 149 xa_erase(&bio_slabs, slab_size); 150 151 kmem_cache_destroy(bslab->slab); 152 kfree(bslab); 153 154 out: 155 mutex_unlock(&bio_slab_lock); 156 } 157 158 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) 159 { 160 BUG_ON(nr_vecs > BIO_MAX_VECS); 161 162 if (nr_vecs == BIO_MAX_VECS) 163 mempool_free(bv, pool); 164 else if (nr_vecs > BIO_INLINE_VECS) 165 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); 166 } 167 168 /* 169 * Make the first allocation restricted and don't dump info on allocation 170 * failures, since we'll fall back to the mempool in case of failure. 171 */ 172 static inline gfp_t bvec_alloc_gfp(gfp_t gfp) 173 { 174 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | 175 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 176 } 177 178 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 179 gfp_t gfp_mask) 180 { 181 struct biovec_slab *bvs = biovec_slab(*nr_vecs); 182 183 if (WARN_ON_ONCE(!bvs)) 184 return NULL; 185 186 /* 187 * Upgrade the nr_vecs request to take full advantage of the allocation. 188 * We also rely on this in the bvec_free path. 189 */ 190 *nr_vecs = bvs->nr_vecs; 191 192 /* 193 * Try a slab allocation first for all smaller allocations. If that 194 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. 195 * The mempool is sized to handle up to BIO_MAX_VECS entries. 196 */ 197 if (*nr_vecs < BIO_MAX_VECS) { 198 struct bio_vec *bvl; 199 200 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); 201 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 202 return bvl; 203 *nr_vecs = BIO_MAX_VECS; 204 } 205 206 return mempool_alloc(pool, gfp_mask); 207 } 208 209 void bio_uninit(struct bio *bio) 210 { 211 #ifdef CONFIG_BLK_CGROUP 212 if (bio->bi_blkg) { 213 blkg_put(bio->bi_blkg); 214 bio->bi_blkg = NULL; 215 } 216 #endif 217 if (bio_integrity(bio)) 218 bio_integrity_free(bio); 219 220 bio_crypt_free_ctx(bio); 221 } 222 EXPORT_SYMBOL(bio_uninit); 223 224 static void bio_free(struct bio *bio) 225 { 226 struct bio_set *bs = bio->bi_pool; 227 void *p; 228 229 bio_uninit(bio); 230 231 if (bs) { 232 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); 233 234 /* 235 * If we have front padding, adjust the bio pointer before freeing 236 */ 237 p = bio; 238 p -= bs->front_pad; 239 240 mempool_free(p, &bs->bio_pool); 241 } else { 242 /* Bio was allocated by bio_kmalloc() */ 243 kfree(bio); 244 } 245 } 246 247 /* 248 * Users of this function have their own bio allocation. Subsequently, 249 * they must remember to pair any call to bio_init() with bio_uninit() 250 * when IO has completed, or when the bio is released. 251 */ 252 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 253 unsigned short max_vecs, unsigned int opf) 254 { 255 bio->bi_next = NULL; 256 bio->bi_bdev = bdev; 257 bio->bi_opf = opf; 258 bio->bi_flags = 0; 259 bio->bi_ioprio = 0; 260 bio->bi_write_hint = 0; 261 bio->bi_status = 0; 262 bio->bi_iter.bi_sector = 0; 263 bio->bi_iter.bi_size = 0; 264 bio->bi_iter.bi_idx = 0; 265 bio->bi_iter.bi_bvec_done = 0; 266 bio->bi_end_io = NULL; 267 bio->bi_private = NULL; 268 #ifdef CONFIG_BLK_CGROUP 269 bio->bi_blkg = NULL; 270 bio->bi_issue.value = 0; 271 if (bdev) 272 bio_associate_blkg(bio); 273 #ifdef CONFIG_BLK_CGROUP_IOCOST 274 bio->bi_iocost_cost = 0; 275 #endif 276 #endif 277 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 278 bio->bi_crypt_context = NULL; 279 #endif 280 #ifdef CONFIG_BLK_DEV_INTEGRITY 281 bio->bi_integrity = NULL; 282 #endif 283 bio->bi_vcnt = 0; 284 285 atomic_set(&bio->__bi_remaining, 1); 286 atomic_set(&bio->__bi_cnt, 1); 287 bio->bi_cookie = BLK_QC_T_NONE; 288 289 bio->bi_max_vecs = max_vecs; 290 bio->bi_io_vec = table; 291 bio->bi_pool = NULL; 292 } 293 EXPORT_SYMBOL(bio_init); 294 295 /** 296 * bio_reset - reinitialize a bio 297 * @bio: bio to reset 298 * @bdev: block device to use the bio for 299 * @opf: operation and flags for bio 300 * 301 * Description: 302 * After calling bio_reset(), @bio will be in the same state as a freshly 303 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 304 * preserved are the ones that are initialized by bio_alloc_bioset(). See 305 * comment in struct bio. 306 */ 307 void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf) 308 { 309 bio_uninit(bio); 310 memset(bio, 0, BIO_RESET_BYTES); 311 atomic_set(&bio->__bi_remaining, 1); 312 bio->bi_bdev = bdev; 313 if (bio->bi_bdev) 314 bio_associate_blkg(bio); 315 bio->bi_opf = opf; 316 } 317 EXPORT_SYMBOL(bio_reset); 318 319 static struct bio *__bio_chain_endio(struct bio *bio) 320 { 321 struct bio *parent = bio->bi_private; 322 323 if (bio->bi_status && !parent->bi_status) 324 parent->bi_status = bio->bi_status; 325 bio_put(bio); 326 return parent; 327 } 328 329 static void bio_chain_endio(struct bio *bio) 330 { 331 bio_endio(__bio_chain_endio(bio)); 332 } 333 334 /** 335 * bio_chain - chain bio completions 336 * @bio: the target bio 337 * @parent: the parent bio of @bio 338 * 339 * The caller won't have a bi_end_io called when @bio completes - instead, 340 * @parent's bi_end_io won't be called until both @parent and @bio have 341 * completed; the chained bio will also be freed when it completes. 342 * 343 * The caller must not set bi_private or bi_end_io in @bio. 344 */ 345 void bio_chain(struct bio *bio, struct bio *parent) 346 { 347 BUG_ON(bio->bi_private || bio->bi_end_io); 348 349 bio->bi_private = parent; 350 bio->bi_end_io = bio_chain_endio; 351 bio_inc_remaining(parent); 352 } 353 EXPORT_SYMBOL(bio_chain); 354 355 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 356 unsigned int nr_pages, unsigned int opf, gfp_t gfp) 357 { 358 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); 359 360 if (bio) { 361 bio_chain(bio, new); 362 submit_bio(bio); 363 } 364 365 return new; 366 } 367 EXPORT_SYMBOL_GPL(blk_next_bio); 368 369 static void bio_alloc_rescue(struct work_struct *work) 370 { 371 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 372 struct bio *bio; 373 374 while (1) { 375 spin_lock(&bs->rescue_lock); 376 bio = bio_list_pop(&bs->rescue_list); 377 spin_unlock(&bs->rescue_lock); 378 379 if (!bio) 380 break; 381 382 submit_bio_noacct(bio); 383 } 384 } 385 386 static void punt_bios_to_rescuer(struct bio_set *bs) 387 { 388 struct bio_list punt, nopunt; 389 struct bio *bio; 390 391 if (WARN_ON_ONCE(!bs->rescue_workqueue)) 392 return; 393 /* 394 * In order to guarantee forward progress we must punt only bios that 395 * were allocated from this bio_set; otherwise, if there was a bio on 396 * there for a stacking driver higher up in the stack, processing it 397 * could require allocating bios from this bio_set, and doing that from 398 * our own rescuer would be bad. 399 * 400 * Since bio lists are singly linked, pop them all instead of trying to 401 * remove from the middle of the list: 402 */ 403 404 bio_list_init(&punt); 405 bio_list_init(&nopunt); 406 407 while ((bio = bio_list_pop(¤t->bio_list[0]))) 408 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 409 current->bio_list[0] = nopunt; 410 411 bio_list_init(&nopunt); 412 while ((bio = bio_list_pop(¤t->bio_list[1]))) 413 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 414 current->bio_list[1] = nopunt; 415 416 spin_lock(&bs->rescue_lock); 417 bio_list_merge(&bs->rescue_list, &punt); 418 spin_unlock(&bs->rescue_lock); 419 420 queue_work(bs->rescue_workqueue, &bs->rescue_work); 421 } 422 423 /** 424 * bio_alloc_bioset - allocate a bio for I/O 425 * @bdev: block device to allocate the bio for (can be %NULL) 426 * @nr_vecs: number of bvecs to pre-allocate 427 * @opf: operation and flags for bio 428 * @gfp_mask: the GFP_* mask given to the slab allocator 429 * @bs: the bio_set to allocate from. 430 * 431 * Allocate a bio from the mempools in @bs. 432 * 433 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to 434 * allocate a bio. This is due to the mempool guarantees. To make this work, 435 * callers must never allocate more than 1 bio at a time from the general pool. 436 * Callers that need to allocate more than 1 bio must always submit the 437 * previously allocated bio for IO before attempting to allocate a new one. 438 * Failure to do so can cause deadlocks under memory pressure. 439 * 440 * Note that when running under submit_bio_noacct() (i.e. any block driver), 441 * bios are not submitted until after you return - see the code in 442 * submit_bio_noacct() that converts recursion into iteration, to prevent 443 * stack overflows. 444 * 445 * This would normally mean allocating multiple bios under submit_bio_noacct() 446 * would be susceptible to deadlocks, but we have 447 * deadlock avoidance code that resubmits any blocked bios from a rescuer 448 * thread. 449 * 450 * However, we do not guarantee forward progress for allocations from other 451 * mempools. Doing multiple allocations from the same mempool under 452 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad 453 * for per bio allocations. 454 * 455 * Returns: Pointer to new bio on success, NULL on failure. 456 */ 457 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 458 unsigned int opf, gfp_t gfp_mask, 459 struct bio_set *bs) 460 { 461 gfp_t saved_gfp = gfp_mask; 462 struct bio *bio; 463 void *p; 464 465 /* should not use nobvec bioset for nr_vecs > 0 */ 466 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) 467 return NULL; 468 469 /* 470 * submit_bio_noacct() converts recursion to iteration; this means if 471 * we're running beneath it, any bios we allocate and submit will not be 472 * submitted (and thus freed) until after we return. 473 * 474 * This exposes us to a potential deadlock if we allocate multiple bios 475 * from the same bio_set() while running underneath submit_bio_noacct(). 476 * If we were to allocate multiple bios (say a stacking block driver 477 * that was splitting bios), we would deadlock if we exhausted the 478 * mempool's reserve. 479 * 480 * We solve this, and guarantee forward progress, with a rescuer 481 * workqueue per bio_set. If we go to allocate and there are bios on 482 * current->bio_list, we first try the allocation without 483 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be 484 * blocking to the rescuer workqueue before we retry with the original 485 * gfp_flags. 486 */ 487 if (current->bio_list && 488 (!bio_list_empty(¤t->bio_list[0]) || 489 !bio_list_empty(¤t->bio_list[1])) && 490 bs->rescue_workqueue) 491 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 492 493 p = mempool_alloc(&bs->bio_pool, gfp_mask); 494 if (!p && gfp_mask != saved_gfp) { 495 punt_bios_to_rescuer(bs); 496 gfp_mask = saved_gfp; 497 p = mempool_alloc(&bs->bio_pool, gfp_mask); 498 } 499 if (unlikely(!p)) 500 return NULL; 501 502 bio = p + bs->front_pad; 503 if (nr_vecs > BIO_INLINE_VECS) { 504 struct bio_vec *bvl = NULL; 505 506 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 507 if (!bvl && gfp_mask != saved_gfp) { 508 punt_bios_to_rescuer(bs); 509 gfp_mask = saved_gfp; 510 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); 511 } 512 if (unlikely(!bvl)) 513 goto err_free; 514 515 bio_init(bio, bdev, bvl, nr_vecs, opf); 516 } else if (nr_vecs) { 517 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); 518 } else { 519 bio_init(bio, bdev, NULL, 0, opf); 520 } 521 522 bio->bi_pool = bs; 523 return bio; 524 525 err_free: 526 mempool_free(p, &bs->bio_pool); 527 return NULL; 528 } 529 EXPORT_SYMBOL(bio_alloc_bioset); 530 531 /** 532 * bio_kmalloc - kmalloc a bio for I/O 533 * @gfp_mask: the GFP_* mask given to the slab allocator 534 * @nr_iovecs: number of iovecs to pre-allocate 535 * 536 * Use kmalloc to allocate and initialize a bio. 537 * 538 * Returns: Pointer to new bio on success, NULL on failure. 539 */ 540 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) 541 { 542 struct bio *bio; 543 544 if (nr_iovecs > UIO_MAXIOV) 545 return NULL; 546 547 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); 548 if (unlikely(!bio)) 549 return NULL; 550 bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 551 0); 552 bio->bi_pool = NULL; 553 return bio; 554 } 555 EXPORT_SYMBOL(bio_kmalloc); 556 557 void zero_fill_bio(struct bio *bio) 558 { 559 struct bio_vec bv; 560 struct bvec_iter iter; 561 562 bio_for_each_segment(bv, bio, iter) 563 memzero_bvec(&bv); 564 } 565 EXPORT_SYMBOL(zero_fill_bio); 566 567 /** 568 * bio_truncate - truncate the bio to small size of @new_size 569 * @bio: the bio to be truncated 570 * @new_size: new size for truncating the bio 571 * 572 * Description: 573 * Truncate the bio to new size of @new_size. If bio_op(bio) is 574 * REQ_OP_READ, zero the truncated part. This function should only 575 * be used for handling corner cases, such as bio eod. 576 */ 577 static void bio_truncate(struct bio *bio, unsigned new_size) 578 { 579 struct bio_vec bv; 580 struct bvec_iter iter; 581 unsigned int done = 0; 582 bool truncated = false; 583 584 if (new_size >= bio->bi_iter.bi_size) 585 return; 586 587 if (bio_op(bio) != REQ_OP_READ) 588 goto exit; 589 590 bio_for_each_segment(bv, bio, iter) { 591 if (done + bv.bv_len > new_size) { 592 unsigned offset; 593 594 if (!truncated) 595 offset = new_size - done; 596 else 597 offset = 0; 598 zero_user(bv.bv_page, bv.bv_offset + offset, 599 bv.bv_len - offset); 600 truncated = true; 601 } 602 done += bv.bv_len; 603 } 604 605 exit: 606 /* 607 * Don't touch bvec table here and make it really immutable, since 608 * fs bio user has to retrieve all pages via bio_for_each_segment_all 609 * in its .end_bio() callback. 610 * 611 * It is enough to truncate bio by updating .bi_size since we can make 612 * correct bvec with the updated .bi_size for drivers. 613 */ 614 bio->bi_iter.bi_size = new_size; 615 } 616 617 /** 618 * guard_bio_eod - truncate a BIO to fit the block device 619 * @bio: bio to truncate 620 * 621 * This allows us to do IO even on the odd last sectors of a device, even if the 622 * block size is some multiple of the physical sector size. 623 * 624 * We'll just truncate the bio to the size of the device, and clear the end of 625 * the buffer head manually. Truly out-of-range accesses will turn into actual 626 * I/O errors, this only handles the "we need to be able to do I/O at the final 627 * sector" case. 628 */ 629 void guard_bio_eod(struct bio *bio) 630 { 631 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); 632 633 if (!maxsector) 634 return; 635 636 /* 637 * If the *whole* IO is past the end of the device, 638 * let it through, and the IO layer will turn it into 639 * an EIO. 640 */ 641 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 642 return; 643 644 maxsector -= bio->bi_iter.bi_sector; 645 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 646 return; 647 648 bio_truncate(bio, maxsector << 9); 649 } 650 651 #define ALLOC_CACHE_MAX 512 652 #define ALLOC_CACHE_SLACK 64 653 654 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, 655 unsigned int nr) 656 { 657 unsigned int i = 0; 658 struct bio *bio; 659 660 while ((bio = cache->free_list) != NULL) { 661 cache->free_list = bio->bi_next; 662 cache->nr--; 663 bio_free(bio); 664 if (++i == nr) 665 break; 666 } 667 } 668 669 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) 670 { 671 struct bio_set *bs; 672 673 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); 674 if (bs->cache) { 675 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); 676 677 bio_alloc_cache_prune(cache, -1U); 678 } 679 return 0; 680 } 681 682 static void bio_alloc_cache_destroy(struct bio_set *bs) 683 { 684 int cpu; 685 686 if (!bs->cache) 687 return; 688 689 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 690 for_each_possible_cpu(cpu) { 691 struct bio_alloc_cache *cache; 692 693 cache = per_cpu_ptr(bs->cache, cpu); 694 bio_alloc_cache_prune(cache, -1U); 695 } 696 free_percpu(bs->cache); 697 } 698 699 /** 700 * bio_put - release a reference to a bio 701 * @bio: bio to release reference to 702 * 703 * Description: 704 * Put a reference to a &struct bio, either one you have gotten with 705 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 706 **/ 707 void bio_put(struct bio *bio) 708 { 709 if (unlikely(bio_flagged(bio, BIO_REFFED))) { 710 BUG_ON(!atomic_read(&bio->__bi_cnt)); 711 if (!atomic_dec_and_test(&bio->__bi_cnt)) 712 return; 713 } 714 715 if (bio_flagged(bio, BIO_PERCPU_CACHE)) { 716 struct bio_alloc_cache *cache; 717 718 bio_uninit(bio); 719 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); 720 bio->bi_next = cache->free_list; 721 cache->free_list = bio; 722 if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK) 723 bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK); 724 put_cpu(); 725 } else { 726 bio_free(bio); 727 } 728 } 729 EXPORT_SYMBOL(bio_put); 730 731 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) 732 { 733 bio_set_flag(bio, BIO_CLONED); 734 if (bio_flagged(bio_src, BIO_THROTTLED)) 735 bio_set_flag(bio, BIO_THROTTLED); 736 if (bio->bi_bdev == bio_src->bi_bdev && 737 bio_flagged(bio_src, BIO_REMAPPED)) 738 bio_set_flag(bio, BIO_REMAPPED); 739 bio->bi_ioprio = bio_src->bi_ioprio; 740 bio->bi_write_hint = bio_src->bi_write_hint; 741 bio->bi_iter = bio_src->bi_iter; 742 743 bio_clone_blkg_association(bio, bio_src); 744 blkcg_bio_issue_init(bio); 745 746 if (bio_crypt_clone(bio, bio_src, gfp) < 0) 747 return -ENOMEM; 748 if (bio_integrity(bio_src) && 749 bio_integrity_clone(bio, bio_src, gfp) < 0) 750 return -ENOMEM; 751 return 0; 752 } 753 754 /** 755 * bio_alloc_clone - clone a bio that shares the original bio's biovec 756 * @bdev: block_device to clone onto 757 * @bio_src: bio to clone from 758 * @gfp: allocation priority 759 * @bs: bio_set to allocate from 760 * 761 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned 762 * bio, but not the actual data it points to. 763 * 764 * The caller must ensure that the return bio is not freed before @bio_src. 765 */ 766 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 767 gfp_t gfp, struct bio_set *bs) 768 { 769 struct bio *bio; 770 771 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); 772 if (!bio) 773 return NULL; 774 775 if (__bio_clone(bio, bio_src, gfp) < 0) { 776 bio_put(bio); 777 return NULL; 778 } 779 bio->bi_io_vec = bio_src->bi_io_vec; 780 781 return bio; 782 } 783 EXPORT_SYMBOL(bio_alloc_clone); 784 785 /** 786 * bio_init_clone - clone a bio that shares the original bio's biovec 787 * @bdev: block_device to clone onto 788 * @bio: bio to clone into 789 * @bio_src: bio to clone from 790 * @gfp: allocation priority 791 * 792 * Initialize a new bio in caller provided memory that is a clone of @bio_src. 793 * The caller owns the returned bio, but not the actual data it points to. 794 * 795 * The caller must ensure that @bio_src is not freed before @bio. 796 */ 797 int bio_init_clone(struct block_device *bdev, struct bio *bio, 798 struct bio *bio_src, gfp_t gfp) 799 { 800 int ret; 801 802 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); 803 ret = __bio_clone(bio, bio_src, gfp); 804 if (ret) 805 bio_uninit(bio); 806 return ret; 807 } 808 EXPORT_SYMBOL(bio_init_clone); 809 810 /** 811 * bio_full - check if the bio is full 812 * @bio: bio to check 813 * @len: length of one segment to be added 814 * 815 * Return true if @bio is full and one segment with @len bytes can't be 816 * added to the bio, otherwise return false 817 */ 818 static inline bool bio_full(struct bio *bio, unsigned len) 819 { 820 if (bio->bi_vcnt >= bio->bi_max_vecs) 821 return true; 822 if (bio->bi_iter.bi_size > UINT_MAX - len) 823 return true; 824 return false; 825 } 826 827 static inline bool page_is_mergeable(const struct bio_vec *bv, 828 struct page *page, unsigned int len, unsigned int off, 829 bool *same_page) 830 { 831 size_t bv_end = bv->bv_offset + bv->bv_len; 832 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; 833 phys_addr_t page_addr = page_to_phys(page); 834 835 if (vec_end_addr + 1 != page_addr + off) 836 return false; 837 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) 838 return false; 839 840 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); 841 if (*same_page) 842 return true; 843 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); 844 } 845 846 /** 847 * __bio_try_merge_page - try appending data to an existing bvec. 848 * @bio: destination bio 849 * @page: start page to add 850 * @len: length of the data to add 851 * @off: offset of the data relative to @page 852 * @same_page: return if the segment has been merged inside the same page 853 * 854 * Try to add the data at @page + @off to the last bvec of @bio. This is a 855 * useful optimisation for file systems with a block size smaller than the 856 * page size. 857 * 858 * Warn if (@len, @off) crosses pages in case that @same_page is true. 859 * 860 * Return %true on success or %false on failure. 861 */ 862 static bool __bio_try_merge_page(struct bio *bio, struct page *page, 863 unsigned int len, unsigned int off, bool *same_page) 864 { 865 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 866 return false; 867 868 if (bio->bi_vcnt > 0) { 869 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 870 871 if (page_is_mergeable(bv, page, len, off, same_page)) { 872 if (bio->bi_iter.bi_size > UINT_MAX - len) { 873 *same_page = false; 874 return false; 875 } 876 bv->bv_len += len; 877 bio->bi_iter.bi_size += len; 878 return true; 879 } 880 } 881 return false; 882 } 883 884 /* 885 * Try to merge a page into a segment, while obeying the hardware segment 886 * size limit. This is not for normal read/write bios, but for passthrough 887 * or Zone Append operations that we can't split. 888 */ 889 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, 890 struct page *page, unsigned len, 891 unsigned offset, bool *same_page) 892 { 893 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 894 unsigned long mask = queue_segment_boundary(q); 895 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; 896 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; 897 898 if ((addr1 | mask) != (addr2 | mask)) 899 return false; 900 if (bv->bv_len + len > queue_max_segment_size(q)) 901 return false; 902 return __bio_try_merge_page(bio, page, len, offset, same_page); 903 } 904 905 /** 906 * bio_add_hw_page - attempt to add a page to a bio with hw constraints 907 * @q: the target queue 908 * @bio: destination bio 909 * @page: page to add 910 * @len: vec entry length 911 * @offset: vec entry offset 912 * @max_sectors: maximum number of sectors that can be added 913 * @same_page: return if the segment has been merged inside the same page 914 * 915 * Add a page to a bio while respecting the hardware max_sectors, max_segment 916 * and gap limitations. 917 */ 918 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 919 struct page *page, unsigned int len, unsigned int offset, 920 unsigned int max_sectors, bool *same_page) 921 { 922 struct bio_vec *bvec; 923 924 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 925 return 0; 926 927 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) 928 return 0; 929 930 if (bio->bi_vcnt > 0) { 931 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) 932 return len; 933 934 /* 935 * If the queue doesn't support SG gaps and adding this segment 936 * would create a gap, disallow it. 937 */ 938 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 939 if (bvec_gap_to_prev(q, bvec, offset)) 940 return 0; 941 } 942 943 if (bio_full(bio, len)) 944 return 0; 945 946 if (bio->bi_vcnt >= queue_max_segments(q)) 947 return 0; 948 949 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 950 bvec->bv_page = page; 951 bvec->bv_len = len; 952 bvec->bv_offset = offset; 953 bio->bi_vcnt++; 954 bio->bi_iter.bi_size += len; 955 return len; 956 } 957 958 /** 959 * bio_add_pc_page - attempt to add page to passthrough bio 960 * @q: the target queue 961 * @bio: destination bio 962 * @page: page to add 963 * @len: vec entry length 964 * @offset: vec entry offset 965 * 966 * Attempt to add a page to the bio_vec maplist. This can fail for a 967 * number of reasons, such as the bio being full or target block device 968 * limitations. The target block device must allow bio's up to PAGE_SIZE, 969 * so it is always possible to add a single page to an empty bio. 970 * 971 * This should only be used by passthrough bios. 972 */ 973 int bio_add_pc_page(struct request_queue *q, struct bio *bio, 974 struct page *page, unsigned int len, unsigned int offset) 975 { 976 bool same_page = false; 977 return bio_add_hw_page(q, bio, page, len, offset, 978 queue_max_hw_sectors(q), &same_page); 979 } 980 EXPORT_SYMBOL(bio_add_pc_page); 981 982 /** 983 * bio_add_zone_append_page - attempt to add page to zone-append bio 984 * @bio: destination bio 985 * @page: page to add 986 * @len: vec entry length 987 * @offset: vec entry offset 988 * 989 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted 990 * for a zone-append request. This can fail for a number of reasons, such as the 991 * bio being full or the target block device is not a zoned block device or 992 * other limitations of the target block device. The target block device must 993 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page 994 * to an empty bio. 995 * 996 * Returns: number of bytes added to the bio, or 0 in case of a failure. 997 */ 998 int bio_add_zone_append_page(struct bio *bio, struct page *page, 999 unsigned int len, unsigned int offset) 1000 { 1001 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1002 bool same_page = false; 1003 1004 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) 1005 return 0; 1006 1007 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 1008 return 0; 1009 1010 return bio_add_hw_page(q, bio, page, len, offset, 1011 queue_max_zone_append_sectors(q), &same_page); 1012 } 1013 EXPORT_SYMBOL_GPL(bio_add_zone_append_page); 1014 1015 /** 1016 * __bio_add_page - add page(s) to a bio in a new segment 1017 * @bio: destination bio 1018 * @page: start page to add 1019 * @len: length of the data to add, may cross pages 1020 * @off: offset of the data relative to @page, may cross pages 1021 * 1022 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure 1023 * that @bio has space for another bvec. 1024 */ 1025 void __bio_add_page(struct bio *bio, struct page *page, 1026 unsigned int len, unsigned int off) 1027 { 1028 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; 1029 1030 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 1031 WARN_ON_ONCE(bio_full(bio, len)); 1032 1033 bv->bv_page = page; 1034 bv->bv_offset = off; 1035 bv->bv_len = len; 1036 1037 bio->bi_iter.bi_size += len; 1038 bio->bi_vcnt++; 1039 1040 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) 1041 bio_set_flag(bio, BIO_WORKINGSET); 1042 } 1043 EXPORT_SYMBOL_GPL(__bio_add_page); 1044 1045 /** 1046 * bio_add_page - attempt to add page(s) to bio 1047 * @bio: destination bio 1048 * @page: start page to add 1049 * @len: vec entry length, may cross pages 1050 * @offset: vec entry offset relative to @page, may cross pages 1051 * 1052 * Attempt to add page(s) to the bio_vec maplist. This will only fail 1053 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 1054 */ 1055 int bio_add_page(struct bio *bio, struct page *page, 1056 unsigned int len, unsigned int offset) 1057 { 1058 bool same_page = false; 1059 1060 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { 1061 if (bio_full(bio, len)) 1062 return 0; 1063 __bio_add_page(bio, page, len, offset); 1064 } 1065 return len; 1066 } 1067 EXPORT_SYMBOL(bio_add_page); 1068 1069 /** 1070 * bio_add_folio - Attempt to add part of a folio to a bio. 1071 * @bio: BIO to add to. 1072 * @folio: Folio to add. 1073 * @len: How many bytes from the folio to add. 1074 * @off: First byte in this folio to add. 1075 * 1076 * Filesystems that use folios can call this function instead of calling 1077 * bio_add_page() for each page in the folio. If @off is bigger than 1078 * PAGE_SIZE, this function can create a bio_vec that starts in a page 1079 * after the bv_page. BIOs do not support folios that are 4GiB or larger. 1080 * 1081 * Return: Whether the addition was successful. 1082 */ 1083 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, 1084 size_t off) 1085 { 1086 if (len > UINT_MAX || off > UINT_MAX) 1087 return false; 1088 return bio_add_page(bio, &folio->page, len, off) > 0; 1089 } 1090 1091 void __bio_release_pages(struct bio *bio, bool mark_dirty) 1092 { 1093 struct bvec_iter_all iter_all; 1094 struct bio_vec *bvec; 1095 1096 bio_for_each_segment_all(bvec, bio, iter_all) { 1097 if (mark_dirty && !PageCompound(bvec->bv_page)) 1098 set_page_dirty_lock(bvec->bv_page); 1099 put_page(bvec->bv_page); 1100 } 1101 } 1102 EXPORT_SYMBOL_GPL(__bio_release_pages); 1103 1104 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) 1105 { 1106 size_t size = iov_iter_count(iter); 1107 1108 WARN_ON_ONCE(bio->bi_max_vecs); 1109 1110 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 1111 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1112 size_t max_sectors = queue_max_zone_append_sectors(q); 1113 1114 size = min(size, max_sectors << SECTOR_SHIFT); 1115 } 1116 1117 bio->bi_vcnt = iter->nr_segs; 1118 bio->bi_io_vec = (struct bio_vec *)iter->bvec; 1119 bio->bi_iter.bi_bvec_done = iter->iov_offset; 1120 bio->bi_iter.bi_size = size; 1121 bio_set_flag(bio, BIO_NO_PAGE_REF); 1122 bio_set_flag(bio, BIO_CLONED); 1123 } 1124 1125 static void bio_put_pages(struct page **pages, size_t size, size_t off) 1126 { 1127 size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE); 1128 1129 for (i = 0; i < nr; i++) 1130 put_page(pages[i]); 1131 } 1132 1133 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) 1134 1135 /** 1136 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 1137 * @bio: bio to add pages to 1138 * @iter: iov iterator describing the region to be mapped 1139 * 1140 * Pins pages from *iter and appends them to @bio's bvec array. The 1141 * pages will have to be released using put_page() when done. 1142 * For multi-segment *iter, this function only adds pages from the 1143 * next non-empty segment of the iov iterator. 1144 */ 1145 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1146 { 1147 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1148 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1149 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1150 struct page **pages = (struct page **)bv; 1151 bool same_page = false; 1152 ssize_t size, left; 1153 unsigned len, i; 1154 size_t offset; 1155 1156 /* 1157 * Move page array up in the allocated memory for the bio vecs as far as 1158 * possible so that we can start filling biovecs from the beginning 1159 * without overwriting the temporary page array. 1160 */ 1161 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1162 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1163 1164 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1165 if (unlikely(size <= 0)) 1166 return size ? size : -EFAULT; 1167 1168 for (left = size, i = 0; left > 0; left -= len, i++) { 1169 struct page *page = pages[i]; 1170 1171 len = min_t(size_t, PAGE_SIZE - offset, left); 1172 1173 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { 1174 if (same_page) 1175 put_page(page); 1176 } else { 1177 if (WARN_ON_ONCE(bio_full(bio, len))) { 1178 bio_put_pages(pages + i, left, offset); 1179 return -EINVAL; 1180 } 1181 __bio_add_page(bio, page, len, offset); 1182 } 1183 offset = 0; 1184 } 1185 1186 iov_iter_advance(iter, size); 1187 return 0; 1188 } 1189 1190 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) 1191 { 1192 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1193 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1194 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 1195 unsigned int max_append_sectors = queue_max_zone_append_sectors(q); 1196 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1197 struct page **pages = (struct page **)bv; 1198 ssize_t size, left; 1199 unsigned len, i; 1200 size_t offset; 1201 int ret = 0; 1202 1203 if (WARN_ON_ONCE(!max_append_sectors)) 1204 return 0; 1205 1206 /* 1207 * Move page array up in the allocated memory for the bio vecs as far as 1208 * possible so that we can start filling biovecs from the beginning 1209 * without overwriting the temporary page array. 1210 */ 1211 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1212 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1213 1214 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1215 if (unlikely(size <= 0)) 1216 return size ? size : -EFAULT; 1217 1218 for (left = size, i = 0; left > 0; left -= len, i++) { 1219 struct page *page = pages[i]; 1220 bool same_page = false; 1221 1222 len = min_t(size_t, PAGE_SIZE - offset, left); 1223 if (bio_add_hw_page(q, bio, page, len, offset, 1224 max_append_sectors, &same_page) != len) { 1225 bio_put_pages(pages + i, left, offset); 1226 ret = -EINVAL; 1227 break; 1228 } 1229 if (same_page) 1230 put_page(page); 1231 offset = 0; 1232 } 1233 1234 iov_iter_advance(iter, size - left); 1235 return ret; 1236 } 1237 1238 /** 1239 * bio_iov_iter_get_pages - add user or kernel pages to a bio 1240 * @bio: bio to add pages to 1241 * @iter: iov iterator describing the region to be added 1242 * 1243 * This takes either an iterator pointing to user memory, or one pointing to 1244 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 1245 * map them into the kernel. On IO completion, the caller should put those 1246 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided 1247 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs 1248 * to ensure the bvecs and pages stay referenced until the submitted I/O is 1249 * completed by a call to ->ki_complete() or returns with an error other than 1250 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF 1251 * on IO completion. If it isn't, then pages should be released. 1252 * 1253 * The function tries, but does not guarantee, to pin as many pages as 1254 * fit into the bio, or are requested in @iter, whatever is smaller. If 1255 * MM encounters an error pinning the requested pages, it stops. Error 1256 * is returned only if 0 pages could be pinned. 1257 * 1258 * It's intended for direct IO, so doesn't do PSI tracking, the caller is 1259 * responsible for setting BIO_WORKINGSET if necessary. 1260 */ 1261 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1262 { 1263 int ret = 0; 1264 1265 if (iov_iter_is_bvec(iter)) { 1266 bio_iov_bvec_set(bio, iter); 1267 iov_iter_advance(iter, bio->bi_iter.bi_size); 1268 return 0; 1269 } 1270 1271 do { 1272 if (bio_op(bio) == REQ_OP_ZONE_APPEND) 1273 ret = __bio_iov_append_get_pages(bio, iter); 1274 else 1275 ret = __bio_iov_iter_get_pages(bio, iter); 1276 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); 1277 1278 /* don't account direct I/O as memory stall */ 1279 bio_clear_flag(bio, BIO_WORKINGSET); 1280 return bio->bi_vcnt ? 0 : ret; 1281 } 1282 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 1283 1284 static void submit_bio_wait_endio(struct bio *bio) 1285 { 1286 complete(bio->bi_private); 1287 } 1288 1289 /** 1290 * submit_bio_wait - submit a bio, and wait until it completes 1291 * @bio: The &struct bio which describes the I/O 1292 * 1293 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 1294 * bio_endio() on failure. 1295 * 1296 * WARNING: Unlike to how submit_bio() is usually used, this function does not 1297 * result in bio reference to be consumed. The caller must drop the reference 1298 * on his own. 1299 */ 1300 int submit_bio_wait(struct bio *bio) 1301 { 1302 DECLARE_COMPLETION_ONSTACK_MAP(done, 1303 bio->bi_bdev->bd_disk->lockdep_map); 1304 unsigned long hang_check; 1305 1306 bio->bi_private = &done; 1307 bio->bi_end_io = submit_bio_wait_endio; 1308 bio->bi_opf |= REQ_SYNC; 1309 submit_bio(bio); 1310 1311 /* Prevent hang_check timer from firing at us during very long I/O */ 1312 hang_check = sysctl_hung_task_timeout_secs; 1313 if (hang_check) 1314 while (!wait_for_completion_io_timeout(&done, 1315 hang_check * (HZ/2))) 1316 ; 1317 else 1318 wait_for_completion_io(&done); 1319 1320 return blk_status_to_errno(bio->bi_status); 1321 } 1322 EXPORT_SYMBOL(submit_bio_wait); 1323 1324 void __bio_advance(struct bio *bio, unsigned bytes) 1325 { 1326 if (bio_integrity(bio)) 1327 bio_integrity_advance(bio, bytes); 1328 1329 bio_crypt_advance(bio, bytes); 1330 bio_advance_iter(bio, &bio->bi_iter, bytes); 1331 } 1332 EXPORT_SYMBOL(__bio_advance); 1333 1334 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 1335 struct bio *src, struct bvec_iter *src_iter) 1336 { 1337 while (src_iter->bi_size && dst_iter->bi_size) { 1338 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); 1339 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); 1340 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); 1341 void *src_buf; 1342 1343 src_buf = bvec_kmap_local(&src_bv); 1344 memcpy_to_bvec(&dst_bv, src_buf); 1345 kunmap_local(src_buf); 1346 1347 bio_advance_iter_single(src, src_iter, bytes); 1348 bio_advance_iter_single(dst, dst_iter, bytes); 1349 } 1350 } 1351 EXPORT_SYMBOL(bio_copy_data_iter); 1352 1353 /** 1354 * bio_copy_data - copy contents of data buffers from one bio to another 1355 * @src: source bio 1356 * @dst: destination bio 1357 * 1358 * Stops when it reaches the end of either @src or @dst - that is, copies 1359 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1360 */ 1361 void bio_copy_data(struct bio *dst, struct bio *src) 1362 { 1363 struct bvec_iter src_iter = src->bi_iter; 1364 struct bvec_iter dst_iter = dst->bi_iter; 1365 1366 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1367 } 1368 EXPORT_SYMBOL(bio_copy_data); 1369 1370 void bio_free_pages(struct bio *bio) 1371 { 1372 struct bio_vec *bvec; 1373 struct bvec_iter_all iter_all; 1374 1375 bio_for_each_segment_all(bvec, bio, iter_all) 1376 __free_page(bvec->bv_page); 1377 } 1378 EXPORT_SYMBOL(bio_free_pages); 1379 1380 /* 1381 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1382 * for performing direct-IO in BIOs. 1383 * 1384 * The problem is that we cannot run set_page_dirty() from interrupt context 1385 * because the required locks are not interrupt-safe. So what we can do is to 1386 * mark the pages dirty _before_ performing IO. And in interrupt context, 1387 * check that the pages are still dirty. If so, fine. If not, redirty them 1388 * in process context. 1389 * 1390 * We special-case compound pages here: normally this means reads into hugetlb 1391 * pages. The logic in here doesn't really work right for compound pages 1392 * because the VM does not uniformly chase down the head page in all cases. 1393 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1394 * handle them at all. So we skip compound pages here at an early stage. 1395 * 1396 * Note that this code is very hard to test under normal circumstances because 1397 * direct-io pins the pages with get_user_pages(). This makes 1398 * is_page_cache_freeable return false, and the VM will not clean the pages. 1399 * But other code (eg, flusher threads) could clean the pages if they are mapped 1400 * pagecache. 1401 * 1402 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1403 * deferred bio dirtying paths. 1404 */ 1405 1406 /* 1407 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1408 */ 1409 void bio_set_pages_dirty(struct bio *bio) 1410 { 1411 struct bio_vec *bvec; 1412 struct bvec_iter_all iter_all; 1413 1414 bio_for_each_segment_all(bvec, bio, iter_all) { 1415 if (!PageCompound(bvec->bv_page)) 1416 set_page_dirty_lock(bvec->bv_page); 1417 } 1418 } 1419 1420 /* 1421 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1422 * If they are, then fine. If, however, some pages are clean then they must 1423 * have been written out during the direct-IO read. So we take another ref on 1424 * the BIO and re-dirty the pages in process context. 1425 * 1426 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1427 * here on. It will run one put_page() against each page and will run one 1428 * bio_put() against the BIO. 1429 */ 1430 1431 static void bio_dirty_fn(struct work_struct *work); 1432 1433 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1434 static DEFINE_SPINLOCK(bio_dirty_lock); 1435 static struct bio *bio_dirty_list; 1436 1437 /* 1438 * This runs in process context 1439 */ 1440 static void bio_dirty_fn(struct work_struct *work) 1441 { 1442 struct bio *bio, *next; 1443 1444 spin_lock_irq(&bio_dirty_lock); 1445 next = bio_dirty_list; 1446 bio_dirty_list = NULL; 1447 spin_unlock_irq(&bio_dirty_lock); 1448 1449 while ((bio = next) != NULL) { 1450 next = bio->bi_private; 1451 1452 bio_release_pages(bio, true); 1453 bio_put(bio); 1454 } 1455 } 1456 1457 void bio_check_pages_dirty(struct bio *bio) 1458 { 1459 struct bio_vec *bvec; 1460 unsigned long flags; 1461 struct bvec_iter_all iter_all; 1462 1463 bio_for_each_segment_all(bvec, bio, iter_all) { 1464 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) 1465 goto defer; 1466 } 1467 1468 bio_release_pages(bio, false); 1469 bio_put(bio); 1470 return; 1471 defer: 1472 spin_lock_irqsave(&bio_dirty_lock, flags); 1473 bio->bi_private = bio_dirty_list; 1474 bio_dirty_list = bio; 1475 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1476 schedule_work(&bio_dirty_work); 1477 } 1478 1479 static inline bool bio_remaining_done(struct bio *bio) 1480 { 1481 /* 1482 * If we're not chaining, then ->__bi_remaining is always 1 and 1483 * we always end io on the first invocation. 1484 */ 1485 if (!bio_flagged(bio, BIO_CHAIN)) 1486 return true; 1487 1488 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1489 1490 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1491 bio_clear_flag(bio, BIO_CHAIN); 1492 return true; 1493 } 1494 1495 return false; 1496 } 1497 1498 /** 1499 * bio_endio - end I/O on a bio 1500 * @bio: bio 1501 * 1502 * Description: 1503 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1504 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1505 * bio unless they own it and thus know that it has an end_io function. 1506 * 1507 * bio_endio() can be called several times on a bio that has been chained 1508 * using bio_chain(). The ->bi_end_io() function will only be called the 1509 * last time. 1510 **/ 1511 void bio_endio(struct bio *bio) 1512 { 1513 again: 1514 if (!bio_remaining_done(bio)) 1515 return; 1516 if (!bio_integrity_endio(bio)) 1517 return; 1518 1519 rq_qos_done_bio(bio); 1520 1521 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1522 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); 1523 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1524 } 1525 1526 /* 1527 * Need to have a real endio function for chained bios, otherwise 1528 * various corner cases will break (like stacking block devices that 1529 * save/restore bi_end_io) - however, we want to avoid unbounded 1530 * recursion and blowing the stack. Tail call optimization would 1531 * handle this, but compiling with frame pointers also disables 1532 * gcc's sibling call optimization. 1533 */ 1534 if (bio->bi_end_io == bio_chain_endio) { 1535 bio = __bio_chain_endio(bio); 1536 goto again; 1537 } 1538 1539 blk_throtl_bio_endio(bio); 1540 /* release cgroup info */ 1541 bio_uninit(bio); 1542 if (bio->bi_end_io) 1543 bio->bi_end_io(bio); 1544 } 1545 EXPORT_SYMBOL(bio_endio); 1546 1547 /** 1548 * bio_split - split a bio 1549 * @bio: bio to split 1550 * @sectors: number of sectors to split from the front of @bio 1551 * @gfp: gfp mask 1552 * @bs: bio set to allocate from 1553 * 1554 * Allocates and returns a new bio which represents @sectors from the start of 1555 * @bio, and updates @bio to represent the remaining sectors. 1556 * 1557 * Unless this is a discard request the newly allocated bio will point 1558 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that 1559 * neither @bio nor @bs are freed before the split bio. 1560 */ 1561 struct bio *bio_split(struct bio *bio, int sectors, 1562 gfp_t gfp, struct bio_set *bs) 1563 { 1564 struct bio *split; 1565 1566 BUG_ON(sectors <= 0); 1567 BUG_ON(sectors >= bio_sectors(bio)); 1568 1569 /* Zone append commands cannot be split */ 1570 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1571 return NULL; 1572 1573 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); 1574 if (!split) 1575 return NULL; 1576 1577 split->bi_iter.bi_size = sectors << 9; 1578 1579 if (bio_integrity(split)) 1580 bio_integrity_trim(split); 1581 1582 bio_advance(bio, split->bi_iter.bi_size); 1583 1584 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1585 bio_set_flag(split, BIO_TRACE_COMPLETION); 1586 1587 return split; 1588 } 1589 EXPORT_SYMBOL(bio_split); 1590 1591 /** 1592 * bio_trim - trim a bio 1593 * @bio: bio to trim 1594 * @offset: number of sectors to trim from the front of @bio 1595 * @size: size we want to trim @bio to, in sectors 1596 * 1597 * This function is typically used for bios that are cloned and submitted 1598 * to the underlying device in parts. 1599 */ 1600 void bio_trim(struct bio *bio, sector_t offset, sector_t size) 1601 { 1602 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || 1603 offset + size > bio->bi_iter.bi_size)) 1604 return; 1605 1606 size <<= 9; 1607 if (offset == 0 && size == bio->bi_iter.bi_size) 1608 return; 1609 1610 bio_advance(bio, offset << 9); 1611 bio->bi_iter.bi_size = size; 1612 1613 if (bio_integrity(bio)) 1614 bio_integrity_trim(bio); 1615 } 1616 EXPORT_SYMBOL_GPL(bio_trim); 1617 1618 /* 1619 * create memory pools for biovec's in a bio_set. 1620 * use the global biovec slabs created for general use. 1621 */ 1622 int biovec_init_pool(mempool_t *pool, int pool_entries) 1623 { 1624 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; 1625 1626 return mempool_init_slab_pool(pool, pool_entries, bp->slab); 1627 } 1628 1629 /* 1630 * bioset_exit - exit a bioset initialized with bioset_init() 1631 * 1632 * May be called on a zeroed but uninitialized bioset (i.e. allocated with 1633 * kzalloc()). 1634 */ 1635 void bioset_exit(struct bio_set *bs) 1636 { 1637 bio_alloc_cache_destroy(bs); 1638 if (bs->rescue_workqueue) 1639 destroy_workqueue(bs->rescue_workqueue); 1640 bs->rescue_workqueue = NULL; 1641 1642 mempool_exit(&bs->bio_pool); 1643 mempool_exit(&bs->bvec_pool); 1644 1645 bioset_integrity_free(bs); 1646 if (bs->bio_slab) 1647 bio_put_slab(bs); 1648 bs->bio_slab = NULL; 1649 } 1650 EXPORT_SYMBOL(bioset_exit); 1651 1652 /** 1653 * bioset_init - Initialize a bio_set 1654 * @bs: pool to initialize 1655 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1656 * @front_pad: Number of bytes to allocate in front of the returned bio 1657 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 1658 * and %BIOSET_NEED_RESCUER 1659 * 1660 * Description: 1661 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1662 * to ask for a number of bytes to be allocated in front of the bio. 1663 * Front pad allocation is useful for embedding the bio inside 1664 * another structure, to avoid allocating extra data to go with the bio. 1665 * Note that the bio must be embedded at the END of that structure always, 1666 * or things will break badly. 1667 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1668 * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). 1669 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used 1670 * to dispatch queued requests when the mempool runs out of space. 1671 * 1672 */ 1673 int bioset_init(struct bio_set *bs, 1674 unsigned int pool_size, 1675 unsigned int front_pad, 1676 int flags) 1677 { 1678 bs->front_pad = front_pad; 1679 if (flags & BIOSET_NEED_BVECS) 1680 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1681 else 1682 bs->back_pad = 0; 1683 1684 spin_lock_init(&bs->rescue_lock); 1685 bio_list_init(&bs->rescue_list); 1686 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1687 1688 bs->bio_slab = bio_find_or_create_slab(bs); 1689 if (!bs->bio_slab) 1690 return -ENOMEM; 1691 1692 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) 1693 goto bad; 1694 1695 if ((flags & BIOSET_NEED_BVECS) && 1696 biovec_init_pool(&bs->bvec_pool, pool_size)) 1697 goto bad; 1698 1699 if (flags & BIOSET_NEED_RESCUER) { 1700 bs->rescue_workqueue = alloc_workqueue("bioset", 1701 WQ_MEM_RECLAIM, 0); 1702 if (!bs->rescue_workqueue) 1703 goto bad; 1704 } 1705 if (flags & BIOSET_PERCPU_CACHE) { 1706 bs->cache = alloc_percpu(struct bio_alloc_cache); 1707 if (!bs->cache) 1708 goto bad; 1709 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); 1710 } 1711 1712 return 0; 1713 bad: 1714 bioset_exit(bs); 1715 return -ENOMEM; 1716 } 1717 EXPORT_SYMBOL(bioset_init); 1718 1719 /* 1720 * Initialize and setup a new bio_set, based on the settings from 1721 * another bio_set. 1722 */ 1723 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) 1724 { 1725 int flags; 1726 1727 flags = 0; 1728 if (src->bvec_pool.min_nr) 1729 flags |= BIOSET_NEED_BVECS; 1730 if (src->rescue_workqueue) 1731 flags |= BIOSET_NEED_RESCUER; 1732 1733 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); 1734 } 1735 EXPORT_SYMBOL(bioset_init_from_src); 1736 1737 /** 1738 * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb 1739 * @kiocb: kiocb describing the IO 1740 * @bdev: block device to allocate the bio for (can be %NULL) 1741 * @nr_vecs: number of iovecs to pre-allocate 1742 * @opf: operation and flags for bio 1743 * @bs: bio_set to allocate from 1744 * 1745 * Description: 1746 * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only 1747 * used to check if we should dip into the per-cpu bio_set allocation 1748 * cache. The allocation uses GFP_KERNEL internally. On return, the 1749 * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio 1750 * MUST be done from process context, not hard/soft IRQ. 1751 * 1752 */ 1753 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, 1754 unsigned short nr_vecs, unsigned int opf, struct bio_set *bs) 1755 { 1756 struct bio_alloc_cache *cache; 1757 struct bio *bio; 1758 1759 if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) 1760 return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); 1761 1762 cache = per_cpu_ptr(bs->cache, get_cpu()); 1763 if (cache->free_list) { 1764 bio = cache->free_list; 1765 cache->free_list = bio->bi_next; 1766 cache->nr--; 1767 put_cpu(); 1768 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, 1769 nr_vecs, opf); 1770 bio->bi_pool = bs; 1771 bio_set_flag(bio, BIO_PERCPU_CACHE); 1772 return bio; 1773 } 1774 put_cpu(); 1775 bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); 1776 bio_set_flag(bio, BIO_PERCPU_CACHE); 1777 return bio; 1778 } 1779 EXPORT_SYMBOL_GPL(bio_alloc_kiocb); 1780 1781 static int __init init_bio(void) 1782 { 1783 int i; 1784 1785 bio_integrity_init(); 1786 1787 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { 1788 struct biovec_slab *bvs = bvec_slabs + i; 1789 1790 bvs->slab = kmem_cache_create(bvs->name, 1791 bvs->nr_vecs * sizeof(struct bio_vec), 0, 1792 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 1793 } 1794 1795 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, 1796 bio_cpu_dead); 1797 1798 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) 1799 panic("bio: can't allocate bios\n"); 1800 1801 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) 1802 panic("bio: can't create integrity pool\n"); 1803 1804 return 0; 1805 } 1806 subsys_initcall(init_bio); 1807