1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 4 */ 5 #include <linux/mm.h> 6 #include <linux/swap.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/uio.h> 10 #include <linux/iocontext.h> 11 #include <linux/slab.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/mempool.h> 16 #include <linux/workqueue.h> 17 #include <linux/cgroup.h> 18 #include <linux/blk-cgroup.h> 19 #include <linux/highmem.h> 20 #include <linux/sched/sysctl.h> 21 #include <linux/blk-crypto.h> 22 23 #include <trace/events/block.h> 24 #include "blk.h" 25 #include "blk-rq-qos.h" 26 27 /* 28 * Test patch to inline a certain number of bi_io_vec's inside the bio 29 * itself, to shrink a bio data allocation from two mempool calls to one 30 */ 31 #define BIO_INLINE_VECS 4 32 33 /* 34 * if you change this list, also change bvec_alloc or things will 35 * break badly! cannot be bigger than what you can fit into an 36 * unsigned short 37 */ 38 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } 39 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 40 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), 41 }; 42 #undef BV 43 44 /* 45 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 46 * IO code that does not need private memory pools. 47 */ 48 struct bio_set fs_bio_set; 49 EXPORT_SYMBOL(fs_bio_set); 50 51 /* 52 * Our slab pool management 53 */ 54 struct bio_slab { 55 struct kmem_cache *slab; 56 unsigned int slab_ref; 57 unsigned int slab_size; 58 char name[8]; 59 }; 60 static DEFINE_MUTEX(bio_slab_lock); 61 static struct bio_slab *bio_slabs; 62 static unsigned int bio_slab_nr, bio_slab_max; 63 64 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 65 { 66 unsigned int sz = sizeof(struct bio) + extra_size; 67 struct kmem_cache *slab = NULL; 68 struct bio_slab *bslab, *new_bio_slabs; 69 unsigned int new_bio_slab_max; 70 unsigned int i, entry = -1; 71 72 mutex_lock(&bio_slab_lock); 73 74 i = 0; 75 while (i < bio_slab_nr) { 76 bslab = &bio_slabs[i]; 77 78 if (!bslab->slab && entry == -1) 79 entry = i; 80 else if (bslab->slab_size == sz) { 81 slab = bslab->slab; 82 bslab->slab_ref++; 83 break; 84 } 85 i++; 86 } 87 88 if (slab) 89 goto out_unlock; 90 91 if (bio_slab_nr == bio_slab_max && entry == -1) { 92 new_bio_slab_max = bio_slab_max << 1; 93 new_bio_slabs = krealloc(bio_slabs, 94 new_bio_slab_max * sizeof(struct bio_slab), 95 GFP_KERNEL); 96 if (!new_bio_slabs) 97 goto out_unlock; 98 bio_slab_max = new_bio_slab_max; 99 bio_slabs = new_bio_slabs; 100 } 101 if (entry == -1) 102 entry = bio_slab_nr++; 103 104 bslab = &bio_slabs[entry]; 105 106 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 107 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 108 SLAB_HWCACHE_ALIGN, NULL); 109 if (!slab) 110 goto out_unlock; 111 112 bslab->slab = slab; 113 bslab->slab_ref = 1; 114 bslab->slab_size = sz; 115 out_unlock: 116 mutex_unlock(&bio_slab_lock); 117 return slab; 118 } 119 120 static void bio_put_slab(struct bio_set *bs) 121 { 122 struct bio_slab *bslab = NULL; 123 unsigned int i; 124 125 mutex_lock(&bio_slab_lock); 126 127 for (i = 0; i < bio_slab_nr; i++) { 128 if (bs->bio_slab == bio_slabs[i].slab) { 129 bslab = &bio_slabs[i]; 130 break; 131 } 132 } 133 134 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 135 goto out; 136 137 WARN_ON(!bslab->slab_ref); 138 139 if (--bslab->slab_ref) 140 goto out; 141 142 kmem_cache_destroy(bslab->slab); 143 bslab->slab = NULL; 144 145 out: 146 mutex_unlock(&bio_slab_lock); 147 } 148 149 unsigned int bvec_nr_vecs(unsigned short idx) 150 { 151 return bvec_slabs[--idx].nr_vecs; 152 } 153 154 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 155 { 156 if (!idx) 157 return; 158 idx--; 159 160 BIO_BUG_ON(idx >= BVEC_POOL_NR); 161 162 if (idx == BVEC_POOL_MAX) { 163 mempool_free(bv, pool); 164 } else { 165 struct biovec_slab *bvs = bvec_slabs + idx; 166 167 kmem_cache_free(bvs->slab, bv); 168 } 169 } 170 171 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 172 mempool_t *pool) 173 { 174 struct bio_vec *bvl; 175 176 /* 177 * see comment near bvec_array define! 178 */ 179 switch (nr) { 180 case 1: 181 *idx = 0; 182 break; 183 case 2 ... 4: 184 *idx = 1; 185 break; 186 case 5 ... 16: 187 *idx = 2; 188 break; 189 case 17 ... 64: 190 *idx = 3; 191 break; 192 case 65 ... 128: 193 *idx = 4; 194 break; 195 case 129 ... BIO_MAX_PAGES: 196 *idx = 5; 197 break; 198 default: 199 return NULL; 200 } 201 202 /* 203 * idx now points to the pool we want to allocate from. only the 204 * 1-vec entry pool is mempool backed. 205 */ 206 if (*idx == BVEC_POOL_MAX) { 207 fallback: 208 bvl = mempool_alloc(pool, gfp_mask); 209 } else { 210 struct biovec_slab *bvs = bvec_slabs + *idx; 211 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 212 213 /* 214 * Make this allocation restricted and don't dump info on 215 * allocation failures, since we'll fallback to the mempool 216 * in case of failure. 217 */ 218 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 219 220 /* 221 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 222 * is set, retry with the 1-entry mempool 223 */ 224 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 225 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 226 *idx = BVEC_POOL_MAX; 227 goto fallback; 228 } 229 } 230 231 (*idx)++; 232 return bvl; 233 } 234 235 void bio_uninit(struct bio *bio) 236 { 237 bio_disassociate_blkg(bio); 238 239 if (bio_integrity(bio)) 240 bio_integrity_free(bio); 241 242 bio_crypt_free_ctx(bio); 243 } 244 EXPORT_SYMBOL(bio_uninit); 245 246 static void bio_free(struct bio *bio) 247 { 248 struct bio_set *bs = bio->bi_pool; 249 void *p; 250 251 bio_uninit(bio); 252 253 if (bs) { 254 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 255 256 /* 257 * If we have front padding, adjust the bio pointer before freeing 258 */ 259 p = bio; 260 p -= bs->front_pad; 261 262 mempool_free(p, &bs->bio_pool); 263 } else { 264 /* Bio was allocated by bio_kmalloc() */ 265 kfree(bio); 266 } 267 } 268 269 /* 270 * Users of this function have their own bio allocation. Subsequently, 271 * they must remember to pair any call to bio_init() with bio_uninit() 272 * when IO has completed, or when the bio is released. 273 */ 274 void bio_init(struct bio *bio, struct bio_vec *table, 275 unsigned short max_vecs) 276 { 277 memset(bio, 0, sizeof(*bio)); 278 atomic_set(&bio->__bi_remaining, 1); 279 atomic_set(&bio->__bi_cnt, 1); 280 281 bio->bi_io_vec = table; 282 bio->bi_max_vecs = max_vecs; 283 } 284 EXPORT_SYMBOL(bio_init); 285 286 /** 287 * bio_reset - reinitialize a bio 288 * @bio: bio to reset 289 * 290 * Description: 291 * After calling bio_reset(), @bio will be in the same state as a freshly 292 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 293 * preserved are the ones that are initialized by bio_alloc_bioset(). See 294 * comment in struct bio. 295 */ 296 void bio_reset(struct bio *bio) 297 { 298 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 299 300 bio_uninit(bio); 301 302 memset(bio, 0, BIO_RESET_BYTES); 303 bio->bi_flags = flags; 304 atomic_set(&bio->__bi_remaining, 1); 305 } 306 EXPORT_SYMBOL(bio_reset); 307 308 static struct bio *__bio_chain_endio(struct bio *bio) 309 { 310 struct bio *parent = bio->bi_private; 311 312 if (!parent->bi_status) 313 parent->bi_status = bio->bi_status; 314 bio_put(bio); 315 return parent; 316 } 317 318 static void bio_chain_endio(struct bio *bio) 319 { 320 bio_endio(__bio_chain_endio(bio)); 321 } 322 323 /** 324 * bio_chain - chain bio completions 325 * @bio: the target bio 326 * @parent: the @bio's parent bio 327 * 328 * The caller won't have a bi_end_io called when @bio completes - instead, 329 * @parent's bi_end_io won't be called until both @parent and @bio have 330 * completed; the chained bio will also be freed when it completes. 331 * 332 * The caller must not set bi_private or bi_end_io in @bio. 333 */ 334 void bio_chain(struct bio *bio, struct bio *parent) 335 { 336 BUG_ON(bio->bi_private || bio->bi_end_io); 337 338 bio->bi_private = parent; 339 bio->bi_end_io = bio_chain_endio; 340 bio_inc_remaining(parent); 341 } 342 EXPORT_SYMBOL(bio_chain); 343 344 static void bio_alloc_rescue(struct work_struct *work) 345 { 346 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 347 struct bio *bio; 348 349 while (1) { 350 spin_lock(&bs->rescue_lock); 351 bio = bio_list_pop(&bs->rescue_list); 352 spin_unlock(&bs->rescue_lock); 353 354 if (!bio) 355 break; 356 357 generic_make_request(bio); 358 } 359 } 360 361 static void punt_bios_to_rescuer(struct bio_set *bs) 362 { 363 struct bio_list punt, nopunt; 364 struct bio *bio; 365 366 if (WARN_ON_ONCE(!bs->rescue_workqueue)) 367 return; 368 /* 369 * In order to guarantee forward progress we must punt only bios that 370 * were allocated from this bio_set; otherwise, if there was a bio on 371 * there for a stacking driver higher up in the stack, processing it 372 * could require allocating bios from this bio_set, and doing that from 373 * our own rescuer would be bad. 374 * 375 * Since bio lists are singly linked, pop them all instead of trying to 376 * remove from the middle of the list: 377 */ 378 379 bio_list_init(&punt); 380 bio_list_init(&nopunt); 381 382 while ((bio = bio_list_pop(¤t->bio_list[0]))) 383 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 384 current->bio_list[0] = nopunt; 385 386 bio_list_init(&nopunt); 387 while ((bio = bio_list_pop(¤t->bio_list[1]))) 388 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 389 current->bio_list[1] = nopunt; 390 391 spin_lock(&bs->rescue_lock); 392 bio_list_merge(&bs->rescue_list, &punt); 393 spin_unlock(&bs->rescue_lock); 394 395 queue_work(bs->rescue_workqueue, &bs->rescue_work); 396 } 397 398 /** 399 * bio_alloc_bioset - allocate a bio for I/O 400 * @gfp_mask: the GFP_* mask given to the slab allocator 401 * @nr_iovecs: number of iovecs to pre-allocate 402 * @bs: the bio_set to allocate from. 403 * 404 * Description: 405 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 406 * backed by the @bs's mempool. 407 * 408 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 409 * always be able to allocate a bio. This is due to the mempool guarantees. 410 * To make this work, callers must never allocate more than 1 bio at a time 411 * from this pool. Callers that need to allocate more than 1 bio must always 412 * submit the previously allocated bio for IO before attempting to allocate 413 * a new one. Failure to do so can cause deadlocks under memory pressure. 414 * 415 * Note that when running under generic_make_request() (i.e. any block 416 * driver), bios are not submitted until after you return - see the code in 417 * generic_make_request() that converts recursion into iteration, to prevent 418 * stack overflows. 419 * 420 * This would normally mean allocating multiple bios under 421 * generic_make_request() would be susceptible to deadlocks, but we have 422 * deadlock avoidance code that resubmits any blocked bios from a rescuer 423 * thread. 424 * 425 * However, we do not guarantee forward progress for allocations from other 426 * mempools. Doing multiple allocations from the same mempool under 427 * generic_make_request() should be avoided - instead, use bio_set's front_pad 428 * for per bio allocations. 429 * 430 * RETURNS: 431 * Pointer to new bio on success, NULL on failure. 432 */ 433 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, 434 struct bio_set *bs) 435 { 436 gfp_t saved_gfp = gfp_mask; 437 unsigned front_pad; 438 unsigned inline_vecs; 439 struct bio_vec *bvl = NULL; 440 struct bio *bio; 441 void *p; 442 443 if (!bs) { 444 if (nr_iovecs > UIO_MAXIOV) 445 return NULL; 446 447 p = kmalloc(sizeof(struct bio) + 448 nr_iovecs * sizeof(struct bio_vec), 449 gfp_mask); 450 front_pad = 0; 451 inline_vecs = nr_iovecs; 452 } else { 453 /* should not use nobvec bioset for nr_iovecs > 0 */ 454 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && 455 nr_iovecs > 0)) 456 return NULL; 457 /* 458 * generic_make_request() converts recursion to iteration; this 459 * means if we're running beneath it, any bios we allocate and 460 * submit will not be submitted (and thus freed) until after we 461 * return. 462 * 463 * This exposes us to a potential deadlock if we allocate 464 * multiple bios from the same bio_set() while running 465 * underneath generic_make_request(). If we were to allocate 466 * multiple bios (say a stacking block driver that was splitting 467 * bios), we would deadlock if we exhausted the mempool's 468 * reserve. 469 * 470 * We solve this, and guarantee forward progress, with a rescuer 471 * workqueue per bio_set. If we go to allocate and there are 472 * bios on current->bio_list, we first try the allocation 473 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 474 * bios we would be blocking to the rescuer workqueue before 475 * we retry with the original gfp_flags. 476 */ 477 478 if (current->bio_list && 479 (!bio_list_empty(¤t->bio_list[0]) || 480 !bio_list_empty(¤t->bio_list[1])) && 481 bs->rescue_workqueue) 482 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 483 484 p = mempool_alloc(&bs->bio_pool, gfp_mask); 485 if (!p && gfp_mask != saved_gfp) { 486 punt_bios_to_rescuer(bs); 487 gfp_mask = saved_gfp; 488 p = mempool_alloc(&bs->bio_pool, gfp_mask); 489 } 490 491 front_pad = bs->front_pad; 492 inline_vecs = BIO_INLINE_VECS; 493 } 494 495 if (unlikely(!p)) 496 return NULL; 497 498 bio = p + front_pad; 499 bio_init(bio, NULL, 0); 500 501 if (nr_iovecs > inline_vecs) { 502 unsigned long idx = 0; 503 504 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool); 505 if (!bvl && gfp_mask != saved_gfp) { 506 punt_bios_to_rescuer(bs); 507 gfp_mask = saved_gfp; 508 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool); 509 } 510 511 if (unlikely(!bvl)) 512 goto err_free; 513 514 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 515 } else if (nr_iovecs) { 516 bvl = bio->bi_inline_vecs; 517 } 518 519 bio->bi_pool = bs; 520 bio->bi_max_vecs = nr_iovecs; 521 bio->bi_io_vec = bvl; 522 return bio; 523 524 err_free: 525 mempool_free(p, &bs->bio_pool); 526 return NULL; 527 } 528 EXPORT_SYMBOL(bio_alloc_bioset); 529 530 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) 531 { 532 unsigned long flags; 533 struct bio_vec bv; 534 struct bvec_iter iter; 535 536 __bio_for_each_segment(bv, bio, iter, start) { 537 char *data = bvec_kmap_irq(&bv, &flags); 538 memset(data, 0, bv.bv_len); 539 flush_dcache_page(bv.bv_page); 540 bvec_kunmap_irq(data, &flags); 541 } 542 } 543 EXPORT_SYMBOL(zero_fill_bio_iter); 544 545 /** 546 * bio_truncate - truncate the bio to small size of @new_size 547 * @bio: the bio to be truncated 548 * @new_size: new size for truncating the bio 549 * 550 * Description: 551 * Truncate the bio to new size of @new_size. If bio_op(bio) is 552 * REQ_OP_READ, zero the truncated part. This function should only 553 * be used for handling corner cases, such as bio eod. 554 */ 555 void bio_truncate(struct bio *bio, unsigned new_size) 556 { 557 struct bio_vec bv; 558 struct bvec_iter iter; 559 unsigned int done = 0; 560 bool truncated = false; 561 562 if (new_size >= bio->bi_iter.bi_size) 563 return; 564 565 if (bio_op(bio) != REQ_OP_READ) 566 goto exit; 567 568 bio_for_each_segment(bv, bio, iter) { 569 if (done + bv.bv_len > new_size) { 570 unsigned offset; 571 572 if (!truncated) 573 offset = new_size - done; 574 else 575 offset = 0; 576 zero_user(bv.bv_page, offset, bv.bv_len - offset); 577 truncated = true; 578 } 579 done += bv.bv_len; 580 } 581 582 exit: 583 /* 584 * Don't touch bvec table here and make it really immutable, since 585 * fs bio user has to retrieve all pages via bio_for_each_segment_all 586 * in its .end_bio() callback. 587 * 588 * It is enough to truncate bio by updating .bi_size since we can make 589 * correct bvec with the updated .bi_size for drivers. 590 */ 591 bio->bi_iter.bi_size = new_size; 592 } 593 594 /** 595 * guard_bio_eod - truncate a BIO to fit the block device 596 * @bio: bio to truncate 597 * 598 * This allows us to do IO even on the odd last sectors of a device, even if the 599 * block size is some multiple of the physical sector size. 600 * 601 * We'll just truncate the bio to the size of the device, and clear the end of 602 * the buffer head manually. Truly out-of-range accesses will turn into actual 603 * I/O errors, this only handles the "we need to be able to do I/O at the final 604 * sector" case. 605 */ 606 void guard_bio_eod(struct bio *bio) 607 { 608 sector_t maxsector; 609 struct hd_struct *part; 610 611 rcu_read_lock(); 612 part = __disk_get_part(bio->bi_disk, bio->bi_partno); 613 if (part) 614 maxsector = part_nr_sects_read(part); 615 else 616 maxsector = get_capacity(bio->bi_disk); 617 rcu_read_unlock(); 618 619 if (!maxsector) 620 return; 621 622 /* 623 * If the *whole* IO is past the end of the device, 624 * let it through, and the IO layer will turn it into 625 * an EIO. 626 */ 627 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 628 return; 629 630 maxsector -= bio->bi_iter.bi_sector; 631 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 632 return; 633 634 bio_truncate(bio, maxsector << 9); 635 } 636 637 /** 638 * bio_put - release a reference to a bio 639 * @bio: bio to release reference to 640 * 641 * Description: 642 * Put a reference to a &struct bio, either one you have gotten with 643 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. 644 **/ 645 void bio_put(struct bio *bio) 646 { 647 if (!bio_flagged(bio, BIO_REFFED)) 648 bio_free(bio); 649 else { 650 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 651 652 /* 653 * last put frees it 654 */ 655 if (atomic_dec_and_test(&bio->__bi_cnt)) 656 bio_free(bio); 657 } 658 } 659 EXPORT_SYMBOL(bio_put); 660 661 /** 662 * __bio_clone_fast - clone a bio that shares the original bio's biovec 663 * @bio: destination bio 664 * @bio_src: bio to clone 665 * 666 * Clone a &bio. Caller will own the returned bio, but not 667 * the actual data it points to. Reference count of returned 668 * bio will be one. 669 * 670 * Caller must ensure that @bio_src is not freed before @bio. 671 */ 672 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 673 { 674 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 675 676 /* 677 * most users will be overriding ->bi_disk with a new target, 678 * so we don't set nor calculate new physical/hw segment counts here 679 */ 680 bio->bi_disk = bio_src->bi_disk; 681 bio->bi_partno = bio_src->bi_partno; 682 bio_set_flag(bio, BIO_CLONED); 683 if (bio_flagged(bio_src, BIO_THROTTLED)) 684 bio_set_flag(bio, BIO_THROTTLED); 685 bio->bi_opf = bio_src->bi_opf; 686 bio->bi_ioprio = bio_src->bi_ioprio; 687 bio->bi_write_hint = bio_src->bi_write_hint; 688 bio->bi_iter = bio_src->bi_iter; 689 bio->bi_io_vec = bio_src->bi_io_vec; 690 691 bio_clone_blkg_association(bio, bio_src); 692 blkcg_bio_issue_init(bio); 693 } 694 EXPORT_SYMBOL(__bio_clone_fast); 695 696 /** 697 * bio_clone_fast - clone a bio that shares the original bio's biovec 698 * @bio: bio to clone 699 * @gfp_mask: allocation priority 700 * @bs: bio_set to allocate from 701 * 702 * Like __bio_clone_fast, only also allocates the returned bio 703 */ 704 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 705 { 706 struct bio *b; 707 708 b = bio_alloc_bioset(gfp_mask, 0, bs); 709 if (!b) 710 return NULL; 711 712 __bio_clone_fast(b, bio); 713 714 bio_crypt_clone(b, bio, gfp_mask); 715 716 if (bio_integrity(bio)) { 717 int ret; 718 719 ret = bio_integrity_clone(b, bio, gfp_mask); 720 721 if (ret < 0) { 722 bio_put(b); 723 return NULL; 724 } 725 } 726 727 return b; 728 } 729 EXPORT_SYMBOL(bio_clone_fast); 730 731 const char *bio_devname(struct bio *bio, char *buf) 732 { 733 return disk_name(bio->bi_disk, bio->bi_partno, buf); 734 } 735 EXPORT_SYMBOL(bio_devname); 736 737 static inline bool page_is_mergeable(const struct bio_vec *bv, 738 struct page *page, unsigned int len, unsigned int off, 739 bool *same_page) 740 { 741 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + 742 bv->bv_offset + bv->bv_len - 1; 743 phys_addr_t page_addr = page_to_phys(page); 744 745 if (vec_end_addr + 1 != page_addr + off) 746 return false; 747 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) 748 return false; 749 750 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); 751 if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) 752 return false; 753 return true; 754 } 755 756 /* 757 * Try to merge a page into a segment, while obeying the hardware segment 758 * size limit. This is not for normal read/write bios, but for passthrough 759 * or Zone Append operations that we can't split. 760 */ 761 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, 762 struct page *page, unsigned len, 763 unsigned offset, bool *same_page) 764 { 765 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 766 unsigned long mask = queue_segment_boundary(q); 767 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; 768 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; 769 770 if ((addr1 | mask) != (addr2 | mask)) 771 return false; 772 if (bv->bv_len + len > queue_max_segment_size(q)) 773 return false; 774 return __bio_try_merge_page(bio, page, len, offset, same_page); 775 } 776 777 /** 778 * bio_add_hw_page - attempt to add a page to a bio with hw constraints 779 * @q: the target queue 780 * @bio: destination bio 781 * @page: page to add 782 * @len: vec entry length 783 * @offset: vec entry offset 784 * @max_sectors: maximum number of sectors that can be added 785 * @same_page: return if the segment has been merged inside the same page 786 * 787 * Add a page to a bio while respecting the hardware max_sectors, max_segment 788 * and gap limitations. 789 */ 790 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 791 struct page *page, unsigned int len, unsigned int offset, 792 unsigned int max_sectors, bool *same_page) 793 { 794 struct bio_vec *bvec; 795 796 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 797 return 0; 798 799 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) 800 return 0; 801 802 if (bio->bi_vcnt > 0) { 803 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) 804 return len; 805 806 /* 807 * If the queue doesn't support SG gaps and adding this segment 808 * would create a gap, disallow it. 809 */ 810 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 811 if (bvec_gap_to_prev(q, bvec, offset)) 812 return 0; 813 } 814 815 if (bio_full(bio, len)) 816 return 0; 817 818 if (bio->bi_vcnt >= queue_max_segments(q)) 819 return 0; 820 821 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 822 bvec->bv_page = page; 823 bvec->bv_len = len; 824 bvec->bv_offset = offset; 825 bio->bi_vcnt++; 826 bio->bi_iter.bi_size += len; 827 return len; 828 } 829 830 /** 831 * bio_add_pc_page - attempt to add page to passthrough bio 832 * @q: the target queue 833 * @bio: destination bio 834 * @page: page to add 835 * @len: vec entry length 836 * @offset: vec entry offset 837 * 838 * Attempt to add a page to the bio_vec maplist. This can fail for a 839 * number of reasons, such as the bio being full or target block device 840 * limitations. The target block device must allow bio's up to PAGE_SIZE, 841 * so it is always possible to add a single page to an empty bio. 842 * 843 * This should only be used by passthrough bios. 844 */ 845 int bio_add_pc_page(struct request_queue *q, struct bio *bio, 846 struct page *page, unsigned int len, unsigned int offset) 847 { 848 bool same_page = false; 849 return bio_add_hw_page(q, bio, page, len, offset, 850 queue_max_hw_sectors(q), &same_page); 851 } 852 EXPORT_SYMBOL(bio_add_pc_page); 853 854 /** 855 * __bio_try_merge_page - try appending data to an existing bvec. 856 * @bio: destination bio 857 * @page: start page to add 858 * @len: length of the data to add 859 * @off: offset of the data relative to @page 860 * @same_page: return if the segment has been merged inside the same page 861 * 862 * Try to add the data at @page + @off to the last bvec of @bio. This is a 863 * a useful optimisation for file systems with a block size smaller than the 864 * page size. 865 * 866 * Warn if (@len, @off) crosses pages in case that @same_page is true. 867 * 868 * Return %true on success or %false on failure. 869 */ 870 bool __bio_try_merge_page(struct bio *bio, struct page *page, 871 unsigned int len, unsigned int off, bool *same_page) 872 { 873 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 874 return false; 875 876 if (bio->bi_vcnt > 0) { 877 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 878 879 if (page_is_mergeable(bv, page, len, off, same_page)) { 880 if (bio->bi_iter.bi_size > UINT_MAX - len) 881 return false; 882 bv->bv_len += len; 883 bio->bi_iter.bi_size += len; 884 return true; 885 } 886 } 887 return false; 888 } 889 EXPORT_SYMBOL_GPL(__bio_try_merge_page); 890 891 /** 892 * __bio_add_page - add page(s) to a bio in a new segment 893 * @bio: destination bio 894 * @page: start page to add 895 * @len: length of the data to add, may cross pages 896 * @off: offset of the data relative to @page, may cross pages 897 * 898 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure 899 * that @bio has space for another bvec. 900 */ 901 void __bio_add_page(struct bio *bio, struct page *page, 902 unsigned int len, unsigned int off) 903 { 904 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; 905 906 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 907 WARN_ON_ONCE(bio_full(bio, len)); 908 909 bv->bv_page = page; 910 bv->bv_offset = off; 911 bv->bv_len = len; 912 913 bio->bi_iter.bi_size += len; 914 bio->bi_vcnt++; 915 916 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) 917 bio_set_flag(bio, BIO_WORKINGSET); 918 } 919 EXPORT_SYMBOL_GPL(__bio_add_page); 920 921 /** 922 * bio_add_page - attempt to add page(s) to bio 923 * @bio: destination bio 924 * @page: start page to add 925 * @len: vec entry length, may cross pages 926 * @offset: vec entry offset relative to @page, may cross pages 927 * 928 * Attempt to add page(s) to the bio_vec maplist. This will only fail 929 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 930 */ 931 int bio_add_page(struct bio *bio, struct page *page, 932 unsigned int len, unsigned int offset) 933 { 934 bool same_page = false; 935 936 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { 937 if (bio_full(bio, len)) 938 return 0; 939 __bio_add_page(bio, page, len, offset); 940 } 941 return len; 942 } 943 EXPORT_SYMBOL(bio_add_page); 944 945 void bio_release_pages(struct bio *bio, bool mark_dirty) 946 { 947 struct bvec_iter_all iter_all; 948 struct bio_vec *bvec; 949 950 if (bio_flagged(bio, BIO_NO_PAGE_REF)) 951 return; 952 953 bio_for_each_segment_all(bvec, bio, iter_all) { 954 if (mark_dirty && !PageCompound(bvec->bv_page)) 955 set_page_dirty_lock(bvec->bv_page); 956 put_page(bvec->bv_page); 957 } 958 } 959 EXPORT_SYMBOL_GPL(bio_release_pages); 960 961 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) 962 { 963 const struct bio_vec *bv = iter->bvec; 964 unsigned int len; 965 size_t size; 966 967 if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len)) 968 return -EINVAL; 969 970 len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count); 971 size = bio_add_page(bio, bv->bv_page, len, 972 bv->bv_offset + iter->iov_offset); 973 if (unlikely(size != len)) 974 return -EINVAL; 975 iov_iter_advance(iter, size); 976 return 0; 977 } 978 979 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) 980 981 /** 982 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 983 * @bio: bio to add pages to 984 * @iter: iov iterator describing the region to be mapped 985 * 986 * Pins pages from *iter and appends them to @bio's bvec array. The 987 * pages will have to be released using put_page() when done. 988 * For multi-segment *iter, this function only adds pages from the 989 * the next non-empty segment of the iov iterator. 990 */ 991 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 992 { 993 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 994 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 995 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 996 struct page **pages = (struct page **)bv; 997 bool same_page = false; 998 ssize_t size, left; 999 unsigned len, i; 1000 size_t offset; 1001 1002 /* 1003 * Move page array up in the allocated memory for the bio vecs as far as 1004 * possible so that we can start filling biovecs from the beginning 1005 * without overwriting the temporary page array. 1006 */ 1007 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1008 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1009 1010 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1011 if (unlikely(size <= 0)) 1012 return size ? size : -EFAULT; 1013 1014 for (left = size, i = 0; left > 0; left -= len, i++) { 1015 struct page *page = pages[i]; 1016 1017 len = min_t(size_t, PAGE_SIZE - offset, left); 1018 1019 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { 1020 if (same_page) 1021 put_page(page); 1022 } else { 1023 if (WARN_ON_ONCE(bio_full(bio, len))) 1024 return -EINVAL; 1025 __bio_add_page(bio, page, len, offset); 1026 } 1027 offset = 0; 1028 } 1029 1030 iov_iter_advance(iter, size); 1031 return 0; 1032 } 1033 1034 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) 1035 { 1036 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1037 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1038 struct request_queue *q = bio->bi_disk->queue; 1039 unsigned int max_append_sectors = queue_max_zone_append_sectors(q); 1040 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1041 struct page **pages = (struct page **)bv; 1042 ssize_t size, left; 1043 unsigned len, i; 1044 size_t offset; 1045 1046 if (WARN_ON_ONCE(!max_append_sectors)) 1047 return 0; 1048 1049 /* 1050 * Move page array up in the allocated memory for the bio vecs as far as 1051 * possible so that we can start filling biovecs from the beginning 1052 * without overwriting the temporary page array. 1053 */ 1054 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1055 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1056 1057 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 1058 if (unlikely(size <= 0)) 1059 return size ? size : -EFAULT; 1060 1061 for (left = size, i = 0; left > 0; left -= len, i++) { 1062 struct page *page = pages[i]; 1063 bool same_page = false; 1064 1065 len = min_t(size_t, PAGE_SIZE - offset, left); 1066 if (bio_add_hw_page(q, bio, page, len, offset, 1067 max_append_sectors, &same_page) != len) 1068 return -EINVAL; 1069 if (same_page) 1070 put_page(page); 1071 offset = 0; 1072 } 1073 1074 iov_iter_advance(iter, size); 1075 return 0; 1076 } 1077 1078 /** 1079 * bio_iov_iter_get_pages - add user or kernel pages to a bio 1080 * @bio: bio to add pages to 1081 * @iter: iov iterator describing the region to be added 1082 * 1083 * This takes either an iterator pointing to user memory, or one pointing to 1084 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 1085 * map them into the kernel. On IO completion, the caller should put those 1086 * pages. If we're adding kernel pages, and the caller told us it's safe to 1087 * do so, we just have to add the pages to the bio directly. We don't grab an 1088 * extra reference to those pages (the user should already have that), and we 1089 * don't put the page on IO completion. The caller needs to check if the bio is 1090 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be 1091 * released. 1092 * 1093 * The function tries, but does not guarantee, to pin as many pages as 1094 * fit into the bio, or are requested in *iter, whatever is smaller. If 1095 * MM encounters an error pinning the requested pages, it stops. Error 1096 * is returned only if 0 pages could be pinned. 1097 */ 1098 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1099 { 1100 const bool is_bvec = iov_iter_is_bvec(iter); 1101 int ret; 1102 1103 if (WARN_ON_ONCE(bio->bi_vcnt)) 1104 return -EINVAL; 1105 1106 do { 1107 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 1108 if (WARN_ON_ONCE(is_bvec)) 1109 return -EINVAL; 1110 ret = __bio_iov_append_get_pages(bio, iter); 1111 } else { 1112 if (is_bvec) 1113 ret = __bio_iov_bvec_add_pages(bio, iter); 1114 else 1115 ret = __bio_iov_iter_get_pages(bio, iter); 1116 } 1117 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); 1118 1119 if (is_bvec) 1120 bio_set_flag(bio, BIO_NO_PAGE_REF); 1121 return bio->bi_vcnt ? 0 : ret; 1122 } 1123 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 1124 1125 static void submit_bio_wait_endio(struct bio *bio) 1126 { 1127 complete(bio->bi_private); 1128 } 1129 1130 /** 1131 * submit_bio_wait - submit a bio, and wait until it completes 1132 * @bio: The &struct bio which describes the I/O 1133 * 1134 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 1135 * bio_endio() on failure. 1136 * 1137 * WARNING: Unlike to how submit_bio() is usually used, this function does not 1138 * result in bio reference to be consumed. The caller must drop the reference 1139 * on his own. 1140 */ 1141 int submit_bio_wait(struct bio *bio) 1142 { 1143 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); 1144 unsigned long hang_check; 1145 1146 bio->bi_private = &done; 1147 bio->bi_end_io = submit_bio_wait_endio; 1148 bio->bi_opf |= REQ_SYNC; 1149 submit_bio(bio); 1150 1151 /* Prevent hang_check timer from firing at us during very long I/O */ 1152 hang_check = sysctl_hung_task_timeout_secs; 1153 if (hang_check) 1154 while (!wait_for_completion_io_timeout(&done, 1155 hang_check * (HZ/2))) 1156 ; 1157 else 1158 wait_for_completion_io(&done); 1159 1160 return blk_status_to_errno(bio->bi_status); 1161 } 1162 EXPORT_SYMBOL(submit_bio_wait); 1163 1164 /** 1165 * bio_advance - increment/complete a bio by some number of bytes 1166 * @bio: bio to advance 1167 * @bytes: number of bytes to complete 1168 * 1169 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 1170 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 1171 * be updated on the last bvec as well. 1172 * 1173 * @bio will then represent the remaining, uncompleted portion of the io. 1174 */ 1175 void bio_advance(struct bio *bio, unsigned bytes) 1176 { 1177 if (bio_integrity(bio)) 1178 bio_integrity_advance(bio, bytes); 1179 1180 bio_crypt_advance(bio, bytes); 1181 bio_advance_iter(bio, &bio->bi_iter, bytes); 1182 } 1183 EXPORT_SYMBOL(bio_advance); 1184 1185 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 1186 struct bio *src, struct bvec_iter *src_iter) 1187 { 1188 struct bio_vec src_bv, dst_bv; 1189 void *src_p, *dst_p; 1190 unsigned bytes; 1191 1192 while (src_iter->bi_size && dst_iter->bi_size) { 1193 src_bv = bio_iter_iovec(src, *src_iter); 1194 dst_bv = bio_iter_iovec(dst, *dst_iter); 1195 1196 bytes = min(src_bv.bv_len, dst_bv.bv_len); 1197 1198 src_p = kmap_atomic(src_bv.bv_page); 1199 dst_p = kmap_atomic(dst_bv.bv_page); 1200 1201 memcpy(dst_p + dst_bv.bv_offset, 1202 src_p + src_bv.bv_offset, 1203 bytes); 1204 1205 kunmap_atomic(dst_p); 1206 kunmap_atomic(src_p); 1207 1208 flush_dcache_page(dst_bv.bv_page); 1209 1210 bio_advance_iter(src, src_iter, bytes); 1211 bio_advance_iter(dst, dst_iter, bytes); 1212 } 1213 } 1214 EXPORT_SYMBOL(bio_copy_data_iter); 1215 1216 /** 1217 * bio_copy_data - copy contents of data buffers from one bio to another 1218 * @src: source bio 1219 * @dst: destination bio 1220 * 1221 * Stops when it reaches the end of either @src or @dst - that is, copies 1222 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1223 */ 1224 void bio_copy_data(struct bio *dst, struct bio *src) 1225 { 1226 struct bvec_iter src_iter = src->bi_iter; 1227 struct bvec_iter dst_iter = dst->bi_iter; 1228 1229 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1230 } 1231 EXPORT_SYMBOL(bio_copy_data); 1232 1233 /** 1234 * bio_list_copy_data - copy contents of data buffers from one chain of bios to 1235 * another 1236 * @src: source bio list 1237 * @dst: destination bio list 1238 * 1239 * Stops when it reaches the end of either the @src list or @dst list - that is, 1240 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of 1241 * bios). 1242 */ 1243 void bio_list_copy_data(struct bio *dst, struct bio *src) 1244 { 1245 struct bvec_iter src_iter = src->bi_iter; 1246 struct bvec_iter dst_iter = dst->bi_iter; 1247 1248 while (1) { 1249 if (!src_iter.bi_size) { 1250 src = src->bi_next; 1251 if (!src) 1252 break; 1253 1254 src_iter = src->bi_iter; 1255 } 1256 1257 if (!dst_iter.bi_size) { 1258 dst = dst->bi_next; 1259 if (!dst) 1260 break; 1261 1262 dst_iter = dst->bi_iter; 1263 } 1264 1265 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1266 } 1267 } 1268 EXPORT_SYMBOL(bio_list_copy_data); 1269 1270 void bio_free_pages(struct bio *bio) 1271 { 1272 struct bio_vec *bvec; 1273 struct bvec_iter_all iter_all; 1274 1275 bio_for_each_segment_all(bvec, bio, iter_all) 1276 __free_page(bvec->bv_page); 1277 } 1278 EXPORT_SYMBOL(bio_free_pages); 1279 1280 /* 1281 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1282 * for performing direct-IO in BIOs. 1283 * 1284 * The problem is that we cannot run set_page_dirty() from interrupt context 1285 * because the required locks are not interrupt-safe. So what we can do is to 1286 * mark the pages dirty _before_ performing IO. And in interrupt context, 1287 * check that the pages are still dirty. If so, fine. If not, redirty them 1288 * in process context. 1289 * 1290 * We special-case compound pages here: normally this means reads into hugetlb 1291 * pages. The logic in here doesn't really work right for compound pages 1292 * because the VM does not uniformly chase down the head page in all cases. 1293 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1294 * handle them at all. So we skip compound pages here at an early stage. 1295 * 1296 * Note that this code is very hard to test under normal circumstances because 1297 * direct-io pins the pages with get_user_pages(). This makes 1298 * is_page_cache_freeable return false, and the VM will not clean the pages. 1299 * But other code (eg, flusher threads) could clean the pages if they are mapped 1300 * pagecache. 1301 * 1302 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1303 * deferred bio dirtying paths. 1304 */ 1305 1306 /* 1307 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1308 */ 1309 void bio_set_pages_dirty(struct bio *bio) 1310 { 1311 struct bio_vec *bvec; 1312 struct bvec_iter_all iter_all; 1313 1314 bio_for_each_segment_all(bvec, bio, iter_all) { 1315 if (!PageCompound(bvec->bv_page)) 1316 set_page_dirty_lock(bvec->bv_page); 1317 } 1318 } 1319 1320 /* 1321 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1322 * If they are, then fine. If, however, some pages are clean then they must 1323 * have been written out during the direct-IO read. So we take another ref on 1324 * the BIO and re-dirty the pages in process context. 1325 * 1326 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1327 * here on. It will run one put_page() against each page and will run one 1328 * bio_put() against the BIO. 1329 */ 1330 1331 static void bio_dirty_fn(struct work_struct *work); 1332 1333 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1334 static DEFINE_SPINLOCK(bio_dirty_lock); 1335 static struct bio *bio_dirty_list; 1336 1337 /* 1338 * This runs in process context 1339 */ 1340 static void bio_dirty_fn(struct work_struct *work) 1341 { 1342 struct bio *bio, *next; 1343 1344 spin_lock_irq(&bio_dirty_lock); 1345 next = bio_dirty_list; 1346 bio_dirty_list = NULL; 1347 spin_unlock_irq(&bio_dirty_lock); 1348 1349 while ((bio = next) != NULL) { 1350 next = bio->bi_private; 1351 1352 bio_release_pages(bio, true); 1353 bio_put(bio); 1354 } 1355 } 1356 1357 void bio_check_pages_dirty(struct bio *bio) 1358 { 1359 struct bio_vec *bvec; 1360 unsigned long flags; 1361 struct bvec_iter_all iter_all; 1362 1363 bio_for_each_segment_all(bvec, bio, iter_all) { 1364 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) 1365 goto defer; 1366 } 1367 1368 bio_release_pages(bio, false); 1369 bio_put(bio); 1370 return; 1371 defer: 1372 spin_lock_irqsave(&bio_dirty_lock, flags); 1373 bio->bi_private = bio_dirty_list; 1374 bio_dirty_list = bio; 1375 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1376 schedule_work(&bio_dirty_work); 1377 } 1378 1379 static inline bool bio_remaining_done(struct bio *bio) 1380 { 1381 /* 1382 * If we're not chaining, then ->__bi_remaining is always 1 and 1383 * we always end io on the first invocation. 1384 */ 1385 if (!bio_flagged(bio, BIO_CHAIN)) 1386 return true; 1387 1388 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1389 1390 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1391 bio_clear_flag(bio, BIO_CHAIN); 1392 return true; 1393 } 1394 1395 return false; 1396 } 1397 1398 /** 1399 * bio_endio - end I/O on a bio 1400 * @bio: bio 1401 * 1402 * Description: 1403 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1404 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1405 * bio unless they own it and thus know that it has an end_io function. 1406 * 1407 * bio_endio() can be called several times on a bio that has been chained 1408 * using bio_chain(). The ->bi_end_io() function will only be called the 1409 * last time. At this point the BLK_TA_COMPLETE tracing event will be 1410 * generated if BIO_TRACE_COMPLETION is set. 1411 **/ 1412 void bio_endio(struct bio *bio) 1413 { 1414 again: 1415 if (!bio_remaining_done(bio)) 1416 return; 1417 if (!bio_integrity_endio(bio)) 1418 return; 1419 1420 if (bio->bi_disk) 1421 rq_qos_done_bio(bio->bi_disk->queue, bio); 1422 1423 /* 1424 * Need to have a real endio function for chained bios, otherwise 1425 * various corner cases will break (like stacking block devices that 1426 * save/restore bi_end_io) - however, we want to avoid unbounded 1427 * recursion and blowing the stack. Tail call optimization would 1428 * handle this, but compiling with frame pointers also disables 1429 * gcc's sibling call optimization. 1430 */ 1431 if (bio->bi_end_io == bio_chain_endio) { 1432 bio = __bio_chain_endio(bio); 1433 goto again; 1434 } 1435 1436 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1437 trace_block_bio_complete(bio->bi_disk->queue, bio); 1438 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1439 } 1440 1441 blk_throtl_bio_endio(bio); 1442 /* release cgroup info */ 1443 bio_uninit(bio); 1444 if (bio->bi_end_io) 1445 bio->bi_end_io(bio); 1446 } 1447 EXPORT_SYMBOL(bio_endio); 1448 1449 /** 1450 * bio_split - split a bio 1451 * @bio: bio to split 1452 * @sectors: number of sectors to split from the front of @bio 1453 * @gfp: gfp mask 1454 * @bs: bio set to allocate from 1455 * 1456 * Allocates and returns a new bio which represents @sectors from the start of 1457 * @bio, and updates @bio to represent the remaining sectors. 1458 * 1459 * Unless this is a discard request the newly allocated bio will point 1460 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that 1461 * neither @bio nor @bs are freed before the split bio. 1462 */ 1463 struct bio *bio_split(struct bio *bio, int sectors, 1464 gfp_t gfp, struct bio_set *bs) 1465 { 1466 struct bio *split; 1467 1468 BUG_ON(sectors <= 0); 1469 BUG_ON(sectors >= bio_sectors(bio)); 1470 1471 /* Zone append commands cannot be split */ 1472 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1473 return NULL; 1474 1475 split = bio_clone_fast(bio, gfp, bs); 1476 if (!split) 1477 return NULL; 1478 1479 split->bi_iter.bi_size = sectors << 9; 1480 1481 if (bio_integrity(split)) 1482 bio_integrity_trim(split); 1483 1484 bio_advance(bio, split->bi_iter.bi_size); 1485 1486 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1487 bio_set_flag(split, BIO_TRACE_COMPLETION); 1488 1489 return split; 1490 } 1491 EXPORT_SYMBOL(bio_split); 1492 1493 /** 1494 * bio_trim - trim a bio 1495 * @bio: bio to trim 1496 * @offset: number of sectors to trim from the front of @bio 1497 * @size: size we want to trim @bio to, in sectors 1498 */ 1499 void bio_trim(struct bio *bio, int offset, int size) 1500 { 1501 /* 'bio' is a cloned bio which we need to trim to match 1502 * the given offset and size. 1503 */ 1504 1505 size <<= 9; 1506 if (offset == 0 && size == bio->bi_iter.bi_size) 1507 return; 1508 1509 bio_advance(bio, offset << 9); 1510 bio->bi_iter.bi_size = size; 1511 1512 if (bio_integrity(bio)) 1513 bio_integrity_trim(bio); 1514 1515 } 1516 EXPORT_SYMBOL_GPL(bio_trim); 1517 1518 /* 1519 * create memory pools for biovec's in a bio_set. 1520 * use the global biovec slabs created for general use. 1521 */ 1522 int biovec_init_pool(mempool_t *pool, int pool_entries) 1523 { 1524 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1525 1526 return mempool_init_slab_pool(pool, pool_entries, bp->slab); 1527 } 1528 1529 /* 1530 * bioset_exit - exit a bioset initialized with bioset_init() 1531 * 1532 * May be called on a zeroed but uninitialized bioset (i.e. allocated with 1533 * kzalloc()). 1534 */ 1535 void bioset_exit(struct bio_set *bs) 1536 { 1537 if (bs->rescue_workqueue) 1538 destroy_workqueue(bs->rescue_workqueue); 1539 bs->rescue_workqueue = NULL; 1540 1541 mempool_exit(&bs->bio_pool); 1542 mempool_exit(&bs->bvec_pool); 1543 1544 bioset_integrity_free(bs); 1545 if (bs->bio_slab) 1546 bio_put_slab(bs); 1547 bs->bio_slab = NULL; 1548 } 1549 EXPORT_SYMBOL(bioset_exit); 1550 1551 /** 1552 * bioset_init - Initialize a bio_set 1553 * @bs: pool to initialize 1554 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1555 * @front_pad: Number of bytes to allocate in front of the returned bio 1556 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS 1557 * and %BIOSET_NEED_RESCUER 1558 * 1559 * Description: 1560 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1561 * to ask for a number of bytes to be allocated in front of the bio. 1562 * Front pad allocation is useful for embedding the bio inside 1563 * another structure, to avoid allocating extra data to go with the bio. 1564 * Note that the bio must be embedded at the END of that structure always, 1565 * or things will break badly. 1566 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated 1567 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). 1568 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to 1569 * dispatch queued requests when the mempool runs out of space. 1570 * 1571 */ 1572 int bioset_init(struct bio_set *bs, 1573 unsigned int pool_size, 1574 unsigned int front_pad, 1575 int flags) 1576 { 1577 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1578 1579 bs->front_pad = front_pad; 1580 1581 spin_lock_init(&bs->rescue_lock); 1582 bio_list_init(&bs->rescue_list); 1583 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1584 1585 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1586 if (!bs->bio_slab) 1587 return -ENOMEM; 1588 1589 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) 1590 goto bad; 1591 1592 if ((flags & BIOSET_NEED_BVECS) && 1593 biovec_init_pool(&bs->bvec_pool, pool_size)) 1594 goto bad; 1595 1596 if (!(flags & BIOSET_NEED_RESCUER)) 1597 return 0; 1598 1599 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1600 if (!bs->rescue_workqueue) 1601 goto bad; 1602 1603 return 0; 1604 bad: 1605 bioset_exit(bs); 1606 return -ENOMEM; 1607 } 1608 EXPORT_SYMBOL(bioset_init); 1609 1610 /* 1611 * Initialize and setup a new bio_set, based on the settings from 1612 * another bio_set. 1613 */ 1614 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) 1615 { 1616 int flags; 1617 1618 flags = 0; 1619 if (src->bvec_pool.min_nr) 1620 flags |= BIOSET_NEED_BVECS; 1621 if (src->rescue_workqueue) 1622 flags |= BIOSET_NEED_RESCUER; 1623 1624 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); 1625 } 1626 EXPORT_SYMBOL(bioset_init_from_src); 1627 1628 #ifdef CONFIG_BLK_CGROUP 1629 1630 /** 1631 * bio_disassociate_blkg - puts back the blkg reference if associated 1632 * @bio: target bio 1633 * 1634 * Helper to disassociate the blkg from @bio if a blkg is associated. 1635 */ 1636 void bio_disassociate_blkg(struct bio *bio) 1637 { 1638 if (bio->bi_blkg) { 1639 blkg_put(bio->bi_blkg); 1640 bio->bi_blkg = NULL; 1641 } 1642 } 1643 EXPORT_SYMBOL_GPL(bio_disassociate_blkg); 1644 1645 /** 1646 * __bio_associate_blkg - associate a bio with the a blkg 1647 * @bio: target bio 1648 * @blkg: the blkg to associate 1649 * 1650 * This tries to associate @bio with the specified @blkg. Association failure 1651 * is handled by walking up the blkg tree. Therefore, the blkg associated can 1652 * be anything between @blkg and the root_blkg. This situation only happens 1653 * when a cgroup is dying and then the remaining bios will spill to the closest 1654 * alive blkg. 1655 * 1656 * A reference will be taken on the @blkg and will be released when @bio is 1657 * freed. 1658 */ 1659 static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) 1660 { 1661 bio_disassociate_blkg(bio); 1662 1663 bio->bi_blkg = blkg_tryget_closest(blkg); 1664 } 1665 1666 /** 1667 * bio_associate_blkg_from_css - associate a bio with a specified css 1668 * @bio: target bio 1669 * @css: target css 1670 * 1671 * Associate @bio with the blkg found by combining the css's blkg and the 1672 * request_queue of the @bio. This falls back to the queue's root_blkg if 1673 * the association fails with the css. 1674 */ 1675 void bio_associate_blkg_from_css(struct bio *bio, 1676 struct cgroup_subsys_state *css) 1677 { 1678 struct request_queue *q = bio->bi_disk->queue; 1679 struct blkcg_gq *blkg; 1680 1681 rcu_read_lock(); 1682 1683 if (!css || !css->parent) 1684 blkg = q->root_blkg; 1685 else 1686 blkg = blkg_lookup_create(css_to_blkcg(css), q); 1687 1688 __bio_associate_blkg(bio, blkg); 1689 1690 rcu_read_unlock(); 1691 } 1692 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 1693 1694 #ifdef CONFIG_MEMCG 1695 /** 1696 * bio_associate_blkg_from_page - associate a bio with the page's blkg 1697 * @bio: target bio 1698 * @page: the page to lookup the blkcg from 1699 * 1700 * Associate @bio with the blkg from @page's owning memcg and the respective 1701 * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's 1702 * root_blkg. 1703 */ 1704 void bio_associate_blkg_from_page(struct bio *bio, struct page *page) 1705 { 1706 struct cgroup_subsys_state *css; 1707 1708 if (!page->mem_cgroup) 1709 return; 1710 1711 rcu_read_lock(); 1712 1713 css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); 1714 bio_associate_blkg_from_css(bio, css); 1715 1716 rcu_read_unlock(); 1717 } 1718 #endif /* CONFIG_MEMCG */ 1719 1720 /** 1721 * bio_associate_blkg - associate a bio with a blkg 1722 * @bio: target bio 1723 * 1724 * Associate @bio with the blkg found from the bio's css and request_queue. 1725 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 1726 * already associated, the css is reused and association redone as the 1727 * request_queue may have changed. 1728 */ 1729 void bio_associate_blkg(struct bio *bio) 1730 { 1731 struct cgroup_subsys_state *css; 1732 1733 rcu_read_lock(); 1734 1735 if (bio->bi_blkg) 1736 css = &bio_blkcg(bio)->css; 1737 else 1738 css = blkcg_css(); 1739 1740 bio_associate_blkg_from_css(bio, css); 1741 1742 rcu_read_unlock(); 1743 } 1744 EXPORT_SYMBOL_GPL(bio_associate_blkg); 1745 1746 /** 1747 * bio_clone_blkg_association - clone blkg association from src to dst bio 1748 * @dst: destination bio 1749 * @src: source bio 1750 */ 1751 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 1752 { 1753 rcu_read_lock(); 1754 1755 if (src->bi_blkg) 1756 __bio_associate_blkg(dst, src->bi_blkg); 1757 1758 rcu_read_unlock(); 1759 } 1760 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 1761 #endif /* CONFIG_BLK_CGROUP */ 1762 1763 static void __init biovec_init_slabs(void) 1764 { 1765 int i; 1766 1767 for (i = 0; i < BVEC_POOL_NR; i++) { 1768 int size; 1769 struct biovec_slab *bvs = bvec_slabs + i; 1770 1771 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 1772 bvs->slab = NULL; 1773 continue; 1774 } 1775 1776 size = bvs->nr_vecs * sizeof(struct bio_vec); 1777 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1778 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1779 } 1780 } 1781 1782 static int __init init_bio(void) 1783 { 1784 bio_slab_max = 2; 1785 bio_slab_nr = 0; 1786 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab), 1787 GFP_KERNEL); 1788 1789 BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET); 1790 1791 if (!bio_slabs) 1792 panic("bio: can't allocate bios\n"); 1793 1794 bio_integrity_init(); 1795 biovec_init_slabs(); 1796 1797 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) 1798 panic("bio: can't allocate bios\n"); 1799 1800 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) 1801 panic("bio: can't create integrity pool\n"); 1802 1803 return 0; 1804 } 1805 subsys_initcall(init_bio); 1806