Lines Matching refs:bio

31 	struct bio		*free_list;
32 struct bio *free_list_irq;
114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
213 void bio_uninit(struct bio *bio) in bio_uninit() argument
216 if (bio->bi_blkg) { in bio_uninit()
217 blkg_put(bio->bi_blkg); in bio_uninit()
218 bio->bi_blkg = NULL; in bio_uninit()
221 if (bio_integrity(bio)) in bio_uninit()
222 bio_integrity_free(bio); in bio_uninit()
224 bio_crypt_free_ctx(bio); in bio_uninit()
228 static void bio_free(struct bio *bio) in bio_free() argument
230 struct bio_set *bs = bio->bi_pool; in bio_free()
231 void *p = bio; in bio_free()
235 bio_uninit(bio); in bio_free()
236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, in bio_init() argument
248 bio->bi_next = NULL; in bio_init()
249 bio->bi_bdev = bdev; in bio_init()
250 bio->bi_opf = opf; in bio_init()
251 bio->bi_flags = 0; in bio_init()
252 bio->bi_ioprio = 0; in bio_init()
253 bio->bi_write_hint = 0; in bio_init()
254 bio->bi_write_stream = 0; in bio_init()
255 bio->bi_status = 0; in bio_init()
256 bio->bi_bvec_gap_bit = 0; in bio_init()
257 bio->bi_iter.bi_sector = 0; in bio_init()
258 bio->bi_iter.bi_size = 0; in bio_init()
259 bio->bi_iter.bi_idx = 0; in bio_init()
260 bio->bi_iter.bi_bvec_done = 0; in bio_init()
261 bio->bi_end_io = NULL; in bio_init()
262 bio->bi_private = NULL; in bio_init()
264 bio->bi_blkg = NULL; in bio_init()
265 bio->issue_time_ns = 0; in bio_init()
267 bio_associate_blkg(bio); in bio_init()
269 bio->bi_iocost_cost = 0; in bio_init()
273 bio->bi_crypt_context = NULL; in bio_init()
276 bio->bi_integrity = NULL; in bio_init()
278 bio->bi_vcnt = 0; in bio_init()
280 atomic_set(&bio->__bi_remaining, 1); in bio_init()
281 atomic_set(&bio->__bi_cnt, 1); in bio_init()
282 bio->bi_cookie = BLK_QC_T_NONE; in bio_init()
284 bio->bi_max_vecs = max_vecs; in bio_init()
285 bio->bi_io_vec = table; in bio_init()
286 bio->bi_pool = NULL; in bio_init()
302 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument
304 bio_uninit(bio); in bio_reset()
305 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
306 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
307 bio->bi_bdev = bdev; in bio_reset()
308 if (bio->bi_bdev) in bio_reset()
309 bio_associate_blkg(bio); in bio_reset()
310 bio->bi_opf = opf; in bio_reset()
314 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
316 struct bio *parent = bio->bi_private; in __bio_chain_endio()
318 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
319 parent->bi_status = bio->bi_status; in __bio_chain_endio()
320 bio_put(bio); in __bio_chain_endio()
328 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
344 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
346 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
348 bio->bi_private = parent; in bio_chain()
349 bio->bi_end_io = bio_chain_endio; in bio_chain()
363 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new) in bio_chain_and_submit()
372 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, in blk_next_bio() argument
375 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp)); in blk_next_bio()
382 struct bio *bio; in bio_alloc_rescue() local
386 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
389 if (!bio) in bio_alloc_rescue()
392 submit_bio_noacct(bio); in bio_alloc_rescue()
399 struct bio *bio; in punt_bios_to_rescuer() local
417 while ((bio = bio_list_pop(&current->bio_list[0]))) in punt_bios_to_rescuer()
418 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
422 while ((bio = bio_list_pop(&current->bio_list[1]))) in punt_bios_to_rescuer()
423 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
449 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, in bio_alloc_percpu_cache()
454 struct bio *bio; in bio_alloc_percpu_cache() local
465 bio = cache->free_list; in bio_alloc_percpu_cache()
466 cache->free_list = bio->bi_next; in bio_alloc_percpu_cache()
471 bio_init_inline(bio, bdev, nr_vecs, opf); in bio_alloc_percpu_cache()
473 bio_init(bio, bdev, NULL, nr_vecs, opf); in bio_alloc_percpu_cache()
474 bio->bi_pool = bs; in bio_alloc_percpu_cache()
475 return bio; in bio_alloc_percpu_cache()
512 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, in bio_alloc_bioset()
517 struct bio *bio; in bio_alloc_bioset() local
526 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, in bio_alloc_bioset()
528 if (bio) in bio_alloc_bioset()
529 return bio; in bio_alloc_bioset()
572 bio = p + bs->front_pad; in bio_alloc_bioset()
585 bio_init(bio, bdev, bvl, nr_vecs, opf); in bio_alloc_bioset()
587 bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf); in bio_alloc_bioset()
589 bio_init(bio, bdev, NULL, 0, opf); in bio_alloc_bioset()
592 bio->bi_pool = bs; in bio_alloc_bioset()
593 return bio; in bio_alloc_bioset()
617 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) in bio_kmalloc()
619 struct bio *bio; in bio_kmalloc() local
623 return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec), in bio_kmalloc()
628 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) in zero_fill_bio_iter() argument
633 __bio_for_each_segment(bv, bio, iter, start) in zero_fill_bio_iter()
648 static void bio_truncate(struct bio *bio, unsigned new_size) in bio_truncate() argument
655 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
658 if (bio_op(bio) != REQ_OP_READ) in bio_truncate()
661 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
685 bio->bi_iter.bi_size = new_size; in bio_truncate()
700 void guard_bio_eod(struct bio *bio) in guard_bio_eod() argument
702 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in guard_bio_eod()
712 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
715 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
716 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
719 bio_truncate(bio, maxsector << 9); in guard_bio_eod()
726 struct bio *bio; in __bio_alloc_cache_prune() local
728 while ((bio = cache->free_list) != NULL) { in __bio_alloc_cache_prune()
729 cache->free_list = bio->bi_next; in __bio_alloc_cache_prune()
731 bio_free(bio); in __bio_alloc_cache_prune()
779 static inline void bio_put_percpu_cache(struct bio *bio) in bio_put_percpu_cache() argument
783 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); in bio_put_percpu_cache()
788 bio_uninit(bio); in bio_put_percpu_cache()
789 bio->bi_next = cache->free_list; in bio_put_percpu_cache()
791 bio->bi_bdev = NULL; in bio_put_percpu_cache()
792 cache->free_list = bio; in bio_put_percpu_cache()
797 bio_uninit(bio); in bio_put_percpu_cache()
798 bio->bi_next = cache->free_list_irq; in bio_put_percpu_cache()
799 cache->free_list_irq = bio; in bio_put_percpu_cache()
808 bio_free(bio); in bio_put_percpu_cache()
819 void bio_put(struct bio *bio) in bio_put() argument
821 if (unlikely(bio_flagged(bio, BIO_REFFED))) { in bio_put()
822 BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
823 if (!atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
826 if (bio->bi_opf & REQ_ALLOC_CACHE) in bio_put()
827 bio_put_percpu_cache(bio); in bio_put()
829 bio_free(bio); in bio_put()
833 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) in __bio_clone() argument
835 bio_set_flag(bio, BIO_CLONED); in __bio_clone()
836 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone()
837 bio->bi_write_hint = bio_src->bi_write_hint; in __bio_clone()
838 bio->bi_write_stream = bio_src->bi_write_stream; in __bio_clone()
839 bio->bi_iter = bio_src->bi_iter; in __bio_clone()
841 if (bio->bi_bdev) { in __bio_clone()
842 if (bio->bi_bdev == bio_src->bi_bdev && in __bio_clone()
844 bio_set_flag(bio, BIO_REMAPPED); in __bio_clone()
845 bio_clone_blkg_association(bio, bio_src); in __bio_clone()
848 if (bio_crypt_clone(bio, bio_src, gfp) < 0) in __bio_clone()
851 bio_integrity_clone(bio, bio_src, gfp) < 0) in __bio_clone()
868 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, in bio_alloc_clone()
871 struct bio *bio; in bio_alloc_clone() local
873 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
874 if (!bio) in bio_alloc_clone()
877 if (__bio_clone(bio, bio_src, gfp) < 0) { in bio_alloc_clone()
878 bio_put(bio); in bio_alloc_clone()
881 bio->bi_io_vec = bio_src->bi_io_vec; in bio_alloc_clone()
883 return bio; in bio_alloc_clone()
899 int bio_init_clone(struct block_device *bdev, struct bio *bio, in bio_init_clone() argument
900 struct bio *bio_src, gfp_t gfp) in bio_init_clone()
904 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone()
905 ret = __bio_clone(bio, bio_src, gfp); in bio_init_clone()
907 bio_uninit(bio); in bio_init_clone()
920 static inline bool bio_full(struct bio *bio, unsigned len) in bio_full() argument
922 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_full()
924 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_full()
984 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
987 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
988 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
991 bio->bi_opf |= REQ_NOMERGE; in __bio_add_page()
993 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); in __bio_add_page()
994 bio->bi_iter.bi_size += len; in __bio_add_page()
995 bio->bi_vcnt++; in __bio_add_page()
1009 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len) in bio_add_virt_nofail() argument
1011 __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr)); in bio_add_virt_nofail()
1025 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
1028 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_page()
1030 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_add_page()
1033 if (bio->bi_vcnt > 0) { in bio_add_page()
1034 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_page()
1040 bio->bi_iter.bi_size += len; in bio_add_page()
1045 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_add_page()
1047 __bio_add_page(bio, page, len, offset); in bio_add_page()
1052 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, in bio_add_folio_nofail() argument
1058 __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE); in bio_add_folio_nofail()
1076 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, in bio_add_folio() argument
1083 return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0; in bio_add_folio()
1101 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len) in bio_add_vmalloc_chunk() argument
1106 if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len) in bio_add_vmalloc_chunk()
1108 if (op_is_write(bio_op(bio))) in bio_add_vmalloc_chunk()
1127 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len) in bio_add_vmalloc() argument
1130 unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len); in bio_add_vmalloc()
1142 void __bio_release_pages(struct bio *bio, bool mark_dirty) in __bio_release_pages() argument
1146 bio_for_each_folio_all(fi, bio) { in __bio_release_pages()
1161 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter) in bio_iov_bvec_set() argument
1163 WARN_ON_ONCE(bio->bi_max_vecs); in bio_iov_bvec_set()
1165 bio->bi_vcnt = iter->nr_segs; in bio_iov_bvec_set()
1166 bio->bi_io_vec = (struct bio_vec *)iter->bvec; in bio_iov_bvec_set()
1167 bio->bi_iter.bi_bvec_done = iter->iov_offset; in bio_iov_bvec_set()
1168 bio->bi_iter.bi_size = iov_iter_count(iter); in bio_iov_bvec_set()
1169 bio_set_flag(bio, BIO_CLONED); in bio_iov_bvec_set()
1214 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
1217 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1218 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1219 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1234 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) in __bio_iov_iter_get_pages()
1238 UINT_MAX - bio->bi_iter.bi_size, in __bio_iov_iter_get_pages()
1247 unsigned int old_vcnt = bio->bi_vcnt; in __bio_iov_iter_get_pages()
1260 if (!bio_add_folio(bio, folio, len, folio_offset)) { in __bio_iov_iter_get_pages()
1266 if (bio_flagged(bio, BIO_PAGE_PINNED)) { in __bio_iov_iter_get_pages()
1274 if (offset && bio->bi_vcnt == old_vcnt) in __bio_iov_iter_get_pages()
1283 bio_release_page(bio, pages[i++]); in __bio_iov_iter_get_pages()
1293 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter, in bio_iov_iter_align_down() argument
1296 size_t nbytes = bio->bi_iter.bi_size & len_align_mask; in bio_iov_iter_align_down()
1302 bio->bi_iter.bi_size -= nbytes; in bio_iov_iter_align_down()
1304 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_iov_iter_align_down()
1311 bio_release_page(bio, bv->bv_page); in bio_iov_iter_align_down()
1312 bio->bi_vcnt--; in bio_iov_iter_align_down()
1316 if (!bio->bi_vcnt) in bio_iov_iter_align_down()
1342 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, in bio_iov_iter_get_pages() argument
1347 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_iov_iter_get_pages()
1351 bio_iov_bvec_set(bio, iter); in bio_iov_iter_get_pages()
1352 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_iov_iter_get_pages()
1357 bio_set_flag(bio, BIO_PAGE_PINNED); in bio_iov_iter_get_pages()
1359 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
1360 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
1362 if (bio->bi_vcnt) in bio_iov_iter_get_pages()
1363 return bio_iov_iter_align_down(bio, iter, len_align_mask); in bio_iov_iter_get_pages()
1367 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
1369 complete(bio->bi_private); in submit_bio_wait_endio()
1383 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
1386 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
1388 bio->bi_private = &done; in submit_bio_wait()
1389 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1390 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1391 submit_bio(bio); in submit_bio_wait()
1394 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1413 struct bio bio; in bdev_rw_virt() local
1419 bio_init(&bio, bdev, &bv, 1, op); in bdev_rw_virt()
1420 bio.bi_iter.bi_sector = sector; in bdev_rw_virt()
1421 bio_add_virt_nofail(&bio, data, len); in bdev_rw_virt()
1422 error = submit_bio_wait(&bio); in bdev_rw_virt()
1423 bio_uninit(&bio); in bdev_rw_virt()
1428 static void bio_wait_end_io(struct bio *bio) in bio_wait_end_io() argument
1430 complete(bio->bi_private); in bio_wait_end_io()
1431 bio_put(bio); in bio_wait_end_io()
1437 void bio_await_chain(struct bio *bio) in bio_await_chain() argument
1440 bio->bi_bdev->bd_disk->lockdep_map); in bio_await_chain()
1442 bio->bi_private = &done; in bio_await_chain()
1443 bio->bi_end_io = bio_wait_end_io; in bio_await_chain()
1444 bio_endio(bio); in bio_await_chain()
1448 void __bio_advance(struct bio *bio, unsigned bytes) in __bio_advance() argument
1450 if (bio_integrity(bio)) in __bio_advance()
1451 bio_integrity_advance(bio, bytes); in __bio_advance()
1453 bio_crypt_advance(bio, bytes); in __bio_advance()
1454 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance()
1458 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1459 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1487 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1496 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1501 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1529 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1533 bio_for_each_folio_all(fi, bio) { in bio_set_pages_dirty()
1556 static struct bio *bio_dirty_list;
1563 struct bio *bio, *next; in bio_dirty_fn() local
1570 while ((bio = next) != NULL) { in bio_dirty_fn()
1571 next = bio->bi_private; in bio_dirty_fn()
1573 bio_release_pages(bio, true); in bio_dirty_fn()
1574 bio_put(bio); in bio_dirty_fn()
1578 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1583 bio_for_each_folio_all(fi, bio) { in bio_check_pages_dirty()
1588 bio_release_pages(bio, false); in bio_check_pages_dirty()
1589 bio_put(bio); in bio_check_pages_dirty()
1593 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1594 bio_dirty_list = bio; in bio_check_pages_dirty()
1600 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1606 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1609 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1611 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1612 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1632 void bio_endio(struct bio *bio) in bio_endio() argument
1635 if (!bio_remaining_done(bio)) in bio_endio()
1637 if (!bio_integrity_endio(bio)) in bio_endio()
1640 blk_zone_bio_endio(bio); in bio_endio()
1642 rq_qos_done_bio(bio); in bio_endio()
1644 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1645 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
1646 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1657 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1658 bio = __bio_chain_endio(bio); in bio_endio()
1668 if (bio->bi_blkg) { in bio_endio()
1669 blkg_put(bio->bi_blkg); in bio_endio()
1670 bio->bi_blkg = NULL; in bio_endio()
1674 if (bio->bi_end_io) in bio_endio()
1675 bio->bi_end_io(bio); in bio_endio()
1693 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1696 struct bio *split; in bio_split()
1700 if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) in bio_split()
1704 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
1708 if (bio->bi_opf & REQ_ATOMIC) in bio_split()
1711 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1720 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1722 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1738 void bio_trim(struct bio *bio, sector_t offset, sector_t size) in bio_trim() argument
1741 if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size)) in bio_trim()
1745 offset + size > bio_sectors(bio))) in bio_trim()
1749 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1752 bio_advance(bio, offset << 9); in bio_trim()
1753 bio->bi_iter.bi_size = size; in bio_trim()
1755 if (bio_integrity(bio)) in bio_trim()
1756 bio_integrity_trim(bio); in bio_trim()
1864 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); in init_bio()