Lines Matching defs:bio
7 #include <linux/bio-integrity.h>
31 struct bio *free_list;
32 struct bio *free_list_irq;
67 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
92 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
114 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
143 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
213 void bio_uninit(struct bio *bio)
216 if (bio->bi_blkg) {
217 blkg_put(bio->bi_blkg);
218 bio->bi_blkg = NULL;
221 if (bio_integrity(bio))
222 bio_integrity_free(bio);
224 bio_crypt_free_ctx(bio);
228 static void bio_free(struct bio *bio)
230 struct bio_set *bs = bio->bi_pool;
231 void *p = bio;
235 bio_uninit(bio);
236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
241 * Users of this function have their own bio allocation. Subsequently,
243 * when IO has completed, or when the bio is released.
245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
248 bio->bi_next = NULL;
249 bio->bi_bdev = bdev;
250 bio->bi_opf = opf;
251 bio->bi_flags = 0;
252 bio->bi_ioprio = 0;
253 bio->bi_write_hint = 0;
254 bio->bi_write_stream = 0;
255 bio->bi_status = 0;
256 bio->bi_iter.bi_sector = 0;
257 bio->bi_iter.bi_size = 0;
258 bio->bi_iter.bi_idx = 0;
259 bio->bi_iter.bi_bvec_done = 0;
260 bio->bi_end_io = NULL;
261 bio->bi_private = NULL;
263 bio->bi_blkg = NULL;
264 bio->bi_issue.value = 0;
266 bio_associate_blkg(bio);
268 bio->bi_iocost_cost = 0;
272 bio->bi_crypt_context = NULL;
275 bio->bi_integrity = NULL;
277 bio->bi_vcnt = 0;
279 atomic_set(&bio->__bi_remaining, 1);
280 atomic_set(&bio->__bi_cnt, 1);
281 bio->bi_cookie = BLK_QC_T_NONE;
283 bio->bi_max_vecs = max_vecs;
284 bio->bi_io_vec = table;
285 bio->bi_pool = NULL;
290 * bio_reset - reinitialize a bio
291 * @bio: bio to reset
292 * @bdev: block device to use the bio for
293 * @opf: operation and flags for bio
296 * After calling bio_reset(), @bio will be in the same state as a freshly
297 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
299 * comment in struct bio.
301 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
303 bio_uninit(bio);
304 memset(bio, 0, BIO_RESET_BYTES);
305 atomic_set(&bio->__bi_remaining, 1);
306 bio->bi_bdev = bdev;
307 if (bio->bi_bdev)
308 bio_associate_blkg(bio);
309 bio->bi_opf = opf;
313 static struct bio *__bio_chain_endio(struct bio *bio)
315 struct bio *parent = bio->bi_private;
317 if (bio->bi_status && !parent->bi_status)
318 parent->bi_status = bio->bi_status;
319 bio_put(bio);
323 static void bio_chain_endio(struct bio *bio)
325 bio_endio(__bio_chain_endio(bio));
329 * bio_chain - chain bio completions
330 * @bio: the target bio
331 * @parent: the parent bio of @bio
333 * The caller won't have a bi_end_io called when @bio completes - instead,
334 * @parent's bi_end_io won't be called until both @parent and @bio have
335 * completed; the chained bio will also be freed when it completes.
337 * The caller must not set bi_private or bi_end_io in @bio.
339 void bio_chain(struct bio *bio, struct bio *parent)
341 BUG_ON(bio->bi_private || bio->bi_end_io);
343 bio->bi_private = parent;
344 bio->bi_end_io = bio_chain_endio;
350 * bio_chain_and_submit - submit a bio after chaining it to another one
351 * @prev: bio to chain and submit
352 * @new: bio to chain to
358 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
367 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
370 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
377 struct bio *bio;
381 bio = bio_list_pop(&bs->rescue_list);
384 if (!bio)
387 submit_bio_noacct(bio);
394 struct bio *bio;
400 * were allocated from this bio_set; otherwise, if there was a bio on
405 * Since bio lists are singly linked, pop them all instead of trying to
412 while ((bio = bio_list_pop(¤t->bio_list[0])))
413 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
417 while ((bio = bio_list_pop(¤t->bio_list[1])))
418 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
444 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
449 struct bio *bio;
460 bio = cache->free_list;
461 cache->free_list = bio->bi_next;
465 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
466 bio->bi_pool = bs;
467 return bio;
471 * bio_alloc_bioset - allocate a bio for I/O
472 * @bdev: block device to allocate the bio for (can be %NULL)
474 * @opf: operation and flags for bio
478 * Allocate a bio from the mempools in @bs.
481 * allocate a bio. This is due to the mempool guarantees. To make this work,
482 * callers must never allocate more than 1 bio at a time from the general pool.
483 * Callers that need to allocate more than 1 bio must always submit the
484 * previously allocated bio for IO before attempting to allocate a new one.
500 * for per bio allocations.
502 * Returns: Pointer to new bio on success, NULL on failure.
504 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
509 struct bio *bio;
518 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
520 if (bio)
521 return bio;
523 * No cached bio available, bio returned below marked with
566 bio = p + bs->front_pad;
579 bio_init(bio, bdev, bvl, nr_vecs, opf);
581 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
583 bio_init(bio, bdev, NULL, 0, opf);
586 bio->bi_pool = bs;
587 return bio;
596 * bio_kmalloc - kmalloc a bio
600 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
601 * using bio_init() before use. To free a bio returned from this function use
602 * kfree() after calling bio_uninit(). A bio returned from this function can
609 * Returns: Pointer to new bio on success, NULL on failure.
611 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
613 struct bio *bio;
617 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
621 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
626 __bio_for_each_segment(bv, bio, iter, start)
632 * bio_truncate - truncate the bio to small size of @new_size
633 * @bio: the bio to be truncated
634 * @new_size: new size for truncating the bio
637 * Truncate the bio to new size of @new_size. If bio_op(bio) is
639 * be used for handling corner cases, such as bio eod.
641 static void bio_truncate(struct bio *bio, unsigned new_size)
648 if (new_size >= bio->bi_iter.bi_size)
651 if (bio_op(bio) != REQ_OP_READ)
654 bio_for_each_segment(bv, bio, iter) {
672 * fs bio user has to retrieve all pages via bio_for_each_segment_all
675 * It is enough to truncate bio by updating .bi_size since we can make
678 bio->bi_iter.bi_size = new_size;
683 * @bio: bio to truncate
688 * We'll just truncate the bio to the size of the device, and clear the end of
693 void guard_bio_eod(struct bio *bio)
695 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
705 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
708 maxsector -= bio->bi_iter.bi_sector;
709 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
712 bio_truncate(bio, maxsector << 9);
719 struct bio *bio;
721 while ((bio = cache->free_list) != NULL) {
722 cache->free_list = bio->bi_next;
724 bio_free(bio);
772 static inline void bio_put_percpu_cache(struct bio *bio)
776 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
781 bio_uninit(bio);
782 bio->bi_next = cache->free_list;
784 bio->bi_bdev = NULL;
785 cache->free_list = bio;
790 bio_uninit(bio);
791 bio->bi_next = cache->free_list_irq;
792 cache->free_list_irq = bio;
801 bio_free(bio);
805 * bio_put - release a reference to a bio
806 * @bio: bio to release reference to
809 * Put a reference to a &struct bio, either one you have gotten with
810 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
812 void bio_put(struct bio *bio)
814 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
815 BUG_ON(!atomic_read(&bio->__bi_cnt));
816 if (!atomic_dec_and_test(&bio->__bi_cnt))
819 if (bio->bi_opf & REQ_ALLOC_CACHE)
820 bio_put_percpu_cache(bio);
822 bio_free(bio);
826 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
828 bio_set_flag(bio, BIO_CLONED);
829 bio->bi_ioprio = bio_src->bi_ioprio;
830 bio->bi_write_hint = bio_src->bi_write_hint;
831 bio->bi_write_stream = bio_src->bi_write_stream;
832 bio->bi_iter = bio_src->bi_iter;
834 if (bio->bi_bdev) {
835 if (bio->bi_bdev == bio_src->bi_bdev &&
837 bio_set_flag(bio, BIO_REMAPPED);
838 bio_clone_blkg_association(bio, bio_src);
841 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
844 bio_integrity_clone(bio, bio_src, gfp) < 0)
850 * bio_alloc_clone - clone a bio that shares the original bio's biovec
852 * @bio_src: bio to clone from
856 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
857 * bio, but not the actual data it points to.
859 * The caller must ensure that the return bio is not freed before @bio_src.
861 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
864 struct bio *bio;
866 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
867 if (!bio)
870 if (__bio_clone(bio, bio_src, gfp) < 0) {
871 bio_put(bio);
874 bio->bi_io_vec = bio_src->bi_io_vec;
876 return bio;
881 * bio_init_clone - clone a bio that shares the original bio's biovec
883 * @bio: bio to clone into
884 * @bio_src: bio to clone from
887 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
888 * The caller owns the returned bio, but not the actual data it points to.
890 * The caller must ensure that @bio_src is not freed before @bio.
892 int bio_init_clone(struct block_device *bdev, struct bio *bio,
893 struct bio *bio_src, gfp_t gfp)
897 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
898 ret = __bio_clone(bio, bio_src, gfp);
900 bio_uninit(bio);
906 * bio_full - check if the bio is full
907 * @bio: bio to check
910 * Return true if @bio is full and one segment with @len bytes can't be
911 * added to the bio, otherwise return false
913 static inline bool bio_full(struct bio *bio, unsigned len)
915 if (bio->bi_vcnt >= bio->bi_max_vecs)
917 if (bio->bi_iter.bi_size > UINT_MAX - len)
950 * to build the initial bio to the hardware limit and doesn't have proper
968 * __bio_add_page - add page(s) to a bio in a new segment
969 * @bio: destination bio
974 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
975 * that @bio has space for another bvec.
977 void __bio_add_page(struct bio *bio, struct page *page,
980 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
981 WARN_ON_ONCE(bio_full(bio, len));
984 bio->bi_opf |= REQ_P2PDMA | REQ_NOMERGE;
986 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
987 bio->bi_iter.bi_size += len;
988 bio->bi_vcnt++;
993 * bio_add_virt_nofail - add data in the direct kernel mapping to a bio
994 * @bio: destination bio
998 * Add the data at @vaddr to @bio. The caller must have ensure a segment
1002 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
1004 __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
1009 * bio_add_page - attempt to add page(s) to bio
1010 * @bio: destination bio
1016 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1018 int bio_add_page(struct bio *bio, struct page *page,
1021 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1023 if (bio->bi_iter.bi_size > UINT_MAX - len)
1026 if (bio->bi_vcnt > 0) {
1027 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1033 bio->bi_iter.bi_size += len;
1038 if (bio->bi_vcnt >= bio->bi_max_vecs)
1040 __bio_add_page(bio, page, len, offset);
1045 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1051 __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
1056 * bio_add_folio - Attempt to add part of a folio to a bio.
1057 * @bio: BIO to add to.
1069 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1076 return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
1081 * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
1082 * @bio: destination bio
1086 * Add data starting at @vaddr to @bio and return how many bytes were added.
1088 * could be added to @bio.
1094 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
1099 if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
1101 if (op_is_write(bio_op(bio)))
1108 * bio_add_vmalloc - add a vmalloc region to a bio
1109 * @bio: destination bio
1113 * Add data starting at @vaddr to @bio. Return %true on success or %false if
1114 * @bio does not have enough space for the payload.
1120 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
1123 unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
1135 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1139 bio_for_each_folio_all(fi, bio) {
1154 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1156 WARN_ON_ONCE(bio->bi_max_vecs);
1158 bio->bi_vcnt = iter->nr_segs;
1159 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1160 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1161 bio->bi_iter.bi_size = iov_iter_count(iter);
1162 bio_set_flag(bio, BIO_CLONED);
1198 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1199 * @bio: bio to add pages to
1202 * Extracts pages from *iter and appends them to @bio's bvec array. The pages
1207 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1210 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1211 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1212 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1220 * Move page array up in the allocated memory for the bio vecs as far as
1227 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1234 * result to ensure the bio's total size is correct. The remainder of
1235 * the iov data will be picked up in the next bio iteration.
1238 UINT_MAX - bio->bi_iter.bi_size,
1245 if (bio->bi_bdev) {
1246 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1259 unsigned int old_vcnt = bio->bi_vcnt;
1272 if (!bio_add_folio(bio, folio, len, folio_offset)) {
1278 if (bio_flagged(bio, BIO_PAGE_PINNED)) {
1283 * added to the bio and __bio_release_pages expects a
1286 if (offset && bio->bi_vcnt == old_vcnt)
1295 bio_release_page(bio, pages[i++]);
1301 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1302 * @bio: bio to add pages to
1312 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1316 * fit into the bio, or are requested in @iter, whatever is smaller. If
1320 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1324 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1328 bio_iov_bvec_set(bio, iter);
1329 iov_iter_advance(iter, bio->bi_iter.bi_size);
1334 bio_set_flag(bio, BIO_PAGE_PINNED);
1336 ret = __bio_iov_iter_get_pages(bio, iter);
1337 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1339 return bio->bi_vcnt ? 0 : ret;
1343 static void submit_bio_wait_endio(struct bio *bio)
1345 complete(bio->bi_private);
1349 * submit_bio_wait - submit a bio, and wait until it completes
1350 * @bio: The &struct bio which describes the I/O
1356 * result in bio reference to be consumed. The caller must drop the reference
1359 int submit_bio_wait(struct bio *bio)
1362 bio->bi_bdev->bd_disk->lockdep_map);
1364 bio->bi_private = &done;
1365 bio->bi_end_io = submit_bio_wait_endio;
1366 bio->bi_opf |= REQ_SYNC;
1367 submit_bio(bio);
1370 return blk_status_to_errno(bio->bi_status);
1389 struct bio bio;
1395 bio_init(&bio, bdev, &bv, 1, op);
1396 bio.bi_iter.bi_sector = sector;
1397 bio_add_virt_nofail(&bio, data, len);
1398 error = submit_bio_wait(&bio);
1399 bio_uninit(&bio);
1404 static void bio_wait_end_io(struct bio *bio)
1406 complete(bio->bi_private);
1407 bio_put(bio);
1411 * bio_await_chain - ends @bio and waits for every chained bio to complete
1413 void bio_await_chain(struct bio *bio)
1416 bio->bi_bdev->bd_disk->lockdep_map);
1418 bio->bi_private = &done;
1419 bio->bi_end_io = bio_wait_end_io;
1420 bio_endio(bio);
1424 void __bio_advance(struct bio *bio, unsigned bytes)
1426 if (bio_integrity(bio))
1427 bio_integrity_advance(bio, bytes);
1429 bio_crypt_advance(bio, bytes);
1430 bio_advance_iter(bio, &bio->bi_iter, bytes);
1434 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1435 struct bio *src, struct bvec_iter *src_iter)
1456 * bio_copy_data - copy contents of data buffers from one bio to another
1457 * @src: source bio
1458 * @dst: destination bio
1463 void bio_copy_data(struct bio *dst, struct bio *src)
1472 void bio_free_pages(struct bio *bio)
1477 bio_for_each_segment_all(bvec, bio, iter_all)
1499 * deferred bio dirtying paths.
1503 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1505 void bio_set_pages_dirty(struct bio *bio)
1509 bio_for_each_folio_all(fi, bio) {
1532 static struct bio *bio_dirty_list;
1539 struct bio *bio, *next;
1546 while ((bio = next) != NULL) {
1547 next = bio->bi_private;
1549 bio_release_pages(bio, true);
1550 bio_put(bio);
1554 void bio_check_pages_dirty(struct bio *bio)
1559 bio_for_each_folio_all(fi, bio) {
1564 bio_release_pages(bio, false);
1565 bio_put(bio);
1569 bio->bi_private = bio_dirty_list;
1570 bio_dirty_list = bio;
1576 static inline bool bio_remaining_done(struct bio *bio)
1582 if (!bio_flagged(bio, BIO_CHAIN))
1585 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1587 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1588 bio_clear_flag(bio, BIO_CHAIN);
1596 * bio_endio - end I/O on a bio
1597 * @bio: bio
1600 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1601 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1602 * bio unless they own it and thus know that it has an end_io function.
1604 * bio_endio() can be called several times on a bio that has been chained
1608 void bio_endio(struct bio *bio)
1611 if (!bio_remaining_done(bio))
1613 if (!bio_integrity_endio(bio))
1616 blk_zone_bio_endio(bio);
1618 rq_qos_done_bio(bio);
1620 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1621 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1622 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1633 if (bio->bi_end_io == bio_chain_endio) {
1634 bio = __bio_chain_endio(bio);
1644 if (bio->bi_blkg) {
1645 blkg_put(bio->bi_blkg);
1646 bio->bi_blkg = NULL;
1650 if (bio->bi_end_io)
1651 bio->bi_end_io(bio);
1656 * bio_split - split a bio
1657 * @bio: bio to split
1658 * @sectors: number of sectors to split from the front of @bio
1660 * @bs: bio set to allocate from
1662 * Allocates and returns a new bio which represents @sectors from the start of
1663 * @bio, and updates @bio to represent the remaining sectors.
1665 * Unless this is a discard request the newly allocated bio will point
1666 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1667 * neither @bio nor @bs are freed before the split bio.
1669 struct bio *bio_split(struct bio *bio, int sectors,
1672 struct bio *split;
1676 if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1680 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1684 if (bio->bi_opf & REQ_ATOMIC)
1687 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1696 bio_advance(bio, split->bi_iter.bi_size);
1698 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1706 * bio_trim - trim a bio
1707 * @bio: bio to trim
1708 * @offset: number of sectors to trim from the front of @bio
1709 * @size: size we want to trim @bio to, in sectors
1714 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1717 if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
1721 offset + size > bio_sectors(bio)))
1725 if (offset == 0 && size == bio->bi_iter.bi_size)
1728 bio_advance(bio, offset << 9);
1729 bio->bi_iter.bi_size = size;
1731 if (bio_integrity(bio))
1732 bio_integrity_trim(bio);
1772 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1773 * @front_pad: Number of bytes to allocate in front of the returned bio
1779 * to ask for a number of bytes to be allocated in front of the bio.
1780 * Front pad allocation is useful for embedding the bio inside
1781 * another structure, to avoid allocating extra data to go with the bio.
1782 * Note that the bio must be embedded at the END of that structure always,
1840 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1850 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1855 panic("bio: can't allocate bios\n");