| /linux/block/ |
| H A D | bio.c | 31 struct bio *free_list; 32 struct bio *free_list_irq; 114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size() 213 void bio_uninit(struct bio *bio) in bio_uninit() argument 216 if (bio->bi_blkg) { in bio_uninit() 217 blkg_put(bio->bi_blkg); in bio_uninit() 218 bio->bi_blkg = NULL; in bio_uninit() 221 if (bio_integrity(bio)) in bio_uninit() 222 bio_integrity_free(bio); in bio_uninit() 224 bio_crypt_free_ctx(bio); in bio_uninit() [all …]
|
| H A D | blk-map.c | 40 static inline void blk_mq_map_bio_put(struct bio *bio) in blk_mq_map_bio_put() argument 42 bio_put(bio); in blk_mq_map_bio_put() 45 static struct bio *blk_rq_map_bio_alloc(struct request *rq, in blk_rq_map_bio_alloc() 49 struct bio *bio; in blk_rq_map_bio_alloc() local 51 bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc() 53 if (!bio) in blk_rq_map_bio_alloc() 56 return bio; in blk_rq_map_bio_alloc() 67 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 72 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 98 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument [all …]
|
| H A D | blk-lib.c | 38 struct bio *blk_alloc_discard_bio(struct block_device *bdev, in blk_alloc_discard_bio() 42 struct bio *bio; in blk_alloc_discard_bio() local 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 48 if (!bio) in blk_alloc_discard_bio() 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 51 bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; in blk_alloc_discard_bio() 60 return bio; in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 66 struct bio *bio; in __blkdev_issue_discard() local 68 while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, in __blkdev_issue_discard() [all …]
|
| H A D | blk-crypto-internal.h | 32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 38 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 41 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 45 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 47 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 48 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 101 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 107 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 113 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 144 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); [all …]
|
| H A D | blk-rq-qos.h | 38 void (*throttle)(struct rq_qos *, struct bio *); 39 void (*track)(struct rq_qos *, struct request *, struct bio *); 40 void (*merge)(struct rq_qos *, struct request *, struct bio *); 44 void (*done_bio)(struct rq_qos *, struct bio *); 45 void (*cleanup)(struct rq_qos *, struct bio *); 103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 110 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
| H A D | fops.c | 46 static inline int blkdev_iov_iter_get_pages(struct bio *bio, in blkdev_iov_iter_get_pages() argument 49 return bio_iov_iter_get_pages(bio, iter, in blkdev_iov_iter_get_pages() 62 struct bio bio; in __blkdev_direct_IO_simple() local 75 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); in __blkdev_direct_IO_simple() 79 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); in __blkdev_direct_IO_simple() 81 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; in __blkdev_direct_IO_simple() 82 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; in __blkdev_direct_IO_simple() 83 bio.bi_write_stream = iocb->ki_write_stream; in __blkdev_direct_IO_simple() 84 bio.bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_simple() 86 bio.bi_opf |= REQ_ATOMIC; in __blkdev_direct_IO_simple() [all …]
|
| H A D | blk-zoned.c | 270 struct bio bio; in blkdev_zone_reset_all() local 272 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); in blkdev_zone_reset_all() 273 trace_blkdev_zone_mgmt(&bio, 0); in blkdev_zone_reset_all() 274 return submit_bio_wait(&bio); in blkdev_zone_reset_all() 298 struct bio *bio = NULL; in blkdev_zone_mgmt() local 329 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); in blkdev_zone_mgmt() 330 bio->bi_iter.bi_sector = sector; in blkdev_zone_mgmt() 337 trace_blkdev_zone_mgmt(bio, nr_sectors); in blkdev_zone_mgmt() 338 ret = submit_bio_wait(bio); in blkdev_zone_mgmt() 339 bio_put(bio); in blkdev_zone_mgmt() [all …]
|
| /linux/drivers/md/bcache/ |
| H A D | request.c | 40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument 46 bio_for_each_segment(bv, bio, iter) { in bio_csum() 111 struct bio *bio = op->bio; in bch_data_invalidate() local 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 134 bio_put(bio); in bch_data_invalidate() [all …]
|
| /linux/fs/iomap/ |
| H A D | bio.c | 11 static void iomap_read_end_io(struct bio *bio) in iomap_read_end_io() argument 13 int error = blk_status_to_errno(bio->bi_status); in iomap_read_end_io() 16 bio_for_each_folio_all(fi, bio) in iomap_read_end_io() 18 bio_put(bio); in iomap_read_end_io() 23 struct bio *bio = ctx->read_ctx; in iomap_bio_submit_read() local 25 if (bio) in iomap_bio_submit_read() 26 submit_bio(bio); in iomap_bio_submit_read() 38 struct bio *bio = ctx->read_ctx; in iomap_bio_read_folio_range() local 41 if (!bio || bio_end_sector(bio) != sector || in iomap_bio_read_folio_range() 42 !bio_add_folio(bio, folio, plen, poff)) { in iomap_bio_read_folio_range() [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | vio.c | 41 physical_block_number_t pbn_from_vio_bio(struct bio *bio) in pbn_from_vio_bio() argument 43 struct vio *vio = bio->bi_private; in pbn_from_vio_bio() 45 physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK; in pbn_from_vio_bio() 50 static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr) in create_multi_block_bio() 52 struct bio *bio = NULL; in create_multi_block_bio() local 55 result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec, in create_multi_block_bio() 56 "bio", &bio); in create_multi_block_bio() 60 *bio_ptr = bio; in create_multi_block_bio() 64 int vdo_create_bio(struct bio **bio_ptr) in vdo_create_bio() 69 void vdo_free_bio(struct bio *bio) in vdo_free_bio() argument [all …]
|
| H A D | io-submitter.c | 74 static void count_all_bios(struct vio *vio, struct bio *bio) in count_all_bios() argument 79 vdo_count_bios(&stats->bios_out, bio); in count_all_bios() 83 vdo_count_bios(&stats->bios_meta, bio); in count_all_bios() 85 vdo_count_bios(&stats->bios_journal, bio); in count_all_bios() 87 vdo_count_bios(&stats->bios_page_cache, bio); in count_all_bios() 107 static void send_bio_to_device(struct vio *vio, struct bio *bio) in send_bio_to_device() argument 113 count_all_bios(vio, bio); in send_bio_to_device() 114 bio_set_dev(bio, vdo_get_backing_device(vdo)); in send_bio_to_device() 115 submit_bio_noacct(bio); in send_bio_to_device() 127 send_bio_to_device(vio, vio->bio); in vdo_submit_vio() [all …]
|
| /linux/fs/ |
| H A D | mpage.c | 46 static void mpage_read_end_io(struct bio *bio) in mpage_read_end_io() argument 49 int err = blk_status_to_errno(bio->bi_status); in mpage_read_end_io() 51 bio_for_each_folio_all(fi, bio) in mpage_read_end_io() 54 bio_put(bio); in mpage_read_end_io() 57 static void mpage_write_end_io(struct bio *bio) in mpage_write_end_io() argument 60 int err = blk_status_to_errno(bio->bi_status); in mpage_write_end_io() 62 bio_for_each_folio_all(fi, bio) { in mpage_write_end_io() 68 bio_put(bio); in mpage_write_end_io() 71 static struct bio *mpage_bio_submit_read(struct bio *bio) in mpage_bio_submit_read() argument 73 bio->bi_end_io = mpage_read_end_io; in mpage_bio_submit_read() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-raid1.c | 126 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument 135 bio_list_add(bl, bio); in queue_bio() 144 struct bio *bio; in dispatch_bios() local 146 while ((bio = bio_list_pop(bio_list))) in dispatch_bios() 147 queue_bio(ms, bio, WRITE); in dispatch_bios() 167 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument 169 return (struct mirror *) bio->bi_next; in bio_get_m() 172 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument 174 bio->bi_next = (struct bio *) m; in bio_set_m() 444 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument [all …]
|
| H A D | dm-io-rewind.c | 56 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) in dm_bio_integrity_rewind() argument 58 struct bio_integrity_payload *bip = bio_integrity(bio); in dm_bio_integrity_rewind() 59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); in dm_bio_integrity_rewind() 68 static inline void dm_bio_integrity_rewind(struct bio *bio, in dm_bio_integrity_rewind() argument 94 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 96 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in dm_bio_crypt_rewind() 104 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) in dm_bio_crypt_rewind() argument 110 static inline void dm_bio_rewind_iter(const struct bio *bio, in dm_bio_rewind_iter() argument 116 if (bio_no_advance_iter(bio)) in dm_bio_rewind_iter() 119 dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes); in dm_bio_rewind_iter() [all …]
|
| H A D | dm-zoned-target.c | 22 struct bio *bio; member 75 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) in dmz_bio_endio() argument 78 dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); in dmz_bio_endio() 80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) in dmz_bio_endio() 81 bio->bi_status = status; in dmz_bio_endio() 82 if (bioctx->dev && bio->bi_status != BLK_STS_OK) in dmz_bio_endio() 89 if (bio->bi_status != BLK_STS_OK && in dmz_bio_endio() 90 bio_op(bio) == REQ_OP_WRITE && in dmz_bio_endio() 95 bio_endio(bio); in dmz_bio_endio() 103 static void dmz_clone_endio(struct bio *clone) in dmz_clone_endio() [all …]
|
| H A D | dm-bio-record.h | 33 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) in dm_bio_record() argument 35 bd->bi_bdev = bio->bi_bdev; in dm_bio_record() 36 bd->bi_flags = bio->bi_flags; in dm_bio_record() 37 bd->bi_iter = bio->bi_iter; in dm_bio_record() 38 bd->__bi_remaining = atomic_read(&bio->__bi_remaining); in dm_bio_record() 39 bd->bi_end_io = bio->bi_end_io; in dm_bio_record() 41 bd->bi_integrity = bio_integrity(bio); in dm_bio_record() 45 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) in dm_bio_restore() argument 47 bio->bi_bdev = bd->bi_bdev; in dm_bio_restore() 48 bio->bi_flags = bd->bi_flags; in dm_bio_restore() [all …]
|
| H A D | dm-ebs-target.c | 48 static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio) in __nr_blocks() argument 50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks() 122 static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) in __ebs_rw_bio() argument 128 bio_for_each_bvec(bv, bio, iter) { in __ebs_rw_bio() 143 static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio) in __ebs_discard_bio() argument 145 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio() 148 blocks = __nr_blocks(ec, bio); in __ebs_discard_bio() 160 if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs)) in __ebs_discard_bio() 167 static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio) in __ebs_forget_bio() argument 169 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio() [all …]
|
| H A D | dm-thin.c | 226 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 384 struct bio *parent_bio; 385 struct bio *bio; member 388 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard() 395 op->bio = NULL; in begin_discard() 404 __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); in issue_discard() 409 if (op->bio) { in end_discard() 414 bio_chain(op->bio, op->parent_bio); in end_discard() 415 op->bio->bi_opf = REQ_OP_DISCARD; in end_discard() 416 submit_bio(op->bio); in end_discard() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | io-cmd-bdev.c | 180 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument 182 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() 184 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 185 nvmet_req_bio_put(req, bio); in nvmet_bio_done() 189 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument 203 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip() 211 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 214 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip() 217 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip() 234 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument [all …]
|
| /linux/fs/f2fs/ |
| H A D | iostat.h | 45 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 48 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in iostat_update_submit_ctx() 54 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument 56 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in get_post_read_ctx() 61 extern void iostat_update_and_unbind_ctx(struct bio *bio); 63 struct bio *bio, struct bio_post_read_ctx *ctx); 71 static inline void iostat_update_and_unbind_ctx(struct bio *bio) {} in iostat_update_and_unbind_ctx() argument 73 struct bio *bio, struct bio_post_read_ctx *ctx) {} in iostat_alloc_and_bind_ctx() argument 74 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 76 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument [all …]
|
| /linux/Documentation/translations/zh_CN/block/ |
| H A D | data-integrity.rst | 101 当启用 CONFIG_BLK_DEV_INTEGRITY 时,数据完整性补丁会在 struct bio 中添加 102 一个新字段。调用 bio_integrity(bio) 会返回一个指向 struct bip 的指针,该 103 结构体包含了该 bio 的完整性负载。本质上,bip 是一个精简版的 struct bio,其 107 内核子系统可以通过调用 bio_integrity_alloc(bio) 来为某个 bio 启用数据完整 108 性保护。该函数会分配并附加一个 bip 到该 bio 上。 110 随后使用 bio_integrity_add_page() 将包含完整性元数据的单独页面附加到该 bio。 150 `bool bio_integrity_prep(bio);` 153 bio_integrity_prep(bio)。 155 在调用此函数之前,必须先设置好 bio 的数据方向和起始扇区,并确 156 保该 bio 已经添加完所有的数据页。调用者需要自行保证,在 I/O 进行 [all …]
|
| /linux/mm/ |
| H A D | page_io.c | 30 static void __end_swap_bio_write(struct bio *bio) in __end_swap_bio_write() argument 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() 34 if (bio->bi_status) { in __end_swap_bio_write() 45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), in __end_swap_bio_write() 46 (unsigned long long)bio->bi_iter.bi_sector); in __end_swap_bio_write() 52 static void end_swap_bio_write(struct bio *bio) in end_swap_bio_write() argument 54 __end_swap_bio_write(bio); in end_swap_bio_write() 55 bio_put(bio); in end_swap_bio_write() 58 static void __end_swap_bio_read(struct bio *bio) in __end_swap_bio_read() argument 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() [all …]
|
| /linux/fs/ext4/ |
| H A D | page-io.c | 100 static void ext4_finish_bio(struct bio *bio) in ext4_finish_bio() argument 104 bio_for_each_folio_all(fi, bio) { in ext4_finish_bio() 118 if (bio->bi_status) { in ext4_finish_bio() 119 int err = blk_status_to_errno(bio->bi_status); in ext4_finish_bio() 136 if (bio->bi_status) { in ext4_finish_bio() 151 struct bio *bio, *next_bio; in ext4_release_io_end() local 157 for (bio = io_end->bio; bio; bio = next_bio) { in ext4_release_io_end() 158 next_bio = bio->bi_private; in ext4_release_io_end() 159 ext4_finish_bio(bio); in ext4_release_io_end() 160 bio_put(bio); in ext4_release_io_end() [all …]
|
| /linux/fs/crypto/ |
| H A D | bio.c | 33 bool fscrypt_decrypt_bio(struct bio *bio) in fscrypt_decrypt_bio() argument 37 bio_for_each_folio_all(fi, bio) { in fscrypt_decrypt_bio() 42 bio->bi_status = errno_to_blk_status(err); in fscrypt_decrypt_bio() 56 struct bio *bio; in fscrypt_zeroout_range_inline_crypt() local 61 bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE, in fscrypt_zeroout_range_inline_crypt() 69 fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); in fscrypt_zeroout_range_inline_crypt() 70 bio->bi_iter.bi_sector = in fscrypt_zeroout_range_inline_crypt() 73 ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); in fscrypt_zeroout_range_inline_crypt() 83 !fscrypt_mergeable_bio(bio, inode, lblk)) { in fscrypt_zeroout_range_inline_crypt() 84 err = submit_bio_wait(bio); in fscrypt_zeroout_range_inline_crypt() [all …]
|
| /linux/include/trace/events/ |
| H A D | bcache.h | 11 TP_PROTO(struct bcache_device *d, struct bio *bio), 12 TP_ARGS(d, bio), 25 __entry->dev = bio_dev(bio); 28 __entry->sector = bio->bi_iter.bi_sector; 29 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 30 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 81 TP_PROTO(struct bcache_device *d, struct bio *bio), 82 TP_ARGS(d, bio) 86 TP_PROTO(struct bcache_device *d, struct bio *bio), [all …]
|