Lines Matching refs:bio

46 static inline int blkdev_iov_iter_get_pages(struct bio *bio,  in blkdev_iov_iter_get_pages()  argument
49 return bio_iov_iter_get_pages(bio, iter, in blkdev_iov_iter_get_pages()
62 struct bio bio; in __blkdev_direct_IO_simple() local
75 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); in __blkdev_direct_IO_simple()
79 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); in __blkdev_direct_IO_simple()
81 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; in __blkdev_direct_IO_simple()
82 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; in __blkdev_direct_IO_simple()
83 bio.bi_write_stream = iocb->ki_write_stream; in __blkdev_direct_IO_simple()
84 bio.bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_simple()
86 bio.bi_opf |= REQ_ATOMIC; in __blkdev_direct_IO_simple()
88 ret = blkdev_iov_iter_get_pages(&bio, iter, bdev); in __blkdev_direct_IO_simple()
91 ret = bio.bi_iter.bi_size; in __blkdev_direct_IO_simple()
97 bio.bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_simple()
99 submit_bio_wait(&bio); in __blkdev_direct_IO_simple()
101 bio_release_pages(&bio, should_dirty); in __blkdev_direct_IO_simple()
102 if (unlikely(bio.bi_status)) in __blkdev_direct_IO_simple()
103 ret = blk_status_to_errno(bio.bi_status); in __blkdev_direct_IO_simple()
109 bio_uninit(&bio); in __blkdev_direct_IO_simple()
127 struct bio bio ____cacheline_aligned_in_smp;
132 static void blkdev_bio_end_io(struct bio *bio) in blkdev_bio_end_io() argument
134 struct blkdev_dio *dio = bio->bi_private; in blkdev_bio_end_io()
138 if (bio->bi_status && !dio->bio.bi_status) in blkdev_bio_end_io()
139 dio->bio.bi_status = bio->bi_status; in blkdev_bio_end_io()
141 if (bio_integrity(bio)) in blkdev_bio_end_io()
142 bio_integrity_unmap_user(bio); in blkdev_bio_end_io()
151 if (likely(!dio->bio.bi_status)) { in blkdev_bio_end_io()
155 ret = blk_status_to_errno(dio->bio.bi_status); in blkdev_bio_end_io()
159 bio_put(&dio->bio); in blkdev_bio_end_io()
169 bio_check_pages_dirty(bio); in blkdev_bio_end_io()
171 bio_release_pages(bio, false); in blkdev_bio_end_io()
172 bio_put(bio); in blkdev_bio_end_io()
181 struct bio *bio; in __blkdev_direct_IO() local
187 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO()
189 dio = container_of(bio, struct blkdev_dio, bio); in __blkdev_direct_IO()
195 bio_get(bio); in __blkdev_direct_IO()
213 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; in __blkdev_direct_IO()
214 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; in __blkdev_direct_IO()
215 bio->bi_write_stream = iocb->ki_write_stream; in __blkdev_direct_IO()
216 bio->bi_private = dio; in __blkdev_direct_IO()
217 bio->bi_end_io = blkdev_bio_end_io; in __blkdev_direct_IO()
218 bio->bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO()
220 ret = blkdev_iov_iter_get_pages(bio, iter, bdev); in __blkdev_direct_IO()
222 bio->bi_status = BLK_STS_IOERR; in __blkdev_direct_IO()
223 bio_endio(bio); in __blkdev_direct_IO()
239 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO()
242 ret = bio_integrity_map_iter(bio, iocb->private); in __blkdev_direct_IO()
249 bio_set_pages_dirty(bio); in __blkdev_direct_IO()
251 task_io_account_write(bio->bi_iter.bi_size); in __blkdev_direct_IO()
253 dio->size += bio->bi_iter.bi_size; in __blkdev_direct_IO()
254 pos += bio->bi_iter.bi_size; in __blkdev_direct_IO()
258 submit_bio(bio); in __blkdev_direct_IO()
262 submit_bio(bio); in __blkdev_direct_IO()
263 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); in __blkdev_direct_IO()
280 ret = blk_status_to_errno(dio->bio.bi_status); in __blkdev_direct_IO()
284 bio_put(&dio->bio); in __blkdev_direct_IO()
287 bio_release_pages(bio, false); in __blkdev_direct_IO()
288 bio_clear_flag(bio, BIO_REFFED); in __blkdev_direct_IO()
289 bio_put(bio); in __blkdev_direct_IO()
294 static void blkdev_bio_end_io_async(struct bio *bio) in blkdev_bio_end_io_async() argument
296 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); in blkdev_bio_end_io_async()
302 if (likely(!bio->bi_status)) { in blkdev_bio_end_io_async()
306 ret = blk_status_to_errno(bio->bi_status); in blkdev_bio_end_io_async()
309 if (bio_integrity(bio)) in blkdev_bio_end_io_async()
310 bio_integrity_unmap_user(bio); in blkdev_bio_end_io_async()
315 bio_check_pages_dirty(bio); in blkdev_bio_end_io_async()
317 bio_release_pages(bio, false); in blkdev_bio_end_io_async()
318 bio_put(bio); in blkdev_bio_end_io_async()
330 struct bio *bio; in __blkdev_direct_IO_async() local
334 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO_async()
336 dio = container_of(bio, struct blkdev_dio, bio); in __blkdev_direct_IO_async()
339 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; in __blkdev_direct_IO_async()
340 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; in __blkdev_direct_IO_async()
341 bio->bi_write_stream = iocb->ki_write_stream; in __blkdev_direct_IO_async()
342 bio->bi_end_io = blkdev_bio_end_io_async; in __blkdev_direct_IO_async()
343 bio->bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_async()
352 bio_iov_bvec_set(bio, iter); in __blkdev_direct_IO_async()
354 ret = blkdev_iov_iter_get_pages(bio, iter, bdev); in __blkdev_direct_IO_async()
358 dio->size = bio->bi_iter.bi_size; in __blkdev_direct_IO_async()
363 bio_set_pages_dirty(bio); in __blkdev_direct_IO_async()
366 task_io_account_write(bio->bi_iter.bi_size); in __blkdev_direct_IO_async()
370 ret = bio_integrity_map_iter(bio, iocb->private); in __blkdev_direct_IO_async()
377 bio->bi_opf |= REQ_ATOMIC; in __blkdev_direct_IO_async()
380 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_async()
383 bio->bi_opf |= REQ_POLLED; in __blkdev_direct_IO_async()
384 submit_bio(bio); in __blkdev_direct_IO_async()
385 WRITE_ONCE(iocb->private, bio); in __blkdev_direct_IO_async()
387 submit_bio(bio); in __blkdev_direct_IO_async()
392 bio_put(bio); in __blkdev_direct_IO_async()
976 offsetof(struct blkdev_dio, bio), in blkdev_init()