Lines Matching refs:bio
40 static inline void blk_mq_map_bio_put(struct bio *bio) in blk_mq_map_bio_put() argument
42 bio_put(bio); in blk_mq_map_bio_put()
45 static struct bio *blk_rq_map_bio_alloc(struct request *rq, in blk_rq_map_bio_alloc()
49 struct bio *bio; in blk_rq_map_bio_alloc() local
51 bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc()
53 if (!bio) in blk_rq_map_bio_alloc()
56 return bio; in blk_rq_map_bio_alloc()
67 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument
72 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
98 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
103 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
128 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
130 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
141 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
142 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
144 bio_free_pages(bio); in bio_uncopy_user()
155 struct bio *bio; in bio_copy_user_iov() local
176 bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); in bio_copy_user_iov()
177 if (!bio) in bio_copy_user_iov()
210 if (bio_add_page(bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
221 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
228 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
236 ret = bio_copy_from_iter(bio, &iter2); in bio_copy_user_iov()
241 zero_fill_bio(bio); in bio_copy_user_iov()
242 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
245 bio->bi_private = bmd; in bio_copy_user_iov()
247 ret = blk_rq_append_bio(rq, bio); in bio_copy_user_iov()
253 bio_free_pages(bio); in bio_copy_user_iov()
254 blk_mq_map_bio_put(bio); in bio_copy_user_iov()
264 struct bio *bio; in bio_map_user_iov() local
270 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); in bio_map_user_iov()
271 if (!bio) in bio_map_user_iov()
277 ret = bio_iov_iter_get_pages(bio, iter, 0); in bio_map_user_iov()
280 ret = blk_rq_append_bio(rq, bio); in bio_map_user_iov()
286 bio_release_pages(bio, false); in bio_map_user_iov()
288 blk_mq_map_bio_put(bio); in bio_map_user_iov()
292 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
295 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
298 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
299 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
300 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
305 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
307 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
308 blk_mq_map_bio_put(bio); in bio_map_kern_endio()
311 static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len, in bio_map_kern()
315 struct bio *bio; in bio_map_kern() local
317 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); in bio_map_kern()
318 if (!bio) in bio_map_kern()
322 bio->bi_private = data; in bio_map_kern()
323 if (!bio_add_vmalloc(bio, data, len)) { in bio_map_kern()
324 blk_mq_map_bio_put(bio); in bio_map_kern()
328 bio_add_virt_nofail(bio, data, len); in bio_map_kern()
330 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
331 return bio; in bio_map_kern()
334 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
336 bio_free_pages(bio); in bio_copy_kern_endio()
337 blk_mq_map_bio_put(bio); in bio_copy_kern_endio()
340 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
342 char *p = bio->bi_private; in bio_copy_kern_endio_read()
346 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
351 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
365 static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len, in bio_copy_kern()
372 struct bio *bio; in bio_copy_kern() local
383 bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); in bio_copy_kern()
384 if (!bio) in bio_copy_kern()
401 if (bio_add_page(bio, page, bytes, 0) < bytes) in bio_copy_kern()
409 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
411 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
412 bio->bi_private = data; in bio_copy_kern()
415 return bio; in bio_copy_kern()
418 bio_free_pages(bio); in bio_copy_kern()
419 blk_mq_map_bio_put(bio); in bio_copy_kern()
427 int blk_rq_append_bio(struct request *rq, struct bio *bio) in blk_rq_append_bio() argument
435 ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes, 0); in blk_rq_append_bio()
443 if (rq->bio) { in blk_rq_append_bio()
444 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
446 rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio, in blk_rq_append_bio()
448 rq->biotail->bi_next = bio; in blk_rq_append_bio()
449 rq->biotail = bio; in blk_rq_append_bio()
450 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio()
451 bio_crypt_free_ctx(bio); in blk_rq_append_bio()
456 rq->bio = rq->biotail = bio; in blk_rq_append_bio()
457 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_append_bio()
458 rq->phys_gap_bit = bio->bi_bvec_gap_bit; in blk_rq_append_bio()
467 struct bio *bio; in blk_rq_map_user_bvec() local
474 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); in blk_rq_map_user_bvec()
475 if (!bio) in blk_rq_map_user_bvec()
477 bio_iov_bvec_set(bio, iter); in blk_rq_map_user_bvec()
479 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_user_bvec()
481 blk_mq_map_bio_put(bio); in blk_rq_map_user_bvec()
506 struct bio *bio = NULL; in blk_rq_map_user_iov() local
542 if (!bio) in blk_rq_map_user_iov()
543 bio = rq->bio; in blk_rq_map_user_iov()
549 blk_rq_unmap_user(bio); in blk_rq_map_user_iov()
551 rq->bio = NULL; in blk_rq_map_user_iov()
615 int blk_rq_unmap_user(struct bio *bio) in blk_rq_unmap_user() argument
617 struct bio *next_bio; in blk_rq_unmap_user()
620 while (bio) { in blk_rq_unmap_user()
621 if (bio->bi_private) { in blk_rq_unmap_user()
622 ret2 = bio_uncopy_user(bio); in blk_rq_unmap_user()
626 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
629 if (bio_integrity(bio)) in blk_rq_unmap_user()
630 bio_integrity_unmap_user(bio); in blk_rq_unmap_user()
632 next_bio = bio; in blk_rq_unmap_user()
633 bio = bio->bi_next; in blk_rq_unmap_user()
657 struct bio *bio; in blk_rq_map_kern() local
666 bio = bio_copy_kern(rq, kbuf, len, gfp_mask); in blk_rq_map_kern()
668 bio = bio_map_kern(rq, kbuf, len, gfp_mask); in blk_rq_map_kern()
670 if (IS_ERR(bio)) in blk_rq_map_kern()
671 return PTR_ERR(bio); in blk_rq_map_kern()
673 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_kern()
675 blk_mq_map_bio_put(bio); in blk_rq_map_kern()