Lines Matching refs:clone

93 static inline struct dm_target_io *clone_to_tio(struct bio *clone)  in clone_to_tio()  argument
95 return container_of(clone, struct dm_target_io, clone); in clone_to_tio()
119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
540 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct() argument
549 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { in dm_start_io_acct()
575 struct bio *clone; in alloc_io() local
577 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io()
578 if (unlikely(!clone)) in alloc_io()
580 tio = clone_to_tio(clone); in alloc_io()
610 bio_put(&io->tio.clone); in free_io()
618 struct bio *clone; in alloc_tio() local
624 clone = &tio->clone; in alloc_tio()
626 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
628 if (!clone) in alloc_tio()
632 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio()
634 tio = clone_to_tio(clone); in alloc_tio()
646 clone->bi_bdev = md->disk->part0; in alloc_tio()
648 bio_set_dev(clone, md->disk->part0); in alloc_tio()
651 clone->bi_iter.bi_size = to_bytes(*len); in alloc_tio()
652 if (bio_integrity(clone)) in alloc_tio()
653 bio_integrity_trim(clone); in alloc_tio()
656 return clone; in alloc_tio()
659 static void free_tio(struct bio *clone) in free_tio() argument
661 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) in free_tio()
663 bio_put(clone); in free_tio()
1358 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) in dm_submit_bio_remap() argument
1360 struct dm_target_io *tio = clone_to_tio(clone); in dm_submit_bio_remap()
1365 tgt_clone = clone; in dm_submit_bio_remap()
1371 dm_start_io_acct(io, clone); in dm_submit_bio_remap()
1395 static void __map_bio(struct bio *clone) in __map_bio() argument
1397 struct dm_target_io *tio = clone_to_tio(clone); in __map_bio()
1403 clone->bi_end_io = clone_endio; in __map_bio()
1408 tio->old_sector = clone->bi_iter.bi_sector; in __map_bio()
1411 unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1420 r = linear_map(ti, clone); in __map_bio()
1422 r = stripe_map(ti, clone); in __map_bio()
1424 r = ti->type->map(ti, clone); in __map_bio()
1430 dm_start_io_acct(io, clone); in __map_bio()
1433 dm_submit_bio_remap(clone, NULL); in __map_bio()
1438 unlikely(swap_bios_limit(ti, clone))) in __map_bio()
1440 free_tio(clone); in __map_bio()
1501 struct bio *clone; in __send_duplicate_bios() local
1516 while ((clone = bio_list_pop(&blist))) { in __send_duplicate_bios()
1518 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); in __send_duplicate_bios()
1519 __map_bio(clone); in __send_duplicate_bios()
1545 ci->io->tio.clone.bi_iter.bi_size = 0; in __send_empty_flush()
1570 struct bio *clone; in __send_empty_flush() local
1578 clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO); in __send_empty_flush()
1580 bio_set_dev(clone, dd->dm_dev->bdev); in __send_empty_flush()
1581 clone->bi_end_io = clone_endio; in __send_empty_flush()
1582 dm_submit_bio_remap(clone, NULL); in __send_empty_flush()
1721 struct bio *clone; in __split_and_process_bio() local
1752 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); in __split_and_process_bio()
1753 if (unlikely(!clone)) in __split_and_process_bio()
1756 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); in __split_and_process_bio()
1758 __map_bio(clone); in __split_and_process_bio()
1826 struct bio *clone; in __send_zone_reset_all_emulated() local
1869 clone = bio_list_pop(&blist); in __send_zone_reset_all_emulated()
1870 clone->bi_opf &= ~REQ_OP_MASK; in __send_zone_reset_all_emulated()
1871 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; in __send_zone_reset_all_emulated()
1872 clone->bi_iter.bi_sector = sector; in __send_zone_reset_all_emulated()
1873 clone->bi_iter.bi_size = 0; in __send_zone_reset_all_emulated()
1874 __map_bio(clone); in __send_zone_reset_all_emulated()
2100 bio_poll(&io->tio.clone, iob, flags); in dm_poll_dm_io()