| /linux/drivers/md/dm-vdo/ |
| H A D | io-submitter.c | 149 vio->bios_merged.head->bi_iter.bi_sector); in get_bio_list() 151 vio->bios_merged.tail->bi_iter.bi_sector); in get_bio_list() 195 sector_t merge_sector = bio->bi_iter.bi_sector; in get_mergeable_locked() 218 return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ? in get_mergeable_locked() 222 return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ? in get_mergeable_locked() 231 bio_sector = vio->bios_merged.head->bi_iter.bi_sector; in map_merged_vio() 236 bio_sector = vio->bios_merged.tail->bi_iter.bi_sector; in map_merged_vio() 243 vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector); in merge_to_prev_tail() 256 vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector); in merge_to_next_head() 293 bio->bi_iter.bi_sector, in try_bio_map_merge()
|
| /linux/drivers/md/ |
| H A D | dm-ebs-target.c | 50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks() 72 unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs)); in __ebs_rw_bvec() 73 sector_t block = __sector_to_block(ec, iter->bi_sector); in __ebs_rw_bvec() 145 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio() 169 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio() 195 block1 = __sector_to_block(ec, bio->bi_iter.bi_sector); in __ebs_process_bios() 200 if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs)) in __ebs_process_bios() 366 bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in ebs_map() 375 if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) || in ebs_map()
|
| H A D | dm-linear.c | 82 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) in linear_map_sector() argument 86 return lc->start + dm_target_offset(ti, bi_sector); in linear_map_sector() 94 bio->bi_iter.bi_sector = linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map()
|
| H A D | dm-stripe.c | 255 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range() 261 bio->bi_iter.bi_sector = begin + in stripe_map_range() 292 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map() 293 &stripe, &bio->bi_iter.bi_sector); in stripe_map() 295 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
|
| H A D | dm-io-rewind.c | 62 bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9); in dm_bio_integrity_rewind() 113 iter->bi_sector -= bytes >> 9; in dm_bio_rewind_iter()
|
| H A D | dm-stats.c | 635 sector_t bi_sector, sector_t end_sector, in __dm_stat_bio() argument 642 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio() 644 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio() 648 rel_sector = bi_sector - s->start; in __dm_stat_bio() 649 todo = end_sector - bi_sector; in __dm_stat_bio() 673 sector_t bi_sector, unsigned int bi_sectors, bool end, in dm_stats_account_io() argument 686 end_sector = bi_sector + bi_sectors; in dm_stats_account_io() 695 (bi_sector == (READ_ONCE(last->last_sector) && in dm_stats_account_io() 714 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); in dm_stats_account_io()
|
| H A D | dm-dust.c | 230 bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in dust_map() 233 r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map() 235 r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
|
| H A D | dm-unstripe.c | 120 sector_t sector = bio->bi_iter.bi_sector; in map_to_core() 140 bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start; in unstripe_map()
|
| H A D | dm-writecache.c | 1306 writecache_discard(wc, bio->bi_iter.bi_sector, in writecache_flush_thread() 1343 read_original_sector(wc, e) - bio->bi_iter.bi_sector; in writecache_map_remap_origin() 1356 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_read() 1357 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { in writecache_map_read() 1367 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map_read() 1394 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + in writecache_bio_copy_ssd() 1421 bio->bi_iter.bi_sector = start_cache_sec; in writecache_bio_copy_ssd() 1447 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); in writecache_map_write() 1470 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_write() 1480 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); in writecache_map_write() [all …]
|
| H A D | dm-stats.h | 33 sector_t bi_sector, unsigned int bi_sectors, bool end,
|
| H A D | dm-thin.c | 675 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 692 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range() 717 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() local 721 bio->bi_iter.bi_sector = in remap() 723 (bi_sector & (pool->sectors_per_block - 1)); in remap() 725 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap() 726 sector_div(bi_sector, pool->sectors_per_block); in remap() 1984 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell() 1986 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 2122 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add() local [all …]
|
| H A D | dm-log-writes.c | 223 bio->bi_iter.bi_sector = sector; in write_metadata() 276 bio->bi_iter.bi_sector = sector; in write_inline_data() 357 bio->bi_iter.bi_sector = sector; in log_one_block() 375 bio->bi_iter.bi_sector = sector; in log_one_block() 704 block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); in log_writes_map()
|
| /linux/drivers/md/bcache/ |
| H A D | request.c | 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in CLOSURE_CALLBACK() 411 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) || in check_should_bypass() 430 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass() 431 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass() 534 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn() 538 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn() 542 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn() [all …]
|
| /linux/block/ |
| H A D | blk-lib.c | 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 135 bio->bi_iter.bi_sector = sector; in __blkdev_issue_write_zeroes() 210 bio->bi_iter.bi_sector = sector; in __blkdev_issue_zero_pages() 356 bio->bi_iter.bi_sector = sector; in blkdev_issue_secure_erase()
|
| H A D | blk-zoned.c | 330 bio->bi_iter.bi_sector = sector; in blkdev_zone_mgmt() 1094 sector_t sector = bio->bi_iter.bi_sector; in blk_zone_reset_bio_endio() 1151 sector_t sector = bio->bi_iter.bi_sector; in blk_zone_finish_bio_endio() 1243 bio->bi_iter.bi_sector, bio_sectors(bio)); in disk_zone_wplug_add_bio() 1273 zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); in blk_zone_write_plug_bio_merged() 1321 if (bio->bi_iter.bi_sector != req_back_sector || in blk_zone_write_plug_init_request() 1383 bio->bi_iter.bi_sector += zwplug->wp_offset; in blk_zone_wplug_prepare_bio() 1410 sector_t sector = bio->bi_iter.bi_sector; in blk_zone_wplug_handle_write() 1509 zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); in blk_zone_wplug_handle_native_zone_append() 1539 !bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { in blk_zone_wplug_handle_zone_mgmt() [all …]
|
| /linux/include/trace/events/ |
| H A D | bcache.h | 28 __entry->sector = bio->bi_iter.bi_sector; 29 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 103 __entry->sector = bio->bi_iter.bi_sector; 138 __entry->sector = bio->bi_iter.bi_sector; 169 __entry->sector = bio->bi_iter.bi_sector; 238 __entry->sector = bio->bi_iter.bi_sector;
|
| H A D | block.h | 323 __entry->sector = bio->bi_iter.bi_sector; 351 __entry->sector = bio->bi_iter.bi_sector; 504 __entry->sector = bio->bi_iter.bi_sector; 543 __entry->sector = bio->bi_iter.bi_sector; 626 __entry->sector = bio->bi_iter.bi_sector;
|
| /linux/fs/iomap/ |
| H A D | bio.c | 63 bio->bi_iter.bi_sector = sector; in iomap_bio_read_folio_range() 85 bio.bi_iter.bi_sector = iomap_sector(srcmap, pos); in iomap_bio_read_folio_range_sync()
|
| H A D | ioend.c | 28 ioend->io_sector = bio->bi_iter.bi_sector; in iomap_init_ioend() 108 bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos); in iomap_alloc_ioend()
|
| /linux/mm/ |
| H A D | page_io.c | 46 (unsigned long long)bio->bi_iter.bi_sector); in __end_swap_bio_write() 65 (unsigned long long)bio->bi_iter.bi_sector); in __end_swap_bio_read() 417 bio.bi_iter.bi_sector = swap_folio_sector(folio); in swap_writepage_bdev_sync() 436 bio->bi_iter.bi_sector = swap_folio_sector(folio); in swap_writepage_bdev_async() 579 bio.bi_iter.bi_sector = swap_folio_sector(folio); in swap_read_folio_bdev_sync() 600 bio->bi_iter.bi_sector = swap_folio_sector(folio); in swap_read_folio_bdev_async()
|
| /linux/fs/ext4/ |
| H A D | page-io.c | 352 sector_t bi_sector = bio->bi_iter.bi_sector; in ext4_end_bio() local 356 (long long) bio->bi_iter.bi_sector, in ext4_end_bio() 372 bi_sector >> (inode->i_blkbits - 9)); in ext4_end_bio() 428 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in io_submit_init_bio()
|
| /linux/fs/crypto/ |
| H A D | bio.c | 70 bio->bi_iter.bi_sector = in fscrypt_zeroout_range_inline_crypt() 163 bio->bi_iter.bi_sector = sector; in fscrypt_zeroout_range()
|
| /linux/include/linux/ |
| H A D | bvec.h | 78 sector_t bi_sector; /* device address in 512 byte member 196 .bi_sector = 0, \
|
| /linux/drivers/nvme/target/ |
| H A D | io-cmd-bdev.c | 211 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 288 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw() 313 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
|
| /linux/fs/xfs/ |
| H A D | xfs_zone_gc.c | 711 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); in xfs_zone_gc_start_chunk() 749 chunk->bio.bi_iter.bi_sector = chunk->new_daddr; in xfs_zone_gc_submit_write() 876 chunk->new_daddr = chunk->bio.bi_iter.bi_sector; in xfs_zone_gc_finish_chunk() 917 bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); in xfs_zone_gc_prepare_reset() 918 if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { in xfs_zone_gc_prepare_reset()
|