Lines Matching refs:sectors_per_block
242 uint32_t sectors_per_block; member
376 (b * pool->sectors_per_block); in block_to_sectors()
680 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
695 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
701 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
702 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
723 (bi_sector & (pool->sectors_per_block - 1)); in remap()
725 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
726 sector_div(bi_sector, pool->sectors_per_block); in remap()
1239 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1346 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1350 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1359 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1362 data_dest * pool->sectors_per_block + len, in schedule_copy()
1363 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1376 tc->pool->sectors_per_block); in schedule_internal_copy()
1402 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1403 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1414 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1415 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1420 pool->sectors_per_block); in schedule_external_copy()
2854 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_discard_passdown_if_not_supported()
2962 pool->sectors_per_block = block_size; in pool_create()
3465 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
4029 (unsigned long)pool->sectors_per_block, in pool_status()
4067 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4068 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4079 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4080 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4081 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4084 limits->io_min = pool->sectors_per_block << SECTOR_SHIFT; in pool_io_hints()
4085 limits->io_opt = pool->sectors_per_block << SECTOR_SHIFT; in pool_io_hints()
4266 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4437 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4440 tc->pool->sectors_per_block) - 1); in thin_status()
4480 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4482 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4493 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()
4494 limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE; in thin_io_hints()