Searched refs:chunk_sectors (Results 1 – 16 of 16) sorted by relevance
/linux/drivers/md/ |
H A D | raid0.c | 84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 131 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones() 134 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 277 sector_div(first_sector, mddev->chunk_sectors); in create_strip_zones() 327 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector() 363 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size() 383 lim.max_hw_sectors = mddev->chunk_sectors; in raid0_set_limits() 384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in raid0_set_limits() 385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits() [all …]
|
H A D | md-linear.c | 75 lim.max_hw_sectors = mddev->chunk_sectors; in linear_set_limits() 76 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in linear_set_limits() 77 lim.io_min = mddev->chunk_sectors << 9; in linear_set_limits() 125 if (mddev->chunk_sectors) { in linear_conf() 127 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 128 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 305 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
|
H A D | dm-zoned-target.c | 994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints() local 1004 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints() 1005 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints() 1008 limits->chunk_sectors = chunk_sectors; in dmz_io_hints() 1009 limits->max_sectors = chunk_sectors; in dmz_io_hints()
|
H A D | dm-raid.c | 718 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur() 731 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new() 780 * rs->md.chunk_sectors in raid_set_alloc() 985 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 1166 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 1496 if (rs->md.chunk_sectors) in parse_raid_params() 1497 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 1546 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache() 1912 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change() 2154 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() [all...] |
H A D | dm-unstripe.c | 180 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
|
H A D | raid10.c | 1801 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1803 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1808 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1810 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1913 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status() 3131 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high() 3852 chunk = mddev->chunk_sectors; in setup_geo() 4019 lim.io_min = mddev->chunk_sectors << 9; in raid10_set_queue_limits() 4281 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() 5118 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
|
H A D | raid1.c | 3376 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape() 3379 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
|
/linux/drivers/char/ |
H A D | ps3flash.c | 26 u64 chunk_sectors; member 38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write() 376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|
/linux/block/ |
H A D | blk-settings.c | 103 min(lim->chunk_sectors, lim->max_hw_sectors)); in blk_validate_zoned_limits() 217 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors)) in blk_validate_atomic_write_limits() 750 if (b->chunk_sectors) in blk_stack_limits() 751 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits() 775 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits() 776 t->chunk_sectors = 0; in blk_stack_limits()
|
H A D | blk-zoned.c | 748 sector += disk->queue->limits.chunk_sectors) { in blk_zone_wplug_handle_reset_all() 1645 sector_t zone_sectors = disk->queue->limits.chunk_sectors; in blk_revalidate_zone_cb() 1720 sector_t zone_sectors = q->limits.chunk_sectors; in blk_revalidate_disk_zones()
|
H A D | blk.h | 362 if (lim->chunk_sectors) in bio_may_need_split()
|
H A D | blk-sysfs.c | 111 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) in QUEUE_SYSFS_LIMIT_SHOW()
|
H A D | blk-merge.c | 184 return lim->chunk_sectors; in blk_boundary_sectors()
|
/linux/include/uapi/linux/ |
H A D | ublk_cmd.h | 369 __u32 chunk_sectors; member
|
/linux/include/linux/ |
H A D | blkdev.h | 367 unsigned int chunk_sectors; member 715 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no() 1380 return q->limits.chunk_sectors; in bdev_zone_sectors()
|
/linux/drivers/block/ |
H A D | ublk_drv.c | 251 return p->dev_sectors >> ilog2(p->chunk_sectors); in ublk_get_nr_zones() 318 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; in ublk_report_zones() 546 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) in ublk_validate_params() 2256 .chunk_sectors = p->chunk_sectors, in ublk_ctrl_start_dev()
|