Lines Matching refs:sectors

1074 		rdev->sectors = 0;  in md_rdev_clear()
1435 rdev->sectors = rdev->sb_start; in super_90_load()
1440 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) in super_90_load()
1441 rdev->sectors = (sector_t)(2ULL << 32) - 2; in super_90_load()
1443 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) in super_90_load()
1800 sector_t sectors; in super_1_load() local
1899 int sectors = le16_to_cpu(sb->bblog_size); in super_1_load() local
1900 if (sectors > (PAGE_SIZE / 512)) in super_1_load()
1906 if (!sync_page_io(rdev, bb_sector, sectors << 9, in super_1_load()
1911 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { in super_1_load()
1969 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; in super_1_load()
1971 sectors = rdev->sb_start; in super_1_load()
1972 if (sectors < le64_to_cpu(sb->data_size)) in super_1_load()
1974 rdev->sectors = le64_to_cpu(sb->data_size); in super_1_load()
2225 sb->data_size = cpu_to_le64(rdev->sectors); in super_1_sync()
2559 rdev->sectors && in bind_rdev_to_array()
2560 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2569 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
3373 if (rdev->sectors && rdev->mddev->external) in offset_store()
3408 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3445 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); in rdev_size_show()
3451 if (a->data_offset + a->sectors <= b->data_offset) in md_rdevs_overlap()
3453 if (b->data_offset + b->sectors <= a->data_offset) in md_rdevs_overlap()
3479 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) in strict_blocks_to_sectors() argument
3494 *sectors = new; in strict_blocks_to_sectors()
3502 sector_t oldsectors = rdev->sectors; in rdev_size_store()
3503 sector_t sectors; in rdev_size_store() local
3507 if (strict_blocks_to_sectors(buf, &sectors) < 0) in rdev_size_store()
3513 sectors = super_types[my_mddev->major_version]. in rdev_size_store()
3514 rdev_size_change(rdev, sectors); in rdev_size_store()
3515 if (!sectors) in rdev_size_store()
3517 } else if (!sectors) in rdev_size_store()
3518 sectors = bdev_nr_sectors(rdev->bdev) - in rdev_size_store()
3524 if (sectors < my_mddev->dev_sectors) in rdev_size_store()
3527 rdev->sectors = sectors; in rdev_size_store()
3534 if (sectors > oldsectors && my_mddev->external && in rdev_size_store()
3541 rdev->sectors = oldsectors; in rdev_size_store()
4922 sector_t sectors; in size_store() local
4923 int err = strict_blocks_to_sectors(buf, &sectors); in size_store()
4931 err = update_size(mddev, sectors); in size_store()
4936 mddev->dev_sectors > sectors) in size_store()
4937 mddev->dev_sectors = sectors; in size_store()
5032 static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors) in rdev_needs_recovery() argument
5038 rdev->recovery_offset < sectors; in rdev_needs_recovery()
5776 sector_t sectors; in array_size_store() local
5791 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5793 sectors = mddev->array_sectors; in array_size_store()
5797 if (strict_blocks_to_sectors(buf, &sectors) < 0) in array_size_store()
5799 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5806 mddev->array_sectors = sectors; in array_size_store()
7495 rdev->sectors = rdev->sb_start; in md_add_new_disk()
7576 rdev->sectors = rdev->sb_start; in hot_add_disk()
7826 sector_t avail = rdev->sectors; in update_size()
8067 geo->sectors = 4; in md_getgeo()
8830 sector_t sectors; in md_seq_show() local
8866 sectors = 0; in md_seq_show()
8883 sectors += rdev->sectors; in md_seq_show()
8894 (unsigned long long)sectors / 2); in md_seq_show()
9031 sectors) - in is_rdev_holder_idle()
9032 part_stat_read_accum(rdev->bdev, sectors); in is_rdev_holder_idle()
9057 mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors); in is_mddev_idle()
9195 &md_io_clone->sectors); in md_bitmap_start()
9197 fn(mddev, md_io_clone->offset, md_io_clone->sectors); in md_bitmap_start()
9206 fn(mddev, md_io_clone->offset, md_io_clone->sectors); in md_bitmap_end()
9244 md_io_clone->sectors = bio_sectors(*bio); in md_clone_bio()
9599 sector_t sectors; in md_do_sync() local
9641 sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j); in md_do_sync()
9642 if (sectors) in md_do_sync()
9646 sectors = mddev->pers->sync_request(mddev, j, max_sectors, in md_do_sync()
9648 if (sectors == 0) { in md_do_sync()
9654 io_sectors += sectors; in md_do_sync()
9655 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
9662 j += sectors; in md_do_sync()
10385 rdev->sectors += rdev->data_offset - rdev->new_data_offset; in md_finish_reshape()
10387 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; in md_finish_reshape()
10396 bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_set_badblocks() argument
10416 if (!badblocks_set(&rdev->badblocks, s, sectors, 0)) in rdev_set_badblocks()
10430 void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, in rdev_clear_badblocks() argument
10438 if (!badblocks_clear(&rdev->badblocks, s, sectors)) in rdev_clear_badblocks()