Lines Matching refs:sctx

109 	struct scrub_ctx *sctx;  member
245 stripe->sctx = NULL; in release_scrub_stripe()
289 static void scrub_put_ctx(struct scrub_ctx *sctx);
323 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) in scrub_free_ctx() argument
327 if (!sctx) in scrub_free_ctx()
331 release_scrub_stripe(&sctx->stripes[i]); in scrub_free_ctx()
333 kvfree(sctx); in scrub_free_ctx()
336 static void scrub_put_ctx(struct scrub_ctx *sctx) in scrub_put_ctx() argument
338 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
339 scrub_free_ctx(sctx); in scrub_put_ctx()
345 struct scrub_ctx *sctx; in scrub_setup_ctx() local
351 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL); in scrub_setup_ctx()
352 if (!sctx) in scrub_setup_ctx()
354 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
355 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
356 sctx->fs_info = fs_info; in scrub_setup_ctx()
357 sctx->extent_path.search_commit_root = 1; in scrub_setup_ctx()
358 sctx->extent_path.skip_locking = 1; in scrub_setup_ctx()
359 sctx->csum_path.search_commit_root = 1; in scrub_setup_ctx()
360 sctx->csum_path.skip_locking = 1; in scrub_setup_ctx()
364 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); in scrub_setup_ctx()
367 sctx->stripes[i].sctx = sctx; in scrub_setup_ctx()
369 sctx->first_free = 0; in scrub_setup_ctx()
370 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
372 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
373 sctx->throttle_deadline = 0; in scrub_setup_ctx()
375 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
378 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
381 return sctx; in scrub_setup_ctx()
384 scrub_free_ctx(sctx); in scrub_setup_ctx()
560 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) in fill_writer_pointer_gap() argument
565 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
568 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
571 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
572 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
574 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
575 sctx->write_pointer, length); in fill_writer_pointer_gap()
577 sctx->write_pointer = physical; in fill_writer_pointer_gap()
866 static void scrub_stripe_report_errors(struct scrub_ctx *sctx, in scrub_stripe_report_errors() argument
871 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_report_errors()
977 spin_lock(&sctx->stat_lock); in scrub_stripe_report_errors()
978 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; in scrub_stripe_report_errors()
979 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; in scrub_stripe_report_errors()
980 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
981 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
982 sctx->stat.no_csum += nr_nodatacsum_sectors; in scrub_stripe_report_errors()
983 sctx->stat.read_errors += stripe->init_nr_io_errors; in scrub_stripe_report_errors()
984 sctx->stat.csum_errors += stripe->init_nr_csum_errors; in scrub_stripe_report_errors()
985 sctx->stat.verify_errors += stripe->init_nr_meta_errors; in scrub_stripe_report_errors()
986 sctx->stat.uncorrectable_errors += in scrub_stripe_report_errors()
988 sctx->stat.corrected_errors += nr_repaired_sectors; in scrub_stripe_report_errors()
989 spin_unlock(&sctx->stat_lock); in scrub_stripe_report_errors()
992 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1011 struct scrub_ctx *sctx = stripe->sctx; in scrub_stripe_read_repair_worker() local
1012 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_read_repair_worker()
1084 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { in scrub_stripe_read_repair_worker()
1086 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); in scrub_stripe_read_repair_worker()
1088 scrub_write_sectors(sctx, stripe, repaired, false); in scrub_stripe_read_repair_worker()
1093 scrub_stripe_report_errors(sctx, stripe); in scrub_stripe_read_repair_worker()
1152 static void scrub_submit_write_bio(struct scrub_ctx *sctx, in scrub_submit_write_bio() argument
1156 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_write_bio()
1161 fill_writer_pointer_gap(sctx, stripe->physical + bio_off); in scrub_submit_write_bio()
1178 sctx->write_pointer += bio_len; in scrub_submit_write_bio()
1194 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, in scrub_write_sectors() argument
1211 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); in scrub_write_sectors()
1225 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); in scrub_write_sectors()
1232 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device, in scrub_throttle_dev_io() argument
1254 if (sctx->throttle_deadline == 0) { in scrub_throttle_dev_io()
1255 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle_dev_io()
1256 sctx->throttle_sent = 0; in scrub_throttle_dev_io()
1260 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle_dev_io()
1262 sctx->throttle_sent += bio_size; in scrub_throttle_dev_io()
1263 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle_dev_io()
1267 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle_dev_io()
1281 sctx->throttle_deadline = 0; in scrub_throttle_dev_io()
1458 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, in sync_write_pointer_for_zoned() argument
1461 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
1467 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1468 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
1469 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
1471 sctx->write_pointer); in sync_write_pointer_for_zoned()
1476 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1477 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
1746 static void scrub_submit_initial_read(struct scrub_ctx *sctx, in scrub_submit_initial_read() argument
1749 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_initial_read()
1783 if (sctx->is_dev_replace && in scrub_submit_initial_read()
1813 static void submit_initial_group_read(struct scrub_ctx *sctx, in submit_initial_group_read() argument
1822 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, in submit_initial_group_read()
1826 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; in submit_initial_group_read()
1830 scrub_submit_initial_read(sctx, stripe); in submit_initial_group_read()
1835 static int flush_scrub_stripes(struct scrub_ctx *sctx) in flush_scrub_stripes() argument
1837 struct btrfs_fs_info *fs_info = sctx->fs_info; in flush_scrub_stripes()
1839 const int nr_stripes = sctx->cur_stripe; in flush_scrub_stripes()
1845 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); in flush_scrub_stripes()
1851 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot); in flush_scrub_stripes()
1855 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1862 if (sctx->is_dev_replace) { in flush_scrub_stripes()
1868 if (stripe_has_metadata_error(&sctx->stripes[i])) { in flush_scrub_stripes()
1876 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1882 scrub_write_sectors(sctx, stripe, good, true); in flush_scrub_stripes()
1888 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1891 spin_lock(&sctx->stat_lock); in flush_scrub_stripes()
1892 sctx->stat.last_physical = stripe->physical + stripe_length(stripe); in flush_scrub_stripes()
1893 spin_unlock(&sctx->stat_lock); in flush_scrub_stripes()
1897 sctx->cur_stripe = 0; in flush_scrub_stripes()
1906 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, in queue_scrub_stripe() argument
1918 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); in queue_scrub_stripe()
1923 stripe = &sctx->stripes[sctx->cur_stripe]; in queue_scrub_stripe()
1925 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, in queue_scrub_stripe()
1926 &sctx->csum_path, dev, physical, in queue_scrub_stripe()
1932 sctx->cur_stripe++; in queue_scrub_stripe()
1935 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { in queue_scrub_stripe()
1936 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; in queue_scrub_stripe()
1938 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP); in queue_scrub_stripe()
1942 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) in queue_scrub_stripe()
1943 return flush_scrub_stripes(sctx); in queue_scrub_stripe()
1947 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, in scrub_raid56_parity_stripe() argument
1954 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity_stripe()
1967 ASSERT(sctx->raid56_data_stripes); in scrub_raid56_parity_stripe()
1984 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2014 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2026 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2027 scrub_submit_initial_read(sctx, stripe); in scrub_raid56_parity_stripe()
2030 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2036 ASSERT(!btrfs_is_zoned(sctx->fs_info)); in scrub_raid56_parity_stripe()
2048 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2092 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2117 static int scrub_simple_mirror(struct scrub_ctx *sctx, in scrub_simple_mirror() argument
2123 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
2138 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
2156 ret = queue_scrub_stripe(sctx, bg, device, mirror_num, in scrub_simple_mirror()
2161 spin_lock(&sctx->stat_lock); in scrub_simple_mirror()
2162 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
2163 spin_unlock(&sctx->stat_lock); in scrub_simple_mirror()
2217 static int scrub_simple_stripe(struct scrub_ctx *sctx, in scrub_simple_stripe() argument
2237 ret = scrub_simple_mirror(sctx, bg, cur_logical, in scrub_simple_stripe()
2250 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, in scrub_stripe() argument
2256 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
2273 ASSERT(sctx->extent_path.nodes[0] == NULL); in scrub_stripe()
2277 if (sctx->is_dev_replace && in scrub_stripe()
2278 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
2279 mutex_lock(&sctx->wr_lock); in scrub_stripe()
2280 sctx->write_pointer = physical; in scrub_stripe()
2281 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
2286 ASSERT(sctx->raid56_data_stripes == NULL); in scrub_stripe()
2288 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), in scrub_stripe()
2291 if (!sctx->raid56_data_stripes) { in scrub_stripe()
2297 &sctx->raid56_data_stripes[i]); in scrub_stripe()
2300 sctx->raid56_data_stripes[i].bg = bg; in scrub_stripe()
2301 sctx->raid56_data_stripes[i].sctx = sctx; in scrub_stripe()
2321 ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length, in scrub_stripe()
2328 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); in scrub_stripe()
2357 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg, in scrub_stripe()
2359 spin_lock(&sctx->stat_lock); in scrub_stripe()
2360 sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN, in scrub_stripe()
2362 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2376 ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN, in scrub_stripe()
2383 spin_lock(&sctx->stat_lock); in scrub_stripe()
2384 sctx->stat.last_physical = physical; in scrub_stripe()
2385 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2388 ret2 = flush_scrub_stripes(sctx); in scrub_stripe()
2391 btrfs_release_path(&sctx->extent_path); in scrub_stripe()
2392 btrfs_release_path(&sctx->csum_path); in scrub_stripe()
2394 if (sctx->raid56_data_stripes) { in scrub_stripe()
2396 release_scrub_stripe(&sctx->raid56_data_stripes[i]); in scrub_stripe()
2397 kfree(sctx->raid56_data_stripes); in scrub_stripe()
2398 sctx->raid56_data_stripes = NULL; in scrub_stripe()
2401 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
2404 ret2 = sync_write_pointer_for_zoned(sctx, in scrub_stripe()
2415 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, in scrub_chunk() argument
2421 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
2447 ret = scrub_stripe(sctx, bg, map, scrub_dev, i); in scrub_chunk()
2474 int scrub_enumerate_chunks(struct scrub_ctx *sctx, in scrub_enumerate_chunks() argument
2479 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
2585 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
2649 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
2650 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
2662 } else if (ret == -ENOSPC && !sctx->is_dev_replace && in scrub_enumerate_chunks()
2699 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
2711 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, in scrub_enumerate_chunks()
2713 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2750 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2755 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
2769 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev, in scrub_one_super() argument
2772 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_one_super()
2804 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, in scrub_supers() argument
2812 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
2819 spin_lock(&sctx->stat_lock); in scrub_supers()
2820 sctx->stat.malloc_errors++; in scrub_supers()
2821 spin_unlock(&sctx->stat_lock); in scrub_supers()
2837 spin_lock(&sctx->stat_lock); in scrub_supers()
2838 sctx->stat.super_errors++; in scrub_supers()
2839 spin_unlock(&sctx->stat_lock); in scrub_supers()
2849 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen); in scrub_supers()
2851 spin_lock(&sctx->stat_lock); in scrub_supers()
2852 sctx->stat.super_errors++; in scrub_supers()
2853 spin_unlock(&sctx->stat_lock); in scrub_supers()
2914 struct scrub_ctx *sctx; in btrfs_scrub_dev() local
2935 sctx = scrub_setup_ctx(fs_info, is_dev_replace); in btrfs_scrub_dev()
2936 if (IS_ERR(sctx)) in btrfs_scrub_dev()
2937 return PTR_ERR(sctx); in btrfs_scrub_dev()
2983 sctx->readonly = readonly; in btrfs_scrub_dev()
2984 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
3008 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3009 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
3010 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3018 ret = scrub_supers(sctx, dev); in btrfs_scrub_dev()
3021 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3027 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
3029 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3033 ret = scrub_enumerate_chunks(sctx, dev, start, end); in btrfs_scrub_dev()
3040 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
3051 scrub_put_ctx(sctx); in btrfs_scrub_dev()
3076 scrub_free_ctx(sctx); in btrfs_scrub_dev()
3126 struct scrub_ctx *sctx; in btrfs_scrub_cancel_dev() local
3129 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
3130 if (!sctx) { in btrfs_scrub_cancel_dev()
3134 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
3151 struct scrub_ctx *sctx = NULL; in btrfs_scrub_progress() local
3156 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
3157 if (sctx) in btrfs_scrub_progress()
3158 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
3161 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()