Lines Matching +full:scrubber +full:- +full:done
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
18 #include "disk-io.h"
21 #include "async-thread.h"
22 #include "file-item.h"
51 bioc->logical, bioc->full_stripe_logical, bioc->size, in dump_bioc()
52 bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes, in dump_bioc()
53 bioc->replace_stripe_src, bioc->num_stripes); in dump_bioc()
54 for (int i = 0; i < bioc->num_stripes; i++) { in dump_bioc()
56 i, bioc->stripes[i].dev->devid, in dump_bioc()
57 bioc->stripes[i].physical); in dump_bioc()
67 dump_bioc(fs_info, rbio->bioc); in btrfs_dump_rbio()
70 rbio->flags, rbio->nr_sectors, rbio->nr_data, in btrfs_dump_rbio()
71 rbio->real_stripes, rbio->stripe_nsectors, in btrfs_dump_rbio()
72 rbio->scrubp, rbio->dbitmap); in btrfs_dump_rbio()
78 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
79 (rbio)->bioc->fs_info : NULL; \
89 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
90 (rbio)->bioc->fs_info : NULL; \
101 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
102 (rbio)->bioc->fs_info : NULL; \
113 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
114 (rbio)->bioc->fs_info : NULL; \
160 bitmap_free(rbio->error_bitmap); in free_raid_bio_pointers()
161 kfree(rbio->stripe_pages); in free_raid_bio_pointers()
162 kfree(rbio->bio_sectors); in free_raid_bio_pointers()
163 kfree(rbio->stripe_sectors); in free_raid_bio_pointers()
164 kfree(rbio->finish_pointers); in free_raid_bio_pointers()
171 if (!refcount_dec_and_test(&rbio->refs)) in free_raid_bio()
174 WARN_ON(!list_empty(&rbio->stripe_cache)); in free_raid_bio()
175 WARN_ON(!list_empty(&rbio->hash_list)); in free_raid_bio()
176 WARN_ON(!bio_list_empty(&rbio->bio_list)); in free_raid_bio()
178 for (i = 0; i < rbio->nr_pages; i++) { in free_raid_bio()
179 if (rbio->stripe_pages[i]) { in free_raid_bio()
180 __free_page(rbio->stripe_pages[i]); in free_raid_bio()
181 rbio->stripe_pages[i] = NULL; in free_raid_bio()
185 btrfs_put_bioc(rbio->bioc); in free_raid_bio()
192 INIT_WORK(&rbio->work, work_func); in start_async_work()
193 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
208 if (info->stripe_hash_table) in btrfs_alloc_stripe_hash_table()
220 return -ENOMEM; in btrfs_alloc_stripe_hash_table()
222 spin_lock_init(&table->cache_lock); in btrfs_alloc_stripe_hash_table()
223 INIT_LIST_HEAD(&table->stripe_cache); in btrfs_alloc_stripe_hash_table()
225 h = table->table; in btrfs_alloc_stripe_hash_table()
229 INIT_LIST_HEAD(&cur->hash_list); in btrfs_alloc_stripe_hash_table()
230 spin_lock_init(&cur->lock); in btrfs_alloc_stripe_hash_table()
233 x = cmpxchg(&info->stripe_hash_table, NULL, table); in btrfs_alloc_stripe_hash_table()
241 memcpy_page(phys_to_page(dst->paddr), offset_in_page(dst->paddr), in memcpy_sectors()
242 phys_to_page(src->paddr), offset_in_page(src->paddr), in memcpy_sectors()
252 * once the caching is done, we set the cache ready
264 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
266 if (!rbio->bio_sectors[i].has_paddr) { in cache_rbio_pages()
272 if (i < rbio->nr_data * rbio->stripe_nsectors) in cache_rbio_pages()
273 ASSERT(rbio->stripe_sectors[i].uptodate); in cache_rbio_pages()
277 memcpy_sectors(&rbio->stripe_sectors[i], &rbio->bio_sectors[i], in cache_rbio_pages()
278 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
279 rbio->stripe_sectors[i].uptodate = 1; in cache_rbio_pages()
281 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
289 u64 num = rbio->bioc->full_stripe_logical; in rbio_bucket()
305 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate()
309 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
314 if (!rbio->stripe_sectors[i].uptodate) in full_page_sectors_uptodate()
327 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors()
331 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
334 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
335 if (!rbio->stripe_pages[page_index]) in index_stripe_sectors()
338 rbio->stripe_sectors[i].has_paddr = true; in index_stripe_sectors()
339 rbio->stripe_sectors[i].paddr = in index_stripe_sectors()
340 page_to_phys(rbio->stripe_pages[page_index]) + in index_stripe_sectors()
348 const u32 sectorsize = src->bioc->fs_info->sectorsize; in steal_rbio_page()
352 if (dest->stripe_pages[page_nr]) in steal_rbio_page()
353 __free_page(dest->stripe_pages[page_nr]); in steal_rbio_page()
354 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; in steal_rbio_page()
355 src->stripe_pages[page_nr] = NULL; in steal_rbio_page()
357 /* Also update the sector->uptodate bits. */ in steal_rbio_page()
360 dest->stripe_sectors[i].uptodate = true; in steal_rbio_page()
366 rbio->bioc->fs_info->sectorsize_bits; in is_data_stripe_page()
375 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); in is_data_stripe_page()
389 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) in steal_rbio()
392 for (i = 0; i < dest->nr_pages; i++) { in steal_rbio()
393 struct page *p = src->stripe_pages[i]; in steal_rbio()
419 * must be called with dest->rbio_list_lock held
424 bio_list_merge_init(&dest->bio_list, &victim->bio_list); in merge_rbio()
425 dest->bio_list_bytes += victim->bio_list_bytes; in merge_rbio()
427 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, in merge_rbio()
428 dest->stripe_nsectors); in merge_rbio()
445 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
448 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
449 h = table->table + bucket; in __remove_rbio_from_cache()
454 spin_lock(&h->lock); in __remove_rbio_from_cache()
460 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
462 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
463 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
464 table->cache_size -= 1; in __remove_rbio_from_cache()
476 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
477 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
478 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
479 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
480 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
485 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
486 spin_unlock(&h->lock); in __remove_rbio_from_cache()
499 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
502 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
504 spin_lock(&table->cache_lock); in remove_rbio_from_cache()
506 spin_unlock(&table->cache_lock); in remove_rbio_from_cache()
517 table = info->stripe_hash_table; in btrfs_clear_rbio_cache()
519 spin_lock(&table->cache_lock); in btrfs_clear_rbio_cache()
520 while (!list_empty(&table->stripe_cache)) { in btrfs_clear_rbio_cache()
521 rbio = list_first_entry(&table->stripe_cache, in btrfs_clear_rbio_cache()
525 spin_unlock(&table->cache_lock); in btrfs_clear_rbio_cache()
534 if (!info->stripe_hash_table) in btrfs_free_stripe_hash_table()
537 kvfree(info->stripe_hash_table); in btrfs_free_stripe_hash_table()
538 info->stripe_hash_table = NULL; in btrfs_free_stripe_hash_table()
556 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
559 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
561 spin_lock(&table->cache_lock); in cache_rbio()
562 spin_lock(&rbio->bio_list_lock); in cache_rbio()
565 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
566 refcount_inc(&rbio->refs); in cache_rbio()
568 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
569 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
571 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
572 table->cache_size += 1; in cache_rbio()
575 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
577 if (table->cache_size > RBIO_CACHE_SIZE) { in cache_rbio()
580 found = list_last_entry(&table->stripe_cache, in cache_rbio()
588 spin_unlock(&table->cache_lock); in cache_rbio()
606 src_cnt -= xor_src_cnt; in run_xor()
617 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
620 spin_lock(&rbio->bio_list_lock); in rbio_is_full()
621 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
623 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
624 spin_unlock(&rbio->bio_list_lock); in rbio_is_full()
642 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || in rbio_can_merge()
643 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) in rbio_can_merge()
653 if (test_bit(RBIO_CACHE_BIT, &last->flags) || in rbio_can_merge()
654 test_bit(RBIO_CACHE_BIT, &cur->flags)) in rbio_can_merge()
657 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical) in rbio_can_merge()
661 if (last->operation != cur->operation) in rbio_can_merge()
671 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) in rbio_can_merge()
674 if (last->operation == BTRFS_RBIO_READ_REBUILD) in rbio_can_merge()
684 ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr); in rbio_stripe_sector_index()
685 ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr); in rbio_stripe_sector_index()
687 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
690 /* Return a sector from rbio->stripe_sectors, not from the bio list */
695 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, in rbio_stripe_sector()
703 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
710 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_sector()
712 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); in rbio_qstripe_sector()
746 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
748 spin_lock(&h->lock); in lock_stripe_add()
749 list_for_each_entry(cur, &h->hash_list, hash_list) { in lock_stripe_add()
750 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) in lock_stripe_add()
753 spin_lock(&cur->bio_list_lock); in lock_stripe_add()
756 if (bio_list_empty(&cur->bio_list) && in lock_stripe_add()
757 list_empty(&cur->plug_list) && in lock_stripe_add()
758 test_bit(RBIO_CACHE_BIT, &cur->flags) && in lock_stripe_add()
759 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { in lock_stripe_add()
760 list_del_init(&cur->hash_list); in lock_stripe_add()
761 refcount_dec(&cur->refs); in lock_stripe_add()
765 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
773 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
785 list_for_each_entry(pending, &cur->plug_list, plug_list) { in lock_stripe_add()
788 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
799 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
800 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
805 refcount_inc(&rbio->refs); in lock_stripe_add()
806 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
808 spin_unlock(&h->lock); in lock_stripe_add()
829 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
831 if (list_empty(&rbio->plug_list)) in unlock_stripe()
834 spin_lock(&h->lock); in unlock_stripe()
835 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
837 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
843 if (list_empty(&rbio->plug_list) && in unlock_stripe()
844 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
846 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
847 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
848 goto done; in unlock_stripe()
851 list_del_init(&rbio->hash_list); in unlock_stripe()
852 refcount_dec(&rbio->refs); in unlock_stripe()
859 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
861 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
866 list_del_init(&rbio->plug_list); in unlock_stripe()
868 list_add(&next->hash_list, &h->hash_list); in unlock_stripe()
869 refcount_inc(&next->refs); in unlock_stripe()
870 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
871 spin_unlock(&h->lock); in unlock_stripe()
873 if (next->operation == BTRFS_RBIO_READ_REBUILD) { in unlock_stripe()
875 } else if (next->operation == BTRFS_RBIO_WRITE) { in unlock_stripe()
878 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { in unlock_stripe()
886 done: in unlock_stripe()
887 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
888 spin_unlock(&h->lock); in unlock_stripe()
900 next = cur->bi_next; in rbio_endio_bio_list()
901 cur->bi_next = NULL; in rbio_endio_bio_list()
902 cur->bi_status = status; in rbio_endio_bio_list()
914 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
917 kfree(rbio->csum_buf); in rbio_orig_end_io()
918 bitmap_free(rbio->csum_bitmap); in rbio_orig_end_io()
919 rbio->csum_buf = NULL; in rbio_orig_end_io()
920 rbio->csum_bitmap = NULL; in rbio_orig_end_io()
927 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
930 * At this moment, rbio->bio_list is empty, however since rbio does not in rbio_orig_end_io()
932 * hash list, rbio may be merged with others so that rbio->bio_list in rbio_orig_end_io()
933 * becomes non-empty. in rbio_orig_end_io()
934 * Once unlock_stripe() is done, rbio->bio_list will not be updated any in rbio_orig_end_io()
938 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
965 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes, in sector_in_rbio()
967 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, in sector_in_rbio()
970 index = stripe_nr * rbio->stripe_nsectors + sector_nr; in sector_in_rbio()
971 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
973 spin_lock(&rbio->bio_list_lock); in sector_in_rbio()
974 sector = &rbio->bio_sectors[index]; in sector_in_rbio()
975 if (sector->has_paddr || bio_list_only) { in sector_in_rbio()
977 if (!sector->has_paddr) in sector_in_rbio()
979 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
982 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
984 return &rbio->stripe_sectors[index]; in sector_in_rbio()
989 * this does not allocate any pages for rbio->pages.
994 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes; in alloc_rbio()
998 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in alloc_rbio()
1003 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); in alloc_rbio()
1019 return ERR_PTR(-ENOMEM); in alloc_rbio()
1020 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *), in alloc_rbio()
1022 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
1024 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
1026 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); in alloc_rbio()
1027 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); in alloc_rbio()
1029 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || in alloc_rbio()
1030 !rbio->finish_pointers || !rbio->error_bitmap) { in alloc_rbio()
1033 return ERR_PTR(-ENOMEM); in alloc_rbio()
1036 bio_list_init(&rbio->bio_list); in alloc_rbio()
1037 init_waitqueue_head(&rbio->io_wait); in alloc_rbio()
1038 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
1039 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
1040 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
1041 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
1043 rbio->bioc = bioc; in alloc_rbio()
1044 rbio->nr_pages = num_pages; in alloc_rbio()
1045 rbio->nr_sectors = num_sectors; in alloc_rbio()
1046 rbio->real_stripes = real_stripes; in alloc_rbio()
1047 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1048 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
1049 refcount_set(&rbio->refs, 1); in alloc_rbio()
1050 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1052 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); in alloc_rbio()
1053 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
1054 ASSERT(rbio->nr_data > 0); in alloc_rbio()
1064 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false); in alloc_rbio_pages()
1075 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
1078 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
1079 rbio->stripe_pages + data_pages, false); in alloc_rbio_parity_pages()
1105 *faila = -1; in get_rbio_veritical_errors()
1106 *failb = -1; in get_rbio_veritical_errors()
1109 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in get_rbio_veritical_errors()
1110 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; in get_rbio_veritical_errors()
1112 if (test_bit(total_sector_nr, rbio->error_bitmap)) { in get_rbio_veritical_errors()
1139 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector()
1140 struct bio *last = bio_list->tail; in rbio_add_io_sector()
1148 * thus it can be larger than rbio->real_stripe. in rbio_add_io_sector()
1149 * So here we check against bioc->num_stripes, not rbio->real_stripes. in rbio_add_io_sector()
1151 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes, in rbio_add_io_sector()
1153 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, in rbio_add_io_sector()
1155 ASSERT(sector->has_paddr); in rbio_add_io_sector()
1157 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_sector()
1158 disk_start = stripe->physical + sector_nr * sectorsize; in rbio_add_io_sector()
1161 if (!stripe->dev->bdev) { in rbio_add_io_sector()
1164 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, in rbio_add_io_sector()
1165 rbio->error_bitmap); in rbio_add_io_sector()
1170 if (unlikely(found_errors > rbio->bioc->max_errors)) in rbio_add_io_sector()
1171 return -EIO; in rbio_add_io_sector()
1177 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_io_sector()
1178 last_end += last->bi_iter.bi_size; in rbio_add_io_sector()
1184 if (last_end == disk_start && !last->bi_status && in rbio_add_io_sector()
1185 last->bi_bdev == stripe->dev->bdev) { in rbio_add_io_sector()
1186 ret = bio_add_page(last, phys_to_page(sector->paddr), in rbio_add_io_sector()
1187 sectorsize, offset_in_page(sector->paddr)); in rbio_add_io_sector()
1194 bio = bio_alloc(stripe->dev->bdev, in rbio_add_io_sector()
1197 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; in rbio_add_io_sector()
1198 bio->bi_private = rbio; in rbio_add_io_sector()
1200 __bio_add_page(bio, phys_to_page(sector->paddr), sectorsize, in rbio_add_io_sector()
1201 offset_in_page(sector->paddr)); in rbio_add_io_sector()
1208 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_one_bio()
1209 const u32 sectorsize_bits = rbio->bioc->fs_info->sectorsize_bits; in index_one_bio()
1210 struct bvec_iter iter = bio->bi_iter; in index_one_bio()
1212 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in index_one_bio()
1213 rbio->bioc->full_stripe_logical; in index_one_bio()
1217 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio()
1219 sector->has_paddr = true; in index_one_bio()
1220 sector->paddr = paddr; in index_one_bio()
1237 spin_lock(&rbio->bio_list_lock); in index_rbio_pages()
1238 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1241 spin_unlock(&rbio->bio_list_lock); in index_rbio_pages()
1247 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1252 /* We rely on bio->bi_bdev to find the stripe number. */ in bio_get_trace_info()
1253 if (!bio->bi_bdev) in bio_get_trace_info()
1256 for (i = 0; i < bioc->num_stripes; i++) { in bio_get_trace_info()
1257 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) in bio_get_trace_info()
1259 trace_info->stripe_nr = i; in bio_get_trace_info()
1260 trace_info->devid = bioc->stripes[i].dev->devid; in bio_get_trace_info()
1261 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in bio_get_trace_info()
1262 bioc->stripes[i].physical; in bio_get_trace_info()
1267 trace_info->devid = -1; in bio_get_trace_info()
1268 trace_info->offset = -1; in bio_get_trace_info()
1269 trace_info->stripe_nr = -1; in bio_get_trace_info()
1289 ASSERT_RBIO(rbio->real_stripes >= 2, rbio); in assert_rbio()
1290 ASSERT_RBIO(rbio->nr_data > 0, rbio); in assert_rbio()
1296 ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio); in assert_rbio()
1302 ASSERT(sector->has_paddr); in kmap_local_sector()
1304 return kmap_local_page(phys_to_page(sector->paddr)) + in kmap_local_sector()
1305 offset_in_page(sector->paddr); in kmap_local_sector()
1311 void **pointers = rbio->finish_pointers; in generate_pq_vertical()
1312 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in generate_pq_vertical()
1315 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; in generate_pq_vertical()
1318 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in generate_pq_vertical()
1325 sector->uptodate = 1; in generate_pq_vertical()
1334 sector->uptodate = 1; in generate_pq_vertical()
1338 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in generate_pq_vertical()
1342 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize); in generate_pq_vertical()
1343 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize); in generate_pq_vertical()
1345 for (stripe = stripe - 1; stripe >= 0; stripe--) in generate_pq_vertical()
1361 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in rmw_assemble_write_bios()
1367 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios()
1373 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1377 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1378 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1381 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1384 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1398 if (likely(!rbio->bioc->replace_nr_stripes)) in rmw_assemble_write_bios()
1406 ASSERT(rbio->bioc->replace_stripe_src >= 0); in rmw_assemble_write_bios()
1408 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1412 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1413 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1420 if (stripe != rbio->bioc->replace_stripe_src) { in rmw_assemble_write_bios()
1426 total_sector_nr += rbio->stripe_nsectors - 1; in rmw_assemble_write_bios()
1431 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1434 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1443 rbio->real_stripes, in rmw_assemble_write_bios()
1452 return -EIO; in rmw_assemble_write_bios()
1457 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in set_rbio_range_error()
1458 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in set_rbio_range_error()
1459 rbio->bioc->full_stripe_logical; in set_rbio_range_error()
1460 int total_nr_sector = offset >> fs_info->sectorsize_bits; in set_rbio_range_error()
1462 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); in set_rbio_range_error()
1464 bitmap_set(rbio->error_bitmap, total_nr_sector, in set_rbio_range_error()
1465 bio->bi_iter.bi_size >> fs_info->sectorsize_bits); in set_rbio_range_error()
1473 if (bio->bi_iter.bi_size == 0) { in set_rbio_range_error()
1477 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in set_rbio_range_error()
1478 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { in set_rbio_range_error()
1480 bitmap_set(rbio->error_bitmap, in set_rbio_range_error()
1481 stripe_nr * rbio->stripe_nsectors, in set_rbio_range_error()
1482 rbio->stripe_nsectors); in set_rbio_range_error()
1490 * For subpage case, we can no longer set page Up-to-date directly for
1498 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1499 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in find_stripe_sector()
1501 if (sector->has_paddr && sector->paddr == paddr) in find_stripe_sector()
1513 const u32 blocksize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1523 sector->uptodate = 1; in set_bio_pages_uptodate()
1532 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr()
1533 if (rbio->stripe_sectors[i].paddr == bvec_paddr) in get_bio_sector_nr()
1535 if (rbio->bio_sectors[i].has_paddr && in get_bio_sector_nr()
1536 rbio->bio_sectors[i].paddr == bvec_paddr) in get_bio_sector_nr()
1539 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr()
1551 bio_size += bvec->bv_len; in rbio_update_error_bitmap()
1560 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) in rbio_update_error_bitmap()
1561 set_bit(i, rbio->error_bitmap); in rbio_update_error_bitmap()
1568 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_bio_data_sectors()
1573 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_bio_data_sectors()
1577 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) in verify_bio_data_sectors()
1580 btrfs_bio_for_each_block_all(paddr, bio, fs_info->sectorsize) { in verify_bio_data_sectors()
1582 u8 *expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size; in verify_bio_data_sectors()
1586 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) in verify_bio_data_sectors()
1592 set_bit(total_sector_nr, rbio->error_bitmap); in verify_bio_data_sectors()
1599 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_read_end_io()
1601 if (bio->bi_status) { in raid_wait_read_end_io()
1609 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_read_end_io()
1610 wake_up(&rbio->io_wait); in raid_wait_read_end_io()
1618 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_read_wait_bio_list()
1620 bio->bi_end_io = raid_wait_read_end_io; in submit_read_wait_bio_list()
1631 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in submit_read_wait_bio_list()
1636 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_data_pages()
1639 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false); in alloc_rbio_data_pages()
1670 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1671 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1674 return -1; in plug_cmp()
1686 list_sort(NULL, &plug->rbio_list, plug_cmp); in raid_unplug()
1688 while (!list_empty(&plug->rbio_list)) { in raid_unplug()
1689 cur = list_first_entry(&plug->rbio_list, in raid_unplug()
1691 list_del_init(&cur->plug_list); in raid_unplug()
1713 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1716 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1717 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_bio()
1718 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; in rbio_add_bio()
1719 const u32 orig_len = orig_bio->bi_iter.bi_size; in rbio_add_bio()
1720 const u32 sectorsize = fs_info->sectorsize; in rbio_add_bio()
1725 rbio->nr_data * BTRFS_STRIPE_LEN, in rbio_add_bio()
1728 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1729 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1734 int bit = ((u32)(cur_logical - full_stripe_start) >> in rbio_add_bio()
1735 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1737 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1746 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_write()
1753 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_write()
1757 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1768 if (!plug->info) { in raid56_parity_write()
1769 plug->info = fs_info; in raid56_parity_write()
1770 INIT_LIST_HEAD(&plug->rbio_list); in raid56_parity_write()
1772 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1787 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_one_sector()
1793 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_one_sector()
1797 if (stripe_nr >= rbio->nr_data) in verify_one_sector()
1803 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in verify_one_sector()
1809 csum_expected = rbio->csum_buf + in verify_one_sector()
1810 (stripe_nr * rbio->stripe_nsectors + sector_nr) * in verify_one_sector()
1811 fs_info->csum_size; in verify_one_sector()
1812 ret = btrfs_check_block_csum(fs_info, sector->paddr, csum_buf, csum_expected); in verify_one_sector()
1818 * @*pointers are the pre-allocated pointers by the caller, so we don't
1824 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in recover_vertical()
1826 const u32 sectorsize = fs_info->sectorsize; in recover_vertical()
1837 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in recover_vertical()
1838 !test_bit(sector_nr, &rbio->dbitmap)) in recover_vertical()
1850 if (unlikely(found_errors > rbio->bioc->max_errors)) in recover_vertical()
1851 return -EIO; in recover_vertical()
1859 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in recover_vertical()
1864 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_vertical()
1874 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in recover_vertical()
1877 if (faila == rbio->nr_data) in recover_vertical()
1899 if (failb == rbio->real_stripes - 1) { in recover_vertical()
1900 if (faila == rbio->real_stripes - 2) in recover_vertical()
1914 if (failb == rbio->real_stripes - 2) { in recover_vertical()
1915 raid6_datap_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1918 raid6_2data_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1925 ASSERT(failb == -1); in recover_vertical()
1928 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); in recover_vertical()
1932 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; in recover_vertical()
1935 pointers[rbio->nr_data - 1] = p; in recover_vertical()
1938 run_xor(pointers, rbio->nr_data - 1, sectorsize); in recover_vertical()
1958 sector->uptodate = 1; in recover_vertical()
1966 sector->uptodate = 1; in recover_vertical()
1970 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) in recover_vertical()
1988 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1989 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1991 ret = -ENOMEM; in recover_sectors()
1995 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_sectors()
1996 spin_lock(&rbio->bio_list_lock); in recover_sectors()
1997 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in recover_sectors()
1998 spin_unlock(&rbio->bio_list_lock); in recover_sectors()
2003 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in recover_sectors()
2025 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); in recover_rbio()
2040 * So here we always re-read everything in recovery path. in recover_rbio()
2042 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in recover_rbio()
2044 int stripe = total_sector_nr / rbio->stripe_nsectors; in recover_rbio()
2045 int sectornr = total_sector_nr % rbio->stripe_nsectors; in recover_rbio()
2053 if (!rbio->bioc->stripes[stripe].dev->bdev || in recover_rbio()
2054 test_bit(total_sector_nr, rbio->error_bitmap)) { in recover_rbio()
2059 set_bit(total_sector_nr, rbio->error_bitmap); in recover_rbio()
2104 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in set_rbio_raid6_extra_error()
2123 failb = rbio->real_stripes - (mirror_num - 1); in set_rbio_raid6_extra_error()
2125 failb--; in set_rbio_raid6_extra_error()
2129 set_bit(failb * rbio->stripe_nsectors + sector_nr, in set_rbio_raid6_extra_error()
2130 rbio->error_bitmap); in set_rbio_raid6_extra_error()
2146 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_recover()
2151 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2156 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2174 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in fill_data_csums()
2176 rbio->bioc->full_stripe_logical); in fill_data_csums()
2177 const u64 start = rbio->bioc->full_stripe_logical; in fill_data_csums()
2178 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << in fill_data_csums()
2179 fs_info->sectorsize_bits; in fill_data_csums()
2183 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); in fill_data_csums()
2188 * - The rbio doesn't belong to data block groups in fill_data_csums()
2191 * - The rbio belongs to mixed block groups in fill_data_csums()
2196 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || in fill_data_csums()
2197 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) in fill_data_csums()
2200 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * in fill_data_csums()
2201 fs_info->csum_size, GFP_NOFS); in fill_data_csums()
2202 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, in fill_data_csums()
2204 if (!rbio->csum_buf || !rbio->csum_bitmap) { in fill_data_csums()
2205 ret = -ENOMEM; in fill_data_csums()
2209 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1, in fill_data_csums()
2210 rbio->csum_buf, rbio->csum_bitmap); in fill_data_csums()
2213 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) in fill_data_csums()
2221 * longer safe for this particular sub-stripe write. in fill_data_csums()
2224 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d", in fill_data_csums()
2225 rbio->bioc->full_stripe_logical, ret); in fill_data_csums()
2227 kfree(rbio->csum_buf); in fill_data_csums()
2228 bitmap_free(rbio->csum_bitmap); in fill_data_csums()
2229 rbio->csum_buf = NULL; in fill_data_csums()
2230 rbio->csum_bitmap = NULL; in fill_data_csums()
2251 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_read_wait_recover()
2254 int stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_read_wait_recover()
2255 int sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_read_wait_recover()
2276 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_write_end_io()
2278 if (bio->bi_status) in raid_wait_write_end_io()
2281 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_write_end_io()
2282 wake_up(&rbio->io_wait); in raid_wait_write_end_io()
2290 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_write_bios()
2292 bio->bi_end_io = raid_wait_write_end_io; in submit_write_bios()
2312 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { in need_read_stripe_sectors()
2313 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in need_read_stripe_sectors()
2320 if (!sector->has_paddr || !sector->uptodate) in need_read_stripe_sectors()
2334 * needed for both full-stripe and sub-stripe writes. in rmw_rbio()
2346 * Now we're doing sub-stripe write, also need all data stripes in rmw_rbio()
2365 spin_lock(&rbio->bio_list_lock); in rmw_rbio()
2366 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in rmw_rbio()
2367 spin_unlock(&rbio->bio_list_lock); in rmw_rbio()
2369 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_rbio()
2382 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in rmw_rbio()
2384 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) in rmw_rbio()
2395 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in rmw_rbio()
2398 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in rmw_rbio()
2402 if (unlikely(found_errors > rbio->bioc->max_errors)) { in rmw_rbio()
2403 ret = -EIO; in rmw_rbio()
2440 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_alloc_scrub_rbio()
2447 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2452 ASSERT(!bio->bi_iter.bi_size); in raid56_parity_alloc_scrub_rbio()
2453 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2460 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2461 if (bioc->stripes[i].dev == scrub_dev) { in raid56_parity_alloc_scrub_rbio()
2462 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2466 ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i); in raid56_parity_alloc_scrub_rbio()
2468 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2478 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in alloc_rbio_essential_pages()
2481 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2484 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2487 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2489 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2493 return -ENOMEM; in alloc_rbio_essential_pages()
2494 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2502 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2503 const u32 sectorsize = bioc->fs_info->sectorsize; in finish_parity_scrub()
2504 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2505 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2506 int nr_data = rbio->nr_data; in finish_parity_scrub()
2519 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2521 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2530 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { in finish_parity_scrub()
2532 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2536 * Because the higher layers(scrubber) are unlikely to in finish_parity_scrub()
2540 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2544 return -ENOMEM; in finish_parity_scrub()
2556 return -ENOMEM; in finish_parity_scrub()
2562 pointers[rbio->real_stripes - 1] = kmap_local_sector(&q_sector); in finish_parity_scrub()
2565 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in finish_parity_scrub()
2570 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2583 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_parity_scrub()
2588 run_xor(pointers + 1, nr_data - 1, sectorsize); in finish_parity_scrub()
2592 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2594 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) in finish_parity_scrub()
2595 memcpy(parity, pointers[rbio->scrubp], sectorsize); in finish_parity_scrub()
2598 bitmap_clear(&rbio->dbitmap, sectornr, 1); in finish_parity_scrub()
2601 for (stripe = nr_data - 1; stripe >= 0; stripe--) in finish_parity_scrub()
2618 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2621 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2622 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, in finish_parity_scrub()
2635 ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio); in finish_parity_scrub()
2636 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2639 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2641 rbio->real_stripes, in finish_parity_scrub()
2658 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2676 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2677 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2679 ret = -ENOMEM; in recover_scrub_rbio()
2683 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in recover_scrub_rbio()
2684 int dfail = 0, failp = -1; in recover_scrub_rbio()
2691 if (unlikely(found_errors > rbio->bioc->max_errors)) { in recover_scrub_rbio()
2692 ret = -EIO; in recover_scrub_rbio()
2715 if (unlikely(dfail > rbio->bioc->max_errors - 1)) { in recover_scrub_rbio()
2716 ret = -EIO; in recover_scrub_rbio()
2732 if (unlikely(failp != rbio->scrubp)) { in recover_scrub_rbio()
2733 ret = -EIO; in recover_scrub_rbio()
2754 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in scrub_assemble_read_bios()
2756 int sectornr = total_sector_nr % rbio->stripe_nsectors; in scrub_assemble_read_bios()
2757 int stripe = total_sector_nr / rbio->stripe_nsectors; in scrub_assemble_read_bios()
2761 if (!test_bit(sectornr, &rbio->dbitmap)) in scrub_assemble_read_bios()
2778 if (sector->uptodate) in scrub_assemble_read_bios()
2802 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in scrub_rbio()
2818 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in scrub_rbio()
2819 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in scrub_rbio()
2823 if (unlikely(found_errors > rbio->bioc->max_errors)) { in scrub_rbio()
2824 ret = -EIO; in scrub_rbio()
2853 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in raid56_parity_cache_data_folios()
2854 const u64 offset_in_full_stripe = data_logical - in raid56_parity_cache_data_folios()
2855 rbio->bioc->full_stripe_logical; in raid56_parity_cache_data_folios()
2861 ASSERT(fs_info->sectorsize <= PAGE_SIZE); in raid56_parity_cache_data_folios()
2877 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); in raid56_parity_cache_data_folios()
2885 kaddr = kmap_local_page(rbio->stripe_pages[pindex]); in raid56_parity_cache_data_folios()
2896 for (unsigned int sector_nr = offset_in_full_stripe >> fs_info->sectorsize_bits; in raid56_parity_cache_data_folios()
2897 sector_nr < (offset_in_full_stripe + BTRFS_STRIPE_LEN) >> fs_info->sectorsize_bits; in raid56_parity_cache_data_folios()
2899 rbio->stripe_sectors[sector_nr].uptodate = true; in raid56_parity_cache_data_folios()