1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011, 2012 STRATO. All rights reserved. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/ratelimit.h> 8 #include <linux/sched/mm.h> 9 #include <crypto/hash.h> 10 #include "ctree.h" 11 #include "discard.h" 12 #include "volumes.h" 13 #include "disk-io.h" 14 #include "ordered-data.h" 15 #include "transaction.h" 16 #include "backref.h" 17 #include "extent_io.h" 18 #include "dev-replace.h" 19 #include "check-integrity.h" 20 #include "rcu-string.h" 21 #include "raid56.h" 22 #include "block-group.h" 23 #include "zoned.h" 24 25 /* 26 * This is only the first step towards a full-features scrub. It reads all 27 * extent and super block and verifies the checksums. In case a bad checksum 28 * is found or the extent cannot be read, good data will be written back if 29 * any can be found. 30 * 31 * Future enhancements: 32 * - In case an unrepairable extent is encountered, track which files are 33 * affected and report them 34 * - track and record media errors, throw out bad devices 35 * - add a mode to also read unallocated space 36 */ 37 38 struct scrub_block; 39 struct scrub_ctx; 40 41 /* 42 * The following three values only influence the performance. 43 * 44 * The last one configures the number of parallel and outstanding I/O 45 * operations. The first one configures an upper limit for the number 46 * of (dynamically allocated) pages that are added to a bio. 47 */ 48 #define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */ 49 #define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */ 50 51 /* 52 * The following value times PAGE_SIZE needs to be large enough to match the 53 * largest node/leaf/sector size that shall be supported. 54 */ 55 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K) 56 57 #define SCRUB_MAX_PAGES (DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE)) 58 59 struct scrub_recover { 60 refcount_t refs; 61 struct btrfs_io_context *bioc; 62 u64 map_length; 63 }; 64 65 struct scrub_sector { 66 struct scrub_block *sblock; 67 struct list_head list; 68 u64 flags; /* extent flags */ 69 u64 generation; 70 /* Offset in bytes to @sblock. */ 71 u32 offset; 72 atomic_t refs; 73 unsigned int have_csum:1; 74 unsigned int io_error:1; 75 u8 csum[BTRFS_CSUM_SIZE]; 76 77 struct scrub_recover *recover; 78 }; 79 80 struct scrub_bio { 81 int index; 82 struct scrub_ctx *sctx; 83 struct btrfs_device *dev; 84 struct bio *bio; 85 blk_status_t status; 86 u64 logical; 87 u64 physical; 88 struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO]; 89 int sector_count; 90 int next_free; 91 struct work_struct work; 92 }; 93 94 struct scrub_block { 95 /* 96 * Each page will have its page::private used to record the logical 97 * bytenr. 98 */ 99 struct page *pages[SCRUB_MAX_PAGES]; 100 struct scrub_sector *sectors[SCRUB_MAX_SECTORS_PER_BLOCK]; 101 struct btrfs_device *dev; 102 /* Logical bytenr of the sblock */ 103 u64 logical; 104 u64 physical; 105 u64 physical_for_dev_replace; 106 /* Length of sblock in bytes */ 107 u32 len; 108 int sector_count; 109 int mirror_num; 110 111 atomic_t outstanding_sectors; 112 refcount_t refs; /* free mem on transition to zero */ 113 struct scrub_ctx *sctx; 114 struct scrub_parity *sparity; 115 struct { 116 unsigned int header_error:1; 117 unsigned int checksum_error:1; 118 unsigned int no_io_error_seen:1; 119 unsigned int generation_error:1; /* also sets header_error */ 120 121 /* The following is for the data used to check parity */ 122 /* It is for the data with checksum */ 123 unsigned int data_corrected:1; 124 }; 125 struct work_struct work; 126 }; 127 128 /* Used for the chunks with parity stripe such RAID5/6 */ 129 struct scrub_parity { 130 struct scrub_ctx *sctx; 131 132 struct btrfs_device *scrub_dev; 133 134 u64 logic_start; 135 136 u64 logic_end; 137 138 int nsectors; 139 140 u32 stripe_len; 141 142 refcount_t refs; 143 144 struct list_head sectors_list; 145 146 /* Work of parity check and repair */ 147 struct work_struct work; 148 149 /* Mark the parity blocks which have data */ 150 unsigned long dbitmap; 151 152 /* 153 * Mark the parity blocks which have data, but errors happen when 154 * read data or check data 155 */ 156 unsigned long ebitmap; 157 }; 158 159 struct scrub_ctx { 160 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; 161 struct btrfs_fs_info *fs_info; 162 int first_free; 163 int curr; 164 atomic_t bios_in_flight; 165 atomic_t workers_pending; 166 spinlock_t list_lock; 167 wait_queue_head_t list_wait; 168 struct list_head csum_list; 169 atomic_t cancel_req; 170 int readonly; 171 int sectors_per_bio; 172 173 /* State of IO submission throttling affecting the associated device */ 174 ktime_t throttle_deadline; 175 u64 throttle_sent; 176 177 int is_dev_replace; 178 u64 write_pointer; 179 180 struct scrub_bio *wr_curr_bio; 181 struct mutex wr_lock; 182 struct btrfs_device *wr_tgtdev; 183 bool flush_all_writes; 184 185 /* 186 * statistics 187 */ 188 struct btrfs_scrub_progress stat; 189 spinlock_t stat_lock; 190 191 /* 192 * Use a ref counter to avoid use-after-free issues. Scrub workers 193 * decrement bios_in_flight and workers_pending and then do a wakeup 194 * on the list_wait wait queue. We must ensure the main scrub task 195 * doesn't free the scrub context before or while the workers are 196 * doing the wakeup() call. 197 */ 198 refcount_t refs; 199 }; 200 201 struct scrub_warning { 202 struct btrfs_path *path; 203 u64 extent_item_size; 204 const char *errstr; 205 u64 physical; 206 u64 logical; 207 struct btrfs_device *dev; 208 }; 209 210 struct full_stripe_lock { 211 struct rb_node node; 212 u64 logical; 213 u64 refs; 214 struct mutex mutex; 215 }; 216 217 #ifndef CONFIG_64BIT 218 /* This structure is for archtectures whose (void *) is smaller than u64 */ 219 struct scrub_page_private { 220 u64 logical; 221 }; 222 #endif 223 224 static int attach_scrub_page_private(struct page *page, u64 logical) 225 { 226 #ifdef CONFIG_64BIT 227 attach_page_private(page, (void *)logical); 228 return 0; 229 #else 230 struct scrub_page_private *spp; 231 232 spp = kmalloc(sizeof(*spp), GFP_KERNEL); 233 if (!spp) 234 return -ENOMEM; 235 spp->logical = logical; 236 attach_page_private(page, (void *)spp); 237 return 0; 238 #endif 239 } 240 241 static void detach_scrub_page_private(struct page *page) 242 { 243 #ifdef CONFIG_64BIT 244 detach_page_private(page); 245 return; 246 #else 247 struct scrub_page_private *spp; 248 249 spp = detach_page_private(page); 250 kfree(spp); 251 return; 252 #endif 253 } 254 255 static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx, 256 struct btrfs_device *dev, 257 u64 logical, u64 physical, 258 u64 physical_for_dev_replace, 259 int mirror_num) 260 { 261 struct scrub_block *sblock; 262 263 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); 264 if (!sblock) 265 return NULL; 266 refcount_set(&sblock->refs, 1); 267 sblock->sctx = sctx; 268 sblock->logical = logical; 269 sblock->physical = physical; 270 sblock->physical_for_dev_replace = physical_for_dev_replace; 271 sblock->dev = dev; 272 sblock->mirror_num = mirror_num; 273 sblock->no_io_error_seen = 1; 274 /* 275 * Scrub_block::pages will be allocated at alloc_scrub_sector() when 276 * the corresponding page is not allocated. 277 */ 278 return sblock; 279 } 280 281 /* 282 * Allocate a new scrub sector and attach it to @sblock. 283 * 284 * Will also allocate new pages for @sblock if needed. 285 */ 286 static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock, 287 u64 logical, gfp_t gfp) 288 { 289 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT; 290 struct scrub_sector *ssector; 291 292 /* We must never have scrub_block exceed U32_MAX in size. */ 293 ASSERT(logical - sblock->logical < U32_MAX); 294 295 ssector = kzalloc(sizeof(*ssector), gfp); 296 if (!ssector) 297 return NULL; 298 299 /* Allocate a new page if the slot is not allocated */ 300 if (!sblock->pages[page_index]) { 301 int ret; 302 303 sblock->pages[page_index] = alloc_page(gfp); 304 if (!sblock->pages[page_index]) { 305 kfree(ssector); 306 return NULL; 307 } 308 ret = attach_scrub_page_private(sblock->pages[page_index], 309 sblock->logical + (page_index << PAGE_SHIFT)); 310 if (ret < 0) { 311 kfree(ssector); 312 __free_page(sblock->pages[page_index]); 313 sblock->pages[page_index] = NULL; 314 return NULL; 315 } 316 } 317 318 atomic_set(&ssector->refs, 1); 319 ssector->sblock = sblock; 320 /* The sector to be added should not be used */ 321 ASSERT(sblock->sectors[sblock->sector_count] == NULL); 322 ssector->offset = logical - sblock->logical; 323 324 /* The sector count must be smaller than the limit */ 325 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK); 326 327 sblock->sectors[sblock->sector_count] = ssector; 328 sblock->sector_count++; 329 sblock->len += sblock->sctx->fs_info->sectorsize; 330 331 return ssector; 332 } 333 334 static struct page *scrub_sector_get_page(struct scrub_sector *ssector) 335 { 336 struct scrub_block *sblock = ssector->sblock; 337 pgoff_t index; 338 /* 339 * When calling this function, ssector must be alreaday attached to the 340 * parent sblock. 341 */ 342 ASSERT(sblock); 343 344 /* The range should be inside the sblock range */ 345 ASSERT(ssector->offset < sblock->len); 346 347 index = ssector->offset >> PAGE_SHIFT; 348 ASSERT(index < SCRUB_MAX_PAGES); 349 ASSERT(sblock->pages[index]); 350 ASSERT(PagePrivate(sblock->pages[index])); 351 return sblock->pages[index]; 352 } 353 354 static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector) 355 { 356 struct scrub_block *sblock = ssector->sblock; 357 358 /* 359 * When calling this function, ssector must be already attached to the 360 * parent sblock. 361 */ 362 ASSERT(sblock); 363 364 /* The range should be inside the sblock range */ 365 ASSERT(ssector->offset < sblock->len); 366 367 return offset_in_page(ssector->offset); 368 } 369 370 static char *scrub_sector_get_kaddr(struct scrub_sector *ssector) 371 { 372 return page_address(scrub_sector_get_page(ssector)) + 373 scrub_sector_get_page_offset(ssector); 374 } 375 376 static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector, 377 unsigned int len) 378 { 379 return bio_add_page(bio, scrub_sector_get_page(ssector), len, 380 scrub_sector_get_page_offset(ssector)); 381 } 382 383 static int scrub_setup_recheck_block(struct scrub_block *original_sblock, 384 struct scrub_block *sblocks_for_recheck[]); 385 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 386 struct scrub_block *sblock, 387 int retry_failed_mirror); 388 static void scrub_recheck_block_checksum(struct scrub_block *sblock); 389 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 390 struct scrub_block *sblock_good); 391 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad, 392 struct scrub_block *sblock_good, 393 int sector_num, int force_write); 394 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); 395 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, 396 int sector_num); 397 static int scrub_checksum_data(struct scrub_block *sblock); 398 static int scrub_checksum_tree_block(struct scrub_block *sblock); 399 static int scrub_checksum_super(struct scrub_block *sblock); 400 static void scrub_block_put(struct scrub_block *sblock); 401 static void scrub_sector_get(struct scrub_sector *sector); 402 static void scrub_sector_put(struct scrub_sector *sector); 403 static void scrub_parity_get(struct scrub_parity *sparity); 404 static void scrub_parity_put(struct scrub_parity *sparity); 405 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, 406 u64 physical, struct btrfs_device *dev, u64 flags, 407 u64 gen, int mirror_num, u8 *csum, 408 u64 physical_for_dev_replace); 409 static void scrub_bio_end_io(struct bio *bio); 410 static void scrub_bio_end_io_worker(struct work_struct *work); 411 static void scrub_block_complete(struct scrub_block *sblock); 412 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info, 413 u64 extent_logical, u32 extent_len, 414 u64 *extent_physical, 415 struct btrfs_device **extent_dev, 416 int *extent_mirror_num); 417 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, 418 struct scrub_sector *sector); 419 static void scrub_wr_submit(struct scrub_ctx *sctx); 420 static void scrub_wr_bio_end_io(struct bio *bio); 421 static void scrub_wr_bio_end_io_worker(struct work_struct *work); 422 static void scrub_put_ctx(struct scrub_ctx *sctx); 423 424 static inline int scrub_is_page_on_raid56(struct scrub_sector *sector) 425 { 426 return sector->recover && 427 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); 428 } 429 430 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 431 { 432 refcount_inc(&sctx->refs); 433 atomic_inc(&sctx->bios_in_flight); 434 } 435 436 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) 437 { 438 atomic_dec(&sctx->bios_in_flight); 439 wake_up(&sctx->list_wait); 440 scrub_put_ctx(sctx); 441 } 442 443 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 444 { 445 while (atomic_read(&fs_info->scrub_pause_req)) { 446 mutex_unlock(&fs_info->scrub_lock); 447 wait_event(fs_info->scrub_pause_wait, 448 atomic_read(&fs_info->scrub_pause_req) == 0); 449 mutex_lock(&fs_info->scrub_lock); 450 } 451 } 452 453 static void scrub_pause_on(struct btrfs_fs_info *fs_info) 454 { 455 atomic_inc(&fs_info->scrubs_paused); 456 wake_up(&fs_info->scrub_pause_wait); 457 } 458 459 static void scrub_pause_off(struct btrfs_fs_info *fs_info) 460 { 461 mutex_lock(&fs_info->scrub_lock); 462 __scrub_blocked_if_needed(fs_info); 463 atomic_dec(&fs_info->scrubs_paused); 464 mutex_unlock(&fs_info->scrub_lock); 465 466 wake_up(&fs_info->scrub_pause_wait); 467 } 468 469 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 470 { 471 scrub_pause_on(fs_info); 472 scrub_pause_off(fs_info); 473 } 474 475 /* 476 * Insert new full stripe lock into full stripe locks tree 477 * 478 * Return pointer to existing or newly inserted full_stripe_lock structure if 479 * everything works well. 480 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory 481 * 482 * NOTE: caller must hold full_stripe_locks_root->lock before calling this 483 * function 484 */ 485 static struct full_stripe_lock *insert_full_stripe_lock( 486 struct btrfs_full_stripe_locks_tree *locks_root, 487 u64 fstripe_logical) 488 { 489 struct rb_node **p; 490 struct rb_node *parent = NULL; 491 struct full_stripe_lock *entry; 492 struct full_stripe_lock *ret; 493 494 lockdep_assert_held(&locks_root->lock); 495 496 p = &locks_root->root.rb_node; 497 while (*p) { 498 parent = *p; 499 entry = rb_entry(parent, struct full_stripe_lock, node); 500 if (fstripe_logical < entry->logical) { 501 p = &(*p)->rb_left; 502 } else if (fstripe_logical > entry->logical) { 503 p = &(*p)->rb_right; 504 } else { 505 entry->refs++; 506 return entry; 507 } 508 } 509 510 /* 511 * Insert new lock. 512 */ 513 ret = kmalloc(sizeof(*ret), GFP_KERNEL); 514 if (!ret) 515 return ERR_PTR(-ENOMEM); 516 ret->logical = fstripe_logical; 517 ret->refs = 1; 518 mutex_init(&ret->mutex); 519 520 rb_link_node(&ret->node, parent, p); 521 rb_insert_color(&ret->node, &locks_root->root); 522 return ret; 523 } 524 525 /* 526 * Search for a full stripe lock of a block group 527 * 528 * Return pointer to existing full stripe lock if found 529 * Return NULL if not found 530 */ 531 static struct full_stripe_lock *search_full_stripe_lock( 532 struct btrfs_full_stripe_locks_tree *locks_root, 533 u64 fstripe_logical) 534 { 535 struct rb_node *node; 536 struct full_stripe_lock *entry; 537 538 lockdep_assert_held(&locks_root->lock); 539 540 node = locks_root->root.rb_node; 541 while (node) { 542 entry = rb_entry(node, struct full_stripe_lock, node); 543 if (fstripe_logical < entry->logical) 544 node = node->rb_left; 545 else if (fstripe_logical > entry->logical) 546 node = node->rb_right; 547 else 548 return entry; 549 } 550 return NULL; 551 } 552 553 /* 554 * Helper to get full stripe logical from a normal bytenr. 555 * 556 * Caller must ensure @cache is a RAID56 block group. 557 */ 558 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr) 559 { 560 u64 ret; 561 562 /* 563 * Due to chunk item size limit, full stripe length should not be 564 * larger than U32_MAX. Just a sanity check here. 565 */ 566 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); 567 568 /* 569 * round_down() can only handle power of 2, while RAID56 full 570 * stripe length can be 64KiB * n, so we need to manually round down. 571 */ 572 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * 573 cache->full_stripe_len + cache->start; 574 return ret; 575 } 576 577 /* 578 * Lock a full stripe to avoid concurrency of recovery and read 579 * 580 * It's only used for profiles with parities (RAID5/6), for other profiles it 581 * does nothing. 582 * 583 * Return 0 if we locked full stripe covering @bytenr, with a mutex held. 584 * So caller must call unlock_full_stripe() at the same context. 585 * 586 * Return <0 if encounters error. 587 */ 588 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, 589 bool *locked_ret) 590 { 591 struct btrfs_block_group *bg_cache; 592 struct btrfs_full_stripe_locks_tree *locks_root; 593 struct full_stripe_lock *existing; 594 u64 fstripe_start; 595 int ret = 0; 596 597 *locked_ret = false; 598 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); 599 if (!bg_cache) { 600 ASSERT(0); 601 return -ENOENT; 602 } 603 604 /* Profiles not based on parity don't need full stripe lock */ 605 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) 606 goto out; 607 locks_root = &bg_cache->full_stripe_locks_root; 608 609 fstripe_start = get_full_stripe_logical(bg_cache, bytenr); 610 611 /* Now insert the full stripe lock */ 612 mutex_lock(&locks_root->lock); 613 existing = insert_full_stripe_lock(locks_root, fstripe_start); 614 mutex_unlock(&locks_root->lock); 615 if (IS_ERR(existing)) { 616 ret = PTR_ERR(existing); 617 goto out; 618 } 619 mutex_lock(&existing->mutex); 620 *locked_ret = true; 621 out: 622 btrfs_put_block_group(bg_cache); 623 return ret; 624 } 625 626 /* 627 * Unlock a full stripe. 628 * 629 * NOTE: Caller must ensure it's the same context calling corresponding 630 * lock_full_stripe(). 631 * 632 * Return 0 if we unlock full stripe without problem. 633 * Return <0 for error 634 */ 635 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, 636 bool locked) 637 { 638 struct btrfs_block_group *bg_cache; 639 struct btrfs_full_stripe_locks_tree *locks_root; 640 struct full_stripe_lock *fstripe_lock; 641 u64 fstripe_start; 642 bool freeit = false; 643 int ret = 0; 644 645 /* If we didn't acquire full stripe lock, no need to continue */ 646 if (!locked) 647 return 0; 648 649 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); 650 if (!bg_cache) { 651 ASSERT(0); 652 return -ENOENT; 653 } 654 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) 655 goto out; 656 657 locks_root = &bg_cache->full_stripe_locks_root; 658 fstripe_start = get_full_stripe_logical(bg_cache, bytenr); 659 660 mutex_lock(&locks_root->lock); 661 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); 662 /* Unpaired unlock_full_stripe() detected */ 663 if (!fstripe_lock) { 664 WARN_ON(1); 665 ret = -ENOENT; 666 mutex_unlock(&locks_root->lock); 667 goto out; 668 } 669 670 if (fstripe_lock->refs == 0) { 671 WARN_ON(1); 672 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", 673 fstripe_lock->logical); 674 } else { 675 fstripe_lock->refs--; 676 } 677 678 if (fstripe_lock->refs == 0) { 679 rb_erase(&fstripe_lock->node, &locks_root->root); 680 freeit = true; 681 } 682 mutex_unlock(&locks_root->lock); 683 684 mutex_unlock(&fstripe_lock->mutex); 685 if (freeit) 686 kfree(fstripe_lock); 687 out: 688 btrfs_put_block_group(bg_cache); 689 return ret; 690 } 691 692 static void scrub_free_csums(struct scrub_ctx *sctx) 693 { 694 while (!list_empty(&sctx->csum_list)) { 695 struct btrfs_ordered_sum *sum; 696 sum = list_first_entry(&sctx->csum_list, 697 struct btrfs_ordered_sum, list); 698 list_del(&sum->list); 699 kfree(sum); 700 } 701 } 702 703 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 704 { 705 int i; 706 707 if (!sctx) 708 return; 709 710 /* this can happen when scrub is cancelled */ 711 if (sctx->curr != -1) { 712 struct scrub_bio *sbio = sctx->bios[sctx->curr]; 713 714 for (i = 0; i < sbio->sector_count; i++) 715 scrub_block_put(sbio->sectors[i]->sblock); 716 bio_put(sbio->bio); 717 } 718 719 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 720 struct scrub_bio *sbio = sctx->bios[i]; 721 722 if (!sbio) 723 break; 724 kfree(sbio); 725 } 726 727 kfree(sctx->wr_curr_bio); 728 scrub_free_csums(sctx); 729 kfree(sctx); 730 } 731 732 static void scrub_put_ctx(struct scrub_ctx *sctx) 733 { 734 if (refcount_dec_and_test(&sctx->refs)) 735 scrub_free_ctx(sctx); 736 } 737 738 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( 739 struct btrfs_fs_info *fs_info, int is_dev_replace) 740 { 741 struct scrub_ctx *sctx; 742 int i; 743 744 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); 745 if (!sctx) 746 goto nomem; 747 refcount_set(&sctx->refs, 1); 748 sctx->is_dev_replace = is_dev_replace; 749 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO; 750 sctx->curr = -1; 751 sctx->fs_info = fs_info; 752 INIT_LIST_HEAD(&sctx->csum_list); 753 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 754 struct scrub_bio *sbio; 755 756 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); 757 if (!sbio) 758 goto nomem; 759 sctx->bios[i] = sbio; 760 761 sbio->index = i; 762 sbio->sctx = sctx; 763 sbio->sector_count = 0; 764 INIT_WORK(&sbio->work, scrub_bio_end_io_worker); 765 766 if (i != SCRUB_BIOS_PER_SCTX - 1) 767 sctx->bios[i]->next_free = i + 1; 768 else 769 sctx->bios[i]->next_free = -1; 770 } 771 sctx->first_free = 0; 772 atomic_set(&sctx->bios_in_flight, 0); 773 atomic_set(&sctx->workers_pending, 0); 774 atomic_set(&sctx->cancel_req, 0); 775 776 spin_lock_init(&sctx->list_lock); 777 spin_lock_init(&sctx->stat_lock); 778 init_waitqueue_head(&sctx->list_wait); 779 sctx->throttle_deadline = 0; 780 781 WARN_ON(sctx->wr_curr_bio != NULL); 782 mutex_init(&sctx->wr_lock); 783 sctx->wr_curr_bio = NULL; 784 if (is_dev_replace) { 785 WARN_ON(!fs_info->dev_replace.tgtdev); 786 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; 787 sctx->flush_all_writes = false; 788 } 789 790 return sctx; 791 792 nomem: 793 scrub_free_ctx(sctx); 794 return ERR_PTR(-ENOMEM); 795 } 796 797 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, 798 void *warn_ctx) 799 { 800 u32 nlink; 801 int ret; 802 int i; 803 unsigned nofs_flag; 804 struct extent_buffer *eb; 805 struct btrfs_inode_item *inode_item; 806 struct scrub_warning *swarn = warn_ctx; 807 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; 808 struct inode_fs_paths *ipath = NULL; 809 struct btrfs_root *local_root; 810 struct btrfs_key key; 811 812 local_root = btrfs_get_fs_root(fs_info, root, true); 813 if (IS_ERR(local_root)) { 814 ret = PTR_ERR(local_root); 815 goto err; 816 } 817 818 /* 819 * this makes the path point to (inum INODE_ITEM ioff) 820 */ 821 key.objectid = inum; 822 key.type = BTRFS_INODE_ITEM_KEY; 823 key.offset = 0; 824 825 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); 826 if (ret) { 827 btrfs_put_root(local_root); 828 btrfs_release_path(swarn->path); 829 goto err; 830 } 831 832 eb = swarn->path->nodes[0]; 833 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 834 struct btrfs_inode_item); 835 nlink = btrfs_inode_nlink(eb, inode_item); 836 btrfs_release_path(swarn->path); 837 838 /* 839 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub 840 * uses GFP_NOFS in this context, so we keep it consistent but it does 841 * not seem to be strictly necessary. 842 */ 843 nofs_flag = memalloc_nofs_save(); 844 ipath = init_ipath(4096, local_root, swarn->path); 845 memalloc_nofs_restore(nofs_flag); 846 if (IS_ERR(ipath)) { 847 btrfs_put_root(local_root); 848 ret = PTR_ERR(ipath); 849 ipath = NULL; 850 goto err; 851 } 852 ret = paths_from_inode(inum, ipath); 853 854 if (ret < 0) 855 goto err; 856 857 /* 858 * we deliberately ignore the bit ipath might have been too small to 859 * hold all of the paths here 860 */ 861 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 862 btrfs_warn_in_rcu(fs_info, 863 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", 864 swarn->errstr, swarn->logical, 865 rcu_str_deref(swarn->dev->name), 866 swarn->physical, 867 root, inum, offset, 868 fs_info->sectorsize, nlink, 869 (char *)(unsigned long)ipath->fspath->val[i]); 870 871 btrfs_put_root(local_root); 872 free_ipath(ipath); 873 return 0; 874 875 err: 876 btrfs_warn_in_rcu(fs_info, 877 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", 878 swarn->errstr, swarn->logical, 879 rcu_str_deref(swarn->dev->name), 880 swarn->physical, 881 root, inum, offset, ret); 882 883 free_ipath(ipath); 884 return 0; 885 } 886 887 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 888 { 889 struct btrfs_device *dev; 890 struct btrfs_fs_info *fs_info; 891 struct btrfs_path *path; 892 struct btrfs_key found_key; 893 struct extent_buffer *eb; 894 struct btrfs_extent_item *ei; 895 struct scrub_warning swarn; 896 unsigned long ptr = 0; 897 u64 extent_item_pos; 898 u64 flags = 0; 899 u64 ref_root; 900 u32 item_size; 901 u8 ref_level = 0; 902 int ret; 903 904 WARN_ON(sblock->sector_count < 1); 905 dev = sblock->dev; 906 fs_info = sblock->sctx->fs_info; 907 908 /* Super block error, no need to search extent tree. */ 909 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 910 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu", 911 errstr, rcu_str_deref(dev->name), 912 sblock->physical); 913 return; 914 } 915 path = btrfs_alloc_path(); 916 if (!path) 917 return; 918 919 swarn.physical = sblock->physical; 920 swarn.logical = sblock->logical; 921 swarn.errstr = errstr; 922 swarn.dev = NULL; 923 924 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 925 &flags); 926 if (ret < 0) 927 goto out; 928 929 extent_item_pos = swarn.logical - found_key.objectid; 930 swarn.extent_item_size = found_key.offset; 931 932 eb = path->nodes[0]; 933 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 934 item_size = btrfs_item_size(eb, path->slots[0]); 935 936 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 937 do { 938 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 939 item_size, &ref_root, 940 &ref_level); 941 btrfs_warn_in_rcu(fs_info, 942 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", 943 errstr, swarn.logical, 944 rcu_str_deref(dev->name), 945 swarn.physical, 946 ref_level ? "node" : "leaf", 947 ret < 0 ? -1 : ref_level, 948 ret < 0 ? -1 : ref_root); 949 } while (ret != 1); 950 btrfs_release_path(path); 951 } else { 952 btrfs_release_path(path); 953 swarn.path = path; 954 swarn.dev = dev; 955 iterate_extent_inodes(fs_info, found_key.objectid, 956 extent_item_pos, 1, 957 scrub_print_warning_inode, &swarn, false); 958 } 959 960 out: 961 btrfs_free_path(path); 962 } 963 964 static inline void scrub_get_recover(struct scrub_recover *recover) 965 { 966 refcount_inc(&recover->refs); 967 } 968 969 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, 970 struct scrub_recover *recover) 971 { 972 if (refcount_dec_and_test(&recover->refs)) { 973 btrfs_bio_counter_dec(fs_info); 974 btrfs_put_bioc(recover->bioc); 975 kfree(recover); 976 } 977 } 978 979 /* 980 * scrub_handle_errored_block gets called when either verification of the 981 * sectors failed or the bio failed to read, e.g. with EIO. In the latter 982 * case, this function handles all sectors in the bio, even though only one 983 * may be bad. 984 * The goal of this function is to repair the errored block by using the 985 * contents of one of the mirrors. 986 */ 987 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 988 { 989 struct scrub_ctx *sctx = sblock_to_check->sctx; 990 struct btrfs_device *dev = sblock_to_check->dev; 991 struct btrfs_fs_info *fs_info; 992 u64 logical; 993 unsigned int failed_mirror_index; 994 unsigned int is_metadata; 995 unsigned int have_csum; 996 /* One scrub_block for each mirror */ 997 struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 }; 998 struct scrub_block *sblock_bad; 999 int ret; 1000 int mirror_index; 1001 int sector_num; 1002 int success; 1003 bool full_stripe_locked; 1004 unsigned int nofs_flag; 1005 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 1006 DEFAULT_RATELIMIT_BURST); 1007 1008 BUG_ON(sblock_to_check->sector_count < 1); 1009 fs_info = sctx->fs_info; 1010 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 1011 /* 1012 * If we find an error in a super block, we just report it. 1013 * They will get written with the next transaction commit 1014 * anyway 1015 */ 1016 scrub_print_warning("super block error", sblock_to_check); 1017 spin_lock(&sctx->stat_lock); 1018 ++sctx->stat.super_errors; 1019 spin_unlock(&sctx->stat_lock); 1020 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 1021 return 0; 1022 } 1023 logical = sblock_to_check->logical; 1024 ASSERT(sblock_to_check->mirror_num); 1025 failed_mirror_index = sblock_to_check->mirror_num - 1; 1026 is_metadata = !(sblock_to_check->sectors[0]->flags & 1027 BTRFS_EXTENT_FLAG_DATA); 1028 have_csum = sblock_to_check->sectors[0]->have_csum; 1029 1030 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical)) 1031 return 0; 1032 1033 /* 1034 * We must use GFP_NOFS because the scrub task might be waiting for a 1035 * worker task executing this function and in turn a transaction commit 1036 * might be waiting the scrub task to pause (which needs to wait for all 1037 * the worker tasks to complete before pausing). 1038 * We do allocations in the workers through insert_full_stripe_lock() 1039 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of 1040 * this function. 1041 */ 1042 nofs_flag = memalloc_nofs_save(); 1043 /* 1044 * For RAID5/6, race can happen for a different device scrub thread. 1045 * For data corruption, Parity and Data threads will both try 1046 * to recovery the data. 1047 * Race can lead to doubly added csum error, or even unrecoverable 1048 * error. 1049 */ 1050 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); 1051 if (ret < 0) { 1052 memalloc_nofs_restore(nofs_flag); 1053 spin_lock(&sctx->stat_lock); 1054 if (ret == -ENOMEM) 1055 sctx->stat.malloc_errors++; 1056 sctx->stat.read_errors++; 1057 sctx->stat.uncorrectable_errors++; 1058 spin_unlock(&sctx->stat_lock); 1059 return ret; 1060 } 1061 1062 /* 1063 * read all mirrors one after the other. This includes to 1064 * re-read the extent or metadata block that failed (that was 1065 * the cause that this fixup code is called) another time, 1066 * sector by sector this time in order to know which sectors 1067 * caused I/O errors and which ones are good (for all mirrors). 1068 * It is the goal to handle the situation when more than one 1069 * mirror contains I/O errors, but the errors do not 1070 * overlap, i.e. the data can be repaired by selecting the 1071 * sectors from those mirrors without I/O error on the 1072 * particular sectors. One example (with blocks >= 2 * sectorsize) 1073 * would be that mirror #1 has an I/O error on the first sector, 1074 * the second sector is good, and mirror #2 has an I/O error on 1075 * the second sector, but the first sector is good. 1076 * Then the first sector of the first mirror can be repaired by 1077 * taking the first sector of the second mirror, and the 1078 * second sector of the second mirror can be repaired by 1079 * copying the contents of the 2nd sector of the 1st mirror. 1080 * One more note: if the sectors of one mirror contain I/O 1081 * errors, the checksum cannot be verified. In order to get 1082 * the best data for repairing, the first attempt is to find 1083 * a mirror without I/O errors and with a validated checksum. 1084 * Only if this is not possible, the sectors are picked from 1085 * mirrors with I/O errors without considering the checksum. 1086 * If the latter is the case, at the end, the checksum of the 1087 * repaired area is verified in order to correctly maintain 1088 * the statistics. 1089 */ 1090 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { 1091 /* 1092 * Note: the two members refs and outstanding_sectors are not 1093 * used in the blocks that are used for the recheck procedure. 1094 * 1095 * But alloc_scrub_block() will initialize sblock::ref anyway, 1096 * so we can use scrub_block_put() to clean them up. 1097 * 1098 * And here we don't setup the physical/dev for the sblock yet, 1099 * they will be correctly initialized in scrub_setup_recheck_block(). 1100 */ 1101 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL, 1102 logical, 0, 0, mirror_index); 1103 if (!sblocks_for_recheck[mirror_index]) { 1104 spin_lock(&sctx->stat_lock); 1105 sctx->stat.malloc_errors++; 1106 sctx->stat.read_errors++; 1107 sctx->stat.uncorrectable_errors++; 1108 spin_unlock(&sctx->stat_lock); 1109 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1110 goto out; 1111 } 1112 } 1113 1114 /* Setup the context, map the logical blocks and alloc the sectors */ 1115 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); 1116 if (ret) { 1117 spin_lock(&sctx->stat_lock); 1118 sctx->stat.read_errors++; 1119 sctx->stat.uncorrectable_errors++; 1120 spin_unlock(&sctx->stat_lock); 1121 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1122 goto out; 1123 } 1124 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 1125 sblock_bad = sblocks_for_recheck[failed_mirror_index]; 1126 1127 /* build and submit the bios for the failed mirror, check checksums */ 1128 scrub_recheck_block(fs_info, sblock_bad, 1); 1129 1130 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 1131 sblock_bad->no_io_error_seen) { 1132 /* 1133 * The error disappeared after reading sector by sector, or 1134 * the area was part of a huge bio and other parts of the 1135 * bio caused I/O errors, or the block layer merged several 1136 * read requests into one and the error is caused by a 1137 * different bio (usually one of the two latter cases is 1138 * the cause) 1139 */ 1140 spin_lock(&sctx->stat_lock); 1141 sctx->stat.unverified_errors++; 1142 sblock_to_check->data_corrected = 1; 1143 spin_unlock(&sctx->stat_lock); 1144 1145 if (sctx->is_dev_replace) 1146 scrub_write_block_to_dev_replace(sblock_bad); 1147 goto out; 1148 } 1149 1150 if (!sblock_bad->no_io_error_seen) { 1151 spin_lock(&sctx->stat_lock); 1152 sctx->stat.read_errors++; 1153 spin_unlock(&sctx->stat_lock); 1154 if (__ratelimit(&rs)) 1155 scrub_print_warning("i/o error", sblock_to_check); 1156 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1157 } else if (sblock_bad->checksum_error) { 1158 spin_lock(&sctx->stat_lock); 1159 sctx->stat.csum_errors++; 1160 spin_unlock(&sctx->stat_lock); 1161 if (__ratelimit(&rs)) 1162 scrub_print_warning("checksum error", sblock_to_check); 1163 btrfs_dev_stat_inc_and_print(dev, 1164 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1165 } else if (sblock_bad->header_error) { 1166 spin_lock(&sctx->stat_lock); 1167 sctx->stat.verify_errors++; 1168 spin_unlock(&sctx->stat_lock); 1169 if (__ratelimit(&rs)) 1170 scrub_print_warning("checksum/header error", 1171 sblock_to_check); 1172 if (sblock_bad->generation_error) 1173 btrfs_dev_stat_inc_and_print(dev, 1174 BTRFS_DEV_STAT_GENERATION_ERRS); 1175 else 1176 btrfs_dev_stat_inc_and_print(dev, 1177 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1178 } 1179 1180 if (sctx->readonly) { 1181 ASSERT(!sctx->is_dev_replace); 1182 goto out; 1183 } 1184 1185 /* 1186 * now build and submit the bios for the other mirrors, check 1187 * checksums. 1188 * First try to pick the mirror which is completely without I/O 1189 * errors and also does not have a checksum error. 1190 * If one is found, and if a checksum is present, the full block 1191 * that is known to contain an error is rewritten. Afterwards 1192 * the block is known to be corrected. 1193 * If a mirror is found which is completely correct, and no 1194 * checksum is present, only those sectors are rewritten that had 1195 * an I/O error in the block to be repaired, since it cannot be 1196 * determined, which copy of the other sectors is better (and it 1197 * could happen otherwise that a correct sector would be 1198 * overwritten by a bad one). 1199 */ 1200 for (mirror_index = 0; ;mirror_index++) { 1201 struct scrub_block *sblock_other; 1202 1203 if (mirror_index == failed_mirror_index) 1204 continue; 1205 1206 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ 1207 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) { 1208 if (mirror_index >= BTRFS_MAX_MIRRORS) 1209 break; 1210 if (!sblocks_for_recheck[mirror_index]->sector_count) 1211 break; 1212 1213 sblock_other = sblocks_for_recheck[mirror_index]; 1214 } else { 1215 struct scrub_recover *r = sblock_bad->sectors[0]->recover; 1216 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs; 1217 1218 if (mirror_index >= max_allowed) 1219 break; 1220 if (!sblocks_for_recheck[1]->sector_count) 1221 break; 1222 1223 ASSERT(failed_mirror_index == 0); 1224 sblock_other = sblocks_for_recheck[1]; 1225 sblock_other->mirror_num = 1 + mirror_index; 1226 } 1227 1228 /* build and submit the bios, check checksums */ 1229 scrub_recheck_block(fs_info, sblock_other, 0); 1230 1231 if (!sblock_other->header_error && 1232 !sblock_other->checksum_error && 1233 sblock_other->no_io_error_seen) { 1234 if (sctx->is_dev_replace) { 1235 scrub_write_block_to_dev_replace(sblock_other); 1236 goto corrected_error; 1237 } else { 1238 ret = scrub_repair_block_from_good_copy( 1239 sblock_bad, sblock_other); 1240 if (!ret) 1241 goto corrected_error; 1242 } 1243 } 1244 } 1245 1246 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) 1247 goto did_not_correct_error; 1248 1249 /* 1250 * In case of I/O errors in the area that is supposed to be 1251 * repaired, continue by picking good copies of those sectors. 1252 * Select the good sectors from mirrors to rewrite bad sectors from 1253 * the area to fix. Afterwards verify the checksum of the block 1254 * that is supposed to be repaired. This verification step is 1255 * only done for the purpose of statistic counting and for the 1256 * final scrub report, whether errors remain. 1257 * A perfect algorithm could make use of the checksum and try 1258 * all possible combinations of sectors from the different mirrors 1259 * until the checksum verification succeeds. For example, when 1260 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector 1261 * of mirror #2 is readable but the final checksum test fails, 1262 * then the 2nd sector of mirror #3 could be tried, whether now 1263 * the final checksum succeeds. But this would be a rare 1264 * exception and is therefore not implemented. At least it is 1265 * avoided that the good copy is overwritten. 1266 * A more useful improvement would be to pick the sectors 1267 * without I/O error based on sector sizes (512 bytes on legacy 1268 * disks) instead of on sectorsize. Then maybe 512 byte of one 1269 * mirror could be repaired by taking 512 byte of a different 1270 * mirror, even if other 512 byte sectors in the same sectorsize 1271 * area are unreadable. 1272 */ 1273 success = 1; 1274 for (sector_num = 0; sector_num < sblock_bad->sector_count; 1275 sector_num++) { 1276 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; 1277 struct scrub_block *sblock_other = NULL; 1278 1279 /* Skip no-io-error sectors in scrub */ 1280 if (!sector_bad->io_error && !sctx->is_dev_replace) 1281 continue; 1282 1283 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) { 1284 /* 1285 * In case of dev replace, if raid56 rebuild process 1286 * didn't work out correct data, then copy the content 1287 * in sblock_bad to make sure target device is identical 1288 * to source device, instead of writing garbage data in 1289 * sblock_for_recheck array to target device. 1290 */ 1291 sblock_other = NULL; 1292 } else if (sector_bad->io_error) { 1293 /* Try to find no-io-error sector in mirrors */ 1294 for (mirror_index = 0; 1295 mirror_index < BTRFS_MAX_MIRRORS && 1296 sblocks_for_recheck[mirror_index]->sector_count > 0; 1297 mirror_index++) { 1298 if (!sblocks_for_recheck[mirror_index]-> 1299 sectors[sector_num]->io_error) { 1300 sblock_other = sblocks_for_recheck[mirror_index]; 1301 break; 1302 } 1303 } 1304 if (!sblock_other) 1305 success = 0; 1306 } 1307 1308 if (sctx->is_dev_replace) { 1309 /* 1310 * Did not find a mirror to fetch the sector from. 1311 * scrub_write_sector_to_dev_replace() handles this 1312 * case (sector->io_error), by filling the block with 1313 * zeros before submitting the write request 1314 */ 1315 if (!sblock_other) 1316 sblock_other = sblock_bad; 1317 1318 if (scrub_write_sector_to_dev_replace(sblock_other, 1319 sector_num) != 0) { 1320 atomic64_inc( 1321 &fs_info->dev_replace.num_write_errors); 1322 success = 0; 1323 } 1324 } else if (sblock_other) { 1325 ret = scrub_repair_sector_from_good_copy(sblock_bad, 1326 sblock_other, 1327 sector_num, 0); 1328 if (0 == ret) 1329 sector_bad->io_error = 0; 1330 else 1331 success = 0; 1332 } 1333 } 1334 1335 if (success && !sctx->is_dev_replace) { 1336 if (is_metadata || have_csum) { 1337 /* 1338 * need to verify the checksum now that all 1339 * sectors on disk are repaired (the write 1340 * request for data to be repaired is on its way). 1341 * Just be lazy and use scrub_recheck_block() 1342 * which re-reads the data before the checksum 1343 * is verified, but most likely the data comes out 1344 * of the page cache. 1345 */ 1346 scrub_recheck_block(fs_info, sblock_bad, 1); 1347 if (!sblock_bad->header_error && 1348 !sblock_bad->checksum_error && 1349 sblock_bad->no_io_error_seen) 1350 goto corrected_error; 1351 else 1352 goto did_not_correct_error; 1353 } else { 1354 corrected_error: 1355 spin_lock(&sctx->stat_lock); 1356 sctx->stat.corrected_errors++; 1357 sblock_to_check->data_corrected = 1; 1358 spin_unlock(&sctx->stat_lock); 1359 btrfs_err_rl_in_rcu(fs_info, 1360 "fixed up error at logical %llu on dev %s", 1361 logical, rcu_str_deref(dev->name)); 1362 } 1363 } else { 1364 did_not_correct_error: 1365 spin_lock(&sctx->stat_lock); 1366 sctx->stat.uncorrectable_errors++; 1367 spin_unlock(&sctx->stat_lock); 1368 btrfs_err_rl_in_rcu(fs_info, 1369 "unable to fixup (regular) error at logical %llu on dev %s", 1370 logical, rcu_str_deref(dev->name)); 1371 } 1372 1373 out: 1374 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { 1375 struct scrub_block *sblock = sblocks_for_recheck[mirror_index]; 1376 struct scrub_recover *recover; 1377 int sector_index; 1378 1379 /* Not allocated, continue checking the next mirror */ 1380 if (!sblock) 1381 continue; 1382 1383 for (sector_index = 0; sector_index < sblock->sector_count; 1384 sector_index++) { 1385 /* 1386 * Here we just cleanup the recover, each sector will be 1387 * properly cleaned up by later scrub_block_put() 1388 */ 1389 recover = sblock->sectors[sector_index]->recover; 1390 if (recover) { 1391 scrub_put_recover(fs_info, recover); 1392 sblock->sectors[sector_index]->recover = NULL; 1393 } 1394 } 1395 scrub_block_put(sblock); 1396 } 1397 1398 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); 1399 memalloc_nofs_restore(nofs_flag); 1400 if (ret < 0) 1401 return ret; 1402 return 0; 1403 } 1404 1405 static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc) 1406 { 1407 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5) 1408 return 2; 1409 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) 1410 return 3; 1411 else 1412 return (int)bioc->num_stripes; 1413 } 1414 1415 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, 1416 u64 *raid_map, 1417 int nstripes, int mirror, 1418 int *stripe_index, 1419 u64 *stripe_offset) 1420 { 1421 int i; 1422 1423 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 1424 /* RAID5/6 */ 1425 for (i = 0; i < nstripes; i++) { 1426 if (raid_map[i] == RAID6_Q_STRIPE || 1427 raid_map[i] == RAID5_P_STRIPE) 1428 continue; 1429 1430 if (logical >= raid_map[i] && 1431 logical < raid_map[i] + BTRFS_STRIPE_LEN) 1432 break; 1433 } 1434 1435 *stripe_index = i; 1436 *stripe_offset = logical - raid_map[i]; 1437 } else { 1438 /* The other RAID type */ 1439 *stripe_index = mirror; 1440 *stripe_offset = 0; 1441 } 1442 } 1443 1444 static int scrub_setup_recheck_block(struct scrub_block *original_sblock, 1445 struct scrub_block *sblocks_for_recheck[]) 1446 { 1447 struct scrub_ctx *sctx = original_sblock->sctx; 1448 struct btrfs_fs_info *fs_info = sctx->fs_info; 1449 u64 logical = original_sblock->logical; 1450 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits; 1451 u64 generation = original_sblock->sectors[0]->generation; 1452 u64 flags = original_sblock->sectors[0]->flags; 1453 u64 have_csum = original_sblock->sectors[0]->have_csum; 1454 struct scrub_recover *recover; 1455 struct btrfs_io_context *bioc; 1456 u64 sublen; 1457 u64 mapped_length; 1458 u64 stripe_offset; 1459 int stripe_index; 1460 int sector_index = 0; 1461 int mirror_index; 1462 int nmirrors; 1463 int ret; 1464 1465 while (length > 0) { 1466 sublen = min_t(u64, length, fs_info->sectorsize); 1467 mapped_length = sublen; 1468 bioc = NULL; 1469 1470 /* 1471 * With a length of sectorsize, each returned stripe represents 1472 * one mirror 1473 */ 1474 btrfs_bio_counter_inc_blocked(fs_info); 1475 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 1476 logical, &mapped_length, &bioc); 1477 if (ret || !bioc || mapped_length < sublen) { 1478 btrfs_put_bioc(bioc); 1479 btrfs_bio_counter_dec(fs_info); 1480 return -EIO; 1481 } 1482 1483 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); 1484 if (!recover) { 1485 btrfs_put_bioc(bioc); 1486 btrfs_bio_counter_dec(fs_info); 1487 return -ENOMEM; 1488 } 1489 1490 refcount_set(&recover->refs, 1); 1491 recover->bioc = bioc; 1492 recover->map_length = mapped_length; 1493 1494 ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK); 1495 1496 nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS); 1497 1498 for (mirror_index = 0; mirror_index < nmirrors; 1499 mirror_index++) { 1500 struct scrub_block *sblock; 1501 struct scrub_sector *sector; 1502 1503 sblock = sblocks_for_recheck[mirror_index]; 1504 sblock->sctx = sctx; 1505 1506 sector = alloc_scrub_sector(sblock, logical, GFP_NOFS); 1507 if (!sector) { 1508 spin_lock(&sctx->stat_lock); 1509 sctx->stat.malloc_errors++; 1510 spin_unlock(&sctx->stat_lock); 1511 scrub_put_recover(fs_info, recover); 1512 return -ENOMEM; 1513 } 1514 sector->flags = flags; 1515 sector->generation = generation; 1516 sector->have_csum = have_csum; 1517 if (have_csum) 1518 memcpy(sector->csum, 1519 original_sblock->sectors[0]->csum, 1520 sctx->fs_info->csum_size); 1521 1522 scrub_stripe_index_and_offset(logical, 1523 bioc->map_type, 1524 bioc->raid_map, 1525 bioc->num_stripes - 1526 bioc->num_tgtdevs, 1527 mirror_index, 1528 &stripe_index, 1529 &stripe_offset); 1530 /* 1531 * We're at the first sector, also populate @sblock 1532 * physical and dev. 1533 */ 1534 if (sector_index == 0) { 1535 sblock->physical = 1536 bioc->stripes[stripe_index].physical + 1537 stripe_offset; 1538 sblock->dev = bioc->stripes[stripe_index].dev; 1539 sblock->physical_for_dev_replace = 1540 original_sblock->physical_for_dev_replace; 1541 } 1542 1543 BUG_ON(sector_index >= original_sblock->sector_count); 1544 scrub_get_recover(recover); 1545 sector->recover = recover; 1546 } 1547 scrub_put_recover(fs_info, recover); 1548 length -= sublen; 1549 logical += sublen; 1550 sector_index++; 1551 } 1552 1553 return 0; 1554 } 1555 1556 static void scrub_bio_wait_endio(struct bio *bio) 1557 { 1558 complete(bio->bi_private); 1559 } 1560 1561 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, 1562 struct bio *bio, 1563 struct scrub_sector *sector) 1564 { 1565 DECLARE_COMPLETION_ONSTACK(done); 1566 1567 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >> 1568 SECTOR_SHIFT; 1569 bio->bi_private = &done; 1570 bio->bi_end_io = scrub_bio_wait_endio; 1571 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num); 1572 1573 wait_for_completion_io(&done); 1574 return blk_status_to_errno(bio->bi_status); 1575 } 1576 1577 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, 1578 struct scrub_block *sblock) 1579 { 1580 struct scrub_sector *first_sector = sblock->sectors[0]; 1581 struct bio *bio; 1582 int i; 1583 1584 /* All sectors in sblock belong to the same stripe on the same device. */ 1585 ASSERT(sblock->dev); 1586 if (!sblock->dev->bdev) 1587 goto out; 1588 1589 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 1590 1591 for (i = 0; i < sblock->sector_count; i++) { 1592 struct scrub_sector *sector = sblock->sectors[i]; 1593 1594 bio_add_scrub_sector(bio, sector, fs_info->sectorsize); 1595 } 1596 1597 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) { 1598 bio_put(bio); 1599 goto out; 1600 } 1601 1602 bio_put(bio); 1603 1604 scrub_recheck_block_checksum(sblock); 1605 1606 return; 1607 out: 1608 for (i = 0; i < sblock->sector_count; i++) 1609 sblock->sectors[i]->io_error = 1; 1610 1611 sblock->no_io_error_seen = 0; 1612 } 1613 1614 /* 1615 * This function will check the on disk data for checksum errors, header errors 1616 * and read I/O errors. If any I/O errors happen, the exact sectors which are 1617 * errored are marked as being bad. The goal is to enable scrub to take those 1618 * sectors that are not errored from all the mirrors so that the sectors that 1619 * are errored in the just handled mirror can be repaired. 1620 */ 1621 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1622 struct scrub_block *sblock, 1623 int retry_failed_mirror) 1624 { 1625 int i; 1626 1627 sblock->no_io_error_seen = 1; 1628 1629 /* short cut for raid56 */ 1630 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0])) 1631 return scrub_recheck_block_on_raid56(fs_info, sblock); 1632 1633 for (i = 0; i < sblock->sector_count; i++) { 1634 struct scrub_sector *sector = sblock->sectors[i]; 1635 struct bio bio; 1636 struct bio_vec bvec; 1637 1638 if (sblock->dev->bdev == NULL) { 1639 sector->io_error = 1; 1640 sblock->no_io_error_seen = 0; 1641 continue; 1642 } 1643 1644 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ); 1645 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize); 1646 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >> 1647 SECTOR_SHIFT; 1648 1649 btrfsic_check_bio(&bio); 1650 if (submit_bio_wait(&bio)) { 1651 sector->io_error = 1; 1652 sblock->no_io_error_seen = 0; 1653 } 1654 1655 bio_uninit(&bio); 1656 } 1657 1658 if (sblock->no_io_error_seen) 1659 scrub_recheck_block_checksum(sblock); 1660 } 1661 1662 static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector) 1663 { 1664 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices; 1665 int ret; 1666 1667 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1668 return !ret; 1669 } 1670 1671 static void scrub_recheck_block_checksum(struct scrub_block *sblock) 1672 { 1673 sblock->header_error = 0; 1674 sblock->checksum_error = 0; 1675 sblock->generation_error = 0; 1676 1677 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA) 1678 scrub_checksum_data(sblock); 1679 else 1680 scrub_checksum_tree_block(sblock); 1681 } 1682 1683 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1684 struct scrub_block *sblock_good) 1685 { 1686 int i; 1687 int ret = 0; 1688 1689 for (i = 0; i < sblock_bad->sector_count; i++) { 1690 int ret_sub; 1691 1692 ret_sub = scrub_repair_sector_from_good_copy(sblock_bad, 1693 sblock_good, i, 1); 1694 if (ret_sub) 1695 ret = ret_sub; 1696 } 1697 1698 return ret; 1699 } 1700 1701 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad, 1702 struct scrub_block *sblock_good, 1703 int sector_num, int force_write) 1704 { 1705 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; 1706 struct scrub_sector *sector_good = sblock_good->sectors[sector_num]; 1707 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; 1708 const u32 sectorsize = fs_info->sectorsize; 1709 1710 if (force_write || sblock_bad->header_error || 1711 sblock_bad->checksum_error || sector_bad->io_error) { 1712 struct bio bio; 1713 struct bio_vec bvec; 1714 int ret; 1715 1716 if (!sblock_bad->dev->bdev) { 1717 btrfs_warn_rl(fs_info, 1718 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); 1719 return -EIO; 1720 } 1721 1722 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE); 1723 bio.bi_iter.bi_sector = (sblock_bad->physical + 1724 sector_bad->offset) >> SECTOR_SHIFT; 1725 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize); 1726 1727 btrfsic_check_bio(&bio); 1728 ret = submit_bio_wait(&bio); 1729 bio_uninit(&bio); 1730 1731 if (ret) { 1732 btrfs_dev_stat_inc_and_print(sblock_bad->dev, 1733 BTRFS_DEV_STAT_WRITE_ERRS); 1734 atomic64_inc(&fs_info->dev_replace.num_write_errors); 1735 return -EIO; 1736 } 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) 1743 { 1744 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; 1745 int i; 1746 1747 /* 1748 * This block is used for the check of the parity on the source device, 1749 * so the data needn't be written into the destination device. 1750 */ 1751 if (sblock->sparity) 1752 return; 1753 1754 for (i = 0; i < sblock->sector_count; i++) { 1755 int ret; 1756 1757 ret = scrub_write_sector_to_dev_replace(sblock, i); 1758 if (ret) 1759 atomic64_inc(&fs_info->dev_replace.num_write_errors); 1760 } 1761 } 1762 1763 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num) 1764 { 1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize; 1766 struct scrub_sector *sector = sblock->sectors[sector_num]; 1767 1768 if (sector->io_error) 1769 memset(scrub_sector_get_kaddr(sector), 0, sectorsize); 1770 1771 return scrub_add_sector_to_wr_bio(sblock->sctx, sector); 1772 } 1773 1774 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) 1775 { 1776 int ret = 0; 1777 u64 length; 1778 1779 if (!btrfs_is_zoned(sctx->fs_info)) 1780 return 0; 1781 1782 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) 1783 return 0; 1784 1785 if (sctx->write_pointer < physical) { 1786 length = physical - sctx->write_pointer; 1787 1788 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, 1789 sctx->write_pointer, length); 1790 if (!ret) 1791 sctx->write_pointer = physical; 1792 } 1793 return ret; 1794 } 1795 1796 static void scrub_block_get(struct scrub_block *sblock) 1797 { 1798 refcount_inc(&sblock->refs); 1799 } 1800 1801 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, 1802 struct scrub_sector *sector) 1803 { 1804 struct scrub_block *sblock = sector->sblock; 1805 struct scrub_bio *sbio; 1806 int ret; 1807 const u32 sectorsize = sctx->fs_info->sectorsize; 1808 1809 mutex_lock(&sctx->wr_lock); 1810 again: 1811 if (!sctx->wr_curr_bio) { 1812 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), 1813 GFP_KERNEL); 1814 if (!sctx->wr_curr_bio) { 1815 mutex_unlock(&sctx->wr_lock); 1816 return -ENOMEM; 1817 } 1818 sctx->wr_curr_bio->sctx = sctx; 1819 sctx->wr_curr_bio->sector_count = 0; 1820 } 1821 sbio = sctx->wr_curr_bio; 1822 if (sbio->sector_count == 0) { 1823 ret = fill_writer_pointer_gap(sctx, sector->offset + 1824 sblock->physical_for_dev_replace); 1825 if (ret) { 1826 mutex_unlock(&sctx->wr_lock); 1827 return ret; 1828 } 1829 1830 sbio->physical = sblock->physical_for_dev_replace + sector->offset; 1831 sbio->logical = sblock->logical + sector->offset; 1832 sbio->dev = sctx->wr_tgtdev; 1833 if (!sbio->bio) { 1834 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, 1835 REQ_OP_WRITE, GFP_NOFS); 1836 } 1837 sbio->bio->bi_private = sbio; 1838 sbio->bio->bi_end_io = scrub_wr_bio_end_io; 1839 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; 1840 sbio->status = 0; 1841 } else if (sbio->physical + sbio->sector_count * sectorsize != 1842 sblock->physical_for_dev_replace + sector->offset || 1843 sbio->logical + sbio->sector_count * sectorsize != 1844 sblock->logical + sector->offset) { 1845 scrub_wr_submit(sctx); 1846 goto again; 1847 } 1848 1849 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); 1850 if (ret != sectorsize) { 1851 if (sbio->sector_count < 1) { 1852 bio_put(sbio->bio); 1853 sbio->bio = NULL; 1854 mutex_unlock(&sctx->wr_lock); 1855 return -EIO; 1856 } 1857 scrub_wr_submit(sctx); 1858 goto again; 1859 } 1860 1861 sbio->sectors[sbio->sector_count] = sector; 1862 scrub_sector_get(sector); 1863 /* 1864 * Since ssector no longer holds a page, but uses sblock::pages, we 1865 * have to ensure the sblock had not been freed before our write bio 1866 * finished. 1867 */ 1868 scrub_block_get(sector->sblock); 1869 1870 sbio->sector_count++; 1871 if (sbio->sector_count == sctx->sectors_per_bio) 1872 scrub_wr_submit(sctx); 1873 mutex_unlock(&sctx->wr_lock); 1874 1875 return 0; 1876 } 1877 1878 static void scrub_wr_submit(struct scrub_ctx *sctx) 1879 { 1880 struct scrub_bio *sbio; 1881 1882 if (!sctx->wr_curr_bio) 1883 return; 1884 1885 sbio = sctx->wr_curr_bio; 1886 sctx->wr_curr_bio = NULL; 1887 scrub_pending_bio_inc(sctx); 1888 /* process all writes in a single worker thread. Then the block layer 1889 * orders the requests before sending them to the driver which 1890 * doubled the write performance on spinning disks when measured 1891 * with Linux 3.5 */ 1892 btrfsic_check_bio(sbio->bio); 1893 submit_bio(sbio->bio); 1894 1895 if (btrfs_is_zoned(sctx->fs_info)) 1896 sctx->write_pointer = sbio->physical + sbio->sector_count * 1897 sctx->fs_info->sectorsize; 1898 } 1899 1900 static void scrub_wr_bio_end_io(struct bio *bio) 1901 { 1902 struct scrub_bio *sbio = bio->bi_private; 1903 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 1904 1905 sbio->status = bio->bi_status; 1906 sbio->bio = bio; 1907 1908 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker); 1909 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); 1910 } 1911 1912 static void scrub_wr_bio_end_io_worker(struct work_struct *work) 1913 { 1914 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1915 struct scrub_ctx *sctx = sbio->sctx; 1916 int i; 1917 1918 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); 1919 if (sbio->status) { 1920 struct btrfs_dev_replace *dev_replace = 1921 &sbio->sctx->fs_info->dev_replace; 1922 1923 for (i = 0; i < sbio->sector_count; i++) { 1924 struct scrub_sector *sector = sbio->sectors[i]; 1925 1926 sector->io_error = 1; 1927 atomic64_inc(&dev_replace->num_write_errors); 1928 } 1929 } 1930 1931 /* 1932 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in 1933 * endio we should put the sblock. 1934 */ 1935 for (i = 0; i < sbio->sector_count; i++) { 1936 scrub_block_put(sbio->sectors[i]->sblock); 1937 scrub_sector_put(sbio->sectors[i]); 1938 } 1939 1940 bio_put(sbio->bio); 1941 kfree(sbio); 1942 scrub_pending_bio_dec(sctx); 1943 } 1944 1945 static int scrub_checksum(struct scrub_block *sblock) 1946 { 1947 u64 flags; 1948 int ret; 1949 1950 /* 1951 * No need to initialize these stats currently, 1952 * because this function only use return value 1953 * instead of these stats value. 1954 * 1955 * Todo: 1956 * always use stats 1957 */ 1958 sblock->header_error = 0; 1959 sblock->generation_error = 0; 1960 sblock->checksum_error = 0; 1961 1962 WARN_ON(sblock->sector_count < 1); 1963 flags = sblock->sectors[0]->flags; 1964 ret = 0; 1965 if (flags & BTRFS_EXTENT_FLAG_DATA) 1966 ret = scrub_checksum_data(sblock); 1967 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1968 ret = scrub_checksum_tree_block(sblock); 1969 else if (flags & BTRFS_EXTENT_FLAG_SUPER) 1970 ret = scrub_checksum_super(sblock); 1971 else 1972 WARN_ON(1); 1973 if (ret) 1974 scrub_handle_errored_block(sblock); 1975 1976 return ret; 1977 } 1978 1979 static int scrub_checksum_data(struct scrub_block *sblock) 1980 { 1981 struct scrub_ctx *sctx = sblock->sctx; 1982 struct btrfs_fs_info *fs_info = sctx->fs_info; 1983 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 1984 u8 csum[BTRFS_CSUM_SIZE]; 1985 struct scrub_sector *sector; 1986 char *kaddr; 1987 1988 BUG_ON(sblock->sector_count < 1); 1989 sector = sblock->sectors[0]; 1990 if (!sector->have_csum) 1991 return 0; 1992 1993 kaddr = scrub_sector_get_kaddr(sector); 1994 1995 shash->tfm = fs_info->csum_shash; 1996 crypto_shash_init(shash); 1997 1998 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 1999 2000 if (memcmp(csum, sector->csum, fs_info->csum_size)) 2001 sblock->checksum_error = 1; 2002 return sblock->checksum_error; 2003 } 2004 2005 static int scrub_checksum_tree_block(struct scrub_block *sblock) 2006 { 2007 struct scrub_ctx *sctx = sblock->sctx; 2008 struct btrfs_header *h; 2009 struct btrfs_fs_info *fs_info = sctx->fs_info; 2010 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2011 u8 calculated_csum[BTRFS_CSUM_SIZE]; 2012 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 2013 /* 2014 * This is done in sectorsize steps even for metadata as there's a 2015 * constraint for nodesize to be aligned to sectorsize. This will need 2016 * to change so we don't misuse data and metadata units like that. 2017 */ 2018 const u32 sectorsize = sctx->fs_info->sectorsize; 2019 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits; 2020 int i; 2021 struct scrub_sector *sector; 2022 char *kaddr; 2023 2024 BUG_ON(sblock->sector_count < 1); 2025 2026 /* Each member in sectors is just one sector */ 2027 ASSERT(sblock->sector_count == num_sectors); 2028 2029 sector = sblock->sectors[0]; 2030 kaddr = scrub_sector_get_kaddr(sector); 2031 h = (struct btrfs_header *)kaddr; 2032 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size); 2033 2034 /* 2035 * we don't use the getter functions here, as we 2036 * a) don't have an extent buffer and 2037 * b) the page is already kmapped 2038 */ 2039 if (sblock->logical != btrfs_stack_header_bytenr(h)) 2040 sblock->header_error = 1; 2041 2042 if (sector->generation != btrfs_stack_header_generation(h)) { 2043 sblock->header_error = 1; 2044 sblock->generation_error = 1; 2045 } 2046 2047 if (!scrub_check_fsid(h->fsid, sector)) 2048 sblock->header_error = 1; 2049 2050 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 2051 BTRFS_UUID_SIZE)) 2052 sblock->header_error = 1; 2053 2054 shash->tfm = fs_info->csum_shash; 2055 crypto_shash_init(shash); 2056 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, 2057 sectorsize - BTRFS_CSUM_SIZE); 2058 2059 for (i = 1; i < num_sectors; i++) { 2060 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]); 2061 crypto_shash_update(shash, kaddr, sectorsize); 2062 } 2063 2064 crypto_shash_final(shash, calculated_csum); 2065 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) 2066 sblock->checksum_error = 1; 2067 2068 return sblock->header_error || sblock->checksum_error; 2069 } 2070 2071 static int scrub_checksum_super(struct scrub_block *sblock) 2072 { 2073 struct btrfs_super_block *s; 2074 struct scrub_ctx *sctx = sblock->sctx; 2075 struct btrfs_fs_info *fs_info = sctx->fs_info; 2076 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2077 u8 calculated_csum[BTRFS_CSUM_SIZE]; 2078 struct scrub_sector *sector; 2079 char *kaddr; 2080 int fail_gen = 0; 2081 int fail_cor = 0; 2082 2083 BUG_ON(sblock->sector_count < 1); 2084 sector = sblock->sectors[0]; 2085 kaddr = scrub_sector_get_kaddr(sector); 2086 s = (struct btrfs_super_block *)kaddr; 2087 2088 if (sblock->logical != btrfs_super_bytenr(s)) 2089 ++fail_cor; 2090 2091 if (sector->generation != btrfs_super_generation(s)) 2092 ++fail_gen; 2093 2094 if (!scrub_check_fsid(s->fsid, sector)) 2095 ++fail_cor; 2096 2097 shash->tfm = fs_info->csum_shash; 2098 crypto_shash_init(shash); 2099 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE, 2100 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum); 2101 2102 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size)) 2103 ++fail_cor; 2104 2105 return fail_cor + fail_gen; 2106 } 2107 2108 static void scrub_block_put(struct scrub_block *sblock) 2109 { 2110 if (refcount_dec_and_test(&sblock->refs)) { 2111 int i; 2112 2113 if (sblock->sparity) 2114 scrub_parity_put(sblock->sparity); 2115 2116 for (i = 0; i < sblock->sector_count; i++) 2117 scrub_sector_put(sblock->sectors[i]); 2118 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) { 2119 if (sblock->pages[i]) { 2120 detach_scrub_page_private(sblock->pages[i]); 2121 __free_page(sblock->pages[i]); 2122 } 2123 } 2124 kfree(sblock); 2125 } 2126 } 2127 2128 static void scrub_sector_get(struct scrub_sector *sector) 2129 { 2130 atomic_inc(§or->refs); 2131 } 2132 2133 static void scrub_sector_put(struct scrub_sector *sector) 2134 { 2135 if (atomic_dec_and_test(§or->refs)) 2136 kfree(sector); 2137 } 2138 2139 /* 2140 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 2141 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. 2142 */ 2143 static void scrub_throttle(struct scrub_ctx *sctx) 2144 { 2145 const int time_slice = 1000; 2146 struct scrub_bio *sbio; 2147 struct btrfs_device *device; 2148 s64 delta; 2149 ktime_t now; 2150 u32 div; 2151 u64 bwlimit; 2152 2153 sbio = sctx->bios[sctx->curr]; 2154 device = sbio->dev; 2155 bwlimit = READ_ONCE(device->scrub_speed_max); 2156 if (bwlimit == 0) 2157 return; 2158 2159 /* 2160 * Slice is divided into intervals when the IO is submitted, adjust by 2161 * bwlimit and maximum of 64 intervals. 2162 */ 2163 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); 2164 div = min_t(u32, 64, div); 2165 2166 /* Start new epoch, set deadline */ 2167 now = ktime_get(); 2168 if (sctx->throttle_deadline == 0) { 2169 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); 2170 sctx->throttle_sent = 0; 2171 } 2172 2173 /* Still in the time to send? */ 2174 if (ktime_before(now, sctx->throttle_deadline)) { 2175 /* If current bio is within the limit, send it */ 2176 sctx->throttle_sent += sbio->bio->bi_iter.bi_size; 2177 if (sctx->throttle_sent <= div_u64(bwlimit, div)) 2178 return; 2179 2180 /* We're over the limit, sleep until the rest of the slice */ 2181 delta = ktime_ms_delta(sctx->throttle_deadline, now); 2182 } else { 2183 /* New request after deadline, start new epoch */ 2184 delta = 0; 2185 } 2186 2187 if (delta) { 2188 long timeout; 2189 2190 timeout = div_u64(delta * HZ, 1000); 2191 schedule_timeout_interruptible(timeout); 2192 } 2193 2194 /* Next call will start the deadline period */ 2195 sctx->throttle_deadline = 0; 2196 } 2197 2198 static void scrub_submit(struct scrub_ctx *sctx) 2199 { 2200 struct scrub_bio *sbio; 2201 2202 if (sctx->curr == -1) 2203 return; 2204 2205 scrub_throttle(sctx); 2206 2207 sbio = sctx->bios[sctx->curr]; 2208 sctx->curr = -1; 2209 scrub_pending_bio_inc(sctx); 2210 btrfsic_check_bio(sbio->bio); 2211 submit_bio(sbio->bio); 2212 } 2213 2214 static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, 2215 struct scrub_sector *sector) 2216 { 2217 struct scrub_block *sblock = sector->sblock; 2218 struct scrub_bio *sbio; 2219 const u32 sectorsize = sctx->fs_info->sectorsize; 2220 int ret; 2221 2222 again: 2223 /* 2224 * grab a fresh bio or wait for one to become available 2225 */ 2226 while (sctx->curr == -1) { 2227 spin_lock(&sctx->list_lock); 2228 sctx->curr = sctx->first_free; 2229 if (sctx->curr != -1) { 2230 sctx->first_free = sctx->bios[sctx->curr]->next_free; 2231 sctx->bios[sctx->curr]->next_free = -1; 2232 sctx->bios[sctx->curr]->sector_count = 0; 2233 spin_unlock(&sctx->list_lock); 2234 } else { 2235 spin_unlock(&sctx->list_lock); 2236 wait_event(sctx->list_wait, sctx->first_free != -1); 2237 } 2238 } 2239 sbio = sctx->bios[sctx->curr]; 2240 if (sbio->sector_count == 0) { 2241 sbio->physical = sblock->physical + sector->offset; 2242 sbio->logical = sblock->logical + sector->offset; 2243 sbio->dev = sblock->dev; 2244 if (!sbio->bio) { 2245 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, 2246 REQ_OP_READ, GFP_NOFS); 2247 } 2248 sbio->bio->bi_private = sbio; 2249 sbio->bio->bi_end_io = scrub_bio_end_io; 2250 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; 2251 sbio->status = 0; 2252 } else if (sbio->physical + sbio->sector_count * sectorsize != 2253 sblock->physical + sector->offset || 2254 sbio->logical + sbio->sector_count * sectorsize != 2255 sblock->logical + sector->offset || 2256 sbio->dev != sblock->dev) { 2257 scrub_submit(sctx); 2258 goto again; 2259 } 2260 2261 sbio->sectors[sbio->sector_count] = sector; 2262 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); 2263 if (ret != sectorsize) { 2264 if (sbio->sector_count < 1) { 2265 bio_put(sbio->bio); 2266 sbio->bio = NULL; 2267 return -EIO; 2268 } 2269 scrub_submit(sctx); 2270 goto again; 2271 } 2272 2273 scrub_block_get(sblock); /* one for the page added to the bio */ 2274 atomic_inc(&sblock->outstanding_sectors); 2275 sbio->sector_count++; 2276 if (sbio->sector_count == sctx->sectors_per_bio) 2277 scrub_submit(sctx); 2278 2279 return 0; 2280 } 2281 2282 static void scrub_missing_raid56_end_io(struct bio *bio) 2283 { 2284 struct scrub_block *sblock = bio->bi_private; 2285 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; 2286 2287 btrfs_bio_counter_dec(fs_info); 2288 if (bio->bi_status) 2289 sblock->no_io_error_seen = 0; 2290 2291 bio_put(bio); 2292 2293 queue_work(fs_info->scrub_workers, &sblock->work); 2294 } 2295 2296 static void scrub_missing_raid56_worker(struct work_struct *work) 2297 { 2298 struct scrub_block *sblock = container_of(work, struct scrub_block, work); 2299 struct scrub_ctx *sctx = sblock->sctx; 2300 struct btrfs_fs_info *fs_info = sctx->fs_info; 2301 u64 logical; 2302 struct btrfs_device *dev; 2303 2304 logical = sblock->logical; 2305 dev = sblock->dev; 2306 2307 if (sblock->no_io_error_seen) 2308 scrub_recheck_block_checksum(sblock); 2309 2310 if (!sblock->no_io_error_seen) { 2311 spin_lock(&sctx->stat_lock); 2312 sctx->stat.read_errors++; 2313 spin_unlock(&sctx->stat_lock); 2314 btrfs_err_rl_in_rcu(fs_info, 2315 "IO error rebuilding logical %llu for dev %s", 2316 logical, rcu_str_deref(dev->name)); 2317 } else if (sblock->header_error || sblock->checksum_error) { 2318 spin_lock(&sctx->stat_lock); 2319 sctx->stat.uncorrectable_errors++; 2320 spin_unlock(&sctx->stat_lock); 2321 btrfs_err_rl_in_rcu(fs_info, 2322 "failed to rebuild valid logical %llu for dev %s", 2323 logical, rcu_str_deref(dev->name)); 2324 } else { 2325 scrub_write_block_to_dev_replace(sblock); 2326 } 2327 2328 if (sctx->is_dev_replace && sctx->flush_all_writes) { 2329 mutex_lock(&sctx->wr_lock); 2330 scrub_wr_submit(sctx); 2331 mutex_unlock(&sctx->wr_lock); 2332 } 2333 2334 scrub_block_put(sblock); 2335 scrub_pending_bio_dec(sctx); 2336 } 2337 2338 static void scrub_missing_raid56_pages(struct scrub_block *sblock) 2339 { 2340 struct scrub_ctx *sctx = sblock->sctx; 2341 struct btrfs_fs_info *fs_info = sctx->fs_info; 2342 u64 length = sblock->sector_count << fs_info->sectorsize_bits; 2343 u64 logical = sblock->logical; 2344 struct btrfs_io_context *bioc = NULL; 2345 struct bio *bio; 2346 struct btrfs_raid_bio *rbio; 2347 int ret; 2348 int i; 2349 2350 btrfs_bio_counter_inc_blocked(fs_info); 2351 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 2352 &length, &bioc); 2353 if (ret || !bioc || !bioc->raid_map) 2354 goto bioc_out; 2355 2356 if (WARN_ON(!sctx->is_dev_replace || 2357 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { 2358 /* 2359 * We shouldn't be scrubbing a missing device. Even for dev 2360 * replace, we should only get here for RAID 5/6. We either 2361 * managed to mount something with no mirrors remaining or 2362 * there's a bug in scrub_find_good_copy()/btrfs_map_block(). 2363 */ 2364 goto bioc_out; 2365 } 2366 2367 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 2368 bio->bi_iter.bi_sector = logical >> 9; 2369 bio->bi_private = sblock; 2370 bio->bi_end_io = scrub_missing_raid56_end_io; 2371 2372 rbio = raid56_alloc_missing_rbio(bio, bioc); 2373 if (!rbio) 2374 goto rbio_out; 2375 2376 for (i = 0; i < sblock->sector_count; i++) { 2377 struct scrub_sector *sector = sblock->sectors[i]; 2378 2379 raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector), 2380 scrub_sector_get_page_offset(sector), 2381 sector->offset + sector->sblock->logical); 2382 } 2383 2384 INIT_WORK(&sblock->work, scrub_missing_raid56_worker); 2385 scrub_block_get(sblock); 2386 scrub_pending_bio_inc(sctx); 2387 raid56_submit_missing_rbio(rbio); 2388 btrfs_put_bioc(bioc); 2389 return; 2390 2391 rbio_out: 2392 bio_put(bio); 2393 bioc_out: 2394 btrfs_bio_counter_dec(fs_info); 2395 btrfs_put_bioc(bioc); 2396 spin_lock(&sctx->stat_lock); 2397 sctx->stat.malloc_errors++; 2398 spin_unlock(&sctx->stat_lock); 2399 } 2400 2401 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, 2402 u64 physical, struct btrfs_device *dev, u64 flags, 2403 u64 gen, int mirror_num, u8 *csum, 2404 u64 physical_for_dev_replace) 2405 { 2406 struct scrub_block *sblock; 2407 const u32 sectorsize = sctx->fs_info->sectorsize; 2408 int index; 2409 2410 sblock = alloc_scrub_block(sctx, dev, logical, physical, 2411 physical_for_dev_replace, mirror_num); 2412 if (!sblock) { 2413 spin_lock(&sctx->stat_lock); 2414 sctx->stat.malloc_errors++; 2415 spin_unlock(&sctx->stat_lock); 2416 return -ENOMEM; 2417 } 2418 2419 for (index = 0; len > 0; index++) { 2420 struct scrub_sector *sector; 2421 /* 2422 * Here we will allocate one page for one sector to scrub. 2423 * This is fine if PAGE_SIZE == sectorsize, but will cost 2424 * more memory for PAGE_SIZE > sectorsize case. 2425 */ 2426 u32 l = min(sectorsize, len); 2427 2428 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL); 2429 if (!sector) { 2430 spin_lock(&sctx->stat_lock); 2431 sctx->stat.malloc_errors++; 2432 spin_unlock(&sctx->stat_lock); 2433 scrub_block_put(sblock); 2434 return -ENOMEM; 2435 } 2436 sector->flags = flags; 2437 sector->generation = gen; 2438 if (csum) { 2439 sector->have_csum = 1; 2440 memcpy(sector->csum, csum, sctx->fs_info->csum_size); 2441 } else { 2442 sector->have_csum = 0; 2443 } 2444 len -= l; 2445 logical += l; 2446 physical += l; 2447 physical_for_dev_replace += l; 2448 } 2449 2450 WARN_ON(sblock->sector_count == 0); 2451 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { 2452 /* 2453 * This case should only be hit for RAID 5/6 device replace. See 2454 * the comment in scrub_missing_raid56_pages() for details. 2455 */ 2456 scrub_missing_raid56_pages(sblock); 2457 } else { 2458 for (index = 0; index < sblock->sector_count; index++) { 2459 struct scrub_sector *sector = sblock->sectors[index]; 2460 int ret; 2461 2462 ret = scrub_add_sector_to_rd_bio(sctx, sector); 2463 if (ret) { 2464 scrub_block_put(sblock); 2465 return ret; 2466 } 2467 } 2468 2469 if (flags & BTRFS_EXTENT_FLAG_SUPER) 2470 scrub_submit(sctx); 2471 } 2472 2473 /* last one frees, either here or in bio completion for last page */ 2474 scrub_block_put(sblock); 2475 return 0; 2476 } 2477 2478 static void scrub_bio_end_io(struct bio *bio) 2479 { 2480 struct scrub_bio *sbio = bio->bi_private; 2481 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 2482 2483 sbio->status = bio->bi_status; 2484 sbio->bio = bio; 2485 2486 queue_work(fs_info->scrub_workers, &sbio->work); 2487 } 2488 2489 static void scrub_bio_end_io_worker(struct work_struct *work) 2490 { 2491 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 2492 struct scrub_ctx *sctx = sbio->sctx; 2493 int i; 2494 2495 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); 2496 if (sbio->status) { 2497 for (i = 0; i < sbio->sector_count; i++) { 2498 struct scrub_sector *sector = sbio->sectors[i]; 2499 2500 sector->io_error = 1; 2501 sector->sblock->no_io_error_seen = 0; 2502 } 2503 } 2504 2505 /* Now complete the scrub_block items that have all pages completed */ 2506 for (i = 0; i < sbio->sector_count; i++) { 2507 struct scrub_sector *sector = sbio->sectors[i]; 2508 struct scrub_block *sblock = sector->sblock; 2509 2510 if (atomic_dec_and_test(&sblock->outstanding_sectors)) 2511 scrub_block_complete(sblock); 2512 scrub_block_put(sblock); 2513 } 2514 2515 bio_put(sbio->bio); 2516 sbio->bio = NULL; 2517 spin_lock(&sctx->list_lock); 2518 sbio->next_free = sctx->first_free; 2519 sctx->first_free = sbio->index; 2520 spin_unlock(&sctx->list_lock); 2521 2522 if (sctx->is_dev_replace && sctx->flush_all_writes) { 2523 mutex_lock(&sctx->wr_lock); 2524 scrub_wr_submit(sctx); 2525 mutex_unlock(&sctx->wr_lock); 2526 } 2527 2528 scrub_pending_bio_dec(sctx); 2529 } 2530 2531 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, 2532 unsigned long *bitmap, 2533 u64 start, u32 len) 2534 { 2535 u64 offset; 2536 u32 nsectors; 2537 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits; 2538 2539 if (len >= sparity->stripe_len) { 2540 bitmap_set(bitmap, 0, sparity->nsectors); 2541 return; 2542 } 2543 2544 start -= sparity->logic_start; 2545 start = div64_u64_rem(start, sparity->stripe_len, &offset); 2546 offset = offset >> sectorsize_bits; 2547 nsectors = len >> sectorsize_bits; 2548 2549 if (offset + nsectors <= sparity->nsectors) { 2550 bitmap_set(bitmap, offset, nsectors); 2551 return; 2552 } 2553 2554 bitmap_set(bitmap, offset, sparity->nsectors - offset); 2555 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); 2556 } 2557 2558 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, 2559 u64 start, u32 len) 2560 { 2561 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len); 2562 } 2563 2564 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, 2565 u64 start, u32 len) 2566 { 2567 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len); 2568 } 2569 2570 static void scrub_block_complete(struct scrub_block *sblock) 2571 { 2572 int corrupted = 0; 2573 2574 if (!sblock->no_io_error_seen) { 2575 corrupted = 1; 2576 scrub_handle_errored_block(sblock); 2577 } else { 2578 /* 2579 * if has checksum error, write via repair mechanism in 2580 * dev replace case, otherwise write here in dev replace 2581 * case. 2582 */ 2583 corrupted = scrub_checksum(sblock); 2584 if (!corrupted && sblock->sctx->is_dev_replace) 2585 scrub_write_block_to_dev_replace(sblock); 2586 } 2587 2588 if (sblock->sparity && corrupted && !sblock->data_corrected) { 2589 u64 start = sblock->logical; 2590 u64 end = sblock->logical + 2591 sblock->sectors[sblock->sector_count - 1]->offset + 2592 sblock->sctx->fs_info->sectorsize; 2593 2594 ASSERT(end - start <= U32_MAX); 2595 scrub_parity_mark_sectors_error(sblock->sparity, 2596 start, end - start); 2597 } 2598 } 2599 2600 static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum) 2601 { 2602 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits; 2603 list_del(&sum->list); 2604 kfree(sum); 2605 } 2606 2607 /* 2608 * Find the desired csum for range [logical, logical + sectorsize), and store 2609 * the csum into @csum. 2610 * 2611 * The search source is sctx->csum_list, which is a pre-populated list 2612 * storing bytenr ordered csum ranges. We're responsible to cleanup any range 2613 * that is before @logical. 2614 * 2615 * Return 0 if there is no csum for the range. 2616 * Return 1 if there is csum for the range and copied to @csum. 2617 */ 2618 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) 2619 { 2620 bool found = false; 2621 2622 while (!list_empty(&sctx->csum_list)) { 2623 struct btrfs_ordered_sum *sum = NULL; 2624 unsigned long index; 2625 unsigned long num_sectors; 2626 2627 sum = list_first_entry(&sctx->csum_list, 2628 struct btrfs_ordered_sum, list); 2629 /* The current csum range is beyond our range, no csum found */ 2630 if (sum->bytenr > logical) 2631 break; 2632 2633 /* 2634 * The current sum is before our bytenr, since scrub is always 2635 * done in bytenr order, the csum will never be used anymore, 2636 * clean it up so that later calls won't bother with the range, 2637 * and continue search the next range. 2638 */ 2639 if (sum->bytenr + sum->len <= logical) { 2640 drop_csum_range(sctx, sum); 2641 continue; 2642 } 2643 2644 /* Now the csum range covers our bytenr, copy the csum */ 2645 found = true; 2646 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits; 2647 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits; 2648 2649 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size, 2650 sctx->fs_info->csum_size); 2651 2652 /* Cleanup the range if we're at the end of the csum range */ 2653 if (index == num_sectors - 1) 2654 drop_csum_range(sctx, sum); 2655 break; 2656 } 2657 if (!found) 2658 return 0; 2659 return 1; 2660 } 2661 2662 /* scrub extent tries to collect up to 64 kB for each bio */ 2663 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, 2664 u64 logical, u32 len, 2665 u64 physical, struct btrfs_device *dev, u64 flags, 2666 u64 gen, int mirror_num) 2667 { 2668 struct btrfs_device *src_dev = dev; 2669 u64 src_physical = physical; 2670 int src_mirror = mirror_num; 2671 int ret; 2672 u8 csum[BTRFS_CSUM_SIZE]; 2673 u32 blocksize; 2674 2675 /* 2676 * Block size determines how many scrub_block will be allocated. Here 2677 * we use BTRFS_STRIPE_LEN (64KiB) as default limit, so we won't 2678 * allocate too many scrub_block, while still won't cause too large 2679 * bios for large extents. 2680 */ 2681 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2682 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2683 blocksize = map->stripe_len; 2684 else 2685 blocksize = BTRFS_STRIPE_LEN; 2686 spin_lock(&sctx->stat_lock); 2687 sctx->stat.data_extents_scrubbed++; 2688 sctx->stat.data_bytes_scrubbed += len; 2689 spin_unlock(&sctx->stat_lock); 2690 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2691 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2692 blocksize = map->stripe_len; 2693 else 2694 blocksize = sctx->fs_info->nodesize; 2695 spin_lock(&sctx->stat_lock); 2696 sctx->stat.tree_extents_scrubbed++; 2697 sctx->stat.tree_bytes_scrubbed += len; 2698 spin_unlock(&sctx->stat_lock); 2699 } else { 2700 blocksize = sctx->fs_info->sectorsize; 2701 WARN_ON(1); 2702 } 2703 2704 /* 2705 * For dev-replace case, we can have @dev being a missing device. 2706 * Regular scrub will avoid its execution on missing device at all, 2707 * as that would trigger tons of read error. 2708 * 2709 * Reading from missing device will cause read error counts to 2710 * increase unnecessarily. 2711 * So here we change the read source to a good mirror. 2712 */ 2713 if (sctx->is_dev_replace && !dev->bdev) 2714 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical, 2715 &src_dev, &src_mirror); 2716 while (len) { 2717 u32 l = min(len, blocksize); 2718 int have_csum = 0; 2719 2720 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2721 /* push csums to sbio */ 2722 have_csum = scrub_find_csum(sctx, logical, csum); 2723 if (have_csum == 0) 2724 ++sctx->stat.no_csum; 2725 } 2726 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev, 2727 flags, gen, src_mirror, 2728 have_csum ? csum : NULL, physical); 2729 if (ret) 2730 return ret; 2731 len -= l; 2732 logical += l; 2733 physical += l; 2734 src_physical += l; 2735 } 2736 return 0; 2737 } 2738 2739 static int scrub_sectors_for_parity(struct scrub_parity *sparity, 2740 u64 logical, u32 len, 2741 u64 physical, struct btrfs_device *dev, 2742 u64 flags, u64 gen, int mirror_num, u8 *csum) 2743 { 2744 struct scrub_ctx *sctx = sparity->sctx; 2745 struct scrub_block *sblock; 2746 const u32 sectorsize = sctx->fs_info->sectorsize; 2747 int index; 2748 2749 ASSERT(IS_ALIGNED(len, sectorsize)); 2750 2751 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num); 2752 if (!sblock) { 2753 spin_lock(&sctx->stat_lock); 2754 sctx->stat.malloc_errors++; 2755 spin_unlock(&sctx->stat_lock); 2756 return -ENOMEM; 2757 } 2758 2759 sblock->sparity = sparity; 2760 scrub_parity_get(sparity); 2761 2762 for (index = 0; len > 0; index++) { 2763 struct scrub_sector *sector; 2764 2765 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL); 2766 if (!sector) { 2767 spin_lock(&sctx->stat_lock); 2768 sctx->stat.malloc_errors++; 2769 spin_unlock(&sctx->stat_lock); 2770 scrub_block_put(sblock); 2771 return -ENOMEM; 2772 } 2773 sblock->sectors[index] = sector; 2774 /* For scrub parity */ 2775 scrub_sector_get(sector); 2776 list_add_tail(§or->list, &sparity->sectors_list); 2777 sector->flags = flags; 2778 sector->generation = gen; 2779 if (csum) { 2780 sector->have_csum = 1; 2781 memcpy(sector->csum, csum, sctx->fs_info->csum_size); 2782 } else { 2783 sector->have_csum = 0; 2784 } 2785 2786 /* Iterate over the stripe range in sectorsize steps */ 2787 len -= sectorsize; 2788 logical += sectorsize; 2789 physical += sectorsize; 2790 } 2791 2792 WARN_ON(sblock->sector_count == 0); 2793 for (index = 0; index < sblock->sector_count; index++) { 2794 struct scrub_sector *sector = sblock->sectors[index]; 2795 int ret; 2796 2797 ret = scrub_add_sector_to_rd_bio(sctx, sector); 2798 if (ret) { 2799 scrub_block_put(sblock); 2800 return ret; 2801 } 2802 } 2803 2804 /* Last one frees, either here or in bio completion for last sector */ 2805 scrub_block_put(sblock); 2806 return 0; 2807 } 2808 2809 static int scrub_extent_for_parity(struct scrub_parity *sparity, 2810 u64 logical, u32 len, 2811 u64 physical, struct btrfs_device *dev, 2812 u64 flags, u64 gen, int mirror_num) 2813 { 2814 struct scrub_ctx *sctx = sparity->sctx; 2815 int ret; 2816 u8 csum[BTRFS_CSUM_SIZE]; 2817 u32 blocksize; 2818 2819 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { 2820 scrub_parity_mark_sectors_error(sparity, logical, len); 2821 return 0; 2822 } 2823 2824 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2825 blocksize = sparity->stripe_len; 2826 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2827 blocksize = sparity->stripe_len; 2828 } else { 2829 blocksize = sctx->fs_info->sectorsize; 2830 WARN_ON(1); 2831 } 2832 2833 while (len) { 2834 u32 l = min(len, blocksize); 2835 int have_csum = 0; 2836 2837 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2838 /* push csums to sbio */ 2839 have_csum = scrub_find_csum(sctx, logical, csum); 2840 if (have_csum == 0) 2841 goto skip; 2842 } 2843 ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev, 2844 flags, gen, mirror_num, 2845 have_csum ? csum : NULL); 2846 if (ret) 2847 return ret; 2848 skip: 2849 len -= l; 2850 logical += l; 2851 physical += l; 2852 } 2853 return 0; 2854 } 2855 2856 /* 2857 * Given a physical address, this will calculate it's 2858 * logical offset. if this is a parity stripe, it will return 2859 * the most left data stripe's logical offset. 2860 * 2861 * return 0 if it is a data stripe, 1 means parity stripe. 2862 */ 2863 static int get_raid56_logic_offset(u64 physical, int num, 2864 struct map_lookup *map, u64 *offset, 2865 u64 *stripe_start) 2866 { 2867 int i; 2868 int j = 0; 2869 u64 stripe_nr; 2870 u64 last_offset; 2871 u32 stripe_index; 2872 u32 rot; 2873 const int data_stripes = nr_data_stripes(map); 2874 2875 last_offset = (physical - map->stripes[num].physical) * data_stripes; 2876 if (stripe_start) 2877 *stripe_start = last_offset; 2878 2879 *offset = last_offset; 2880 for (i = 0; i < data_stripes; i++) { 2881 *offset = last_offset + i * map->stripe_len; 2882 2883 stripe_nr = div64_u64(*offset, map->stripe_len); 2884 stripe_nr = div_u64(stripe_nr, data_stripes); 2885 2886 /* Work out the disk rotation on this stripe-set */ 2887 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); 2888 /* calculate which stripe this data locates */ 2889 rot += i; 2890 stripe_index = rot % map->num_stripes; 2891 if (stripe_index == num) 2892 return 0; 2893 if (stripe_index < num) 2894 j++; 2895 } 2896 *offset = last_offset + j * map->stripe_len; 2897 return 1; 2898 } 2899 2900 static void scrub_free_parity(struct scrub_parity *sparity) 2901 { 2902 struct scrub_ctx *sctx = sparity->sctx; 2903 struct scrub_sector *curr, *next; 2904 int nbits; 2905 2906 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors); 2907 if (nbits) { 2908 spin_lock(&sctx->stat_lock); 2909 sctx->stat.read_errors += nbits; 2910 sctx->stat.uncorrectable_errors += nbits; 2911 spin_unlock(&sctx->stat_lock); 2912 } 2913 2914 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) { 2915 list_del_init(&curr->list); 2916 scrub_sector_put(curr); 2917 } 2918 2919 kfree(sparity); 2920 } 2921 2922 static void scrub_parity_bio_endio_worker(struct work_struct *work) 2923 { 2924 struct scrub_parity *sparity = container_of(work, struct scrub_parity, 2925 work); 2926 struct scrub_ctx *sctx = sparity->sctx; 2927 2928 btrfs_bio_counter_dec(sctx->fs_info); 2929 scrub_free_parity(sparity); 2930 scrub_pending_bio_dec(sctx); 2931 } 2932 2933 static void scrub_parity_bio_endio(struct bio *bio) 2934 { 2935 struct scrub_parity *sparity = bio->bi_private; 2936 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; 2937 2938 if (bio->bi_status) 2939 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, 2940 &sparity->dbitmap, sparity->nsectors); 2941 2942 bio_put(bio); 2943 2944 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker); 2945 queue_work(fs_info->scrub_parity_workers, &sparity->work); 2946 } 2947 2948 static void scrub_parity_check_and_repair(struct scrub_parity *sparity) 2949 { 2950 struct scrub_ctx *sctx = sparity->sctx; 2951 struct btrfs_fs_info *fs_info = sctx->fs_info; 2952 struct bio *bio; 2953 struct btrfs_raid_bio *rbio; 2954 struct btrfs_io_context *bioc = NULL; 2955 u64 length; 2956 int ret; 2957 2958 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap, 2959 &sparity->ebitmap, sparity->nsectors)) 2960 goto out; 2961 2962 length = sparity->logic_end - sparity->logic_start; 2963 2964 btrfs_bio_counter_inc_blocked(fs_info); 2965 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, 2966 &length, &bioc); 2967 if (ret || !bioc || !bioc->raid_map) 2968 goto bioc_out; 2969 2970 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 2971 bio->bi_iter.bi_sector = sparity->logic_start >> 9; 2972 bio->bi_private = sparity; 2973 bio->bi_end_io = scrub_parity_bio_endio; 2974 2975 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, 2976 sparity->scrub_dev, 2977 &sparity->dbitmap, 2978 sparity->nsectors); 2979 btrfs_put_bioc(bioc); 2980 if (!rbio) 2981 goto rbio_out; 2982 2983 scrub_pending_bio_inc(sctx); 2984 raid56_parity_submit_scrub_rbio(rbio); 2985 return; 2986 2987 rbio_out: 2988 bio_put(bio); 2989 bioc_out: 2990 btrfs_bio_counter_dec(fs_info); 2991 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap, 2992 sparity->nsectors); 2993 spin_lock(&sctx->stat_lock); 2994 sctx->stat.malloc_errors++; 2995 spin_unlock(&sctx->stat_lock); 2996 out: 2997 scrub_free_parity(sparity); 2998 } 2999 3000 static void scrub_parity_get(struct scrub_parity *sparity) 3001 { 3002 refcount_inc(&sparity->refs); 3003 } 3004 3005 static void scrub_parity_put(struct scrub_parity *sparity) 3006 { 3007 if (!refcount_dec_and_test(&sparity->refs)) 3008 return; 3009 3010 scrub_parity_check_and_repair(sparity); 3011 } 3012 3013 /* 3014 * Return 0 if the extent item range covers any byte of the range. 3015 * Return <0 if the extent item is before @search_start. 3016 * Return >0 if the extent item is after @start_start + @search_len. 3017 */ 3018 static int compare_extent_item_range(struct btrfs_path *path, 3019 u64 search_start, u64 search_len) 3020 { 3021 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; 3022 u64 len; 3023 struct btrfs_key key; 3024 3025 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3026 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || 3027 key.type == BTRFS_METADATA_ITEM_KEY); 3028 if (key.type == BTRFS_METADATA_ITEM_KEY) 3029 len = fs_info->nodesize; 3030 else 3031 len = key.offset; 3032 3033 if (key.objectid + len <= search_start) 3034 return -1; 3035 if (key.objectid >= search_start + search_len) 3036 return 1; 3037 return 0; 3038 } 3039 3040 /* 3041 * Locate one extent item which covers any byte in range 3042 * [@search_start, @search_start + @search_length) 3043 * 3044 * If the path is not initialized, we will initialize the search by doing 3045 * a btrfs_search_slot(). 3046 * If the path is already initialized, we will use the path as the initial 3047 * slot, to avoid duplicated btrfs_search_slot() calls. 3048 * 3049 * NOTE: If an extent item starts before @search_start, we will still 3050 * return the extent item. This is for data extent crossing stripe boundary. 3051 * 3052 * Return 0 if we found such extent item, and @path will point to the extent item. 3053 * Return >0 if no such extent item can be found, and @path will be released. 3054 * Return <0 if hit fatal error, and @path will be released. 3055 */ 3056 static int find_first_extent_item(struct btrfs_root *extent_root, 3057 struct btrfs_path *path, 3058 u64 search_start, u64 search_len) 3059 { 3060 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3061 struct btrfs_key key; 3062 int ret; 3063 3064 /* Continue using the existing path */ 3065 if (path->nodes[0]) 3066 goto search_forward; 3067 3068 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 3069 key.type = BTRFS_METADATA_ITEM_KEY; 3070 else 3071 key.type = BTRFS_EXTENT_ITEM_KEY; 3072 key.objectid = search_start; 3073 key.offset = (u64)-1; 3074 3075 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 3076 if (ret < 0) 3077 return ret; 3078 3079 ASSERT(ret > 0); 3080 /* 3081 * Here we intentionally pass 0 as @min_objectid, as there could be 3082 * an extent item starting before @search_start. 3083 */ 3084 ret = btrfs_previous_extent_item(extent_root, path, 0); 3085 if (ret < 0) 3086 return ret; 3087 /* 3088 * No matter whether we have found an extent item, the next loop will 3089 * properly do every check on the key. 3090 */ 3091 search_forward: 3092 while (true) { 3093 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3094 if (key.objectid >= search_start + search_len) 3095 break; 3096 if (key.type != BTRFS_METADATA_ITEM_KEY && 3097 key.type != BTRFS_EXTENT_ITEM_KEY) 3098 goto next; 3099 3100 ret = compare_extent_item_range(path, search_start, search_len); 3101 if (ret == 0) 3102 return ret; 3103 if (ret > 0) 3104 break; 3105 next: 3106 path->slots[0]++; 3107 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 3108 ret = btrfs_next_leaf(extent_root, path); 3109 if (ret) { 3110 /* Either no more item or fatal error */ 3111 btrfs_release_path(path); 3112 return ret; 3113 } 3114 } 3115 } 3116 btrfs_release_path(path); 3117 return 1; 3118 } 3119 3120 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, 3121 u64 *size_ret, u64 *flags_ret, u64 *generation_ret) 3122 { 3123 struct btrfs_key key; 3124 struct btrfs_extent_item *ei; 3125 3126 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3127 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || 3128 key.type == BTRFS_EXTENT_ITEM_KEY); 3129 *extent_start_ret = key.objectid; 3130 if (key.type == BTRFS_METADATA_ITEM_KEY) 3131 *size_ret = path->nodes[0]->fs_info->nodesize; 3132 else 3133 *size_ret = key.offset; 3134 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); 3135 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); 3136 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); 3137 } 3138 3139 static bool does_range_cross_boundary(u64 extent_start, u64 extent_len, 3140 u64 boundary_start, u64 boudary_len) 3141 { 3142 return (extent_start < boundary_start && 3143 extent_start + extent_len > boundary_start) || 3144 (extent_start < boundary_start + boudary_len && 3145 extent_start + extent_len > boundary_start + boudary_len); 3146 } 3147 3148 static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx, 3149 struct scrub_parity *sparity, 3150 struct map_lookup *map, 3151 struct btrfs_device *sdev, 3152 struct btrfs_path *path, 3153 u64 logical) 3154 { 3155 struct btrfs_fs_info *fs_info = sctx->fs_info; 3156 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical); 3157 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical); 3158 u64 cur_logical = logical; 3159 int ret; 3160 3161 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); 3162 3163 /* Path must not be populated */ 3164 ASSERT(!path->nodes[0]); 3165 3166 while (cur_logical < logical + map->stripe_len) { 3167 struct btrfs_io_context *bioc = NULL; 3168 struct btrfs_device *extent_dev; 3169 u64 extent_start; 3170 u64 extent_size; 3171 u64 mapped_length; 3172 u64 extent_flags; 3173 u64 extent_gen; 3174 u64 extent_physical; 3175 u64 extent_mirror_num; 3176 3177 ret = find_first_extent_item(extent_root, path, cur_logical, 3178 logical + map->stripe_len - cur_logical); 3179 /* No more extent item in this data stripe */ 3180 if (ret > 0) { 3181 ret = 0; 3182 break; 3183 } 3184 if (ret < 0) 3185 break; 3186 get_extent_info(path, &extent_start, &extent_size, &extent_flags, 3187 &extent_gen); 3188 3189 /* Metadata should not cross stripe boundaries */ 3190 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && 3191 does_range_cross_boundary(extent_start, extent_size, 3192 logical, map->stripe_len)) { 3193 btrfs_err(fs_info, 3194 "scrub: tree block %llu spanning stripes, ignored. logical=%llu", 3195 extent_start, logical); 3196 spin_lock(&sctx->stat_lock); 3197 sctx->stat.uncorrectable_errors++; 3198 spin_unlock(&sctx->stat_lock); 3199 cur_logical += extent_size; 3200 continue; 3201 } 3202 3203 /* Skip hole range which doesn't have any extent */ 3204 cur_logical = max(extent_start, cur_logical); 3205 3206 /* Truncate the range inside this data stripe */ 3207 extent_size = min(extent_start + extent_size, 3208 logical + map->stripe_len) - cur_logical; 3209 extent_start = cur_logical; 3210 ASSERT(extent_size <= U32_MAX); 3211 3212 scrub_parity_mark_sectors_data(sparity, extent_start, extent_size); 3213 3214 mapped_length = extent_size; 3215 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start, 3216 &mapped_length, &bioc, 0); 3217 if (!ret && (!bioc || mapped_length < extent_size)) 3218 ret = -EIO; 3219 if (ret) { 3220 btrfs_put_bioc(bioc); 3221 scrub_parity_mark_sectors_error(sparity, extent_start, 3222 extent_size); 3223 break; 3224 } 3225 extent_physical = bioc->stripes[0].physical; 3226 extent_mirror_num = bioc->mirror_num; 3227 extent_dev = bioc->stripes[0].dev; 3228 btrfs_put_bioc(bioc); 3229 3230 ret = btrfs_lookup_csums_range(csum_root, extent_start, 3231 extent_start + extent_size - 1, 3232 &sctx->csum_list, 1, false); 3233 if (ret) { 3234 scrub_parity_mark_sectors_error(sparity, extent_start, 3235 extent_size); 3236 break; 3237 } 3238 3239 ret = scrub_extent_for_parity(sparity, extent_start, 3240 extent_size, extent_physical, 3241 extent_dev, extent_flags, 3242 extent_gen, extent_mirror_num); 3243 scrub_free_csums(sctx); 3244 3245 if (ret) { 3246 scrub_parity_mark_sectors_error(sparity, extent_start, 3247 extent_size); 3248 break; 3249 } 3250 3251 cond_resched(); 3252 cur_logical += extent_size; 3253 } 3254 btrfs_release_path(path); 3255 return ret; 3256 } 3257 3258 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, 3259 struct map_lookup *map, 3260 struct btrfs_device *sdev, 3261 u64 logic_start, 3262 u64 logic_end) 3263 { 3264 struct btrfs_fs_info *fs_info = sctx->fs_info; 3265 struct btrfs_path *path; 3266 u64 cur_logical; 3267 int ret; 3268 struct scrub_parity *sparity; 3269 int nsectors; 3270 3271 path = btrfs_alloc_path(); 3272 if (!path) { 3273 spin_lock(&sctx->stat_lock); 3274 sctx->stat.malloc_errors++; 3275 spin_unlock(&sctx->stat_lock); 3276 return -ENOMEM; 3277 } 3278 path->search_commit_root = 1; 3279 path->skip_locking = 1; 3280 3281 ASSERT(map->stripe_len <= U32_MAX); 3282 nsectors = map->stripe_len >> fs_info->sectorsize_bits; 3283 ASSERT(nsectors <= BITS_PER_LONG); 3284 sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS); 3285 if (!sparity) { 3286 spin_lock(&sctx->stat_lock); 3287 sctx->stat.malloc_errors++; 3288 spin_unlock(&sctx->stat_lock); 3289 btrfs_free_path(path); 3290 return -ENOMEM; 3291 } 3292 3293 ASSERT(map->stripe_len <= U32_MAX); 3294 sparity->stripe_len = map->stripe_len; 3295 sparity->nsectors = nsectors; 3296 sparity->sctx = sctx; 3297 sparity->scrub_dev = sdev; 3298 sparity->logic_start = logic_start; 3299 sparity->logic_end = logic_end; 3300 refcount_set(&sparity->refs, 1); 3301 INIT_LIST_HEAD(&sparity->sectors_list); 3302 3303 ret = 0; 3304 for (cur_logical = logic_start; cur_logical < logic_end; 3305 cur_logical += map->stripe_len) { 3306 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map, 3307 sdev, path, cur_logical); 3308 if (ret < 0) 3309 break; 3310 } 3311 3312 scrub_parity_put(sparity); 3313 scrub_submit(sctx); 3314 mutex_lock(&sctx->wr_lock); 3315 scrub_wr_submit(sctx); 3316 mutex_unlock(&sctx->wr_lock); 3317 3318 btrfs_free_path(path); 3319 return ret < 0 ? ret : 0; 3320 } 3321 3322 static void sync_replace_for_zoned(struct scrub_ctx *sctx) 3323 { 3324 if (!btrfs_is_zoned(sctx->fs_info)) 3325 return; 3326 3327 sctx->flush_all_writes = true; 3328 scrub_submit(sctx); 3329 mutex_lock(&sctx->wr_lock); 3330 scrub_wr_submit(sctx); 3331 mutex_unlock(&sctx->wr_lock); 3332 3333 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 3334 } 3335 3336 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, 3337 u64 physical, u64 physical_end) 3338 { 3339 struct btrfs_fs_info *fs_info = sctx->fs_info; 3340 int ret = 0; 3341 3342 if (!btrfs_is_zoned(fs_info)) 3343 return 0; 3344 3345 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 3346 3347 mutex_lock(&sctx->wr_lock); 3348 if (sctx->write_pointer < physical_end) { 3349 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, 3350 physical, 3351 sctx->write_pointer); 3352 if (ret) 3353 btrfs_err(fs_info, 3354 "zoned: failed to recover write pointer"); 3355 } 3356 mutex_unlock(&sctx->wr_lock); 3357 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); 3358 3359 return ret; 3360 } 3361 3362 /* 3363 * Scrub one range which can only has simple mirror based profile. 3364 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in 3365 * RAID0/RAID10). 3366 * 3367 * Since we may need to handle a subset of block group, we need @logical_start 3368 * and @logical_length parameter. 3369 */ 3370 static int scrub_simple_mirror(struct scrub_ctx *sctx, 3371 struct btrfs_root *extent_root, 3372 struct btrfs_root *csum_root, 3373 struct btrfs_block_group *bg, 3374 struct map_lookup *map, 3375 u64 logical_start, u64 logical_length, 3376 struct btrfs_device *device, 3377 u64 physical, int mirror_num) 3378 { 3379 struct btrfs_fs_info *fs_info = sctx->fs_info; 3380 const u64 logical_end = logical_start + logical_length; 3381 /* An artificial limit, inherit from old scrub behavior */ 3382 const u32 max_length = SZ_64K; 3383 struct btrfs_path path = { 0 }; 3384 u64 cur_logical = logical_start; 3385 int ret; 3386 3387 /* The range must be inside the bg */ 3388 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); 3389 3390 path.search_commit_root = 1; 3391 path.skip_locking = 1; 3392 /* Go through each extent items inside the logical range */ 3393 while (cur_logical < logical_end) { 3394 u64 extent_start; 3395 u64 extent_len; 3396 u64 extent_flags; 3397 u64 extent_gen; 3398 u64 scrub_len; 3399 3400 /* Canceled? */ 3401 if (atomic_read(&fs_info->scrub_cancel_req) || 3402 atomic_read(&sctx->cancel_req)) { 3403 ret = -ECANCELED; 3404 break; 3405 } 3406 /* Paused? */ 3407 if (atomic_read(&fs_info->scrub_pause_req)) { 3408 /* Push queued extents */ 3409 sctx->flush_all_writes = true; 3410 scrub_submit(sctx); 3411 mutex_lock(&sctx->wr_lock); 3412 scrub_wr_submit(sctx); 3413 mutex_unlock(&sctx->wr_lock); 3414 wait_event(sctx->list_wait, 3415 atomic_read(&sctx->bios_in_flight) == 0); 3416 sctx->flush_all_writes = false; 3417 scrub_blocked_if_needed(fs_info); 3418 } 3419 /* Block group removed? */ 3420 spin_lock(&bg->lock); 3421 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { 3422 spin_unlock(&bg->lock); 3423 ret = 0; 3424 break; 3425 } 3426 spin_unlock(&bg->lock); 3427 3428 ret = find_first_extent_item(extent_root, &path, cur_logical, 3429 logical_end - cur_logical); 3430 if (ret > 0) { 3431 /* No more extent, just update the accounting */ 3432 sctx->stat.last_physical = physical + logical_length; 3433 ret = 0; 3434 break; 3435 } 3436 if (ret < 0) 3437 break; 3438 get_extent_info(&path, &extent_start, &extent_len, 3439 &extent_flags, &extent_gen); 3440 /* Skip hole range which doesn't have any extent */ 3441 cur_logical = max(extent_start, cur_logical); 3442 3443 /* 3444 * Scrub len has three limits: 3445 * - Extent size limit 3446 * - Scrub range limit 3447 * This is especially imporatant for RAID0/RAID10 to reuse 3448 * this function 3449 * - Max scrub size limit 3450 */ 3451 scrub_len = min(min(extent_start + extent_len, 3452 logical_end), cur_logical + max_length) - 3453 cur_logical; 3454 3455 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) { 3456 ret = btrfs_lookup_csums_range(csum_root, cur_logical, 3457 cur_logical + scrub_len - 1, 3458 &sctx->csum_list, 1, false); 3459 if (ret) 3460 break; 3461 } 3462 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && 3463 does_range_cross_boundary(extent_start, extent_len, 3464 logical_start, logical_length)) { 3465 btrfs_err(fs_info, 3466 "scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)", 3467 extent_start, logical_start, logical_end); 3468 spin_lock(&sctx->stat_lock); 3469 sctx->stat.uncorrectable_errors++; 3470 spin_unlock(&sctx->stat_lock); 3471 cur_logical += scrub_len; 3472 continue; 3473 } 3474 ret = scrub_extent(sctx, map, cur_logical, scrub_len, 3475 cur_logical - logical_start + physical, 3476 device, extent_flags, extent_gen, 3477 mirror_num); 3478 scrub_free_csums(sctx); 3479 if (ret) 3480 break; 3481 if (sctx->is_dev_replace) 3482 sync_replace_for_zoned(sctx); 3483 cur_logical += scrub_len; 3484 /* Don't hold CPU for too long time */ 3485 cond_resched(); 3486 } 3487 btrfs_release_path(&path); 3488 return ret; 3489 } 3490 3491 /* Calculate the full stripe length for simple stripe based profiles */ 3492 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) 3493 { 3494 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3495 BTRFS_BLOCK_GROUP_RAID10)); 3496 3497 return map->num_stripes / map->sub_stripes * map->stripe_len; 3498 } 3499 3500 /* Get the logical bytenr for the stripe */ 3501 static u64 simple_stripe_get_logical(struct map_lookup *map, 3502 struct btrfs_block_group *bg, 3503 int stripe_index) 3504 { 3505 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3506 BTRFS_BLOCK_GROUP_RAID10)); 3507 ASSERT(stripe_index < map->num_stripes); 3508 3509 /* 3510 * (stripe_index / sub_stripes) gives how many data stripes we need to 3511 * skip. 3512 */ 3513 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start; 3514 } 3515 3516 /* Get the mirror number for the stripe */ 3517 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index) 3518 { 3519 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3520 BTRFS_BLOCK_GROUP_RAID10)); 3521 ASSERT(stripe_index < map->num_stripes); 3522 3523 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */ 3524 return stripe_index % map->sub_stripes + 1; 3525 } 3526 3527 static int scrub_simple_stripe(struct scrub_ctx *sctx, 3528 struct btrfs_root *extent_root, 3529 struct btrfs_root *csum_root, 3530 struct btrfs_block_group *bg, 3531 struct map_lookup *map, 3532 struct btrfs_device *device, 3533 int stripe_index) 3534 { 3535 const u64 logical_increment = simple_stripe_full_stripe_len(map); 3536 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); 3537 const u64 orig_physical = map->stripes[stripe_index].physical; 3538 const int mirror_num = simple_stripe_mirror_num(map, stripe_index); 3539 u64 cur_logical = orig_logical; 3540 u64 cur_physical = orig_physical; 3541 int ret = 0; 3542 3543 while (cur_logical < bg->start + bg->length) { 3544 /* 3545 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is 3546 * just RAID1, so we can reuse scrub_simple_mirror() to scrub 3547 * this stripe. 3548 */ 3549 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map, 3550 cur_logical, map->stripe_len, device, 3551 cur_physical, mirror_num); 3552 if (ret) 3553 return ret; 3554 /* Skip to next stripe which belongs to the target device */ 3555 cur_logical += logical_increment; 3556 /* For physical offset, we just go to next stripe */ 3557 cur_physical += map->stripe_len; 3558 } 3559 return ret; 3560 } 3561 3562 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 3563 struct btrfs_block_group *bg, 3564 struct extent_map *em, 3565 struct btrfs_device *scrub_dev, 3566 int stripe_index) 3567 { 3568 struct btrfs_path *path; 3569 struct btrfs_fs_info *fs_info = sctx->fs_info; 3570 struct btrfs_root *root; 3571 struct btrfs_root *csum_root; 3572 struct blk_plug plug; 3573 struct map_lookup *map = em->map_lookup; 3574 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; 3575 const u64 chunk_logical = bg->start; 3576 int ret; 3577 u64 physical = map->stripes[stripe_index].physical; 3578 const u64 dev_stripe_len = btrfs_calc_stripe_length(em); 3579 const u64 physical_end = physical + dev_stripe_len; 3580 u64 logical; 3581 u64 logic_end; 3582 /* The logical increment after finishing one stripe */ 3583 u64 increment; 3584 /* Offset inside the chunk */ 3585 u64 offset; 3586 u64 stripe_logical; 3587 u64 stripe_end; 3588 int stop_loop = 0; 3589 3590 path = btrfs_alloc_path(); 3591 if (!path) 3592 return -ENOMEM; 3593 3594 /* 3595 * work on commit root. The related disk blocks are static as 3596 * long as COW is applied. This means, it is save to rewrite 3597 * them to repair disk errors without any race conditions 3598 */ 3599 path->search_commit_root = 1; 3600 path->skip_locking = 1; 3601 path->reada = READA_FORWARD; 3602 3603 wait_event(sctx->list_wait, 3604 atomic_read(&sctx->bios_in_flight) == 0); 3605 scrub_blocked_if_needed(fs_info); 3606 3607 root = btrfs_extent_root(fs_info, bg->start); 3608 csum_root = btrfs_csum_root(fs_info, bg->start); 3609 3610 /* 3611 * collect all data csums for the stripe to avoid seeking during 3612 * the scrub. This might currently (crc32) end up to be about 1MB 3613 */ 3614 blk_start_plug(&plug); 3615 3616 if (sctx->is_dev_replace && 3617 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { 3618 mutex_lock(&sctx->wr_lock); 3619 sctx->write_pointer = physical; 3620 mutex_unlock(&sctx->wr_lock); 3621 sctx->flush_all_writes = true; 3622 } 3623 3624 /* 3625 * There used to be a big double loop to handle all profiles using the 3626 * same routine, which grows larger and more gross over time. 3627 * 3628 * So here we handle each profile differently, so simpler profiles 3629 * have simpler scrubbing function. 3630 */ 3631 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 | 3632 BTRFS_BLOCK_GROUP_RAID56_MASK))) { 3633 /* 3634 * Above check rules out all complex profile, the remaining 3635 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple 3636 * mirrored duplication without stripe. 3637 * 3638 * Only @physical and @mirror_num needs to calculated using 3639 * @stripe_index. 3640 */ 3641 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, 3642 bg->start, bg->length, scrub_dev, 3643 map->stripes[stripe_index].physical, 3644 stripe_index + 1); 3645 offset = 0; 3646 goto out; 3647 } 3648 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 3649 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, 3650 scrub_dev, stripe_index); 3651 offset = map->stripe_len * (stripe_index / map->sub_stripes); 3652 goto out; 3653 } 3654 3655 /* Only RAID56 goes through the old code */ 3656 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); 3657 ret = 0; 3658 3659 /* Calculate the logical end of the stripe */ 3660 get_raid56_logic_offset(physical_end, stripe_index, 3661 map, &logic_end, NULL); 3662 logic_end += chunk_logical; 3663 3664 /* Initialize @offset in case we need to go to out: label */ 3665 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); 3666 increment = map->stripe_len * nr_data_stripes(map); 3667 3668 /* 3669 * Due to the rotation, for RAID56 it's better to iterate each stripe 3670 * using their physical offset. 3671 */ 3672 while (physical < physical_end) { 3673 ret = get_raid56_logic_offset(physical, stripe_index, map, 3674 &logical, &stripe_logical); 3675 logical += chunk_logical; 3676 if (ret) { 3677 /* it is parity strip */ 3678 stripe_logical += chunk_logical; 3679 stripe_end = stripe_logical + increment; 3680 ret = scrub_raid56_parity(sctx, map, scrub_dev, 3681 stripe_logical, 3682 stripe_end); 3683 if (ret) 3684 goto out; 3685 goto next; 3686 } 3687 3688 /* 3689 * Now we're at a data stripe, scrub each extents in the range. 3690 * 3691 * At this stage, if we ignore the repair part, inside each data 3692 * stripe it is no different than SINGLE profile. 3693 * We can reuse scrub_simple_mirror() here, as the repair part 3694 * is still based on @mirror_num. 3695 */ 3696 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, 3697 logical, map->stripe_len, 3698 scrub_dev, physical, 1); 3699 if (ret < 0) 3700 goto out; 3701 next: 3702 logical += increment; 3703 physical += map->stripe_len; 3704 spin_lock(&sctx->stat_lock); 3705 if (stop_loop) 3706 sctx->stat.last_physical = 3707 map->stripes[stripe_index].physical + dev_stripe_len; 3708 else 3709 sctx->stat.last_physical = physical; 3710 spin_unlock(&sctx->stat_lock); 3711 if (stop_loop) 3712 break; 3713 } 3714 out: 3715 /* push queued extents */ 3716 scrub_submit(sctx); 3717 mutex_lock(&sctx->wr_lock); 3718 scrub_wr_submit(sctx); 3719 mutex_unlock(&sctx->wr_lock); 3720 3721 blk_finish_plug(&plug); 3722 btrfs_free_path(path); 3723 3724 if (sctx->is_dev_replace && ret >= 0) { 3725 int ret2; 3726 3727 ret2 = sync_write_pointer_for_zoned(sctx, 3728 chunk_logical + offset, 3729 map->stripes[stripe_index].physical, 3730 physical_end); 3731 if (ret2) 3732 ret = ret2; 3733 } 3734 3735 return ret < 0 ? ret : 0; 3736 } 3737 3738 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3739 struct btrfs_block_group *bg, 3740 struct btrfs_device *scrub_dev, 3741 u64 dev_offset, 3742 u64 dev_extent_len) 3743 { 3744 struct btrfs_fs_info *fs_info = sctx->fs_info; 3745 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 3746 struct map_lookup *map; 3747 struct extent_map *em; 3748 int i; 3749 int ret = 0; 3750 3751 read_lock(&map_tree->lock); 3752 em = lookup_extent_mapping(map_tree, bg->start, bg->length); 3753 read_unlock(&map_tree->lock); 3754 3755 if (!em) { 3756 /* 3757 * Might have been an unused block group deleted by the cleaner 3758 * kthread or relocation. 3759 */ 3760 spin_lock(&bg->lock); 3761 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) 3762 ret = -EINVAL; 3763 spin_unlock(&bg->lock); 3764 3765 return ret; 3766 } 3767 if (em->start != bg->start) 3768 goto out; 3769 if (em->len < dev_extent_len) 3770 goto out; 3771 3772 map = em->map_lookup; 3773 for (i = 0; i < map->num_stripes; ++i) { 3774 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 3775 map->stripes[i].physical == dev_offset) { 3776 ret = scrub_stripe(sctx, bg, em, scrub_dev, i); 3777 if (ret) 3778 goto out; 3779 } 3780 } 3781 out: 3782 free_extent_map(em); 3783 3784 return ret; 3785 } 3786 3787 static int finish_extent_writes_for_zoned(struct btrfs_root *root, 3788 struct btrfs_block_group *cache) 3789 { 3790 struct btrfs_fs_info *fs_info = cache->fs_info; 3791 struct btrfs_trans_handle *trans; 3792 3793 if (!btrfs_is_zoned(fs_info)) 3794 return 0; 3795 3796 btrfs_wait_block_group_reservations(cache); 3797 btrfs_wait_nocow_writers(cache); 3798 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); 3799 3800 trans = btrfs_join_transaction(root); 3801 if (IS_ERR(trans)) 3802 return PTR_ERR(trans); 3803 return btrfs_commit_transaction(trans); 3804 } 3805 3806 static noinline_for_stack 3807 int scrub_enumerate_chunks(struct scrub_ctx *sctx, 3808 struct btrfs_device *scrub_dev, u64 start, u64 end) 3809 { 3810 struct btrfs_dev_extent *dev_extent = NULL; 3811 struct btrfs_path *path; 3812 struct btrfs_fs_info *fs_info = sctx->fs_info; 3813 struct btrfs_root *root = fs_info->dev_root; 3814 u64 chunk_offset; 3815 int ret = 0; 3816 int ro_set; 3817 int slot; 3818 struct extent_buffer *l; 3819 struct btrfs_key key; 3820 struct btrfs_key found_key; 3821 struct btrfs_block_group *cache; 3822 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 3823 3824 path = btrfs_alloc_path(); 3825 if (!path) 3826 return -ENOMEM; 3827 3828 path->reada = READA_FORWARD; 3829 path->search_commit_root = 1; 3830 path->skip_locking = 1; 3831 3832 key.objectid = scrub_dev->devid; 3833 key.offset = 0ull; 3834 key.type = BTRFS_DEV_EXTENT_KEY; 3835 3836 while (1) { 3837 u64 dev_extent_len; 3838 3839 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3840 if (ret < 0) 3841 break; 3842 if (ret > 0) { 3843 if (path->slots[0] >= 3844 btrfs_header_nritems(path->nodes[0])) { 3845 ret = btrfs_next_leaf(root, path); 3846 if (ret < 0) 3847 break; 3848 if (ret > 0) { 3849 ret = 0; 3850 break; 3851 } 3852 } else { 3853 ret = 0; 3854 } 3855 } 3856 3857 l = path->nodes[0]; 3858 slot = path->slots[0]; 3859 3860 btrfs_item_key_to_cpu(l, &found_key, slot); 3861 3862 if (found_key.objectid != scrub_dev->devid) 3863 break; 3864 3865 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 3866 break; 3867 3868 if (found_key.offset >= end) 3869 break; 3870 3871 if (found_key.offset < key.offset) 3872 break; 3873 3874 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 3875 dev_extent_len = btrfs_dev_extent_length(l, dev_extent); 3876 3877 if (found_key.offset + dev_extent_len <= start) 3878 goto skip; 3879 3880 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 3881 3882 /* 3883 * get a reference on the corresponding block group to prevent 3884 * the chunk from going away while we scrub it 3885 */ 3886 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3887 3888 /* some chunks are removed but not committed to disk yet, 3889 * continue scrubbing */ 3890 if (!cache) 3891 goto skip; 3892 3893 ASSERT(cache->start <= chunk_offset); 3894 /* 3895 * We are using the commit root to search for device extents, so 3896 * that means we could have found a device extent item from a 3897 * block group that was deleted in the current transaction. The 3898 * logical start offset of the deleted block group, stored at 3899 * @chunk_offset, might be part of the logical address range of 3900 * a new block group (which uses different physical extents). 3901 * In this case btrfs_lookup_block_group() has returned the new 3902 * block group, and its start address is less than @chunk_offset. 3903 * 3904 * We skip such new block groups, because it's pointless to 3905 * process them, as we won't find their extents because we search 3906 * for them using the commit root of the extent tree. For a device 3907 * replace it's also fine to skip it, we won't miss copying them 3908 * to the target device because we have the write duplication 3909 * setup through the regular write path (by btrfs_map_block()), 3910 * and we have committed a transaction when we started the device 3911 * replace, right after setting up the device replace state. 3912 */ 3913 if (cache->start < chunk_offset) { 3914 btrfs_put_block_group(cache); 3915 goto skip; 3916 } 3917 3918 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { 3919 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { 3920 spin_unlock(&cache->lock); 3921 btrfs_put_block_group(cache); 3922 goto skip; 3923 } 3924 } 3925 3926 /* 3927 * Make sure that while we are scrubbing the corresponding block 3928 * group doesn't get its logical address and its device extents 3929 * reused for another block group, which can possibly be of a 3930 * different type and different profile. We do this to prevent 3931 * false error detections and crashes due to bogus attempts to 3932 * repair extents. 3933 */ 3934 spin_lock(&cache->lock); 3935 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 3936 spin_unlock(&cache->lock); 3937 btrfs_put_block_group(cache); 3938 goto skip; 3939 } 3940 btrfs_freeze_block_group(cache); 3941 spin_unlock(&cache->lock); 3942 3943 /* 3944 * we need call btrfs_inc_block_group_ro() with scrubs_paused, 3945 * to avoid deadlock caused by: 3946 * btrfs_inc_block_group_ro() 3947 * -> btrfs_wait_for_commit() 3948 * -> btrfs_commit_transaction() 3949 * -> btrfs_scrub_pause() 3950 */ 3951 scrub_pause_on(fs_info); 3952 3953 /* 3954 * Don't do chunk preallocation for scrub. 3955 * 3956 * This is especially important for SYSTEM bgs, or we can hit 3957 * -EFBIG from btrfs_finish_chunk_alloc() like: 3958 * 1. The only SYSTEM bg is marked RO. 3959 * Since SYSTEM bg is small, that's pretty common. 3960 * 2. New SYSTEM bg will be allocated 3961 * Due to regular version will allocate new chunk. 3962 * 3. New SYSTEM bg is empty and will get cleaned up 3963 * Before cleanup really happens, it's marked RO again. 3964 * 4. Empty SYSTEM bg get scrubbed 3965 * We go back to 2. 3966 * 3967 * This can easily boost the amount of SYSTEM chunks if cleaner 3968 * thread can't be triggered fast enough, and use up all space 3969 * of btrfs_super_block::sys_chunk_array 3970 * 3971 * While for dev replace, we need to try our best to mark block 3972 * group RO, to prevent race between: 3973 * - Write duplication 3974 * Contains latest data 3975 * - Scrub copy 3976 * Contains data from commit tree 3977 * 3978 * If target block group is not marked RO, nocow writes can 3979 * be overwritten by scrub copy, causing data corruption. 3980 * So for dev-replace, it's not allowed to continue if a block 3981 * group is not RO. 3982 */ 3983 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); 3984 if (!ret && sctx->is_dev_replace) { 3985 ret = finish_extent_writes_for_zoned(root, cache); 3986 if (ret) { 3987 btrfs_dec_block_group_ro(cache); 3988 scrub_pause_off(fs_info); 3989 btrfs_put_block_group(cache); 3990 break; 3991 } 3992 } 3993 3994 if (ret == 0) { 3995 ro_set = 1; 3996 } else if (ret == -ENOSPC && !sctx->is_dev_replace) { 3997 /* 3998 * btrfs_inc_block_group_ro return -ENOSPC when it 3999 * failed in creating new chunk for metadata. 4000 * It is not a problem for scrub, because 4001 * metadata are always cowed, and our scrub paused 4002 * commit_transactions. 4003 */ 4004 ro_set = 0; 4005 } else if (ret == -ETXTBSY) { 4006 btrfs_warn(fs_info, 4007 "skipping scrub of block group %llu due to active swapfile", 4008 cache->start); 4009 scrub_pause_off(fs_info); 4010 ret = 0; 4011 goto skip_unfreeze; 4012 } else { 4013 btrfs_warn(fs_info, 4014 "failed setting block group ro: %d", ret); 4015 btrfs_unfreeze_block_group(cache); 4016 btrfs_put_block_group(cache); 4017 scrub_pause_off(fs_info); 4018 break; 4019 } 4020 4021 /* 4022 * Now the target block is marked RO, wait for nocow writes to 4023 * finish before dev-replace. 4024 * COW is fine, as COW never overwrites extents in commit tree. 4025 */ 4026 if (sctx->is_dev_replace) { 4027 btrfs_wait_nocow_writers(cache); 4028 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, 4029 cache->length); 4030 } 4031 4032 scrub_pause_off(fs_info); 4033 down_write(&dev_replace->rwsem); 4034 dev_replace->cursor_right = found_key.offset + dev_extent_len; 4035 dev_replace->cursor_left = found_key.offset; 4036 dev_replace->item_needs_writeback = 1; 4037 up_write(&dev_replace->rwsem); 4038 4039 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, 4040 dev_extent_len); 4041 4042 /* 4043 * flush, submit all pending read and write bios, afterwards 4044 * wait for them. 4045 * Note that in the dev replace case, a read request causes 4046 * write requests that are submitted in the read completion 4047 * worker. Therefore in the current situation, it is required 4048 * that all write requests are flushed, so that all read and 4049 * write requests are really completed when bios_in_flight 4050 * changes to 0. 4051 */ 4052 sctx->flush_all_writes = true; 4053 scrub_submit(sctx); 4054 mutex_lock(&sctx->wr_lock); 4055 scrub_wr_submit(sctx); 4056 mutex_unlock(&sctx->wr_lock); 4057 4058 wait_event(sctx->list_wait, 4059 atomic_read(&sctx->bios_in_flight) == 0); 4060 4061 scrub_pause_on(fs_info); 4062 4063 /* 4064 * must be called before we decrease @scrub_paused. 4065 * make sure we don't block transaction commit while 4066 * we are waiting pending workers finished. 4067 */ 4068 wait_event(sctx->list_wait, 4069 atomic_read(&sctx->workers_pending) == 0); 4070 sctx->flush_all_writes = false; 4071 4072 scrub_pause_off(fs_info); 4073 4074 if (sctx->is_dev_replace && 4075 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, 4076 cache, found_key.offset)) 4077 ro_set = 0; 4078 4079 down_write(&dev_replace->rwsem); 4080 dev_replace->cursor_left = dev_replace->cursor_right; 4081 dev_replace->item_needs_writeback = 1; 4082 up_write(&dev_replace->rwsem); 4083 4084 if (ro_set) 4085 btrfs_dec_block_group_ro(cache); 4086 4087 /* 4088 * We might have prevented the cleaner kthread from deleting 4089 * this block group if it was already unused because we raced 4090 * and set it to RO mode first. So add it back to the unused 4091 * list, otherwise it might not ever be deleted unless a manual 4092 * balance is triggered or it becomes used and unused again. 4093 */ 4094 spin_lock(&cache->lock); 4095 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && 4096 !cache->ro && cache->reserved == 0 && cache->used == 0) { 4097 spin_unlock(&cache->lock); 4098 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) 4099 btrfs_discard_queue_work(&fs_info->discard_ctl, 4100 cache); 4101 else 4102 btrfs_mark_bg_unused(cache); 4103 } else { 4104 spin_unlock(&cache->lock); 4105 } 4106 skip_unfreeze: 4107 btrfs_unfreeze_block_group(cache); 4108 btrfs_put_block_group(cache); 4109 if (ret) 4110 break; 4111 if (sctx->is_dev_replace && 4112 atomic64_read(&dev_replace->num_write_errors) > 0) { 4113 ret = -EIO; 4114 break; 4115 } 4116 if (sctx->stat.malloc_errors > 0) { 4117 ret = -ENOMEM; 4118 break; 4119 } 4120 skip: 4121 key.offset = found_key.offset + dev_extent_len; 4122 btrfs_release_path(path); 4123 } 4124 4125 btrfs_free_path(path); 4126 4127 return ret; 4128 } 4129 4130 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 4131 struct btrfs_device *scrub_dev) 4132 { 4133 int i; 4134 u64 bytenr; 4135 u64 gen; 4136 int ret; 4137 struct btrfs_fs_info *fs_info = sctx->fs_info; 4138 4139 if (BTRFS_FS_ERROR(fs_info)) 4140 return -EROFS; 4141 4142 /* Seed devices of a new filesystem has their own generation. */ 4143 if (scrub_dev->fs_devices != fs_info->fs_devices) 4144 gen = scrub_dev->generation; 4145 else 4146 gen = fs_info->last_trans_committed; 4147 4148 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 4149 bytenr = btrfs_sb_offset(i); 4150 if (bytenr + BTRFS_SUPER_INFO_SIZE > 4151 scrub_dev->commit_total_bytes) 4152 break; 4153 if (!btrfs_check_super_location(scrub_dev, bytenr)) 4154 continue; 4155 4156 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 4157 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, 4158 NULL, bytenr); 4159 if (ret) 4160 return ret; 4161 } 4162 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 4163 4164 return 0; 4165 } 4166 4167 static void scrub_workers_put(struct btrfs_fs_info *fs_info) 4168 { 4169 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, 4170 &fs_info->scrub_lock)) { 4171 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; 4172 struct workqueue_struct *scrub_wr_comp = 4173 fs_info->scrub_wr_completion_workers; 4174 struct workqueue_struct *scrub_parity = 4175 fs_info->scrub_parity_workers; 4176 4177 fs_info->scrub_workers = NULL; 4178 fs_info->scrub_wr_completion_workers = NULL; 4179 fs_info->scrub_parity_workers = NULL; 4180 mutex_unlock(&fs_info->scrub_lock); 4181 4182 if (scrub_workers) 4183 destroy_workqueue(scrub_workers); 4184 if (scrub_wr_comp) 4185 destroy_workqueue(scrub_wr_comp); 4186 if (scrub_parity) 4187 destroy_workqueue(scrub_parity); 4188 } 4189 } 4190 4191 /* 4192 * get a reference count on fs_info->scrub_workers. start worker if necessary 4193 */ 4194 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 4195 int is_dev_replace) 4196 { 4197 struct workqueue_struct *scrub_workers = NULL; 4198 struct workqueue_struct *scrub_wr_comp = NULL; 4199 struct workqueue_struct *scrub_parity = NULL; 4200 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; 4201 int max_active = fs_info->thread_pool_size; 4202 int ret = -ENOMEM; 4203 4204 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) 4205 return 0; 4206 4207 scrub_workers = alloc_workqueue("btrfs-scrub", flags, 4208 is_dev_replace ? 1 : max_active); 4209 if (!scrub_workers) 4210 goto fail_scrub_workers; 4211 4212 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active); 4213 if (!scrub_wr_comp) 4214 goto fail_scrub_wr_completion_workers; 4215 4216 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active); 4217 if (!scrub_parity) 4218 goto fail_scrub_parity_workers; 4219 4220 mutex_lock(&fs_info->scrub_lock); 4221 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { 4222 ASSERT(fs_info->scrub_workers == NULL && 4223 fs_info->scrub_wr_completion_workers == NULL && 4224 fs_info->scrub_parity_workers == NULL); 4225 fs_info->scrub_workers = scrub_workers; 4226 fs_info->scrub_wr_completion_workers = scrub_wr_comp; 4227 fs_info->scrub_parity_workers = scrub_parity; 4228 refcount_set(&fs_info->scrub_workers_refcnt, 1); 4229 mutex_unlock(&fs_info->scrub_lock); 4230 return 0; 4231 } 4232 /* Other thread raced in and created the workers for us */ 4233 refcount_inc(&fs_info->scrub_workers_refcnt); 4234 mutex_unlock(&fs_info->scrub_lock); 4235 4236 ret = 0; 4237 destroy_workqueue(scrub_parity); 4238 fail_scrub_parity_workers: 4239 destroy_workqueue(scrub_wr_comp); 4240 fail_scrub_wr_completion_workers: 4241 destroy_workqueue(scrub_workers); 4242 fail_scrub_workers: 4243 return ret; 4244 } 4245 4246 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4247 u64 end, struct btrfs_scrub_progress *progress, 4248 int readonly, int is_dev_replace) 4249 { 4250 struct btrfs_dev_lookup_args args = { .devid = devid }; 4251 struct scrub_ctx *sctx; 4252 int ret; 4253 struct btrfs_device *dev; 4254 unsigned int nofs_flag; 4255 bool need_commit = false; 4256 4257 if (btrfs_fs_closing(fs_info)) 4258 return -EAGAIN; 4259 4260 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */ 4261 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); 4262 4263 /* 4264 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible 4265 * value (max nodesize / min sectorsize), thus nodesize should always 4266 * be fine. 4267 */ 4268 ASSERT(fs_info->nodesize <= 4269 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); 4270 4271 /* Allocate outside of device_list_mutex */ 4272 sctx = scrub_setup_ctx(fs_info, is_dev_replace); 4273 if (IS_ERR(sctx)) 4274 return PTR_ERR(sctx); 4275 4276 ret = scrub_workers_get(fs_info, is_dev_replace); 4277 if (ret) 4278 goto out_free_ctx; 4279 4280 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4281 dev = btrfs_find_device(fs_info->fs_devices, &args); 4282 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && 4283 !is_dev_replace)) { 4284 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4285 ret = -ENODEV; 4286 goto out; 4287 } 4288 4289 if (!is_dev_replace && !readonly && 4290 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 4291 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4292 btrfs_err_in_rcu(fs_info, 4293 "scrub on devid %llu: filesystem on %s is not writable", 4294 devid, rcu_str_deref(dev->name)); 4295 ret = -EROFS; 4296 goto out; 4297 } 4298 4299 mutex_lock(&fs_info->scrub_lock); 4300 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4301 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { 4302 mutex_unlock(&fs_info->scrub_lock); 4303 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4304 ret = -EIO; 4305 goto out; 4306 } 4307 4308 down_read(&fs_info->dev_replace.rwsem); 4309 if (dev->scrub_ctx || 4310 (!is_dev_replace && 4311 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { 4312 up_read(&fs_info->dev_replace.rwsem); 4313 mutex_unlock(&fs_info->scrub_lock); 4314 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4315 ret = -EINPROGRESS; 4316 goto out; 4317 } 4318 up_read(&fs_info->dev_replace.rwsem); 4319 4320 sctx->readonly = readonly; 4321 dev->scrub_ctx = sctx; 4322 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4323 4324 /* 4325 * checking @scrub_pause_req here, we can avoid 4326 * race between committing transaction and scrubbing. 4327 */ 4328 __scrub_blocked_if_needed(fs_info); 4329 atomic_inc(&fs_info->scrubs_running); 4330 mutex_unlock(&fs_info->scrub_lock); 4331 4332 /* 4333 * In order to avoid deadlock with reclaim when there is a transaction 4334 * trying to pause scrub, make sure we use GFP_NOFS for all the 4335 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity() 4336 * invoked by our callees. The pausing request is done when the 4337 * transaction commit starts, and it blocks the transaction until scrub 4338 * is paused (done at specific points at scrub_stripe() or right above 4339 * before incrementing fs_info->scrubs_running). 4340 */ 4341 nofs_flag = memalloc_nofs_save(); 4342 if (!is_dev_replace) { 4343 u64 old_super_errors; 4344 4345 spin_lock(&sctx->stat_lock); 4346 old_super_errors = sctx->stat.super_errors; 4347 spin_unlock(&sctx->stat_lock); 4348 4349 btrfs_info(fs_info, "scrub: started on devid %llu", devid); 4350 /* 4351 * by holding device list mutex, we can 4352 * kick off writing super in log tree sync. 4353 */ 4354 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4355 ret = scrub_supers(sctx, dev); 4356 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4357 4358 spin_lock(&sctx->stat_lock); 4359 /* 4360 * Super block errors found, but we can not commit transaction 4361 * at current context, since btrfs_commit_transaction() needs 4362 * to pause the current running scrub (hold by ourselves). 4363 */ 4364 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) 4365 need_commit = true; 4366 spin_unlock(&sctx->stat_lock); 4367 } 4368 4369 if (!ret) 4370 ret = scrub_enumerate_chunks(sctx, dev, start, end); 4371 memalloc_nofs_restore(nofs_flag); 4372 4373 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 4374 atomic_dec(&fs_info->scrubs_running); 4375 wake_up(&fs_info->scrub_pause_wait); 4376 4377 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); 4378 4379 if (progress) 4380 memcpy(progress, &sctx->stat, sizeof(*progress)); 4381 4382 if (!is_dev_replace) 4383 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", 4384 ret ? "not finished" : "finished", devid, ret); 4385 4386 mutex_lock(&fs_info->scrub_lock); 4387 dev->scrub_ctx = NULL; 4388 mutex_unlock(&fs_info->scrub_lock); 4389 4390 scrub_workers_put(fs_info); 4391 scrub_put_ctx(sctx); 4392 4393 /* 4394 * We found some super block errors before, now try to force a 4395 * transaction commit, as scrub has finished. 4396 */ 4397 if (need_commit) { 4398 struct btrfs_trans_handle *trans; 4399 4400 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4401 if (IS_ERR(trans)) { 4402 ret = PTR_ERR(trans); 4403 btrfs_err(fs_info, 4404 "scrub: failed to start transaction to fix super block errors: %d", ret); 4405 return ret; 4406 } 4407 ret = btrfs_commit_transaction(trans); 4408 if (ret < 0) 4409 btrfs_err(fs_info, 4410 "scrub: failed to commit transaction to fix super block errors: %d", ret); 4411 } 4412 return ret; 4413 out: 4414 scrub_workers_put(fs_info); 4415 out_free_ctx: 4416 scrub_free_ctx(sctx); 4417 4418 return ret; 4419 } 4420 4421 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) 4422 { 4423 mutex_lock(&fs_info->scrub_lock); 4424 atomic_inc(&fs_info->scrub_pause_req); 4425 while (atomic_read(&fs_info->scrubs_paused) != 4426 atomic_read(&fs_info->scrubs_running)) { 4427 mutex_unlock(&fs_info->scrub_lock); 4428 wait_event(fs_info->scrub_pause_wait, 4429 atomic_read(&fs_info->scrubs_paused) == 4430 atomic_read(&fs_info->scrubs_running)); 4431 mutex_lock(&fs_info->scrub_lock); 4432 } 4433 mutex_unlock(&fs_info->scrub_lock); 4434 } 4435 4436 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) 4437 { 4438 atomic_dec(&fs_info->scrub_pause_req); 4439 wake_up(&fs_info->scrub_pause_wait); 4440 } 4441 4442 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 4443 { 4444 mutex_lock(&fs_info->scrub_lock); 4445 if (!atomic_read(&fs_info->scrubs_running)) { 4446 mutex_unlock(&fs_info->scrub_lock); 4447 return -ENOTCONN; 4448 } 4449 4450 atomic_inc(&fs_info->scrub_cancel_req); 4451 while (atomic_read(&fs_info->scrubs_running)) { 4452 mutex_unlock(&fs_info->scrub_lock); 4453 wait_event(fs_info->scrub_pause_wait, 4454 atomic_read(&fs_info->scrubs_running) == 0); 4455 mutex_lock(&fs_info->scrub_lock); 4456 } 4457 atomic_dec(&fs_info->scrub_cancel_req); 4458 mutex_unlock(&fs_info->scrub_lock); 4459 4460 return 0; 4461 } 4462 4463 int btrfs_scrub_cancel_dev(struct btrfs_device *dev) 4464 { 4465 struct btrfs_fs_info *fs_info = dev->fs_info; 4466 struct scrub_ctx *sctx; 4467 4468 mutex_lock(&fs_info->scrub_lock); 4469 sctx = dev->scrub_ctx; 4470 if (!sctx) { 4471 mutex_unlock(&fs_info->scrub_lock); 4472 return -ENOTCONN; 4473 } 4474 atomic_inc(&sctx->cancel_req); 4475 while (dev->scrub_ctx) { 4476 mutex_unlock(&fs_info->scrub_lock); 4477 wait_event(fs_info->scrub_pause_wait, 4478 dev->scrub_ctx == NULL); 4479 mutex_lock(&fs_info->scrub_lock); 4480 } 4481 mutex_unlock(&fs_info->scrub_lock); 4482 4483 return 0; 4484 } 4485 4486 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, 4487 struct btrfs_scrub_progress *progress) 4488 { 4489 struct btrfs_dev_lookup_args args = { .devid = devid }; 4490 struct btrfs_device *dev; 4491 struct scrub_ctx *sctx = NULL; 4492 4493 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4494 dev = btrfs_find_device(fs_info->fs_devices, &args); 4495 if (dev) 4496 sctx = dev->scrub_ctx; 4497 if (sctx) 4498 memcpy(progress, &sctx->stat, sizeof(*progress)); 4499 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4500 4501 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 4502 } 4503 4504 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info, 4505 u64 extent_logical, u32 extent_len, 4506 u64 *extent_physical, 4507 struct btrfs_device **extent_dev, 4508 int *extent_mirror_num) 4509 { 4510 u64 mapped_length; 4511 struct btrfs_io_context *bioc = NULL; 4512 int ret; 4513 4514 mapped_length = extent_len; 4515 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, 4516 &mapped_length, &bioc, 0); 4517 if (ret || !bioc || mapped_length < extent_len || 4518 !bioc->stripes[0].dev->bdev) { 4519 btrfs_put_bioc(bioc); 4520 return; 4521 } 4522 4523 *extent_physical = bioc->stripes[0].physical; 4524 *extent_mirror_num = bioc->mirror_num; 4525 *extent_dev = bioc->stripes[0].dev; 4526 btrfs_put_bioc(bioc); 4527 } 4528