1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011, 2012 STRATO. All rights reserved. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/ratelimit.h> 8 #include <linux/sched/mm.h> 9 #include <crypto/hash.h> 10 #include "ctree.h" 11 #include "discard.h" 12 #include "volumes.h" 13 #include "disk-io.h" 14 #include "ordered-data.h" 15 #include "transaction.h" 16 #include "backref.h" 17 #include "extent_io.h" 18 #include "dev-replace.h" 19 #include "check-integrity.h" 20 #include "raid56.h" 21 #include "block-group.h" 22 #include "zoned.h" 23 #include "fs.h" 24 #include "accessors.h" 25 #include "file-item.h" 26 #include "scrub.h" 27 28 /* 29 * This is only the first step towards a full-features scrub. It reads all 30 * extent and super block and verifies the checksums. In case a bad checksum 31 * is found or the extent cannot be read, good data will be written back if 32 * any can be found. 33 * 34 * Future enhancements: 35 * - In case an unrepairable extent is encountered, track which files are 36 * affected and report them 37 * - track and record media errors, throw out bad devices 38 * - add a mode to also read unallocated space 39 */ 40 41 struct scrub_block; 42 struct scrub_ctx; 43 44 /* 45 * The following three values only influence the performance. 46 * 47 * The last one configures the number of parallel and outstanding I/O 48 * operations. The first one configures an upper limit for the number 49 * of (dynamically allocated) pages that are added to a bio. 50 */ 51 #define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */ 52 #define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */ 53 54 /* 55 * The following value times PAGE_SIZE needs to be large enough to match the 56 * largest node/leaf/sector size that shall be supported. 57 */ 58 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K) 59 60 #define SCRUB_MAX_PAGES (DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE)) 61 62 /* 63 * Maximum number of mirrors that can be available for all profiles counting 64 * the target device of dev-replace as one. During an active device replace 65 * procedure, the target device of the copy operation is a mirror for the 66 * filesystem data as well that can be used to read data in order to repair 67 * read errors on other disks. 68 * 69 * Current value is derived from RAID1C4 with 4 copies. 70 */ 71 #define BTRFS_MAX_MIRRORS (4 + 1) 72 73 struct scrub_recover { 74 refcount_t refs; 75 struct btrfs_io_context *bioc; 76 u64 map_length; 77 }; 78 79 struct scrub_sector { 80 struct scrub_block *sblock; 81 struct list_head list; 82 u64 flags; /* extent flags */ 83 u64 generation; 84 /* Offset in bytes to @sblock. */ 85 u32 offset; 86 atomic_t refs; 87 unsigned int have_csum:1; 88 unsigned int io_error:1; 89 u8 csum[BTRFS_CSUM_SIZE]; 90 91 struct scrub_recover *recover; 92 }; 93 94 struct scrub_bio { 95 int index; 96 struct scrub_ctx *sctx; 97 struct btrfs_device *dev; 98 struct bio *bio; 99 blk_status_t status; 100 u64 logical; 101 u64 physical; 102 struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO]; 103 int sector_count; 104 int next_free; 105 struct work_struct work; 106 }; 107 108 struct scrub_block { 109 /* 110 * Each page will have its page::private used to record the logical 111 * bytenr. 112 */ 113 struct page *pages[SCRUB_MAX_PAGES]; 114 struct scrub_sector *sectors[SCRUB_MAX_SECTORS_PER_BLOCK]; 115 struct btrfs_device *dev; 116 /* Logical bytenr of the sblock */ 117 u64 logical; 118 u64 physical; 119 u64 physical_for_dev_replace; 120 /* Length of sblock in bytes */ 121 u32 len; 122 int sector_count; 123 int mirror_num; 124 125 atomic_t outstanding_sectors; 126 refcount_t refs; /* free mem on transition to zero */ 127 struct scrub_ctx *sctx; 128 struct scrub_parity *sparity; 129 struct { 130 unsigned int header_error:1; 131 unsigned int checksum_error:1; 132 unsigned int no_io_error_seen:1; 133 unsigned int generation_error:1; /* also sets header_error */ 134 135 /* The following is for the data used to check parity */ 136 /* It is for the data with checksum */ 137 unsigned int data_corrected:1; 138 }; 139 struct work_struct work; 140 }; 141 142 /* Used for the chunks with parity stripe such RAID5/6 */ 143 struct scrub_parity { 144 struct scrub_ctx *sctx; 145 146 struct btrfs_device *scrub_dev; 147 148 u64 logic_start; 149 150 u64 logic_end; 151 152 int nsectors; 153 154 u32 stripe_len; 155 156 refcount_t refs; 157 158 struct list_head sectors_list; 159 160 /* Work of parity check and repair */ 161 struct work_struct work; 162 163 /* Mark the parity blocks which have data */ 164 unsigned long dbitmap; 165 166 /* 167 * Mark the parity blocks which have data, but errors happen when 168 * read data or check data 169 */ 170 unsigned long ebitmap; 171 }; 172 173 struct scrub_ctx { 174 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; 175 struct btrfs_fs_info *fs_info; 176 int first_free; 177 int curr; 178 atomic_t bios_in_flight; 179 atomic_t workers_pending; 180 spinlock_t list_lock; 181 wait_queue_head_t list_wait; 182 struct list_head csum_list; 183 atomic_t cancel_req; 184 int readonly; 185 int sectors_per_bio; 186 187 /* State of IO submission throttling affecting the associated device */ 188 ktime_t throttle_deadline; 189 u64 throttle_sent; 190 191 int is_dev_replace; 192 u64 write_pointer; 193 194 struct scrub_bio *wr_curr_bio; 195 struct mutex wr_lock; 196 struct btrfs_device *wr_tgtdev; 197 bool flush_all_writes; 198 199 /* 200 * statistics 201 */ 202 struct btrfs_scrub_progress stat; 203 spinlock_t stat_lock; 204 205 /* 206 * Use a ref counter to avoid use-after-free issues. Scrub workers 207 * decrement bios_in_flight and workers_pending and then do a wakeup 208 * on the list_wait wait queue. We must ensure the main scrub task 209 * doesn't free the scrub context before or while the workers are 210 * doing the wakeup() call. 211 */ 212 refcount_t refs; 213 }; 214 215 struct scrub_warning { 216 struct btrfs_path *path; 217 u64 extent_item_size; 218 const char *errstr; 219 u64 physical; 220 u64 logical; 221 struct btrfs_device *dev; 222 }; 223 224 struct full_stripe_lock { 225 struct rb_node node; 226 u64 logical; 227 u64 refs; 228 struct mutex mutex; 229 }; 230 231 #ifndef CONFIG_64BIT 232 /* This structure is for archtectures whose (void *) is smaller than u64 */ 233 struct scrub_page_private { 234 u64 logical; 235 }; 236 #endif 237 238 static int attach_scrub_page_private(struct page *page, u64 logical) 239 { 240 #ifdef CONFIG_64BIT 241 attach_page_private(page, (void *)logical); 242 return 0; 243 #else 244 struct scrub_page_private *spp; 245 246 spp = kmalloc(sizeof(*spp), GFP_KERNEL); 247 if (!spp) 248 return -ENOMEM; 249 spp->logical = logical; 250 attach_page_private(page, (void *)spp); 251 return 0; 252 #endif 253 } 254 255 static void detach_scrub_page_private(struct page *page) 256 { 257 #ifdef CONFIG_64BIT 258 detach_page_private(page); 259 return; 260 #else 261 struct scrub_page_private *spp; 262 263 spp = detach_page_private(page); 264 kfree(spp); 265 return; 266 #endif 267 } 268 269 static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx, 270 struct btrfs_device *dev, 271 u64 logical, u64 physical, 272 u64 physical_for_dev_replace, 273 int mirror_num) 274 { 275 struct scrub_block *sblock; 276 277 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); 278 if (!sblock) 279 return NULL; 280 refcount_set(&sblock->refs, 1); 281 sblock->sctx = sctx; 282 sblock->logical = logical; 283 sblock->physical = physical; 284 sblock->physical_for_dev_replace = physical_for_dev_replace; 285 sblock->dev = dev; 286 sblock->mirror_num = mirror_num; 287 sblock->no_io_error_seen = 1; 288 /* 289 * Scrub_block::pages will be allocated at alloc_scrub_sector() when 290 * the corresponding page is not allocated. 291 */ 292 return sblock; 293 } 294 295 /* 296 * Allocate a new scrub sector and attach it to @sblock. 297 * 298 * Will also allocate new pages for @sblock if needed. 299 */ 300 static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock, 301 u64 logical) 302 { 303 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT; 304 struct scrub_sector *ssector; 305 306 /* We must never have scrub_block exceed U32_MAX in size. */ 307 ASSERT(logical - sblock->logical < U32_MAX); 308 309 ssector = kzalloc(sizeof(*ssector), GFP_KERNEL); 310 if (!ssector) 311 return NULL; 312 313 /* Allocate a new page if the slot is not allocated */ 314 if (!sblock->pages[page_index]) { 315 int ret; 316 317 sblock->pages[page_index] = alloc_page(GFP_KERNEL); 318 if (!sblock->pages[page_index]) { 319 kfree(ssector); 320 return NULL; 321 } 322 ret = attach_scrub_page_private(sblock->pages[page_index], 323 sblock->logical + (page_index << PAGE_SHIFT)); 324 if (ret < 0) { 325 kfree(ssector); 326 __free_page(sblock->pages[page_index]); 327 sblock->pages[page_index] = NULL; 328 return NULL; 329 } 330 } 331 332 atomic_set(&ssector->refs, 1); 333 ssector->sblock = sblock; 334 /* The sector to be added should not be used */ 335 ASSERT(sblock->sectors[sblock->sector_count] == NULL); 336 ssector->offset = logical - sblock->logical; 337 338 /* The sector count must be smaller than the limit */ 339 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK); 340 341 sblock->sectors[sblock->sector_count] = ssector; 342 sblock->sector_count++; 343 sblock->len += sblock->sctx->fs_info->sectorsize; 344 345 return ssector; 346 } 347 348 static struct page *scrub_sector_get_page(struct scrub_sector *ssector) 349 { 350 struct scrub_block *sblock = ssector->sblock; 351 pgoff_t index; 352 /* 353 * When calling this function, ssector must be alreaday attached to the 354 * parent sblock. 355 */ 356 ASSERT(sblock); 357 358 /* The range should be inside the sblock range */ 359 ASSERT(ssector->offset < sblock->len); 360 361 index = ssector->offset >> PAGE_SHIFT; 362 ASSERT(index < SCRUB_MAX_PAGES); 363 ASSERT(sblock->pages[index]); 364 ASSERT(PagePrivate(sblock->pages[index])); 365 return sblock->pages[index]; 366 } 367 368 static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector) 369 { 370 struct scrub_block *sblock = ssector->sblock; 371 372 /* 373 * When calling this function, ssector must be already attached to the 374 * parent sblock. 375 */ 376 ASSERT(sblock); 377 378 /* The range should be inside the sblock range */ 379 ASSERT(ssector->offset < sblock->len); 380 381 return offset_in_page(ssector->offset); 382 } 383 384 static char *scrub_sector_get_kaddr(struct scrub_sector *ssector) 385 { 386 return page_address(scrub_sector_get_page(ssector)) + 387 scrub_sector_get_page_offset(ssector); 388 } 389 390 static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector, 391 unsigned int len) 392 { 393 return bio_add_page(bio, scrub_sector_get_page(ssector), len, 394 scrub_sector_get_page_offset(ssector)); 395 } 396 397 static int scrub_setup_recheck_block(struct scrub_block *original_sblock, 398 struct scrub_block *sblocks_for_recheck[]); 399 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 400 struct scrub_block *sblock, 401 int retry_failed_mirror); 402 static void scrub_recheck_block_checksum(struct scrub_block *sblock); 403 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 404 struct scrub_block *sblock_good); 405 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad, 406 struct scrub_block *sblock_good, 407 int sector_num, int force_write); 408 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); 409 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, 410 int sector_num); 411 static int scrub_checksum_data(struct scrub_block *sblock); 412 static int scrub_checksum_tree_block(struct scrub_block *sblock); 413 static int scrub_checksum_super(struct scrub_block *sblock); 414 static void scrub_block_put(struct scrub_block *sblock); 415 static void scrub_sector_get(struct scrub_sector *sector); 416 static void scrub_sector_put(struct scrub_sector *sector); 417 static void scrub_parity_get(struct scrub_parity *sparity); 418 static void scrub_parity_put(struct scrub_parity *sparity); 419 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, 420 u64 physical, struct btrfs_device *dev, u64 flags, 421 u64 gen, int mirror_num, u8 *csum, 422 u64 physical_for_dev_replace); 423 static void scrub_bio_end_io(struct bio *bio); 424 static void scrub_bio_end_io_worker(struct work_struct *work); 425 static void scrub_block_complete(struct scrub_block *sblock); 426 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info, 427 u64 extent_logical, u32 extent_len, 428 u64 *extent_physical, 429 struct btrfs_device **extent_dev, 430 int *extent_mirror_num); 431 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, 432 struct scrub_sector *sector); 433 static void scrub_wr_submit(struct scrub_ctx *sctx); 434 static void scrub_wr_bio_end_io(struct bio *bio); 435 static void scrub_wr_bio_end_io_worker(struct work_struct *work); 436 static void scrub_put_ctx(struct scrub_ctx *sctx); 437 438 static inline int scrub_is_page_on_raid56(struct scrub_sector *sector) 439 { 440 return sector->recover && 441 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); 442 } 443 444 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) 445 { 446 refcount_inc(&sctx->refs); 447 atomic_inc(&sctx->bios_in_flight); 448 } 449 450 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) 451 { 452 atomic_dec(&sctx->bios_in_flight); 453 wake_up(&sctx->list_wait); 454 scrub_put_ctx(sctx); 455 } 456 457 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 458 { 459 while (atomic_read(&fs_info->scrub_pause_req)) { 460 mutex_unlock(&fs_info->scrub_lock); 461 wait_event(fs_info->scrub_pause_wait, 462 atomic_read(&fs_info->scrub_pause_req) == 0); 463 mutex_lock(&fs_info->scrub_lock); 464 } 465 } 466 467 static void scrub_pause_on(struct btrfs_fs_info *fs_info) 468 { 469 atomic_inc(&fs_info->scrubs_paused); 470 wake_up(&fs_info->scrub_pause_wait); 471 } 472 473 static void scrub_pause_off(struct btrfs_fs_info *fs_info) 474 { 475 mutex_lock(&fs_info->scrub_lock); 476 __scrub_blocked_if_needed(fs_info); 477 atomic_dec(&fs_info->scrubs_paused); 478 mutex_unlock(&fs_info->scrub_lock); 479 480 wake_up(&fs_info->scrub_pause_wait); 481 } 482 483 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) 484 { 485 scrub_pause_on(fs_info); 486 scrub_pause_off(fs_info); 487 } 488 489 /* 490 * Insert new full stripe lock into full stripe locks tree 491 * 492 * Return pointer to existing or newly inserted full_stripe_lock structure if 493 * everything works well. 494 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory 495 * 496 * NOTE: caller must hold full_stripe_locks_root->lock before calling this 497 * function 498 */ 499 static struct full_stripe_lock *insert_full_stripe_lock( 500 struct btrfs_full_stripe_locks_tree *locks_root, 501 u64 fstripe_logical) 502 { 503 struct rb_node **p; 504 struct rb_node *parent = NULL; 505 struct full_stripe_lock *entry; 506 struct full_stripe_lock *ret; 507 508 lockdep_assert_held(&locks_root->lock); 509 510 p = &locks_root->root.rb_node; 511 while (*p) { 512 parent = *p; 513 entry = rb_entry(parent, struct full_stripe_lock, node); 514 if (fstripe_logical < entry->logical) { 515 p = &(*p)->rb_left; 516 } else if (fstripe_logical > entry->logical) { 517 p = &(*p)->rb_right; 518 } else { 519 entry->refs++; 520 return entry; 521 } 522 } 523 524 /* 525 * Insert new lock. 526 */ 527 ret = kmalloc(sizeof(*ret), GFP_KERNEL); 528 if (!ret) 529 return ERR_PTR(-ENOMEM); 530 ret->logical = fstripe_logical; 531 ret->refs = 1; 532 mutex_init(&ret->mutex); 533 534 rb_link_node(&ret->node, parent, p); 535 rb_insert_color(&ret->node, &locks_root->root); 536 return ret; 537 } 538 539 /* 540 * Search for a full stripe lock of a block group 541 * 542 * Return pointer to existing full stripe lock if found 543 * Return NULL if not found 544 */ 545 static struct full_stripe_lock *search_full_stripe_lock( 546 struct btrfs_full_stripe_locks_tree *locks_root, 547 u64 fstripe_logical) 548 { 549 struct rb_node *node; 550 struct full_stripe_lock *entry; 551 552 lockdep_assert_held(&locks_root->lock); 553 554 node = locks_root->root.rb_node; 555 while (node) { 556 entry = rb_entry(node, struct full_stripe_lock, node); 557 if (fstripe_logical < entry->logical) 558 node = node->rb_left; 559 else if (fstripe_logical > entry->logical) 560 node = node->rb_right; 561 else 562 return entry; 563 } 564 return NULL; 565 } 566 567 /* 568 * Helper to get full stripe logical from a normal bytenr. 569 * 570 * Caller must ensure @cache is a RAID56 block group. 571 */ 572 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr) 573 { 574 u64 ret; 575 576 /* 577 * Due to chunk item size limit, full stripe length should not be 578 * larger than U32_MAX. Just a sanity check here. 579 */ 580 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); 581 582 /* 583 * round_down() can only handle power of 2, while RAID56 full 584 * stripe length can be 64KiB * n, so we need to manually round down. 585 */ 586 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * 587 cache->full_stripe_len + cache->start; 588 return ret; 589 } 590 591 /* 592 * Lock a full stripe to avoid concurrency of recovery and read 593 * 594 * It's only used for profiles with parities (RAID5/6), for other profiles it 595 * does nothing. 596 * 597 * Return 0 if we locked full stripe covering @bytenr, with a mutex held. 598 * So caller must call unlock_full_stripe() at the same context. 599 * 600 * Return <0 if encounters error. 601 */ 602 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, 603 bool *locked_ret) 604 { 605 struct btrfs_block_group *bg_cache; 606 struct btrfs_full_stripe_locks_tree *locks_root; 607 struct full_stripe_lock *existing; 608 u64 fstripe_start; 609 int ret = 0; 610 611 *locked_ret = false; 612 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); 613 if (!bg_cache) { 614 ASSERT(0); 615 return -ENOENT; 616 } 617 618 /* Profiles not based on parity don't need full stripe lock */ 619 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) 620 goto out; 621 locks_root = &bg_cache->full_stripe_locks_root; 622 623 fstripe_start = get_full_stripe_logical(bg_cache, bytenr); 624 625 /* Now insert the full stripe lock */ 626 mutex_lock(&locks_root->lock); 627 existing = insert_full_stripe_lock(locks_root, fstripe_start); 628 mutex_unlock(&locks_root->lock); 629 if (IS_ERR(existing)) { 630 ret = PTR_ERR(existing); 631 goto out; 632 } 633 mutex_lock(&existing->mutex); 634 *locked_ret = true; 635 out: 636 btrfs_put_block_group(bg_cache); 637 return ret; 638 } 639 640 /* 641 * Unlock a full stripe. 642 * 643 * NOTE: Caller must ensure it's the same context calling corresponding 644 * lock_full_stripe(). 645 * 646 * Return 0 if we unlock full stripe without problem. 647 * Return <0 for error 648 */ 649 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, 650 bool locked) 651 { 652 struct btrfs_block_group *bg_cache; 653 struct btrfs_full_stripe_locks_tree *locks_root; 654 struct full_stripe_lock *fstripe_lock; 655 u64 fstripe_start; 656 bool freeit = false; 657 int ret = 0; 658 659 /* If we didn't acquire full stripe lock, no need to continue */ 660 if (!locked) 661 return 0; 662 663 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); 664 if (!bg_cache) { 665 ASSERT(0); 666 return -ENOENT; 667 } 668 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) 669 goto out; 670 671 locks_root = &bg_cache->full_stripe_locks_root; 672 fstripe_start = get_full_stripe_logical(bg_cache, bytenr); 673 674 mutex_lock(&locks_root->lock); 675 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); 676 /* Unpaired unlock_full_stripe() detected */ 677 if (!fstripe_lock) { 678 WARN_ON(1); 679 ret = -ENOENT; 680 mutex_unlock(&locks_root->lock); 681 goto out; 682 } 683 684 if (fstripe_lock->refs == 0) { 685 WARN_ON(1); 686 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", 687 fstripe_lock->logical); 688 } else { 689 fstripe_lock->refs--; 690 } 691 692 if (fstripe_lock->refs == 0) { 693 rb_erase(&fstripe_lock->node, &locks_root->root); 694 freeit = true; 695 } 696 mutex_unlock(&locks_root->lock); 697 698 mutex_unlock(&fstripe_lock->mutex); 699 if (freeit) 700 kfree(fstripe_lock); 701 out: 702 btrfs_put_block_group(bg_cache); 703 return ret; 704 } 705 706 static void scrub_free_csums(struct scrub_ctx *sctx) 707 { 708 while (!list_empty(&sctx->csum_list)) { 709 struct btrfs_ordered_sum *sum; 710 sum = list_first_entry(&sctx->csum_list, 711 struct btrfs_ordered_sum, list); 712 list_del(&sum->list); 713 kfree(sum); 714 } 715 } 716 717 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) 718 { 719 int i; 720 721 if (!sctx) 722 return; 723 724 /* this can happen when scrub is cancelled */ 725 if (sctx->curr != -1) { 726 struct scrub_bio *sbio = sctx->bios[sctx->curr]; 727 728 for (i = 0; i < sbio->sector_count; i++) 729 scrub_block_put(sbio->sectors[i]->sblock); 730 bio_put(sbio->bio); 731 } 732 733 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 734 struct scrub_bio *sbio = sctx->bios[i]; 735 736 if (!sbio) 737 break; 738 kfree(sbio); 739 } 740 741 kfree(sctx->wr_curr_bio); 742 scrub_free_csums(sctx); 743 kfree(sctx); 744 } 745 746 static void scrub_put_ctx(struct scrub_ctx *sctx) 747 { 748 if (refcount_dec_and_test(&sctx->refs)) 749 scrub_free_ctx(sctx); 750 } 751 752 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( 753 struct btrfs_fs_info *fs_info, int is_dev_replace) 754 { 755 struct scrub_ctx *sctx; 756 int i; 757 758 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); 759 if (!sctx) 760 goto nomem; 761 refcount_set(&sctx->refs, 1); 762 sctx->is_dev_replace = is_dev_replace; 763 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO; 764 sctx->curr = -1; 765 sctx->fs_info = fs_info; 766 INIT_LIST_HEAD(&sctx->csum_list); 767 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { 768 struct scrub_bio *sbio; 769 770 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); 771 if (!sbio) 772 goto nomem; 773 sctx->bios[i] = sbio; 774 775 sbio->index = i; 776 sbio->sctx = sctx; 777 sbio->sector_count = 0; 778 INIT_WORK(&sbio->work, scrub_bio_end_io_worker); 779 780 if (i != SCRUB_BIOS_PER_SCTX - 1) 781 sctx->bios[i]->next_free = i + 1; 782 else 783 sctx->bios[i]->next_free = -1; 784 } 785 sctx->first_free = 0; 786 atomic_set(&sctx->bios_in_flight, 0); 787 atomic_set(&sctx->workers_pending, 0); 788 atomic_set(&sctx->cancel_req, 0); 789 790 spin_lock_init(&sctx->list_lock); 791 spin_lock_init(&sctx->stat_lock); 792 init_waitqueue_head(&sctx->list_wait); 793 sctx->throttle_deadline = 0; 794 795 WARN_ON(sctx->wr_curr_bio != NULL); 796 mutex_init(&sctx->wr_lock); 797 sctx->wr_curr_bio = NULL; 798 if (is_dev_replace) { 799 WARN_ON(!fs_info->dev_replace.tgtdev); 800 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; 801 sctx->flush_all_writes = false; 802 } 803 804 return sctx; 805 806 nomem: 807 scrub_free_ctx(sctx); 808 return ERR_PTR(-ENOMEM); 809 } 810 811 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 812 u64 root, void *warn_ctx) 813 { 814 u32 nlink; 815 int ret; 816 int i; 817 unsigned nofs_flag; 818 struct extent_buffer *eb; 819 struct btrfs_inode_item *inode_item; 820 struct scrub_warning *swarn = warn_ctx; 821 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; 822 struct inode_fs_paths *ipath = NULL; 823 struct btrfs_root *local_root; 824 struct btrfs_key key; 825 826 local_root = btrfs_get_fs_root(fs_info, root, true); 827 if (IS_ERR(local_root)) { 828 ret = PTR_ERR(local_root); 829 goto err; 830 } 831 832 /* 833 * this makes the path point to (inum INODE_ITEM ioff) 834 */ 835 key.objectid = inum; 836 key.type = BTRFS_INODE_ITEM_KEY; 837 key.offset = 0; 838 839 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); 840 if (ret) { 841 btrfs_put_root(local_root); 842 btrfs_release_path(swarn->path); 843 goto err; 844 } 845 846 eb = swarn->path->nodes[0]; 847 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], 848 struct btrfs_inode_item); 849 nlink = btrfs_inode_nlink(eb, inode_item); 850 btrfs_release_path(swarn->path); 851 852 /* 853 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub 854 * uses GFP_NOFS in this context, so we keep it consistent but it does 855 * not seem to be strictly necessary. 856 */ 857 nofs_flag = memalloc_nofs_save(); 858 ipath = init_ipath(4096, local_root, swarn->path); 859 memalloc_nofs_restore(nofs_flag); 860 if (IS_ERR(ipath)) { 861 btrfs_put_root(local_root); 862 ret = PTR_ERR(ipath); 863 ipath = NULL; 864 goto err; 865 } 866 ret = paths_from_inode(inum, ipath); 867 868 if (ret < 0) 869 goto err; 870 871 /* 872 * we deliberately ignore the bit ipath might have been too small to 873 * hold all of the paths here 874 */ 875 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 876 btrfs_warn_in_rcu(fs_info, 877 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", 878 swarn->errstr, swarn->logical, 879 btrfs_dev_name(swarn->dev), 880 swarn->physical, 881 root, inum, offset, 882 fs_info->sectorsize, nlink, 883 (char *)(unsigned long)ipath->fspath->val[i]); 884 885 btrfs_put_root(local_root); 886 free_ipath(ipath); 887 return 0; 888 889 err: 890 btrfs_warn_in_rcu(fs_info, 891 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", 892 swarn->errstr, swarn->logical, 893 btrfs_dev_name(swarn->dev), 894 swarn->physical, 895 root, inum, offset, ret); 896 897 free_ipath(ipath); 898 return 0; 899 } 900 901 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) 902 { 903 struct btrfs_device *dev; 904 struct btrfs_fs_info *fs_info; 905 struct btrfs_path *path; 906 struct btrfs_key found_key; 907 struct extent_buffer *eb; 908 struct btrfs_extent_item *ei; 909 struct scrub_warning swarn; 910 unsigned long ptr = 0; 911 u64 flags = 0; 912 u64 ref_root; 913 u32 item_size; 914 u8 ref_level = 0; 915 int ret; 916 917 WARN_ON(sblock->sector_count < 1); 918 dev = sblock->dev; 919 fs_info = sblock->sctx->fs_info; 920 921 /* Super block error, no need to search extent tree. */ 922 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 923 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu", 924 errstr, btrfs_dev_name(dev), sblock->physical); 925 return; 926 } 927 path = btrfs_alloc_path(); 928 if (!path) 929 return; 930 931 swarn.physical = sblock->physical; 932 swarn.logical = sblock->logical; 933 swarn.errstr = errstr; 934 swarn.dev = NULL; 935 936 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, 937 &flags); 938 if (ret < 0) 939 goto out; 940 941 swarn.extent_item_size = found_key.offset; 942 943 eb = path->nodes[0]; 944 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 945 item_size = btrfs_item_size(eb, path->slots[0]); 946 947 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 948 do { 949 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 950 item_size, &ref_root, 951 &ref_level); 952 btrfs_warn_in_rcu(fs_info, 953 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", 954 errstr, swarn.logical, 955 btrfs_dev_name(dev), 956 swarn.physical, 957 ref_level ? "node" : "leaf", 958 ret < 0 ? -1 : ref_level, 959 ret < 0 ? -1 : ref_root); 960 } while (ret != 1); 961 btrfs_release_path(path); 962 } else { 963 struct btrfs_backref_walk_ctx ctx = { 0 }; 964 965 btrfs_release_path(path); 966 967 ctx.bytenr = found_key.objectid; 968 ctx.extent_item_pos = swarn.logical - found_key.objectid; 969 ctx.fs_info = fs_info; 970 971 swarn.path = path; 972 swarn.dev = dev; 973 974 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn); 975 } 976 977 out: 978 btrfs_free_path(path); 979 } 980 981 static inline void scrub_get_recover(struct scrub_recover *recover) 982 { 983 refcount_inc(&recover->refs); 984 } 985 986 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, 987 struct scrub_recover *recover) 988 { 989 if (refcount_dec_and_test(&recover->refs)) { 990 btrfs_bio_counter_dec(fs_info); 991 btrfs_put_bioc(recover->bioc); 992 kfree(recover); 993 } 994 } 995 996 /* 997 * scrub_handle_errored_block gets called when either verification of the 998 * sectors failed or the bio failed to read, e.g. with EIO. In the latter 999 * case, this function handles all sectors in the bio, even though only one 1000 * may be bad. 1001 * The goal of this function is to repair the errored block by using the 1002 * contents of one of the mirrors. 1003 */ 1004 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) 1005 { 1006 struct scrub_ctx *sctx = sblock_to_check->sctx; 1007 struct btrfs_device *dev = sblock_to_check->dev; 1008 struct btrfs_fs_info *fs_info; 1009 u64 logical; 1010 unsigned int failed_mirror_index; 1011 unsigned int is_metadata; 1012 unsigned int have_csum; 1013 /* One scrub_block for each mirror */ 1014 struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 }; 1015 struct scrub_block *sblock_bad; 1016 int ret; 1017 int mirror_index; 1018 int sector_num; 1019 int success; 1020 bool full_stripe_locked; 1021 unsigned int nofs_flag; 1022 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 1023 DEFAULT_RATELIMIT_BURST); 1024 1025 BUG_ON(sblock_to_check->sector_count < 1); 1026 fs_info = sctx->fs_info; 1027 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { 1028 /* 1029 * If we find an error in a super block, we just report it. 1030 * They will get written with the next transaction commit 1031 * anyway 1032 */ 1033 scrub_print_warning("super block error", sblock_to_check); 1034 spin_lock(&sctx->stat_lock); 1035 ++sctx->stat.super_errors; 1036 spin_unlock(&sctx->stat_lock); 1037 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 1038 return 0; 1039 } 1040 logical = sblock_to_check->logical; 1041 ASSERT(sblock_to_check->mirror_num); 1042 failed_mirror_index = sblock_to_check->mirror_num - 1; 1043 is_metadata = !(sblock_to_check->sectors[0]->flags & 1044 BTRFS_EXTENT_FLAG_DATA); 1045 have_csum = sblock_to_check->sectors[0]->have_csum; 1046 1047 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical)) 1048 return 0; 1049 1050 /* 1051 * We must use GFP_NOFS because the scrub task might be waiting for a 1052 * worker task executing this function and in turn a transaction commit 1053 * might be waiting the scrub task to pause (which needs to wait for all 1054 * the worker tasks to complete before pausing). 1055 * We do allocations in the workers through insert_full_stripe_lock() 1056 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of 1057 * this function. 1058 */ 1059 nofs_flag = memalloc_nofs_save(); 1060 /* 1061 * For RAID5/6, race can happen for a different device scrub thread. 1062 * For data corruption, Parity and Data threads will both try 1063 * to recovery the data. 1064 * Race can lead to doubly added csum error, or even unrecoverable 1065 * error. 1066 */ 1067 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); 1068 if (ret < 0) { 1069 memalloc_nofs_restore(nofs_flag); 1070 spin_lock(&sctx->stat_lock); 1071 if (ret == -ENOMEM) 1072 sctx->stat.malloc_errors++; 1073 sctx->stat.read_errors++; 1074 sctx->stat.uncorrectable_errors++; 1075 spin_unlock(&sctx->stat_lock); 1076 return ret; 1077 } 1078 1079 /* 1080 * read all mirrors one after the other. This includes to 1081 * re-read the extent or metadata block that failed (that was 1082 * the cause that this fixup code is called) another time, 1083 * sector by sector this time in order to know which sectors 1084 * caused I/O errors and which ones are good (for all mirrors). 1085 * It is the goal to handle the situation when more than one 1086 * mirror contains I/O errors, but the errors do not 1087 * overlap, i.e. the data can be repaired by selecting the 1088 * sectors from those mirrors without I/O error on the 1089 * particular sectors. One example (with blocks >= 2 * sectorsize) 1090 * would be that mirror #1 has an I/O error on the first sector, 1091 * the second sector is good, and mirror #2 has an I/O error on 1092 * the second sector, but the first sector is good. 1093 * Then the first sector of the first mirror can be repaired by 1094 * taking the first sector of the second mirror, and the 1095 * second sector of the second mirror can be repaired by 1096 * copying the contents of the 2nd sector of the 1st mirror. 1097 * One more note: if the sectors of one mirror contain I/O 1098 * errors, the checksum cannot be verified. In order to get 1099 * the best data for repairing, the first attempt is to find 1100 * a mirror without I/O errors and with a validated checksum. 1101 * Only if this is not possible, the sectors are picked from 1102 * mirrors with I/O errors without considering the checksum. 1103 * If the latter is the case, at the end, the checksum of the 1104 * repaired area is verified in order to correctly maintain 1105 * the statistics. 1106 */ 1107 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { 1108 /* 1109 * Note: the two members refs and outstanding_sectors are not 1110 * used in the blocks that are used for the recheck procedure. 1111 * 1112 * But alloc_scrub_block() will initialize sblock::ref anyway, 1113 * so we can use scrub_block_put() to clean them up. 1114 * 1115 * And here we don't setup the physical/dev for the sblock yet, 1116 * they will be correctly initialized in scrub_setup_recheck_block(). 1117 */ 1118 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL, 1119 logical, 0, 0, mirror_index); 1120 if (!sblocks_for_recheck[mirror_index]) { 1121 spin_lock(&sctx->stat_lock); 1122 sctx->stat.malloc_errors++; 1123 sctx->stat.read_errors++; 1124 sctx->stat.uncorrectable_errors++; 1125 spin_unlock(&sctx->stat_lock); 1126 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1127 goto out; 1128 } 1129 } 1130 1131 /* Setup the context, map the logical blocks and alloc the sectors */ 1132 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); 1133 if (ret) { 1134 spin_lock(&sctx->stat_lock); 1135 sctx->stat.read_errors++; 1136 sctx->stat.uncorrectable_errors++; 1137 spin_unlock(&sctx->stat_lock); 1138 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1139 goto out; 1140 } 1141 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 1142 sblock_bad = sblocks_for_recheck[failed_mirror_index]; 1143 1144 /* build and submit the bios for the failed mirror, check checksums */ 1145 scrub_recheck_block(fs_info, sblock_bad, 1); 1146 1147 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 1148 sblock_bad->no_io_error_seen) { 1149 /* 1150 * The error disappeared after reading sector by sector, or 1151 * the area was part of a huge bio and other parts of the 1152 * bio caused I/O errors, or the block layer merged several 1153 * read requests into one and the error is caused by a 1154 * different bio (usually one of the two latter cases is 1155 * the cause) 1156 */ 1157 spin_lock(&sctx->stat_lock); 1158 sctx->stat.unverified_errors++; 1159 sblock_to_check->data_corrected = 1; 1160 spin_unlock(&sctx->stat_lock); 1161 1162 if (sctx->is_dev_replace) 1163 scrub_write_block_to_dev_replace(sblock_bad); 1164 goto out; 1165 } 1166 1167 if (!sblock_bad->no_io_error_seen) { 1168 spin_lock(&sctx->stat_lock); 1169 sctx->stat.read_errors++; 1170 spin_unlock(&sctx->stat_lock); 1171 if (__ratelimit(&rs)) 1172 scrub_print_warning("i/o error", sblock_to_check); 1173 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); 1174 } else if (sblock_bad->checksum_error) { 1175 spin_lock(&sctx->stat_lock); 1176 sctx->stat.csum_errors++; 1177 spin_unlock(&sctx->stat_lock); 1178 if (__ratelimit(&rs)) 1179 scrub_print_warning("checksum error", sblock_to_check); 1180 btrfs_dev_stat_inc_and_print(dev, 1181 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1182 } else if (sblock_bad->header_error) { 1183 spin_lock(&sctx->stat_lock); 1184 sctx->stat.verify_errors++; 1185 spin_unlock(&sctx->stat_lock); 1186 if (__ratelimit(&rs)) 1187 scrub_print_warning("checksum/header error", 1188 sblock_to_check); 1189 if (sblock_bad->generation_error) 1190 btrfs_dev_stat_inc_and_print(dev, 1191 BTRFS_DEV_STAT_GENERATION_ERRS); 1192 else 1193 btrfs_dev_stat_inc_and_print(dev, 1194 BTRFS_DEV_STAT_CORRUPTION_ERRS); 1195 } 1196 1197 if (sctx->readonly) { 1198 ASSERT(!sctx->is_dev_replace); 1199 goto out; 1200 } 1201 1202 /* 1203 * now build and submit the bios for the other mirrors, check 1204 * checksums. 1205 * First try to pick the mirror which is completely without I/O 1206 * errors and also does not have a checksum error. 1207 * If one is found, and if a checksum is present, the full block 1208 * that is known to contain an error is rewritten. Afterwards 1209 * the block is known to be corrected. 1210 * If a mirror is found which is completely correct, and no 1211 * checksum is present, only those sectors are rewritten that had 1212 * an I/O error in the block to be repaired, since it cannot be 1213 * determined, which copy of the other sectors is better (and it 1214 * could happen otherwise that a correct sector would be 1215 * overwritten by a bad one). 1216 */ 1217 for (mirror_index = 0; ;mirror_index++) { 1218 struct scrub_block *sblock_other; 1219 1220 if (mirror_index == failed_mirror_index) 1221 continue; 1222 1223 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ 1224 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) { 1225 if (mirror_index >= BTRFS_MAX_MIRRORS) 1226 break; 1227 if (!sblocks_for_recheck[mirror_index]->sector_count) 1228 break; 1229 1230 sblock_other = sblocks_for_recheck[mirror_index]; 1231 } else { 1232 struct scrub_recover *r = sblock_bad->sectors[0]->recover; 1233 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs; 1234 1235 if (mirror_index >= max_allowed) 1236 break; 1237 if (!sblocks_for_recheck[1]->sector_count) 1238 break; 1239 1240 ASSERT(failed_mirror_index == 0); 1241 sblock_other = sblocks_for_recheck[1]; 1242 sblock_other->mirror_num = 1 + mirror_index; 1243 } 1244 1245 /* build and submit the bios, check checksums */ 1246 scrub_recheck_block(fs_info, sblock_other, 0); 1247 1248 if (!sblock_other->header_error && 1249 !sblock_other->checksum_error && 1250 sblock_other->no_io_error_seen) { 1251 if (sctx->is_dev_replace) { 1252 scrub_write_block_to_dev_replace(sblock_other); 1253 goto corrected_error; 1254 } else { 1255 ret = scrub_repair_block_from_good_copy( 1256 sblock_bad, sblock_other); 1257 if (!ret) 1258 goto corrected_error; 1259 } 1260 } 1261 } 1262 1263 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) 1264 goto did_not_correct_error; 1265 1266 /* 1267 * In case of I/O errors in the area that is supposed to be 1268 * repaired, continue by picking good copies of those sectors. 1269 * Select the good sectors from mirrors to rewrite bad sectors from 1270 * the area to fix. Afterwards verify the checksum of the block 1271 * that is supposed to be repaired. This verification step is 1272 * only done for the purpose of statistic counting and for the 1273 * final scrub report, whether errors remain. 1274 * A perfect algorithm could make use of the checksum and try 1275 * all possible combinations of sectors from the different mirrors 1276 * until the checksum verification succeeds. For example, when 1277 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector 1278 * of mirror #2 is readable but the final checksum test fails, 1279 * then the 2nd sector of mirror #3 could be tried, whether now 1280 * the final checksum succeeds. But this would be a rare 1281 * exception and is therefore not implemented. At least it is 1282 * avoided that the good copy is overwritten. 1283 * A more useful improvement would be to pick the sectors 1284 * without I/O error based on sector sizes (512 bytes on legacy 1285 * disks) instead of on sectorsize. Then maybe 512 byte of one 1286 * mirror could be repaired by taking 512 byte of a different 1287 * mirror, even if other 512 byte sectors in the same sectorsize 1288 * area are unreadable. 1289 */ 1290 success = 1; 1291 for (sector_num = 0; sector_num < sblock_bad->sector_count; 1292 sector_num++) { 1293 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; 1294 struct scrub_block *sblock_other = NULL; 1295 1296 /* Skip no-io-error sectors in scrub */ 1297 if (!sector_bad->io_error && !sctx->is_dev_replace) 1298 continue; 1299 1300 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) { 1301 /* 1302 * In case of dev replace, if raid56 rebuild process 1303 * didn't work out correct data, then copy the content 1304 * in sblock_bad to make sure target device is identical 1305 * to source device, instead of writing garbage data in 1306 * sblock_for_recheck array to target device. 1307 */ 1308 sblock_other = NULL; 1309 } else if (sector_bad->io_error) { 1310 /* Try to find no-io-error sector in mirrors */ 1311 for (mirror_index = 0; 1312 mirror_index < BTRFS_MAX_MIRRORS && 1313 sblocks_for_recheck[mirror_index]->sector_count > 0; 1314 mirror_index++) { 1315 if (!sblocks_for_recheck[mirror_index]-> 1316 sectors[sector_num]->io_error) { 1317 sblock_other = sblocks_for_recheck[mirror_index]; 1318 break; 1319 } 1320 } 1321 if (!sblock_other) 1322 success = 0; 1323 } 1324 1325 if (sctx->is_dev_replace) { 1326 /* 1327 * Did not find a mirror to fetch the sector from. 1328 * scrub_write_sector_to_dev_replace() handles this 1329 * case (sector->io_error), by filling the block with 1330 * zeros before submitting the write request 1331 */ 1332 if (!sblock_other) 1333 sblock_other = sblock_bad; 1334 1335 if (scrub_write_sector_to_dev_replace(sblock_other, 1336 sector_num) != 0) { 1337 atomic64_inc( 1338 &fs_info->dev_replace.num_write_errors); 1339 success = 0; 1340 } 1341 } else if (sblock_other) { 1342 ret = scrub_repair_sector_from_good_copy(sblock_bad, 1343 sblock_other, 1344 sector_num, 0); 1345 if (0 == ret) 1346 sector_bad->io_error = 0; 1347 else 1348 success = 0; 1349 } 1350 } 1351 1352 if (success && !sctx->is_dev_replace) { 1353 if (is_metadata || have_csum) { 1354 /* 1355 * need to verify the checksum now that all 1356 * sectors on disk are repaired (the write 1357 * request for data to be repaired is on its way). 1358 * Just be lazy and use scrub_recheck_block() 1359 * which re-reads the data before the checksum 1360 * is verified, but most likely the data comes out 1361 * of the page cache. 1362 */ 1363 scrub_recheck_block(fs_info, sblock_bad, 1); 1364 if (!sblock_bad->header_error && 1365 !sblock_bad->checksum_error && 1366 sblock_bad->no_io_error_seen) 1367 goto corrected_error; 1368 else 1369 goto did_not_correct_error; 1370 } else { 1371 corrected_error: 1372 spin_lock(&sctx->stat_lock); 1373 sctx->stat.corrected_errors++; 1374 sblock_to_check->data_corrected = 1; 1375 spin_unlock(&sctx->stat_lock); 1376 btrfs_err_rl_in_rcu(fs_info, 1377 "fixed up error at logical %llu on dev %s", 1378 logical, btrfs_dev_name(dev)); 1379 } 1380 } else { 1381 did_not_correct_error: 1382 spin_lock(&sctx->stat_lock); 1383 sctx->stat.uncorrectable_errors++; 1384 spin_unlock(&sctx->stat_lock); 1385 btrfs_err_rl_in_rcu(fs_info, 1386 "unable to fixup (regular) error at logical %llu on dev %s", 1387 logical, btrfs_dev_name(dev)); 1388 } 1389 1390 out: 1391 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) { 1392 struct scrub_block *sblock = sblocks_for_recheck[mirror_index]; 1393 struct scrub_recover *recover; 1394 int sector_index; 1395 1396 /* Not allocated, continue checking the next mirror */ 1397 if (!sblock) 1398 continue; 1399 1400 for (sector_index = 0; sector_index < sblock->sector_count; 1401 sector_index++) { 1402 /* 1403 * Here we just cleanup the recover, each sector will be 1404 * properly cleaned up by later scrub_block_put() 1405 */ 1406 recover = sblock->sectors[sector_index]->recover; 1407 if (recover) { 1408 scrub_put_recover(fs_info, recover); 1409 sblock->sectors[sector_index]->recover = NULL; 1410 } 1411 } 1412 scrub_block_put(sblock); 1413 } 1414 1415 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); 1416 memalloc_nofs_restore(nofs_flag); 1417 if (ret < 0) 1418 return ret; 1419 return 0; 1420 } 1421 1422 static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc) 1423 { 1424 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5) 1425 return 2; 1426 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) 1427 return 3; 1428 else 1429 return (int)bioc->num_stripes; 1430 } 1431 1432 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, 1433 u64 *raid_map, 1434 int nstripes, int mirror, 1435 int *stripe_index, 1436 u64 *stripe_offset) 1437 { 1438 int i; 1439 1440 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { 1441 /* RAID5/6 */ 1442 for (i = 0; i < nstripes; i++) { 1443 if (raid_map[i] == RAID6_Q_STRIPE || 1444 raid_map[i] == RAID5_P_STRIPE) 1445 continue; 1446 1447 if (logical >= raid_map[i] && 1448 logical < raid_map[i] + BTRFS_STRIPE_LEN) 1449 break; 1450 } 1451 1452 *stripe_index = i; 1453 *stripe_offset = logical - raid_map[i]; 1454 } else { 1455 /* The other RAID type */ 1456 *stripe_index = mirror; 1457 *stripe_offset = 0; 1458 } 1459 } 1460 1461 static int scrub_setup_recheck_block(struct scrub_block *original_sblock, 1462 struct scrub_block *sblocks_for_recheck[]) 1463 { 1464 struct scrub_ctx *sctx = original_sblock->sctx; 1465 struct btrfs_fs_info *fs_info = sctx->fs_info; 1466 u64 logical = original_sblock->logical; 1467 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits; 1468 u64 generation = original_sblock->sectors[0]->generation; 1469 u64 flags = original_sblock->sectors[0]->flags; 1470 u64 have_csum = original_sblock->sectors[0]->have_csum; 1471 struct scrub_recover *recover; 1472 struct btrfs_io_context *bioc; 1473 u64 sublen; 1474 u64 mapped_length; 1475 u64 stripe_offset; 1476 int stripe_index; 1477 int sector_index = 0; 1478 int mirror_index; 1479 int nmirrors; 1480 int ret; 1481 1482 while (length > 0) { 1483 sublen = min_t(u64, length, fs_info->sectorsize); 1484 mapped_length = sublen; 1485 bioc = NULL; 1486 1487 /* 1488 * With a length of sectorsize, each returned stripe represents 1489 * one mirror 1490 */ 1491 btrfs_bio_counter_inc_blocked(fs_info); 1492 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 1493 logical, &mapped_length, &bioc); 1494 if (ret || !bioc || mapped_length < sublen) { 1495 btrfs_put_bioc(bioc); 1496 btrfs_bio_counter_dec(fs_info); 1497 return -EIO; 1498 } 1499 1500 recover = kzalloc(sizeof(struct scrub_recover), GFP_KERNEL); 1501 if (!recover) { 1502 btrfs_put_bioc(bioc); 1503 btrfs_bio_counter_dec(fs_info); 1504 return -ENOMEM; 1505 } 1506 1507 refcount_set(&recover->refs, 1); 1508 recover->bioc = bioc; 1509 recover->map_length = mapped_length; 1510 1511 ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK); 1512 1513 nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS); 1514 1515 for (mirror_index = 0; mirror_index < nmirrors; 1516 mirror_index++) { 1517 struct scrub_block *sblock; 1518 struct scrub_sector *sector; 1519 1520 sblock = sblocks_for_recheck[mirror_index]; 1521 sblock->sctx = sctx; 1522 1523 sector = alloc_scrub_sector(sblock, logical); 1524 if (!sector) { 1525 spin_lock(&sctx->stat_lock); 1526 sctx->stat.malloc_errors++; 1527 spin_unlock(&sctx->stat_lock); 1528 scrub_put_recover(fs_info, recover); 1529 return -ENOMEM; 1530 } 1531 sector->flags = flags; 1532 sector->generation = generation; 1533 sector->have_csum = have_csum; 1534 if (have_csum) 1535 memcpy(sector->csum, 1536 original_sblock->sectors[0]->csum, 1537 sctx->fs_info->csum_size); 1538 1539 scrub_stripe_index_and_offset(logical, 1540 bioc->map_type, 1541 bioc->raid_map, 1542 bioc->num_stripes - 1543 bioc->num_tgtdevs, 1544 mirror_index, 1545 &stripe_index, 1546 &stripe_offset); 1547 /* 1548 * We're at the first sector, also populate @sblock 1549 * physical and dev. 1550 */ 1551 if (sector_index == 0) { 1552 sblock->physical = 1553 bioc->stripes[stripe_index].physical + 1554 stripe_offset; 1555 sblock->dev = bioc->stripes[stripe_index].dev; 1556 sblock->physical_for_dev_replace = 1557 original_sblock->physical_for_dev_replace; 1558 } 1559 1560 BUG_ON(sector_index >= original_sblock->sector_count); 1561 scrub_get_recover(recover); 1562 sector->recover = recover; 1563 } 1564 scrub_put_recover(fs_info, recover); 1565 length -= sublen; 1566 logical += sublen; 1567 sector_index++; 1568 } 1569 1570 return 0; 1571 } 1572 1573 static void scrub_bio_wait_endio(struct bio *bio) 1574 { 1575 complete(bio->bi_private); 1576 } 1577 1578 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, 1579 struct bio *bio, 1580 struct scrub_sector *sector) 1581 { 1582 DECLARE_COMPLETION_ONSTACK(done); 1583 1584 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >> 1585 SECTOR_SHIFT; 1586 bio->bi_private = &done; 1587 bio->bi_end_io = scrub_bio_wait_endio; 1588 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num); 1589 1590 wait_for_completion_io(&done); 1591 return blk_status_to_errno(bio->bi_status); 1592 } 1593 1594 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, 1595 struct scrub_block *sblock) 1596 { 1597 struct scrub_sector *first_sector = sblock->sectors[0]; 1598 struct bio *bio; 1599 int i; 1600 1601 /* All sectors in sblock belong to the same stripe on the same device. */ 1602 ASSERT(sblock->dev); 1603 if (!sblock->dev->bdev) 1604 goto out; 1605 1606 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 1607 1608 for (i = 0; i < sblock->sector_count; i++) { 1609 struct scrub_sector *sector = sblock->sectors[i]; 1610 1611 bio_add_scrub_sector(bio, sector, fs_info->sectorsize); 1612 } 1613 1614 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) { 1615 bio_put(bio); 1616 goto out; 1617 } 1618 1619 bio_put(bio); 1620 1621 scrub_recheck_block_checksum(sblock); 1622 1623 return; 1624 out: 1625 for (i = 0; i < sblock->sector_count; i++) 1626 sblock->sectors[i]->io_error = 1; 1627 1628 sblock->no_io_error_seen = 0; 1629 } 1630 1631 /* 1632 * This function will check the on disk data for checksum errors, header errors 1633 * and read I/O errors. If any I/O errors happen, the exact sectors which are 1634 * errored are marked as being bad. The goal is to enable scrub to take those 1635 * sectors that are not errored from all the mirrors so that the sectors that 1636 * are errored in the just handled mirror can be repaired. 1637 */ 1638 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1639 struct scrub_block *sblock, 1640 int retry_failed_mirror) 1641 { 1642 int i; 1643 1644 sblock->no_io_error_seen = 1; 1645 1646 /* short cut for raid56 */ 1647 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0])) 1648 return scrub_recheck_block_on_raid56(fs_info, sblock); 1649 1650 for (i = 0; i < sblock->sector_count; i++) { 1651 struct scrub_sector *sector = sblock->sectors[i]; 1652 struct bio bio; 1653 struct bio_vec bvec; 1654 1655 if (sblock->dev->bdev == NULL) { 1656 sector->io_error = 1; 1657 sblock->no_io_error_seen = 0; 1658 continue; 1659 } 1660 1661 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ); 1662 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize); 1663 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >> 1664 SECTOR_SHIFT; 1665 1666 btrfsic_check_bio(&bio); 1667 if (submit_bio_wait(&bio)) { 1668 sector->io_error = 1; 1669 sblock->no_io_error_seen = 0; 1670 } 1671 1672 bio_uninit(&bio); 1673 } 1674 1675 if (sblock->no_io_error_seen) 1676 scrub_recheck_block_checksum(sblock); 1677 } 1678 1679 static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector) 1680 { 1681 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices; 1682 int ret; 1683 1684 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); 1685 return !ret; 1686 } 1687 1688 static void scrub_recheck_block_checksum(struct scrub_block *sblock) 1689 { 1690 sblock->header_error = 0; 1691 sblock->checksum_error = 0; 1692 sblock->generation_error = 0; 1693 1694 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA) 1695 scrub_checksum_data(sblock); 1696 else 1697 scrub_checksum_tree_block(sblock); 1698 } 1699 1700 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1701 struct scrub_block *sblock_good) 1702 { 1703 int i; 1704 int ret = 0; 1705 1706 for (i = 0; i < sblock_bad->sector_count; i++) { 1707 int ret_sub; 1708 1709 ret_sub = scrub_repair_sector_from_good_copy(sblock_bad, 1710 sblock_good, i, 1); 1711 if (ret_sub) 1712 ret = ret_sub; 1713 } 1714 1715 return ret; 1716 } 1717 1718 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad, 1719 struct scrub_block *sblock_good, 1720 int sector_num, int force_write) 1721 { 1722 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; 1723 struct scrub_sector *sector_good = sblock_good->sectors[sector_num]; 1724 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; 1725 const u32 sectorsize = fs_info->sectorsize; 1726 1727 if (force_write || sblock_bad->header_error || 1728 sblock_bad->checksum_error || sector_bad->io_error) { 1729 struct bio bio; 1730 struct bio_vec bvec; 1731 int ret; 1732 1733 if (!sblock_bad->dev->bdev) { 1734 btrfs_warn_rl(fs_info, 1735 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); 1736 return -EIO; 1737 } 1738 1739 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE); 1740 bio.bi_iter.bi_sector = (sblock_bad->physical + 1741 sector_bad->offset) >> SECTOR_SHIFT; 1742 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize); 1743 1744 btrfsic_check_bio(&bio); 1745 ret = submit_bio_wait(&bio); 1746 bio_uninit(&bio); 1747 1748 if (ret) { 1749 btrfs_dev_stat_inc_and_print(sblock_bad->dev, 1750 BTRFS_DEV_STAT_WRITE_ERRS); 1751 atomic64_inc(&fs_info->dev_replace.num_write_errors); 1752 return -EIO; 1753 } 1754 } 1755 1756 return 0; 1757 } 1758 1759 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) 1760 { 1761 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; 1762 int i; 1763 1764 /* 1765 * This block is used for the check of the parity on the source device, 1766 * so the data needn't be written into the destination device. 1767 */ 1768 if (sblock->sparity) 1769 return; 1770 1771 for (i = 0; i < sblock->sector_count; i++) { 1772 int ret; 1773 1774 ret = scrub_write_sector_to_dev_replace(sblock, i); 1775 if (ret) 1776 atomic64_inc(&fs_info->dev_replace.num_write_errors); 1777 } 1778 } 1779 1780 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num) 1781 { 1782 const u32 sectorsize = sblock->sctx->fs_info->sectorsize; 1783 struct scrub_sector *sector = sblock->sectors[sector_num]; 1784 1785 if (sector->io_error) 1786 memset(scrub_sector_get_kaddr(sector), 0, sectorsize); 1787 1788 return scrub_add_sector_to_wr_bio(sblock->sctx, sector); 1789 } 1790 1791 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) 1792 { 1793 int ret = 0; 1794 u64 length; 1795 1796 if (!btrfs_is_zoned(sctx->fs_info)) 1797 return 0; 1798 1799 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) 1800 return 0; 1801 1802 if (sctx->write_pointer < physical) { 1803 length = physical - sctx->write_pointer; 1804 1805 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, 1806 sctx->write_pointer, length); 1807 if (!ret) 1808 sctx->write_pointer = physical; 1809 } 1810 return ret; 1811 } 1812 1813 static void scrub_block_get(struct scrub_block *sblock) 1814 { 1815 refcount_inc(&sblock->refs); 1816 } 1817 1818 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, 1819 struct scrub_sector *sector) 1820 { 1821 struct scrub_block *sblock = sector->sblock; 1822 struct scrub_bio *sbio; 1823 int ret; 1824 const u32 sectorsize = sctx->fs_info->sectorsize; 1825 1826 mutex_lock(&sctx->wr_lock); 1827 again: 1828 if (!sctx->wr_curr_bio) { 1829 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), 1830 GFP_KERNEL); 1831 if (!sctx->wr_curr_bio) { 1832 mutex_unlock(&sctx->wr_lock); 1833 return -ENOMEM; 1834 } 1835 sctx->wr_curr_bio->sctx = sctx; 1836 sctx->wr_curr_bio->sector_count = 0; 1837 } 1838 sbio = sctx->wr_curr_bio; 1839 if (sbio->sector_count == 0) { 1840 ret = fill_writer_pointer_gap(sctx, sector->offset + 1841 sblock->physical_for_dev_replace); 1842 if (ret) { 1843 mutex_unlock(&sctx->wr_lock); 1844 return ret; 1845 } 1846 1847 sbio->physical = sblock->physical_for_dev_replace + sector->offset; 1848 sbio->logical = sblock->logical + sector->offset; 1849 sbio->dev = sctx->wr_tgtdev; 1850 if (!sbio->bio) { 1851 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, 1852 REQ_OP_WRITE, GFP_NOFS); 1853 } 1854 sbio->bio->bi_private = sbio; 1855 sbio->bio->bi_end_io = scrub_wr_bio_end_io; 1856 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; 1857 sbio->status = 0; 1858 } else if (sbio->physical + sbio->sector_count * sectorsize != 1859 sblock->physical_for_dev_replace + sector->offset || 1860 sbio->logical + sbio->sector_count * sectorsize != 1861 sblock->logical + sector->offset) { 1862 scrub_wr_submit(sctx); 1863 goto again; 1864 } 1865 1866 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); 1867 if (ret != sectorsize) { 1868 if (sbio->sector_count < 1) { 1869 bio_put(sbio->bio); 1870 sbio->bio = NULL; 1871 mutex_unlock(&sctx->wr_lock); 1872 return -EIO; 1873 } 1874 scrub_wr_submit(sctx); 1875 goto again; 1876 } 1877 1878 sbio->sectors[sbio->sector_count] = sector; 1879 scrub_sector_get(sector); 1880 /* 1881 * Since ssector no longer holds a page, but uses sblock::pages, we 1882 * have to ensure the sblock had not been freed before our write bio 1883 * finished. 1884 */ 1885 scrub_block_get(sector->sblock); 1886 1887 sbio->sector_count++; 1888 if (sbio->sector_count == sctx->sectors_per_bio) 1889 scrub_wr_submit(sctx); 1890 mutex_unlock(&sctx->wr_lock); 1891 1892 return 0; 1893 } 1894 1895 static void scrub_wr_submit(struct scrub_ctx *sctx) 1896 { 1897 struct scrub_bio *sbio; 1898 1899 if (!sctx->wr_curr_bio) 1900 return; 1901 1902 sbio = sctx->wr_curr_bio; 1903 sctx->wr_curr_bio = NULL; 1904 scrub_pending_bio_inc(sctx); 1905 /* process all writes in a single worker thread. Then the block layer 1906 * orders the requests before sending them to the driver which 1907 * doubled the write performance on spinning disks when measured 1908 * with Linux 3.5 */ 1909 btrfsic_check_bio(sbio->bio); 1910 submit_bio(sbio->bio); 1911 1912 if (btrfs_is_zoned(sctx->fs_info)) 1913 sctx->write_pointer = sbio->physical + sbio->sector_count * 1914 sctx->fs_info->sectorsize; 1915 } 1916 1917 static void scrub_wr_bio_end_io(struct bio *bio) 1918 { 1919 struct scrub_bio *sbio = bio->bi_private; 1920 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 1921 1922 sbio->status = bio->bi_status; 1923 sbio->bio = bio; 1924 1925 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker); 1926 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); 1927 } 1928 1929 static void scrub_wr_bio_end_io_worker(struct work_struct *work) 1930 { 1931 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1932 struct scrub_ctx *sctx = sbio->sctx; 1933 int i; 1934 1935 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); 1936 if (sbio->status) { 1937 struct btrfs_dev_replace *dev_replace = 1938 &sbio->sctx->fs_info->dev_replace; 1939 1940 for (i = 0; i < sbio->sector_count; i++) { 1941 struct scrub_sector *sector = sbio->sectors[i]; 1942 1943 sector->io_error = 1; 1944 atomic64_inc(&dev_replace->num_write_errors); 1945 } 1946 } 1947 1948 /* 1949 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in 1950 * endio we should put the sblock. 1951 */ 1952 for (i = 0; i < sbio->sector_count; i++) { 1953 scrub_block_put(sbio->sectors[i]->sblock); 1954 scrub_sector_put(sbio->sectors[i]); 1955 } 1956 1957 bio_put(sbio->bio); 1958 kfree(sbio); 1959 scrub_pending_bio_dec(sctx); 1960 } 1961 1962 static int scrub_checksum(struct scrub_block *sblock) 1963 { 1964 u64 flags; 1965 int ret; 1966 1967 /* 1968 * No need to initialize these stats currently, 1969 * because this function only use return value 1970 * instead of these stats value. 1971 * 1972 * Todo: 1973 * always use stats 1974 */ 1975 sblock->header_error = 0; 1976 sblock->generation_error = 0; 1977 sblock->checksum_error = 0; 1978 1979 WARN_ON(sblock->sector_count < 1); 1980 flags = sblock->sectors[0]->flags; 1981 ret = 0; 1982 if (flags & BTRFS_EXTENT_FLAG_DATA) 1983 ret = scrub_checksum_data(sblock); 1984 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1985 ret = scrub_checksum_tree_block(sblock); 1986 else if (flags & BTRFS_EXTENT_FLAG_SUPER) 1987 ret = scrub_checksum_super(sblock); 1988 else 1989 WARN_ON(1); 1990 if (ret) 1991 scrub_handle_errored_block(sblock); 1992 1993 return ret; 1994 } 1995 1996 static int scrub_checksum_data(struct scrub_block *sblock) 1997 { 1998 struct scrub_ctx *sctx = sblock->sctx; 1999 struct btrfs_fs_info *fs_info = sctx->fs_info; 2000 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2001 u8 csum[BTRFS_CSUM_SIZE]; 2002 struct scrub_sector *sector; 2003 char *kaddr; 2004 2005 BUG_ON(sblock->sector_count < 1); 2006 sector = sblock->sectors[0]; 2007 if (!sector->have_csum) 2008 return 0; 2009 2010 kaddr = scrub_sector_get_kaddr(sector); 2011 2012 shash->tfm = fs_info->csum_shash; 2013 crypto_shash_init(shash); 2014 2015 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 2016 2017 if (memcmp(csum, sector->csum, fs_info->csum_size)) 2018 sblock->checksum_error = 1; 2019 return sblock->checksum_error; 2020 } 2021 2022 static int scrub_checksum_tree_block(struct scrub_block *sblock) 2023 { 2024 struct scrub_ctx *sctx = sblock->sctx; 2025 struct btrfs_header *h; 2026 struct btrfs_fs_info *fs_info = sctx->fs_info; 2027 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2028 u8 calculated_csum[BTRFS_CSUM_SIZE]; 2029 u8 on_disk_csum[BTRFS_CSUM_SIZE]; 2030 /* 2031 * This is done in sectorsize steps even for metadata as there's a 2032 * constraint for nodesize to be aligned to sectorsize. This will need 2033 * to change so we don't misuse data and metadata units like that. 2034 */ 2035 const u32 sectorsize = sctx->fs_info->sectorsize; 2036 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits; 2037 int i; 2038 struct scrub_sector *sector; 2039 char *kaddr; 2040 2041 BUG_ON(sblock->sector_count < 1); 2042 2043 /* Each member in sectors is just one sector */ 2044 ASSERT(sblock->sector_count == num_sectors); 2045 2046 sector = sblock->sectors[0]; 2047 kaddr = scrub_sector_get_kaddr(sector); 2048 h = (struct btrfs_header *)kaddr; 2049 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size); 2050 2051 /* 2052 * we don't use the getter functions here, as we 2053 * a) don't have an extent buffer and 2054 * b) the page is already kmapped 2055 */ 2056 if (sblock->logical != btrfs_stack_header_bytenr(h)) 2057 sblock->header_error = 1; 2058 2059 if (sector->generation != btrfs_stack_header_generation(h)) { 2060 sblock->header_error = 1; 2061 sblock->generation_error = 1; 2062 } 2063 2064 if (!scrub_check_fsid(h->fsid, sector)) 2065 sblock->header_error = 1; 2066 2067 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 2068 BTRFS_UUID_SIZE)) 2069 sblock->header_error = 1; 2070 2071 shash->tfm = fs_info->csum_shash; 2072 crypto_shash_init(shash); 2073 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, 2074 sectorsize - BTRFS_CSUM_SIZE); 2075 2076 for (i = 1; i < num_sectors; i++) { 2077 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]); 2078 crypto_shash_update(shash, kaddr, sectorsize); 2079 } 2080 2081 crypto_shash_final(shash, calculated_csum); 2082 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) 2083 sblock->checksum_error = 1; 2084 2085 return sblock->header_error || sblock->checksum_error; 2086 } 2087 2088 static int scrub_checksum_super(struct scrub_block *sblock) 2089 { 2090 struct btrfs_super_block *s; 2091 struct scrub_ctx *sctx = sblock->sctx; 2092 struct btrfs_fs_info *fs_info = sctx->fs_info; 2093 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2094 u8 calculated_csum[BTRFS_CSUM_SIZE]; 2095 struct scrub_sector *sector; 2096 char *kaddr; 2097 int fail_gen = 0; 2098 int fail_cor = 0; 2099 2100 BUG_ON(sblock->sector_count < 1); 2101 sector = sblock->sectors[0]; 2102 kaddr = scrub_sector_get_kaddr(sector); 2103 s = (struct btrfs_super_block *)kaddr; 2104 2105 if (sblock->logical != btrfs_super_bytenr(s)) 2106 ++fail_cor; 2107 2108 if (sector->generation != btrfs_super_generation(s)) 2109 ++fail_gen; 2110 2111 if (!scrub_check_fsid(s->fsid, sector)) 2112 ++fail_cor; 2113 2114 shash->tfm = fs_info->csum_shash; 2115 crypto_shash_init(shash); 2116 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE, 2117 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum); 2118 2119 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size)) 2120 ++fail_cor; 2121 2122 return fail_cor + fail_gen; 2123 } 2124 2125 static void scrub_block_put(struct scrub_block *sblock) 2126 { 2127 if (refcount_dec_and_test(&sblock->refs)) { 2128 int i; 2129 2130 if (sblock->sparity) 2131 scrub_parity_put(sblock->sparity); 2132 2133 for (i = 0; i < sblock->sector_count; i++) 2134 scrub_sector_put(sblock->sectors[i]); 2135 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) { 2136 if (sblock->pages[i]) { 2137 detach_scrub_page_private(sblock->pages[i]); 2138 __free_page(sblock->pages[i]); 2139 } 2140 } 2141 kfree(sblock); 2142 } 2143 } 2144 2145 static void scrub_sector_get(struct scrub_sector *sector) 2146 { 2147 atomic_inc(§or->refs); 2148 } 2149 2150 static void scrub_sector_put(struct scrub_sector *sector) 2151 { 2152 if (atomic_dec_and_test(§or->refs)) 2153 kfree(sector); 2154 } 2155 2156 /* 2157 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 2158 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. 2159 */ 2160 static void scrub_throttle(struct scrub_ctx *sctx) 2161 { 2162 const int time_slice = 1000; 2163 struct scrub_bio *sbio; 2164 struct btrfs_device *device; 2165 s64 delta; 2166 ktime_t now; 2167 u32 div; 2168 u64 bwlimit; 2169 2170 sbio = sctx->bios[sctx->curr]; 2171 device = sbio->dev; 2172 bwlimit = READ_ONCE(device->scrub_speed_max); 2173 if (bwlimit == 0) 2174 return; 2175 2176 /* 2177 * Slice is divided into intervals when the IO is submitted, adjust by 2178 * bwlimit and maximum of 64 intervals. 2179 */ 2180 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); 2181 div = min_t(u32, 64, div); 2182 2183 /* Start new epoch, set deadline */ 2184 now = ktime_get(); 2185 if (sctx->throttle_deadline == 0) { 2186 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); 2187 sctx->throttle_sent = 0; 2188 } 2189 2190 /* Still in the time to send? */ 2191 if (ktime_before(now, sctx->throttle_deadline)) { 2192 /* If current bio is within the limit, send it */ 2193 sctx->throttle_sent += sbio->bio->bi_iter.bi_size; 2194 if (sctx->throttle_sent <= div_u64(bwlimit, div)) 2195 return; 2196 2197 /* We're over the limit, sleep until the rest of the slice */ 2198 delta = ktime_ms_delta(sctx->throttle_deadline, now); 2199 } else { 2200 /* New request after deadline, start new epoch */ 2201 delta = 0; 2202 } 2203 2204 if (delta) { 2205 long timeout; 2206 2207 timeout = div_u64(delta * HZ, 1000); 2208 schedule_timeout_interruptible(timeout); 2209 } 2210 2211 /* Next call will start the deadline period */ 2212 sctx->throttle_deadline = 0; 2213 } 2214 2215 static void scrub_submit(struct scrub_ctx *sctx) 2216 { 2217 struct scrub_bio *sbio; 2218 2219 if (sctx->curr == -1) 2220 return; 2221 2222 scrub_throttle(sctx); 2223 2224 sbio = sctx->bios[sctx->curr]; 2225 sctx->curr = -1; 2226 scrub_pending_bio_inc(sctx); 2227 btrfsic_check_bio(sbio->bio); 2228 submit_bio(sbio->bio); 2229 } 2230 2231 static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, 2232 struct scrub_sector *sector) 2233 { 2234 struct scrub_block *sblock = sector->sblock; 2235 struct scrub_bio *sbio; 2236 const u32 sectorsize = sctx->fs_info->sectorsize; 2237 int ret; 2238 2239 again: 2240 /* 2241 * grab a fresh bio or wait for one to become available 2242 */ 2243 while (sctx->curr == -1) { 2244 spin_lock(&sctx->list_lock); 2245 sctx->curr = sctx->first_free; 2246 if (sctx->curr != -1) { 2247 sctx->first_free = sctx->bios[sctx->curr]->next_free; 2248 sctx->bios[sctx->curr]->next_free = -1; 2249 sctx->bios[sctx->curr]->sector_count = 0; 2250 spin_unlock(&sctx->list_lock); 2251 } else { 2252 spin_unlock(&sctx->list_lock); 2253 wait_event(sctx->list_wait, sctx->first_free != -1); 2254 } 2255 } 2256 sbio = sctx->bios[sctx->curr]; 2257 if (sbio->sector_count == 0) { 2258 sbio->physical = sblock->physical + sector->offset; 2259 sbio->logical = sblock->logical + sector->offset; 2260 sbio->dev = sblock->dev; 2261 if (!sbio->bio) { 2262 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, 2263 REQ_OP_READ, GFP_NOFS); 2264 } 2265 sbio->bio->bi_private = sbio; 2266 sbio->bio->bi_end_io = scrub_bio_end_io; 2267 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; 2268 sbio->status = 0; 2269 } else if (sbio->physical + sbio->sector_count * sectorsize != 2270 sblock->physical + sector->offset || 2271 sbio->logical + sbio->sector_count * sectorsize != 2272 sblock->logical + sector->offset || 2273 sbio->dev != sblock->dev) { 2274 scrub_submit(sctx); 2275 goto again; 2276 } 2277 2278 sbio->sectors[sbio->sector_count] = sector; 2279 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); 2280 if (ret != sectorsize) { 2281 if (sbio->sector_count < 1) { 2282 bio_put(sbio->bio); 2283 sbio->bio = NULL; 2284 return -EIO; 2285 } 2286 scrub_submit(sctx); 2287 goto again; 2288 } 2289 2290 scrub_block_get(sblock); /* one for the page added to the bio */ 2291 atomic_inc(&sblock->outstanding_sectors); 2292 sbio->sector_count++; 2293 if (sbio->sector_count == sctx->sectors_per_bio) 2294 scrub_submit(sctx); 2295 2296 return 0; 2297 } 2298 2299 static void scrub_missing_raid56_end_io(struct bio *bio) 2300 { 2301 struct scrub_block *sblock = bio->bi_private; 2302 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; 2303 2304 btrfs_bio_counter_dec(fs_info); 2305 if (bio->bi_status) 2306 sblock->no_io_error_seen = 0; 2307 2308 bio_put(bio); 2309 2310 queue_work(fs_info->scrub_workers, &sblock->work); 2311 } 2312 2313 static void scrub_missing_raid56_worker(struct work_struct *work) 2314 { 2315 struct scrub_block *sblock = container_of(work, struct scrub_block, work); 2316 struct scrub_ctx *sctx = sblock->sctx; 2317 struct btrfs_fs_info *fs_info = sctx->fs_info; 2318 u64 logical; 2319 struct btrfs_device *dev; 2320 2321 logical = sblock->logical; 2322 dev = sblock->dev; 2323 2324 if (sblock->no_io_error_seen) 2325 scrub_recheck_block_checksum(sblock); 2326 2327 if (!sblock->no_io_error_seen) { 2328 spin_lock(&sctx->stat_lock); 2329 sctx->stat.read_errors++; 2330 spin_unlock(&sctx->stat_lock); 2331 btrfs_err_rl_in_rcu(fs_info, 2332 "IO error rebuilding logical %llu for dev %s", 2333 logical, btrfs_dev_name(dev)); 2334 } else if (sblock->header_error || sblock->checksum_error) { 2335 spin_lock(&sctx->stat_lock); 2336 sctx->stat.uncorrectable_errors++; 2337 spin_unlock(&sctx->stat_lock); 2338 btrfs_err_rl_in_rcu(fs_info, 2339 "failed to rebuild valid logical %llu for dev %s", 2340 logical, btrfs_dev_name(dev)); 2341 } else { 2342 scrub_write_block_to_dev_replace(sblock); 2343 } 2344 2345 if (sctx->is_dev_replace && sctx->flush_all_writes) { 2346 mutex_lock(&sctx->wr_lock); 2347 scrub_wr_submit(sctx); 2348 mutex_unlock(&sctx->wr_lock); 2349 } 2350 2351 scrub_block_put(sblock); 2352 scrub_pending_bio_dec(sctx); 2353 } 2354 2355 static void scrub_missing_raid56_pages(struct scrub_block *sblock) 2356 { 2357 struct scrub_ctx *sctx = sblock->sctx; 2358 struct btrfs_fs_info *fs_info = sctx->fs_info; 2359 u64 length = sblock->sector_count << fs_info->sectorsize_bits; 2360 u64 logical = sblock->logical; 2361 struct btrfs_io_context *bioc = NULL; 2362 struct bio *bio; 2363 struct btrfs_raid_bio *rbio; 2364 int ret; 2365 int i; 2366 2367 btrfs_bio_counter_inc_blocked(fs_info); 2368 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 2369 &length, &bioc); 2370 if (ret || !bioc || !bioc->raid_map) 2371 goto bioc_out; 2372 2373 if (WARN_ON(!sctx->is_dev_replace || 2374 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { 2375 /* 2376 * We shouldn't be scrubbing a missing device. Even for dev 2377 * replace, we should only get here for RAID 5/6. We either 2378 * managed to mount something with no mirrors remaining or 2379 * there's a bug in scrub_find_good_copy()/btrfs_map_block(). 2380 */ 2381 goto bioc_out; 2382 } 2383 2384 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 2385 bio->bi_iter.bi_sector = logical >> 9; 2386 bio->bi_private = sblock; 2387 bio->bi_end_io = scrub_missing_raid56_end_io; 2388 2389 rbio = raid56_alloc_missing_rbio(bio, bioc); 2390 if (!rbio) 2391 goto rbio_out; 2392 2393 for (i = 0; i < sblock->sector_count; i++) { 2394 struct scrub_sector *sector = sblock->sectors[i]; 2395 2396 raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector), 2397 scrub_sector_get_page_offset(sector), 2398 sector->offset + sector->sblock->logical); 2399 } 2400 2401 INIT_WORK(&sblock->work, scrub_missing_raid56_worker); 2402 scrub_block_get(sblock); 2403 scrub_pending_bio_inc(sctx); 2404 raid56_submit_missing_rbio(rbio); 2405 btrfs_put_bioc(bioc); 2406 return; 2407 2408 rbio_out: 2409 bio_put(bio); 2410 bioc_out: 2411 btrfs_bio_counter_dec(fs_info); 2412 btrfs_put_bioc(bioc); 2413 spin_lock(&sctx->stat_lock); 2414 sctx->stat.malloc_errors++; 2415 spin_unlock(&sctx->stat_lock); 2416 } 2417 2418 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, 2419 u64 physical, struct btrfs_device *dev, u64 flags, 2420 u64 gen, int mirror_num, u8 *csum, 2421 u64 physical_for_dev_replace) 2422 { 2423 struct scrub_block *sblock; 2424 const u32 sectorsize = sctx->fs_info->sectorsize; 2425 int index; 2426 2427 sblock = alloc_scrub_block(sctx, dev, logical, physical, 2428 physical_for_dev_replace, mirror_num); 2429 if (!sblock) { 2430 spin_lock(&sctx->stat_lock); 2431 sctx->stat.malloc_errors++; 2432 spin_unlock(&sctx->stat_lock); 2433 return -ENOMEM; 2434 } 2435 2436 for (index = 0; len > 0; index++) { 2437 struct scrub_sector *sector; 2438 /* 2439 * Here we will allocate one page for one sector to scrub. 2440 * This is fine if PAGE_SIZE == sectorsize, but will cost 2441 * more memory for PAGE_SIZE > sectorsize case. 2442 */ 2443 u32 l = min(sectorsize, len); 2444 2445 sector = alloc_scrub_sector(sblock, logical); 2446 if (!sector) { 2447 spin_lock(&sctx->stat_lock); 2448 sctx->stat.malloc_errors++; 2449 spin_unlock(&sctx->stat_lock); 2450 scrub_block_put(sblock); 2451 return -ENOMEM; 2452 } 2453 sector->flags = flags; 2454 sector->generation = gen; 2455 if (csum) { 2456 sector->have_csum = 1; 2457 memcpy(sector->csum, csum, sctx->fs_info->csum_size); 2458 } else { 2459 sector->have_csum = 0; 2460 } 2461 len -= l; 2462 logical += l; 2463 physical += l; 2464 physical_for_dev_replace += l; 2465 } 2466 2467 WARN_ON(sblock->sector_count == 0); 2468 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { 2469 /* 2470 * This case should only be hit for RAID 5/6 device replace. See 2471 * the comment in scrub_missing_raid56_pages() for details. 2472 */ 2473 scrub_missing_raid56_pages(sblock); 2474 } else { 2475 for (index = 0; index < sblock->sector_count; index++) { 2476 struct scrub_sector *sector = sblock->sectors[index]; 2477 int ret; 2478 2479 ret = scrub_add_sector_to_rd_bio(sctx, sector); 2480 if (ret) { 2481 scrub_block_put(sblock); 2482 return ret; 2483 } 2484 } 2485 2486 if (flags & BTRFS_EXTENT_FLAG_SUPER) 2487 scrub_submit(sctx); 2488 } 2489 2490 /* last one frees, either here or in bio completion for last page */ 2491 scrub_block_put(sblock); 2492 return 0; 2493 } 2494 2495 static void scrub_bio_end_io(struct bio *bio) 2496 { 2497 struct scrub_bio *sbio = bio->bi_private; 2498 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 2499 2500 sbio->status = bio->bi_status; 2501 sbio->bio = bio; 2502 2503 queue_work(fs_info->scrub_workers, &sbio->work); 2504 } 2505 2506 static void scrub_bio_end_io_worker(struct work_struct *work) 2507 { 2508 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 2509 struct scrub_ctx *sctx = sbio->sctx; 2510 int i; 2511 2512 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); 2513 if (sbio->status) { 2514 for (i = 0; i < sbio->sector_count; i++) { 2515 struct scrub_sector *sector = sbio->sectors[i]; 2516 2517 sector->io_error = 1; 2518 sector->sblock->no_io_error_seen = 0; 2519 } 2520 } 2521 2522 /* Now complete the scrub_block items that have all pages completed */ 2523 for (i = 0; i < sbio->sector_count; i++) { 2524 struct scrub_sector *sector = sbio->sectors[i]; 2525 struct scrub_block *sblock = sector->sblock; 2526 2527 if (atomic_dec_and_test(&sblock->outstanding_sectors)) 2528 scrub_block_complete(sblock); 2529 scrub_block_put(sblock); 2530 } 2531 2532 bio_put(sbio->bio); 2533 sbio->bio = NULL; 2534 spin_lock(&sctx->list_lock); 2535 sbio->next_free = sctx->first_free; 2536 sctx->first_free = sbio->index; 2537 spin_unlock(&sctx->list_lock); 2538 2539 if (sctx->is_dev_replace && sctx->flush_all_writes) { 2540 mutex_lock(&sctx->wr_lock); 2541 scrub_wr_submit(sctx); 2542 mutex_unlock(&sctx->wr_lock); 2543 } 2544 2545 scrub_pending_bio_dec(sctx); 2546 } 2547 2548 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, 2549 unsigned long *bitmap, 2550 u64 start, u32 len) 2551 { 2552 u64 offset; 2553 u32 nsectors; 2554 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits; 2555 2556 if (len >= sparity->stripe_len) { 2557 bitmap_set(bitmap, 0, sparity->nsectors); 2558 return; 2559 } 2560 2561 start -= sparity->logic_start; 2562 start = div64_u64_rem(start, sparity->stripe_len, &offset); 2563 offset = offset >> sectorsize_bits; 2564 nsectors = len >> sectorsize_bits; 2565 2566 if (offset + nsectors <= sparity->nsectors) { 2567 bitmap_set(bitmap, offset, nsectors); 2568 return; 2569 } 2570 2571 bitmap_set(bitmap, offset, sparity->nsectors - offset); 2572 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); 2573 } 2574 2575 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, 2576 u64 start, u32 len) 2577 { 2578 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len); 2579 } 2580 2581 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, 2582 u64 start, u32 len) 2583 { 2584 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len); 2585 } 2586 2587 static void scrub_block_complete(struct scrub_block *sblock) 2588 { 2589 int corrupted = 0; 2590 2591 if (!sblock->no_io_error_seen) { 2592 corrupted = 1; 2593 scrub_handle_errored_block(sblock); 2594 } else { 2595 /* 2596 * if has checksum error, write via repair mechanism in 2597 * dev replace case, otherwise write here in dev replace 2598 * case. 2599 */ 2600 corrupted = scrub_checksum(sblock); 2601 if (!corrupted && sblock->sctx->is_dev_replace) 2602 scrub_write_block_to_dev_replace(sblock); 2603 } 2604 2605 if (sblock->sparity && corrupted && !sblock->data_corrected) { 2606 u64 start = sblock->logical; 2607 u64 end = sblock->logical + 2608 sblock->sectors[sblock->sector_count - 1]->offset + 2609 sblock->sctx->fs_info->sectorsize; 2610 2611 ASSERT(end - start <= U32_MAX); 2612 scrub_parity_mark_sectors_error(sblock->sparity, 2613 start, end - start); 2614 } 2615 } 2616 2617 static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum) 2618 { 2619 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits; 2620 list_del(&sum->list); 2621 kfree(sum); 2622 } 2623 2624 /* 2625 * Find the desired csum for range [logical, logical + sectorsize), and store 2626 * the csum into @csum. 2627 * 2628 * The search source is sctx->csum_list, which is a pre-populated list 2629 * storing bytenr ordered csum ranges. We're responsible to cleanup any range 2630 * that is before @logical. 2631 * 2632 * Return 0 if there is no csum for the range. 2633 * Return 1 if there is csum for the range and copied to @csum. 2634 */ 2635 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) 2636 { 2637 bool found = false; 2638 2639 while (!list_empty(&sctx->csum_list)) { 2640 struct btrfs_ordered_sum *sum = NULL; 2641 unsigned long index; 2642 unsigned long num_sectors; 2643 2644 sum = list_first_entry(&sctx->csum_list, 2645 struct btrfs_ordered_sum, list); 2646 /* The current csum range is beyond our range, no csum found */ 2647 if (sum->bytenr > logical) 2648 break; 2649 2650 /* 2651 * The current sum is before our bytenr, since scrub is always 2652 * done in bytenr order, the csum will never be used anymore, 2653 * clean it up so that later calls won't bother with the range, 2654 * and continue search the next range. 2655 */ 2656 if (sum->bytenr + sum->len <= logical) { 2657 drop_csum_range(sctx, sum); 2658 continue; 2659 } 2660 2661 /* Now the csum range covers our bytenr, copy the csum */ 2662 found = true; 2663 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits; 2664 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits; 2665 2666 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size, 2667 sctx->fs_info->csum_size); 2668 2669 /* Cleanup the range if we're at the end of the csum range */ 2670 if (index == num_sectors - 1) 2671 drop_csum_range(sctx, sum); 2672 break; 2673 } 2674 if (!found) 2675 return 0; 2676 return 1; 2677 } 2678 2679 /* scrub extent tries to collect up to 64 kB for each bio */ 2680 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, 2681 u64 logical, u32 len, 2682 u64 physical, struct btrfs_device *dev, u64 flags, 2683 u64 gen, int mirror_num) 2684 { 2685 struct btrfs_device *src_dev = dev; 2686 u64 src_physical = physical; 2687 int src_mirror = mirror_num; 2688 int ret; 2689 u8 csum[BTRFS_CSUM_SIZE]; 2690 u32 blocksize; 2691 2692 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2693 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2694 blocksize = map->stripe_len; 2695 else 2696 blocksize = sctx->fs_info->sectorsize; 2697 spin_lock(&sctx->stat_lock); 2698 sctx->stat.data_extents_scrubbed++; 2699 sctx->stat.data_bytes_scrubbed += len; 2700 spin_unlock(&sctx->stat_lock); 2701 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2702 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2703 blocksize = map->stripe_len; 2704 else 2705 blocksize = sctx->fs_info->nodesize; 2706 spin_lock(&sctx->stat_lock); 2707 sctx->stat.tree_extents_scrubbed++; 2708 sctx->stat.tree_bytes_scrubbed += len; 2709 spin_unlock(&sctx->stat_lock); 2710 } else { 2711 blocksize = sctx->fs_info->sectorsize; 2712 WARN_ON(1); 2713 } 2714 2715 /* 2716 * For dev-replace case, we can have @dev being a missing device. 2717 * Regular scrub will avoid its execution on missing device at all, 2718 * as that would trigger tons of read error. 2719 * 2720 * Reading from missing device will cause read error counts to 2721 * increase unnecessarily. 2722 * So here we change the read source to a good mirror. 2723 */ 2724 if (sctx->is_dev_replace && !dev->bdev) 2725 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical, 2726 &src_dev, &src_mirror); 2727 while (len) { 2728 u32 l = min(len, blocksize); 2729 int have_csum = 0; 2730 2731 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2732 /* push csums to sbio */ 2733 have_csum = scrub_find_csum(sctx, logical, csum); 2734 if (have_csum == 0) 2735 ++sctx->stat.no_csum; 2736 } 2737 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev, 2738 flags, gen, src_mirror, 2739 have_csum ? csum : NULL, physical); 2740 if (ret) 2741 return ret; 2742 len -= l; 2743 logical += l; 2744 physical += l; 2745 src_physical += l; 2746 } 2747 return 0; 2748 } 2749 2750 static int scrub_sectors_for_parity(struct scrub_parity *sparity, 2751 u64 logical, u32 len, 2752 u64 physical, struct btrfs_device *dev, 2753 u64 flags, u64 gen, int mirror_num, u8 *csum) 2754 { 2755 struct scrub_ctx *sctx = sparity->sctx; 2756 struct scrub_block *sblock; 2757 const u32 sectorsize = sctx->fs_info->sectorsize; 2758 int index; 2759 2760 ASSERT(IS_ALIGNED(len, sectorsize)); 2761 2762 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num); 2763 if (!sblock) { 2764 spin_lock(&sctx->stat_lock); 2765 sctx->stat.malloc_errors++; 2766 spin_unlock(&sctx->stat_lock); 2767 return -ENOMEM; 2768 } 2769 2770 sblock->sparity = sparity; 2771 scrub_parity_get(sparity); 2772 2773 for (index = 0; len > 0; index++) { 2774 struct scrub_sector *sector; 2775 2776 sector = alloc_scrub_sector(sblock, logical); 2777 if (!sector) { 2778 spin_lock(&sctx->stat_lock); 2779 sctx->stat.malloc_errors++; 2780 spin_unlock(&sctx->stat_lock); 2781 scrub_block_put(sblock); 2782 return -ENOMEM; 2783 } 2784 sblock->sectors[index] = sector; 2785 /* For scrub parity */ 2786 scrub_sector_get(sector); 2787 list_add_tail(§or->list, &sparity->sectors_list); 2788 sector->flags = flags; 2789 sector->generation = gen; 2790 if (csum) { 2791 sector->have_csum = 1; 2792 memcpy(sector->csum, csum, sctx->fs_info->csum_size); 2793 } else { 2794 sector->have_csum = 0; 2795 } 2796 2797 /* Iterate over the stripe range in sectorsize steps */ 2798 len -= sectorsize; 2799 logical += sectorsize; 2800 physical += sectorsize; 2801 } 2802 2803 WARN_ON(sblock->sector_count == 0); 2804 for (index = 0; index < sblock->sector_count; index++) { 2805 struct scrub_sector *sector = sblock->sectors[index]; 2806 int ret; 2807 2808 ret = scrub_add_sector_to_rd_bio(sctx, sector); 2809 if (ret) { 2810 scrub_block_put(sblock); 2811 return ret; 2812 } 2813 } 2814 2815 /* Last one frees, either here or in bio completion for last sector */ 2816 scrub_block_put(sblock); 2817 return 0; 2818 } 2819 2820 static int scrub_extent_for_parity(struct scrub_parity *sparity, 2821 u64 logical, u32 len, 2822 u64 physical, struct btrfs_device *dev, 2823 u64 flags, u64 gen, int mirror_num) 2824 { 2825 struct scrub_ctx *sctx = sparity->sctx; 2826 int ret; 2827 u8 csum[BTRFS_CSUM_SIZE]; 2828 u32 blocksize; 2829 2830 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { 2831 scrub_parity_mark_sectors_error(sparity, logical, len); 2832 return 0; 2833 } 2834 2835 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2836 blocksize = sparity->stripe_len; 2837 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 2838 blocksize = sparity->stripe_len; 2839 } else { 2840 blocksize = sctx->fs_info->sectorsize; 2841 WARN_ON(1); 2842 } 2843 2844 while (len) { 2845 u32 l = min(len, blocksize); 2846 int have_csum = 0; 2847 2848 if (flags & BTRFS_EXTENT_FLAG_DATA) { 2849 /* push csums to sbio */ 2850 have_csum = scrub_find_csum(sctx, logical, csum); 2851 if (have_csum == 0) 2852 goto skip; 2853 } 2854 ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev, 2855 flags, gen, mirror_num, 2856 have_csum ? csum : NULL); 2857 if (ret) 2858 return ret; 2859 skip: 2860 len -= l; 2861 logical += l; 2862 physical += l; 2863 } 2864 return 0; 2865 } 2866 2867 /* 2868 * Given a physical address, this will calculate it's 2869 * logical offset. if this is a parity stripe, it will return 2870 * the most left data stripe's logical offset. 2871 * 2872 * return 0 if it is a data stripe, 1 means parity stripe. 2873 */ 2874 static int get_raid56_logic_offset(u64 physical, int num, 2875 struct map_lookup *map, u64 *offset, 2876 u64 *stripe_start) 2877 { 2878 int i; 2879 int j = 0; 2880 u64 stripe_nr; 2881 u64 last_offset; 2882 u32 stripe_index; 2883 u32 rot; 2884 const int data_stripes = nr_data_stripes(map); 2885 2886 last_offset = (physical - map->stripes[num].physical) * data_stripes; 2887 if (stripe_start) 2888 *stripe_start = last_offset; 2889 2890 *offset = last_offset; 2891 for (i = 0; i < data_stripes; i++) { 2892 *offset = last_offset + i * map->stripe_len; 2893 2894 stripe_nr = div64_u64(*offset, map->stripe_len); 2895 stripe_nr = div_u64(stripe_nr, data_stripes); 2896 2897 /* Work out the disk rotation on this stripe-set */ 2898 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); 2899 /* calculate which stripe this data locates */ 2900 rot += i; 2901 stripe_index = rot % map->num_stripes; 2902 if (stripe_index == num) 2903 return 0; 2904 if (stripe_index < num) 2905 j++; 2906 } 2907 *offset = last_offset + j * map->stripe_len; 2908 return 1; 2909 } 2910 2911 static void scrub_free_parity(struct scrub_parity *sparity) 2912 { 2913 struct scrub_ctx *sctx = sparity->sctx; 2914 struct scrub_sector *curr, *next; 2915 int nbits; 2916 2917 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors); 2918 if (nbits) { 2919 spin_lock(&sctx->stat_lock); 2920 sctx->stat.read_errors += nbits; 2921 sctx->stat.uncorrectable_errors += nbits; 2922 spin_unlock(&sctx->stat_lock); 2923 } 2924 2925 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) { 2926 list_del_init(&curr->list); 2927 scrub_sector_put(curr); 2928 } 2929 2930 kfree(sparity); 2931 } 2932 2933 static void scrub_parity_bio_endio_worker(struct work_struct *work) 2934 { 2935 struct scrub_parity *sparity = container_of(work, struct scrub_parity, 2936 work); 2937 struct scrub_ctx *sctx = sparity->sctx; 2938 2939 btrfs_bio_counter_dec(sctx->fs_info); 2940 scrub_free_parity(sparity); 2941 scrub_pending_bio_dec(sctx); 2942 } 2943 2944 static void scrub_parity_bio_endio(struct bio *bio) 2945 { 2946 struct scrub_parity *sparity = bio->bi_private; 2947 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; 2948 2949 if (bio->bi_status) 2950 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, 2951 &sparity->dbitmap, sparity->nsectors); 2952 2953 bio_put(bio); 2954 2955 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker); 2956 queue_work(fs_info->scrub_parity_workers, &sparity->work); 2957 } 2958 2959 static void scrub_parity_check_and_repair(struct scrub_parity *sparity) 2960 { 2961 struct scrub_ctx *sctx = sparity->sctx; 2962 struct btrfs_fs_info *fs_info = sctx->fs_info; 2963 struct bio *bio; 2964 struct btrfs_raid_bio *rbio; 2965 struct btrfs_io_context *bioc = NULL; 2966 u64 length; 2967 int ret; 2968 2969 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap, 2970 &sparity->ebitmap, sparity->nsectors)) 2971 goto out; 2972 2973 length = sparity->logic_end - sparity->logic_start; 2974 2975 btrfs_bio_counter_inc_blocked(fs_info); 2976 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, 2977 &length, &bioc); 2978 if (ret || !bioc || !bioc->raid_map) 2979 goto bioc_out; 2980 2981 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); 2982 bio->bi_iter.bi_sector = sparity->logic_start >> 9; 2983 bio->bi_private = sparity; 2984 bio->bi_end_io = scrub_parity_bio_endio; 2985 2986 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, 2987 sparity->scrub_dev, 2988 &sparity->dbitmap, 2989 sparity->nsectors); 2990 btrfs_put_bioc(bioc); 2991 if (!rbio) 2992 goto rbio_out; 2993 2994 scrub_pending_bio_inc(sctx); 2995 raid56_parity_submit_scrub_rbio(rbio); 2996 return; 2997 2998 rbio_out: 2999 bio_put(bio); 3000 bioc_out: 3001 btrfs_bio_counter_dec(fs_info); 3002 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap, 3003 sparity->nsectors); 3004 spin_lock(&sctx->stat_lock); 3005 sctx->stat.malloc_errors++; 3006 spin_unlock(&sctx->stat_lock); 3007 out: 3008 scrub_free_parity(sparity); 3009 } 3010 3011 static void scrub_parity_get(struct scrub_parity *sparity) 3012 { 3013 refcount_inc(&sparity->refs); 3014 } 3015 3016 static void scrub_parity_put(struct scrub_parity *sparity) 3017 { 3018 if (!refcount_dec_and_test(&sparity->refs)) 3019 return; 3020 3021 scrub_parity_check_and_repair(sparity); 3022 } 3023 3024 /* 3025 * Return 0 if the extent item range covers any byte of the range. 3026 * Return <0 if the extent item is before @search_start. 3027 * Return >0 if the extent item is after @start_start + @search_len. 3028 */ 3029 static int compare_extent_item_range(struct btrfs_path *path, 3030 u64 search_start, u64 search_len) 3031 { 3032 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; 3033 u64 len; 3034 struct btrfs_key key; 3035 3036 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3037 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || 3038 key.type == BTRFS_METADATA_ITEM_KEY); 3039 if (key.type == BTRFS_METADATA_ITEM_KEY) 3040 len = fs_info->nodesize; 3041 else 3042 len = key.offset; 3043 3044 if (key.objectid + len <= search_start) 3045 return -1; 3046 if (key.objectid >= search_start + search_len) 3047 return 1; 3048 return 0; 3049 } 3050 3051 /* 3052 * Locate one extent item which covers any byte in range 3053 * [@search_start, @search_start + @search_length) 3054 * 3055 * If the path is not initialized, we will initialize the search by doing 3056 * a btrfs_search_slot(). 3057 * If the path is already initialized, we will use the path as the initial 3058 * slot, to avoid duplicated btrfs_search_slot() calls. 3059 * 3060 * NOTE: If an extent item starts before @search_start, we will still 3061 * return the extent item. This is for data extent crossing stripe boundary. 3062 * 3063 * Return 0 if we found such extent item, and @path will point to the extent item. 3064 * Return >0 if no such extent item can be found, and @path will be released. 3065 * Return <0 if hit fatal error, and @path will be released. 3066 */ 3067 static int find_first_extent_item(struct btrfs_root *extent_root, 3068 struct btrfs_path *path, 3069 u64 search_start, u64 search_len) 3070 { 3071 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3072 struct btrfs_key key; 3073 int ret; 3074 3075 /* Continue using the existing path */ 3076 if (path->nodes[0]) 3077 goto search_forward; 3078 3079 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 3080 key.type = BTRFS_METADATA_ITEM_KEY; 3081 else 3082 key.type = BTRFS_EXTENT_ITEM_KEY; 3083 key.objectid = search_start; 3084 key.offset = (u64)-1; 3085 3086 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 3087 if (ret < 0) 3088 return ret; 3089 3090 ASSERT(ret > 0); 3091 /* 3092 * Here we intentionally pass 0 as @min_objectid, as there could be 3093 * an extent item starting before @search_start. 3094 */ 3095 ret = btrfs_previous_extent_item(extent_root, path, 0); 3096 if (ret < 0) 3097 return ret; 3098 /* 3099 * No matter whether we have found an extent item, the next loop will 3100 * properly do every check on the key. 3101 */ 3102 search_forward: 3103 while (true) { 3104 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3105 if (key.objectid >= search_start + search_len) 3106 break; 3107 if (key.type != BTRFS_METADATA_ITEM_KEY && 3108 key.type != BTRFS_EXTENT_ITEM_KEY) 3109 goto next; 3110 3111 ret = compare_extent_item_range(path, search_start, search_len); 3112 if (ret == 0) 3113 return ret; 3114 if (ret > 0) 3115 break; 3116 next: 3117 path->slots[0]++; 3118 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 3119 ret = btrfs_next_leaf(extent_root, path); 3120 if (ret) { 3121 /* Either no more item or fatal error */ 3122 btrfs_release_path(path); 3123 return ret; 3124 } 3125 } 3126 } 3127 btrfs_release_path(path); 3128 return 1; 3129 } 3130 3131 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, 3132 u64 *size_ret, u64 *flags_ret, u64 *generation_ret) 3133 { 3134 struct btrfs_key key; 3135 struct btrfs_extent_item *ei; 3136 3137 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3138 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || 3139 key.type == BTRFS_EXTENT_ITEM_KEY); 3140 *extent_start_ret = key.objectid; 3141 if (key.type == BTRFS_METADATA_ITEM_KEY) 3142 *size_ret = path->nodes[0]->fs_info->nodesize; 3143 else 3144 *size_ret = key.offset; 3145 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); 3146 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); 3147 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); 3148 } 3149 3150 static bool does_range_cross_boundary(u64 extent_start, u64 extent_len, 3151 u64 boundary_start, u64 boudary_len) 3152 { 3153 return (extent_start < boundary_start && 3154 extent_start + extent_len > boundary_start) || 3155 (extent_start < boundary_start + boudary_len && 3156 extent_start + extent_len > boundary_start + boudary_len); 3157 } 3158 3159 static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx, 3160 struct scrub_parity *sparity, 3161 struct map_lookup *map, 3162 struct btrfs_device *sdev, 3163 struct btrfs_path *path, 3164 u64 logical) 3165 { 3166 struct btrfs_fs_info *fs_info = sctx->fs_info; 3167 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical); 3168 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical); 3169 u64 cur_logical = logical; 3170 int ret; 3171 3172 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); 3173 3174 /* Path must not be populated */ 3175 ASSERT(!path->nodes[0]); 3176 3177 while (cur_logical < logical + map->stripe_len) { 3178 struct btrfs_io_context *bioc = NULL; 3179 struct btrfs_device *extent_dev; 3180 u64 extent_start; 3181 u64 extent_size; 3182 u64 mapped_length; 3183 u64 extent_flags; 3184 u64 extent_gen; 3185 u64 extent_physical; 3186 u64 extent_mirror_num; 3187 3188 ret = find_first_extent_item(extent_root, path, cur_logical, 3189 logical + map->stripe_len - cur_logical); 3190 /* No more extent item in this data stripe */ 3191 if (ret > 0) { 3192 ret = 0; 3193 break; 3194 } 3195 if (ret < 0) 3196 break; 3197 get_extent_info(path, &extent_start, &extent_size, &extent_flags, 3198 &extent_gen); 3199 3200 /* Metadata should not cross stripe boundaries */ 3201 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && 3202 does_range_cross_boundary(extent_start, extent_size, 3203 logical, map->stripe_len)) { 3204 btrfs_err(fs_info, 3205 "scrub: tree block %llu spanning stripes, ignored. logical=%llu", 3206 extent_start, logical); 3207 spin_lock(&sctx->stat_lock); 3208 sctx->stat.uncorrectable_errors++; 3209 spin_unlock(&sctx->stat_lock); 3210 cur_logical += extent_size; 3211 continue; 3212 } 3213 3214 /* Skip hole range which doesn't have any extent */ 3215 cur_logical = max(extent_start, cur_logical); 3216 3217 /* Truncate the range inside this data stripe */ 3218 extent_size = min(extent_start + extent_size, 3219 logical + map->stripe_len) - cur_logical; 3220 extent_start = cur_logical; 3221 ASSERT(extent_size <= U32_MAX); 3222 3223 scrub_parity_mark_sectors_data(sparity, extent_start, extent_size); 3224 3225 mapped_length = extent_size; 3226 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start, 3227 &mapped_length, &bioc, 0); 3228 if (!ret && (!bioc || mapped_length < extent_size)) 3229 ret = -EIO; 3230 if (ret) { 3231 btrfs_put_bioc(bioc); 3232 scrub_parity_mark_sectors_error(sparity, extent_start, 3233 extent_size); 3234 break; 3235 } 3236 extent_physical = bioc->stripes[0].physical; 3237 extent_mirror_num = bioc->mirror_num; 3238 extent_dev = bioc->stripes[0].dev; 3239 btrfs_put_bioc(bioc); 3240 3241 ret = btrfs_lookup_csums_list(csum_root, extent_start, 3242 extent_start + extent_size - 1, 3243 &sctx->csum_list, 1, false); 3244 if (ret) { 3245 scrub_parity_mark_sectors_error(sparity, extent_start, 3246 extent_size); 3247 break; 3248 } 3249 3250 ret = scrub_extent_for_parity(sparity, extent_start, 3251 extent_size, extent_physical, 3252 extent_dev, extent_flags, 3253 extent_gen, extent_mirror_num); 3254 scrub_free_csums(sctx); 3255 3256 if (ret) { 3257 scrub_parity_mark_sectors_error(sparity, extent_start, 3258 extent_size); 3259 break; 3260 } 3261 3262 cond_resched(); 3263 cur_logical += extent_size; 3264 } 3265 btrfs_release_path(path); 3266 return ret; 3267 } 3268 3269 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, 3270 struct map_lookup *map, 3271 struct btrfs_device *sdev, 3272 u64 logic_start, 3273 u64 logic_end) 3274 { 3275 struct btrfs_fs_info *fs_info = sctx->fs_info; 3276 struct btrfs_path *path; 3277 u64 cur_logical; 3278 int ret; 3279 struct scrub_parity *sparity; 3280 int nsectors; 3281 3282 path = btrfs_alloc_path(); 3283 if (!path) { 3284 spin_lock(&sctx->stat_lock); 3285 sctx->stat.malloc_errors++; 3286 spin_unlock(&sctx->stat_lock); 3287 return -ENOMEM; 3288 } 3289 path->search_commit_root = 1; 3290 path->skip_locking = 1; 3291 3292 ASSERT(map->stripe_len <= U32_MAX); 3293 nsectors = map->stripe_len >> fs_info->sectorsize_bits; 3294 ASSERT(nsectors <= BITS_PER_LONG); 3295 sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS); 3296 if (!sparity) { 3297 spin_lock(&sctx->stat_lock); 3298 sctx->stat.malloc_errors++; 3299 spin_unlock(&sctx->stat_lock); 3300 btrfs_free_path(path); 3301 return -ENOMEM; 3302 } 3303 3304 ASSERT(map->stripe_len <= U32_MAX); 3305 sparity->stripe_len = map->stripe_len; 3306 sparity->nsectors = nsectors; 3307 sparity->sctx = sctx; 3308 sparity->scrub_dev = sdev; 3309 sparity->logic_start = logic_start; 3310 sparity->logic_end = logic_end; 3311 refcount_set(&sparity->refs, 1); 3312 INIT_LIST_HEAD(&sparity->sectors_list); 3313 3314 ret = 0; 3315 for (cur_logical = logic_start; cur_logical < logic_end; 3316 cur_logical += map->stripe_len) { 3317 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map, 3318 sdev, path, cur_logical); 3319 if (ret < 0) 3320 break; 3321 } 3322 3323 scrub_parity_put(sparity); 3324 scrub_submit(sctx); 3325 mutex_lock(&sctx->wr_lock); 3326 scrub_wr_submit(sctx); 3327 mutex_unlock(&sctx->wr_lock); 3328 3329 btrfs_free_path(path); 3330 return ret < 0 ? ret : 0; 3331 } 3332 3333 static void sync_replace_for_zoned(struct scrub_ctx *sctx) 3334 { 3335 if (!btrfs_is_zoned(sctx->fs_info)) 3336 return; 3337 3338 sctx->flush_all_writes = true; 3339 scrub_submit(sctx); 3340 mutex_lock(&sctx->wr_lock); 3341 scrub_wr_submit(sctx); 3342 mutex_unlock(&sctx->wr_lock); 3343 3344 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 3345 } 3346 3347 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, 3348 u64 physical, u64 physical_end) 3349 { 3350 struct btrfs_fs_info *fs_info = sctx->fs_info; 3351 int ret = 0; 3352 3353 if (!btrfs_is_zoned(fs_info)) 3354 return 0; 3355 3356 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 3357 3358 mutex_lock(&sctx->wr_lock); 3359 if (sctx->write_pointer < physical_end) { 3360 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, 3361 physical, 3362 sctx->write_pointer); 3363 if (ret) 3364 btrfs_err(fs_info, 3365 "zoned: failed to recover write pointer"); 3366 } 3367 mutex_unlock(&sctx->wr_lock); 3368 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); 3369 3370 return ret; 3371 } 3372 3373 /* 3374 * Scrub one range which can only has simple mirror based profile. 3375 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in 3376 * RAID0/RAID10). 3377 * 3378 * Since we may need to handle a subset of block group, we need @logical_start 3379 * and @logical_length parameter. 3380 */ 3381 static int scrub_simple_mirror(struct scrub_ctx *sctx, 3382 struct btrfs_root *extent_root, 3383 struct btrfs_root *csum_root, 3384 struct btrfs_block_group *bg, 3385 struct map_lookup *map, 3386 u64 logical_start, u64 logical_length, 3387 struct btrfs_device *device, 3388 u64 physical, int mirror_num) 3389 { 3390 struct btrfs_fs_info *fs_info = sctx->fs_info; 3391 const u64 logical_end = logical_start + logical_length; 3392 /* An artificial limit, inherit from old scrub behavior */ 3393 const u32 max_length = SZ_64K; 3394 struct btrfs_path path = { 0 }; 3395 u64 cur_logical = logical_start; 3396 int ret; 3397 3398 /* The range must be inside the bg */ 3399 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); 3400 3401 path.search_commit_root = 1; 3402 path.skip_locking = 1; 3403 /* Go through each extent items inside the logical range */ 3404 while (cur_logical < logical_end) { 3405 u64 extent_start; 3406 u64 extent_len; 3407 u64 extent_flags; 3408 u64 extent_gen; 3409 u64 scrub_len; 3410 3411 /* Canceled? */ 3412 if (atomic_read(&fs_info->scrub_cancel_req) || 3413 atomic_read(&sctx->cancel_req)) { 3414 ret = -ECANCELED; 3415 break; 3416 } 3417 /* Paused? */ 3418 if (atomic_read(&fs_info->scrub_pause_req)) { 3419 /* Push queued extents */ 3420 sctx->flush_all_writes = true; 3421 scrub_submit(sctx); 3422 mutex_lock(&sctx->wr_lock); 3423 scrub_wr_submit(sctx); 3424 mutex_unlock(&sctx->wr_lock); 3425 wait_event(sctx->list_wait, 3426 atomic_read(&sctx->bios_in_flight) == 0); 3427 sctx->flush_all_writes = false; 3428 scrub_blocked_if_needed(fs_info); 3429 } 3430 /* Block group removed? */ 3431 spin_lock(&bg->lock); 3432 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { 3433 spin_unlock(&bg->lock); 3434 ret = 0; 3435 break; 3436 } 3437 spin_unlock(&bg->lock); 3438 3439 ret = find_first_extent_item(extent_root, &path, cur_logical, 3440 logical_end - cur_logical); 3441 if (ret > 0) { 3442 /* No more extent, just update the accounting */ 3443 sctx->stat.last_physical = physical + logical_length; 3444 ret = 0; 3445 break; 3446 } 3447 if (ret < 0) 3448 break; 3449 get_extent_info(&path, &extent_start, &extent_len, 3450 &extent_flags, &extent_gen); 3451 /* Skip hole range which doesn't have any extent */ 3452 cur_logical = max(extent_start, cur_logical); 3453 3454 /* 3455 * Scrub len has three limits: 3456 * - Extent size limit 3457 * - Scrub range limit 3458 * This is especially imporatant for RAID0/RAID10 to reuse 3459 * this function 3460 * - Max scrub size limit 3461 */ 3462 scrub_len = min(min(extent_start + extent_len, 3463 logical_end), cur_logical + max_length) - 3464 cur_logical; 3465 3466 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) { 3467 ret = btrfs_lookup_csums_list(csum_root, cur_logical, 3468 cur_logical + scrub_len - 1, 3469 &sctx->csum_list, 1, false); 3470 if (ret) 3471 break; 3472 } 3473 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && 3474 does_range_cross_boundary(extent_start, extent_len, 3475 logical_start, logical_length)) { 3476 btrfs_err(fs_info, 3477 "scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)", 3478 extent_start, logical_start, logical_end); 3479 spin_lock(&sctx->stat_lock); 3480 sctx->stat.uncorrectable_errors++; 3481 spin_unlock(&sctx->stat_lock); 3482 cur_logical += scrub_len; 3483 continue; 3484 } 3485 ret = scrub_extent(sctx, map, cur_logical, scrub_len, 3486 cur_logical - logical_start + physical, 3487 device, extent_flags, extent_gen, 3488 mirror_num); 3489 scrub_free_csums(sctx); 3490 if (ret) 3491 break; 3492 if (sctx->is_dev_replace) 3493 sync_replace_for_zoned(sctx); 3494 cur_logical += scrub_len; 3495 /* Don't hold CPU for too long time */ 3496 cond_resched(); 3497 } 3498 btrfs_release_path(&path); 3499 return ret; 3500 } 3501 3502 /* Calculate the full stripe length for simple stripe based profiles */ 3503 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) 3504 { 3505 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3506 BTRFS_BLOCK_GROUP_RAID10)); 3507 3508 return map->num_stripes / map->sub_stripes * map->stripe_len; 3509 } 3510 3511 /* Get the logical bytenr for the stripe */ 3512 static u64 simple_stripe_get_logical(struct map_lookup *map, 3513 struct btrfs_block_group *bg, 3514 int stripe_index) 3515 { 3516 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3517 BTRFS_BLOCK_GROUP_RAID10)); 3518 ASSERT(stripe_index < map->num_stripes); 3519 3520 /* 3521 * (stripe_index / sub_stripes) gives how many data stripes we need to 3522 * skip. 3523 */ 3524 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start; 3525 } 3526 3527 /* Get the mirror number for the stripe */ 3528 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index) 3529 { 3530 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | 3531 BTRFS_BLOCK_GROUP_RAID10)); 3532 ASSERT(stripe_index < map->num_stripes); 3533 3534 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */ 3535 return stripe_index % map->sub_stripes + 1; 3536 } 3537 3538 static int scrub_simple_stripe(struct scrub_ctx *sctx, 3539 struct btrfs_root *extent_root, 3540 struct btrfs_root *csum_root, 3541 struct btrfs_block_group *bg, 3542 struct map_lookup *map, 3543 struct btrfs_device *device, 3544 int stripe_index) 3545 { 3546 const u64 logical_increment = simple_stripe_full_stripe_len(map); 3547 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); 3548 const u64 orig_physical = map->stripes[stripe_index].physical; 3549 const int mirror_num = simple_stripe_mirror_num(map, stripe_index); 3550 u64 cur_logical = orig_logical; 3551 u64 cur_physical = orig_physical; 3552 int ret = 0; 3553 3554 while (cur_logical < bg->start + bg->length) { 3555 /* 3556 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is 3557 * just RAID1, so we can reuse scrub_simple_mirror() to scrub 3558 * this stripe. 3559 */ 3560 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map, 3561 cur_logical, map->stripe_len, device, 3562 cur_physical, mirror_num); 3563 if (ret) 3564 return ret; 3565 /* Skip to next stripe which belongs to the target device */ 3566 cur_logical += logical_increment; 3567 /* For physical offset, we just go to next stripe */ 3568 cur_physical += map->stripe_len; 3569 } 3570 return ret; 3571 } 3572 3573 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 3574 struct btrfs_block_group *bg, 3575 struct extent_map *em, 3576 struct btrfs_device *scrub_dev, 3577 int stripe_index) 3578 { 3579 struct btrfs_path *path; 3580 struct btrfs_fs_info *fs_info = sctx->fs_info; 3581 struct btrfs_root *root; 3582 struct btrfs_root *csum_root; 3583 struct blk_plug plug; 3584 struct map_lookup *map = em->map_lookup; 3585 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; 3586 const u64 chunk_logical = bg->start; 3587 int ret; 3588 u64 physical = map->stripes[stripe_index].physical; 3589 const u64 dev_stripe_len = btrfs_calc_stripe_length(em); 3590 const u64 physical_end = physical + dev_stripe_len; 3591 u64 logical; 3592 u64 logic_end; 3593 /* The logical increment after finishing one stripe */ 3594 u64 increment; 3595 /* Offset inside the chunk */ 3596 u64 offset; 3597 u64 stripe_logical; 3598 u64 stripe_end; 3599 int stop_loop = 0; 3600 3601 path = btrfs_alloc_path(); 3602 if (!path) 3603 return -ENOMEM; 3604 3605 /* 3606 * work on commit root. The related disk blocks are static as 3607 * long as COW is applied. This means, it is save to rewrite 3608 * them to repair disk errors without any race conditions 3609 */ 3610 path->search_commit_root = 1; 3611 path->skip_locking = 1; 3612 path->reada = READA_FORWARD; 3613 3614 wait_event(sctx->list_wait, 3615 atomic_read(&sctx->bios_in_flight) == 0); 3616 scrub_blocked_if_needed(fs_info); 3617 3618 root = btrfs_extent_root(fs_info, bg->start); 3619 csum_root = btrfs_csum_root(fs_info, bg->start); 3620 3621 /* 3622 * collect all data csums for the stripe to avoid seeking during 3623 * the scrub. This might currently (crc32) end up to be about 1MB 3624 */ 3625 blk_start_plug(&plug); 3626 3627 if (sctx->is_dev_replace && 3628 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { 3629 mutex_lock(&sctx->wr_lock); 3630 sctx->write_pointer = physical; 3631 mutex_unlock(&sctx->wr_lock); 3632 sctx->flush_all_writes = true; 3633 } 3634 3635 /* 3636 * There used to be a big double loop to handle all profiles using the 3637 * same routine, which grows larger and more gross over time. 3638 * 3639 * So here we handle each profile differently, so simpler profiles 3640 * have simpler scrubbing function. 3641 */ 3642 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 | 3643 BTRFS_BLOCK_GROUP_RAID56_MASK))) { 3644 /* 3645 * Above check rules out all complex profile, the remaining 3646 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple 3647 * mirrored duplication without stripe. 3648 * 3649 * Only @physical and @mirror_num needs to calculated using 3650 * @stripe_index. 3651 */ 3652 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, 3653 bg->start, bg->length, scrub_dev, 3654 map->stripes[stripe_index].physical, 3655 stripe_index + 1); 3656 offset = 0; 3657 goto out; 3658 } 3659 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 3660 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, 3661 scrub_dev, stripe_index); 3662 offset = map->stripe_len * (stripe_index / map->sub_stripes); 3663 goto out; 3664 } 3665 3666 /* Only RAID56 goes through the old code */ 3667 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); 3668 ret = 0; 3669 3670 /* Calculate the logical end of the stripe */ 3671 get_raid56_logic_offset(physical_end, stripe_index, 3672 map, &logic_end, NULL); 3673 logic_end += chunk_logical; 3674 3675 /* Initialize @offset in case we need to go to out: label */ 3676 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); 3677 increment = map->stripe_len * nr_data_stripes(map); 3678 3679 /* 3680 * Due to the rotation, for RAID56 it's better to iterate each stripe 3681 * using their physical offset. 3682 */ 3683 while (physical < physical_end) { 3684 ret = get_raid56_logic_offset(physical, stripe_index, map, 3685 &logical, &stripe_logical); 3686 logical += chunk_logical; 3687 if (ret) { 3688 /* it is parity strip */ 3689 stripe_logical += chunk_logical; 3690 stripe_end = stripe_logical + increment; 3691 ret = scrub_raid56_parity(sctx, map, scrub_dev, 3692 stripe_logical, 3693 stripe_end); 3694 if (ret) 3695 goto out; 3696 goto next; 3697 } 3698 3699 /* 3700 * Now we're at a data stripe, scrub each extents in the range. 3701 * 3702 * At this stage, if we ignore the repair part, inside each data 3703 * stripe it is no different than SINGLE profile. 3704 * We can reuse scrub_simple_mirror() here, as the repair part 3705 * is still based on @mirror_num. 3706 */ 3707 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, 3708 logical, map->stripe_len, 3709 scrub_dev, physical, 1); 3710 if (ret < 0) 3711 goto out; 3712 next: 3713 logical += increment; 3714 physical += map->stripe_len; 3715 spin_lock(&sctx->stat_lock); 3716 if (stop_loop) 3717 sctx->stat.last_physical = 3718 map->stripes[stripe_index].physical + dev_stripe_len; 3719 else 3720 sctx->stat.last_physical = physical; 3721 spin_unlock(&sctx->stat_lock); 3722 if (stop_loop) 3723 break; 3724 } 3725 out: 3726 /* push queued extents */ 3727 scrub_submit(sctx); 3728 mutex_lock(&sctx->wr_lock); 3729 scrub_wr_submit(sctx); 3730 mutex_unlock(&sctx->wr_lock); 3731 3732 blk_finish_plug(&plug); 3733 btrfs_free_path(path); 3734 3735 if (sctx->is_dev_replace && ret >= 0) { 3736 int ret2; 3737 3738 ret2 = sync_write_pointer_for_zoned(sctx, 3739 chunk_logical + offset, 3740 map->stripes[stripe_index].physical, 3741 physical_end); 3742 if (ret2) 3743 ret = ret2; 3744 } 3745 3746 return ret < 0 ? ret : 0; 3747 } 3748 3749 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3750 struct btrfs_block_group *bg, 3751 struct btrfs_device *scrub_dev, 3752 u64 dev_offset, 3753 u64 dev_extent_len) 3754 { 3755 struct btrfs_fs_info *fs_info = sctx->fs_info; 3756 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 3757 struct map_lookup *map; 3758 struct extent_map *em; 3759 int i; 3760 int ret = 0; 3761 3762 read_lock(&map_tree->lock); 3763 em = lookup_extent_mapping(map_tree, bg->start, bg->length); 3764 read_unlock(&map_tree->lock); 3765 3766 if (!em) { 3767 /* 3768 * Might have been an unused block group deleted by the cleaner 3769 * kthread or relocation. 3770 */ 3771 spin_lock(&bg->lock); 3772 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) 3773 ret = -EINVAL; 3774 spin_unlock(&bg->lock); 3775 3776 return ret; 3777 } 3778 if (em->start != bg->start) 3779 goto out; 3780 if (em->len < dev_extent_len) 3781 goto out; 3782 3783 map = em->map_lookup; 3784 for (i = 0; i < map->num_stripes; ++i) { 3785 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 3786 map->stripes[i].physical == dev_offset) { 3787 ret = scrub_stripe(sctx, bg, em, scrub_dev, i); 3788 if (ret) 3789 goto out; 3790 } 3791 } 3792 out: 3793 free_extent_map(em); 3794 3795 return ret; 3796 } 3797 3798 static int finish_extent_writes_for_zoned(struct btrfs_root *root, 3799 struct btrfs_block_group *cache) 3800 { 3801 struct btrfs_fs_info *fs_info = cache->fs_info; 3802 struct btrfs_trans_handle *trans; 3803 3804 if (!btrfs_is_zoned(fs_info)) 3805 return 0; 3806 3807 btrfs_wait_block_group_reservations(cache); 3808 btrfs_wait_nocow_writers(cache); 3809 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); 3810 3811 trans = btrfs_join_transaction(root); 3812 if (IS_ERR(trans)) 3813 return PTR_ERR(trans); 3814 return btrfs_commit_transaction(trans); 3815 } 3816 3817 static noinline_for_stack 3818 int scrub_enumerate_chunks(struct scrub_ctx *sctx, 3819 struct btrfs_device *scrub_dev, u64 start, u64 end) 3820 { 3821 struct btrfs_dev_extent *dev_extent = NULL; 3822 struct btrfs_path *path; 3823 struct btrfs_fs_info *fs_info = sctx->fs_info; 3824 struct btrfs_root *root = fs_info->dev_root; 3825 u64 chunk_offset; 3826 int ret = 0; 3827 int ro_set; 3828 int slot; 3829 struct extent_buffer *l; 3830 struct btrfs_key key; 3831 struct btrfs_key found_key; 3832 struct btrfs_block_group *cache; 3833 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 3834 3835 path = btrfs_alloc_path(); 3836 if (!path) 3837 return -ENOMEM; 3838 3839 path->reada = READA_FORWARD; 3840 path->search_commit_root = 1; 3841 path->skip_locking = 1; 3842 3843 key.objectid = scrub_dev->devid; 3844 key.offset = 0ull; 3845 key.type = BTRFS_DEV_EXTENT_KEY; 3846 3847 while (1) { 3848 u64 dev_extent_len; 3849 3850 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3851 if (ret < 0) 3852 break; 3853 if (ret > 0) { 3854 if (path->slots[0] >= 3855 btrfs_header_nritems(path->nodes[0])) { 3856 ret = btrfs_next_leaf(root, path); 3857 if (ret < 0) 3858 break; 3859 if (ret > 0) { 3860 ret = 0; 3861 break; 3862 } 3863 } else { 3864 ret = 0; 3865 } 3866 } 3867 3868 l = path->nodes[0]; 3869 slot = path->slots[0]; 3870 3871 btrfs_item_key_to_cpu(l, &found_key, slot); 3872 3873 if (found_key.objectid != scrub_dev->devid) 3874 break; 3875 3876 if (found_key.type != BTRFS_DEV_EXTENT_KEY) 3877 break; 3878 3879 if (found_key.offset >= end) 3880 break; 3881 3882 if (found_key.offset < key.offset) 3883 break; 3884 3885 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 3886 dev_extent_len = btrfs_dev_extent_length(l, dev_extent); 3887 3888 if (found_key.offset + dev_extent_len <= start) 3889 goto skip; 3890 3891 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 3892 3893 /* 3894 * get a reference on the corresponding block group to prevent 3895 * the chunk from going away while we scrub it 3896 */ 3897 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 3898 3899 /* some chunks are removed but not committed to disk yet, 3900 * continue scrubbing */ 3901 if (!cache) 3902 goto skip; 3903 3904 ASSERT(cache->start <= chunk_offset); 3905 /* 3906 * We are using the commit root to search for device extents, so 3907 * that means we could have found a device extent item from a 3908 * block group that was deleted in the current transaction. The 3909 * logical start offset of the deleted block group, stored at 3910 * @chunk_offset, might be part of the logical address range of 3911 * a new block group (which uses different physical extents). 3912 * In this case btrfs_lookup_block_group() has returned the new 3913 * block group, and its start address is less than @chunk_offset. 3914 * 3915 * We skip such new block groups, because it's pointless to 3916 * process them, as we won't find their extents because we search 3917 * for them using the commit root of the extent tree. For a device 3918 * replace it's also fine to skip it, we won't miss copying them 3919 * to the target device because we have the write duplication 3920 * setup through the regular write path (by btrfs_map_block()), 3921 * and we have committed a transaction when we started the device 3922 * replace, right after setting up the device replace state. 3923 */ 3924 if (cache->start < chunk_offset) { 3925 btrfs_put_block_group(cache); 3926 goto skip; 3927 } 3928 3929 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { 3930 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { 3931 btrfs_put_block_group(cache); 3932 goto skip; 3933 } 3934 } 3935 3936 /* 3937 * Make sure that while we are scrubbing the corresponding block 3938 * group doesn't get its logical address and its device extents 3939 * reused for another block group, which can possibly be of a 3940 * different type and different profile. We do this to prevent 3941 * false error detections and crashes due to bogus attempts to 3942 * repair extents. 3943 */ 3944 spin_lock(&cache->lock); 3945 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { 3946 spin_unlock(&cache->lock); 3947 btrfs_put_block_group(cache); 3948 goto skip; 3949 } 3950 btrfs_freeze_block_group(cache); 3951 spin_unlock(&cache->lock); 3952 3953 /* 3954 * we need call btrfs_inc_block_group_ro() with scrubs_paused, 3955 * to avoid deadlock caused by: 3956 * btrfs_inc_block_group_ro() 3957 * -> btrfs_wait_for_commit() 3958 * -> btrfs_commit_transaction() 3959 * -> btrfs_scrub_pause() 3960 */ 3961 scrub_pause_on(fs_info); 3962 3963 /* 3964 * Don't do chunk preallocation for scrub. 3965 * 3966 * This is especially important for SYSTEM bgs, or we can hit 3967 * -EFBIG from btrfs_finish_chunk_alloc() like: 3968 * 1. The only SYSTEM bg is marked RO. 3969 * Since SYSTEM bg is small, that's pretty common. 3970 * 2. New SYSTEM bg will be allocated 3971 * Due to regular version will allocate new chunk. 3972 * 3. New SYSTEM bg is empty and will get cleaned up 3973 * Before cleanup really happens, it's marked RO again. 3974 * 4. Empty SYSTEM bg get scrubbed 3975 * We go back to 2. 3976 * 3977 * This can easily boost the amount of SYSTEM chunks if cleaner 3978 * thread can't be triggered fast enough, and use up all space 3979 * of btrfs_super_block::sys_chunk_array 3980 * 3981 * While for dev replace, we need to try our best to mark block 3982 * group RO, to prevent race between: 3983 * - Write duplication 3984 * Contains latest data 3985 * - Scrub copy 3986 * Contains data from commit tree 3987 * 3988 * If target block group is not marked RO, nocow writes can 3989 * be overwritten by scrub copy, causing data corruption. 3990 * So for dev-replace, it's not allowed to continue if a block 3991 * group is not RO. 3992 */ 3993 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); 3994 if (!ret && sctx->is_dev_replace) { 3995 ret = finish_extent_writes_for_zoned(root, cache); 3996 if (ret) { 3997 btrfs_dec_block_group_ro(cache); 3998 scrub_pause_off(fs_info); 3999 btrfs_put_block_group(cache); 4000 break; 4001 } 4002 } 4003 4004 if (ret == 0) { 4005 ro_set = 1; 4006 } else if (ret == -ENOSPC && !sctx->is_dev_replace) { 4007 /* 4008 * btrfs_inc_block_group_ro return -ENOSPC when it 4009 * failed in creating new chunk for metadata. 4010 * It is not a problem for scrub, because 4011 * metadata are always cowed, and our scrub paused 4012 * commit_transactions. 4013 */ 4014 ro_set = 0; 4015 } else if (ret == -ETXTBSY) { 4016 btrfs_warn(fs_info, 4017 "skipping scrub of block group %llu due to active swapfile", 4018 cache->start); 4019 scrub_pause_off(fs_info); 4020 ret = 0; 4021 goto skip_unfreeze; 4022 } else { 4023 btrfs_warn(fs_info, 4024 "failed setting block group ro: %d", ret); 4025 btrfs_unfreeze_block_group(cache); 4026 btrfs_put_block_group(cache); 4027 scrub_pause_off(fs_info); 4028 break; 4029 } 4030 4031 /* 4032 * Now the target block is marked RO, wait for nocow writes to 4033 * finish before dev-replace. 4034 * COW is fine, as COW never overwrites extents in commit tree. 4035 */ 4036 if (sctx->is_dev_replace) { 4037 btrfs_wait_nocow_writers(cache); 4038 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, 4039 cache->length); 4040 } 4041 4042 scrub_pause_off(fs_info); 4043 down_write(&dev_replace->rwsem); 4044 dev_replace->cursor_right = found_key.offset + dev_extent_len; 4045 dev_replace->cursor_left = found_key.offset; 4046 dev_replace->item_needs_writeback = 1; 4047 up_write(&dev_replace->rwsem); 4048 4049 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, 4050 dev_extent_len); 4051 4052 /* 4053 * flush, submit all pending read and write bios, afterwards 4054 * wait for them. 4055 * Note that in the dev replace case, a read request causes 4056 * write requests that are submitted in the read completion 4057 * worker. Therefore in the current situation, it is required 4058 * that all write requests are flushed, so that all read and 4059 * write requests are really completed when bios_in_flight 4060 * changes to 0. 4061 */ 4062 sctx->flush_all_writes = true; 4063 scrub_submit(sctx); 4064 mutex_lock(&sctx->wr_lock); 4065 scrub_wr_submit(sctx); 4066 mutex_unlock(&sctx->wr_lock); 4067 4068 wait_event(sctx->list_wait, 4069 atomic_read(&sctx->bios_in_flight) == 0); 4070 4071 scrub_pause_on(fs_info); 4072 4073 /* 4074 * must be called before we decrease @scrub_paused. 4075 * make sure we don't block transaction commit while 4076 * we are waiting pending workers finished. 4077 */ 4078 wait_event(sctx->list_wait, 4079 atomic_read(&sctx->workers_pending) == 0); 4080 sctx->flush_all_writes = false; 4081 4082 scrub_pause_off(fs_info); 4083 4084 if (sctx->is_dev_replace && 4085 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, 4086 cache, found_key.offset)) 4087 ro_set = 0; 4088 4089 down_write(&dev_replace->rwsem); 4090 dev_replace->cursor_left = dev_replace->cursor_right; 4091 dev_replace->item_needs_writeback = 1; 4092 up_write(&dev_replace->rwsem); 4093 4094 if (ro_set) 4095 btrfs_dec_block_group_ro(cache); 4096 4097 /* 4098 * We might have prevented the cleaner kthread from deleting 4099 * this block group if it was already unused because we raced 4100 * and set it to RO mode first. So add it back to the unused 4101 * list, otherwise it might not ever be deleted unless a manual 4102 * balance is triggered or it becomes used and unused again. 4103 */ 4104 spin_lock(&cache->lock); 4105 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && 4106 !cache->ro && cache->reserved == 0 && cache->used == 0) { 4107 spin_unlock(&cache->lock); 4108 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) 4109 btrfs_discard_queue_work(&fs_info->discard_ctl, 4110 cache); 4111 else 4112 btrfs_mark_bg_unused(cache); 4113 } else { 4114 spin_unlock(&cache->lock); 4115 } 4116 skip_unfreeze: 4117 btrfs_unfreeze_block_group(cache); 4118 btrfs_put_block_group(cache); 4119 if (ret) 4120 break; 4121 if (sctx->is_dev_replace && 4122 atomic64_read(&dev_replace->num_write_errors) > 0) { 4123 ret = -EIO; 4124 break; 4125 } 4126 if (sctx->stat.malloc_errors > 0) { 4127 ret = -ENOMEM; 4128 break; 4129 } 4130 skip: 4131 key.offset = found_key.offset + dev_extent_len; 4132 btrfs_release_path(path); 4133 } 4134 4135 btrfs_free_path(path); 4136 4137 return ret; 4138 } 4139 4140 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, 4141 struct btrfs_device *scrub_dev) 4142 { 4143 int i; 4144 u64 bytenr; 4145 u64 gen; 4146 int ret; 4147 struct btrfs_fs_info *fs_info = sctx->fs_info; 4148 4149 if (BTRFS_FS_ERROR(fs_info)) 4150 return -EROFS; 4151 4152 /* Seed devices of a new filesystem has their own generation. */ 4153 if (scrub_dev->fs_devices != fs_info->fs_devices) 4154 gen = scrub_dev->generation; 4155 else 4156 gen = fs_info->last_trans_committed; 4157 4158 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 4159 bytenr = btrfs_sb_offset(i); 4160 if (bytenr + BTRFS_SUPER_INFO_SIZE > 4161 scrub_dev->commit_total_bytes) 4162 break; 4163 if (!btrfs_check_super_location(scrub_dev, bytenr)) 4164 continue; 4165 4166 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, 4167 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, 4168 NULL, bytenr); 4169 if (ret) 4170 return ret; 4171 } 4172 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 4173 4174 return 0; 4175 } 4176 4177 static void scrub_workers_put(struct btrfs_fs_info *fs_info) 4178 { 4179 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, 4180 &fs_info->scrub_lock)) { 4181 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; 4182 struct workqueue_struct *scrub_wr_comp = 4183 fs_info->scrub_wr_completion_workers; 4184 struct workqueue_struct *scrub_parity = 4185 fs_info->scrub_parity_workers; 4186 4187 fs_info->scrub_workers = NULL; 4188 fs_info->scrub_wr_completion_workers = NULL; 4189 fs_info->scrub_parity_workers = NULL; 4190 mutex_unlock(&fs_info->scrub_lock); 4191 4192 if (scrub_workers) 4193 destroy_workqueue(scrub_workers); 4194 if (scrub_wr_comp) 4195 destroy_workqueue(scrub_wr_comp); 4196 if (scrub_parity) 4197 destroy_workqueue(scrub_parity); 4198 } 4199 } 4200 4201 /* 4202 * get a reference count on fs_info->scrub_workers. start worker if necessary 4203 */ 4204 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 4205 int is_dev_replace) 4206 { 4207 struct workqueue_struct *scrub_workers = NULL; 4208 struct workqueue_struct *scrub_wr_comp = NULL; 4209 struct workqueue_struct *scrub_parity = NULL; 4210 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; 4211 int max_active = fs_info->thread_pool_size; 4212 int ret = -ENOMEM; 4213 4214 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) 4215 return 0; 4216 4217 scrub_workers = alloc_workqueue("btrfs-scrub", flags, 4218 is_dev_replace ? 1 : max_active); 4219 if (!scrub_workers) 4220 goto fail_scrub_workers; 4221 4222 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active); 4223 if (!scrub_wr_comp) 4224 goto fail_scrub_wr_completion_workers; 4225 4226 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active); 4227 if (!scrub_parity) 4228 goto fail_scrub_parity_workers; 4229 4230 mutex_lock(&fs_info->scrub_lock); 4231 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { 4232 ASSERT(fs_info->scrub_workers == NULL && 4233 fs_info->scrub_wr_completion_workers == NULL && 4234 fs_info->scrub_parity_workers == NULL); 4235 fs_info->scrub_workers = scrub_workers; 4236 fs_info->scrub_wr_completion_workers = scrub_wr_comp; 4237 fs_info->scrub_parity_workers = scrub_parity; 4238 refcount_set(&fs_info->scrub_workers_refcnt, 1); 4239 mutex_unlock(&fs_info->scrub_lock); 4240 return 0; 4241 } 4242 /* Other thread raced in and created the workers for us */ 4243 refcount_inc(&fs_info->scrub_workers_refcnt); 4244 mutex_unlock(&fs_info->scrub_lock); 4245 4246 ret = 0; 4247 destroy_workqueue(scrub_parity); 4248 fail_scrub_parity_workers: 4249 destroy_workqueue(scrub_wr_comp); 4250 fail_scrub_wr_completion_workers: 4251 destroy_workqueue(scrub_workers); 4252 fail_scrub_workers: 4253 return ret; 4254 } 4255 4256 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4257 u64 end, struct btrfs_scrub_progress *progress, 4258 int readonly, int is_dev_replace) 4259 { 4260 struct btrfs_dev_lookup_args args = { .devid = devid }; 4261 struct scrub_ctx *sctx; 4262 int ret; 4263 struct btrfs_device *dev; 4264 unsigned int nofs_flag; 4265 bool need_commit = false; 4266 4267 if (btrfs_fs_closing(fs_info)) 4268 return -EAGAIN; 4269 4270 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */ 4271 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); 4272 4273 /* 4274 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible 4275 * value (max nodesize / min sectorsize), thus nodesize should always 4276 * be fine. 4277 */ 4278 ASSERT(fs_info->nodesize <= 4279 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); 4280 4281 /* Allocate outside of device_list_mutex */ 4282 sctx = scrub_setup_ctx(fs_info, is_dev_replace); 4283 if (IS_ERR(sctx)) 4284 return PTR_ERR(sctx); 4285 4286 ret = scrub_workers_get(fs_info, is_dev_replace); 4287 if (ret) 4288 goto out_free_ctx; 4289 4290 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4291 dev = btrfs_find_device(fs_info->fs_devices, &args); 4292 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && 4293 !is_dev_replace)) { 4294 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4295 ret = -ENODEV; 4296 goto out; 4297 } 4298 4299 if (!is_dev_replace && !readonly && 4300 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { 4301 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4302 btrfs_err_in_rcu(fs_info, 4303 "scrub on devid %llu: filesystem on %s is not writable", 4304 devid, btrfs_dev_name(dev)); 4305 ret = -EROFS; 4306 goto out; 4307 } 4308 4309 mutex_lock(&fs_info->scrub_lock); 4310 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || 4311 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { 4312 mutex_unlock(&fs_info->scrub_lock); 4313 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4314 ret = -EIO; 4315 goto out; 4316 } 4317 4318 down_read(&fs_info->dev_replace.rwsem); 4319 if (dev->scrub_ctx || 4320 (!is_dev_replace && 4321 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { 4322 up_read(&fs_info->dev_replace.rwsem); 4323 mutex_unlock(&fs_info->scrub_lock); 4324 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4325 ret = -EINPROGRESS; 4326 goto out; 4327 } 4328 up_read(&fs_info->dev_replace.rwsem); 4329 4330 sctx->readonly = readonly; 4331 dev->scrub_ctx = sctx; 4332 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4333 4334 /* 4335 * checking @scrub_pause_req here, we can avoid 4336 * race between committing transaction and scrubbing. 4337 */ 4338 __scrub_blocked_if_needed(fs_info); 4339 atomic_inc(&fs_info->scrubs_running); 4340 mutex_unlock(&fs_info->scrub_lock); 4341 4342 /* 4343 * In order to avoid deadlock with reclaim when there is a transaction 4344 * trying to pause scrub, make sure we use GFP_NOFS for all the 4345 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity() 4346 * invoked by our callees. The pausing request is done when the 4347 * transaction commit starts, and it blocks the transaction until scrub 4348 * is paused (done at specific points at scrub_stripe() or right above 4349 * before incrementing fs_info->scrubs_running). 4350 */ 4351 nofs_flag = memalloc_nofs_save(); 4352 if (!is_dev_replace) { 4353 u64 old_super_errors; 4354 4355 spin_lock(&sctx->stat_lock); 4356 old_super_errors = sctx->stat.super_errors; 4357 spin_unlock(&sctx->stat_lock); 4358 4359 btrfs_info(fs_info, "scrub: started on devid %llu", devid); 4360 /* 4361 * by holding device list mutex, we can 4362 * kick off writing super in log tree sync. 4363 */ 4364 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4365 ret = scrub_supers(sctx, dev); 4366 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4367 4368 spin_lock(&sctx->stat_lock); 4369 /* 4370 * Super block errors found, but we can not commit transaction 4371 * at current context, since btrfs_commit_transaction() needs 4372 * to pause the current running scrub (hold by ourselves). 4373 */ 4374 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) 4375 need_commit = true; 4376 spin_unlock(&sctx->stat_lock); 4377 } 4378 4379 if (!ret) 4380 ret = scrub_enumerate_chunks(sctx, dev, start, end); 4381 memalloc_nofs_restore(nofs_flag); 4382 4383 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); 4384 atomic_dec(&fs_info->scrubs_running); 4385 wake_up(&fs_info->scrub_pause_wait); 4386 4387 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); 4388 4389 if (progress) 4390 memcpy(progress, &sctx->stat, sizeof(*progress)); 4391 4392 if (!is_dev_replace) 4393 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", 4394 ret ? "not finished" : "finished", devid, ret); 4395 4396 mutex_lock(&fs_info->scrub_lock); 4397 dev->scrub_ctx = NULL; 4398 mutex_unlock(&fs_info->scrub_lock); 4399 4400 scrub_workers_put(fs_info); 4401 scrub_put_ctx(sctx); 4402 4403 /* 4404 * We found some super block errors before, now try to force a 4405 * transaction commit, as scrub has finished. 4406 */ 4407 if (need_commit) { 4408 struct btrfs_trans_handle *trans; 4409 4410 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4411 if (IS_ERR(trans)) { 4412 ret = PTR_ERR(trans); 4413 btrfs_err(fs_info, 4414 "scrub: failed to start transaction to fix super block errors: %d", ret); 4415 return ret; 4416 } 4417 ret = btrfs_commit_transaction(trans); 4418 if (ret < 0) 4419 btrfs_err(fs_info, 4420 "scrub: failed to commit transaction to fix super block errors: %d", ret); 4421 } 4422 return ret; 4423 out: 4424 scrub_workers_put(fs_info); 4425 out_free_ctx: 4426 scrub_free_ctx(sctx); 4427 4428 return ret; 4429 } 4430 4431 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) 4432 { 4433 mutex_lock(&fs_info->scrub_lock); 4434 atomic_inc(&fs_info->scrub_pause_req); 4435 while (atomic_read(&fs_info->scrubs_paused) != 4436 atomic_read(&fs_info->scrubs_running)) { 4437 mutex_unlock(&fs_info->scrub_lock); 4438 wait_event(fs_info->scrub_pause_wait, 4439 atomic_read(&fs_info->scrubs_paused) == 4440 atomic_read(&fs_info->scrubs_running)); 4441 mutex_lock(&fs_info->scrub_lock); 4442 } 4443 mutex_unlock(&fs_info->scrub_lock); 4444 } 4445 4446 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) 4447 { 4448 atomic_dec(&fs_info->scrub_pause_req); 4449 wake_up(&fs_info->scrub_pause_wait); 4450 } 4451 4452 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) 4453 { 4454 mutex_lock(&fs_info->scrub_lock); 4455 if (!atomic_read(&fs_info->scrubs_running)) { 4456 mutex_unlock(&fs_info->scrub_lock); 4457 return -ENOTCONN; 4458 } 4459 4460 atomic_inc(&fs_info->scrub_cancel_req); 4461 while (atomic_read(&fs_info->scrubs_running)) { 4462 mutex_unlock(&fs_info->scrub_lock); 4463 wait_event(fs_info->scrub_pause_wait, 4464 atomic_read(&fs_info->scrubs_running) == 0); 4465 mutex_lock(&fs_info->scrub_lock); 4466 } 4467 atomic_dec(&fs_info->scrub_cancel_req); 4468 mutex_unlock(&fs_info->scrub_lock); 4469 4470 return 0; 4471 } 4472 4473 int btrfs_scrub_cancel_dev(struct btrfs_device *dev) 4474 { 4475 struct btrfs_fs_info *fs_info = dev->fs_info; 4476 struct scrub_ctx *sctx; 4477 4478 mutex_lock(&fs_info->scrub_lock); 4479 sctx = dev->scrub_ctx; 4480 if (!sctx) { 4481 mutex_unlock(&fs_info->scrub_lock); 4482 return -ENOTCONN; 4483 } 4484 atomic_inc(&sctx->cancel_req); 4485 while (dev->scrub_ctx) { 4486 mutex_unlock(&fs_info->scrub_lock); 4487 wait_event(fs_info->scrub_pause_wait, 4488 dev->scrub_ctx == NULL); 4489 mutex_lock(&fs_info->scrub_lock); 4490 } 4491 mutex_unlock(&fs_info->scrub_lock); 4492 4493 return 0; 4494 } 4495 4496 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, 4497 struct btrfs_scrub_progress *progress) 4498 { 4499 struct btrfs_dev_lookup_args args = { .devid = devid }; 4500 struct btrfs_device *dev; 4501 struct scrub_ctx *sctx = NULL; 4502 4503 mutex_lock(&fs_info->fs_devices->device_list_mutex); 4504 dev = btrfs_find_device(fs_info->fs_devices, &args); 4505 if (dev) 4506 sctx = dev->scrub_ctx; 4507 if (sctx) 4508 memcpy(progress, &sctx->stat, sizeof(*progress)); 4509 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 4510 4511 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; 4512 } 4513 4514 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info, 4515 u64 extent_logical, u32 extent_len, 4516 u64 *extent_physical, 4517 struct btrfs_device **extent_dev, 4518 int *extent_mirror_num) 4519 { 4520 u64 mapped_length; 4521 struct btrfs_io_context *bioc = NULL; 4522 int ret; 4523 4524 mapped_length = extent_len; 4525 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, 4526 &mapped_length, &bioc, 0); 4527 if (ret || !bioc || mapped_length < extent_len || 4528 !bioc->stripes[0].dev->bdev) { 4529 btrfs_put_bioc(bioc); 4530 return; 4531 } 4532 4533 *extent_physical = bioc->stripes[0].physical; 4534 *extent_mirror_num = bioc->mirror_num; 4535 *extent_dev = bioc->stripes[0].dev; 4536 btrfs_put_bioc(bioc); 4537 } 4538