1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Fusion-io All rights reserved. 4 * Copyright (C) 2012 Intel Corp. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/raid/pq.h> 12 #include <linux/hash.h> 13 #include <linux/list_sort.h> 14 #include <linux/raid/xor.h> 15 #include <linux/mm.h> 16 #include "messages.h" 17 #include "ctree.h" 18 #include "disk-io.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "async-thread.h" 22 #include "file-item.h" 23 #include "btrfs_inode.h" 24 25 /* set when additional merges to this rbio are not allowed */ 26 #define RBIO_RMW_LOCKED_BIT 1 27 28 /* 29 * set when this rbio is sitting in the hash, but it is just a cache 30 * of past RMW 31 */ 32 #define RBIO_CACHE_BIT 2 33 34 /* 35 * set when it is safe to trust the stripe_pages for caching 36 */ 37 #define RBIO_CACHE_READY_BIT 3 38 39 #define RBIO_CACHE_SIZE 1024 40 41 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 42 43 static void dump_bioc(const struct btrfs_fs_info *fs_info, const struct btrfs_io_context *bioc) 44 { 45 if (unlikely(!bioc)) { 46 btrfs_crit(fs_info, "bioc=NULL"); 47 return; 48 } 49 btrfs_crit(fs_info, 50 "bioc logical=%llu full_stripe=%llu size=%llu map_type=0x%llx mirror=%u replace_nr_stripes=%u replace_stripe_src=%d num_stripes=%u", 51 bioc->logical, bioc->full_stripe_logical, bioc->size, 52 bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes, 53 bioc->replace_stripe_src, bioc->num_stripes); 54 for (int i = 0; i < bioc->num_stripes; i++) { 55 btrfs_crit(fs_info, " nr=%d devid=%llu physical=%llu", 56 i, bioc->stripes[i].dev->devid, 57 bioc->stripes[i].physical); 58 } 59 } 60 61 static void btrfs_dump_rbio(const struct btrfs_fs_info *fs_info, 62 const struct btrfs_raid_bio *rbio) 63 { 64 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 65 return; 66 67 dump_bioc(fs_info, rbio->bioc); 68 btrfs_crit(fs_info, 69 "rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u sector_nsteps=%u scrubp=%u dbitmap=0x%lx", 70 rbio->flags, rbio->nr_sectors, rbio->nr_data, 71 rbio->real_stripes, rbio->stripe_nsectors, 72 rbio->sector_nsteps, rbio->scrubp, rbio->dbitmap); 73 } 74 75 #define ASSERT_RBIO(expr, rbio) \ 76 ({ \ 77 if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \ 78 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \ 79 (rbio)->bioc->fs_info : NULL; \ 80 \ 81 btrfs_dump_rbio(__fs_info, (rbio)); \ 82 } \ 83 ASSERT((expr)); \ 84 }) 85 86 #define ASSERT_RBIO_STRIPE(expr, rbio, stripe_nr) \ 87 ({ \ 88 if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \ 89 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \ 90 (rbio)->bioc->fs_info : NULL; \ 91 \ 92 btrfs_dump_rbio(__fs_info, (rbio)); \ 93 btrfs_crit(__fs_info, "stripe_nr=%d", (stripe_nr)); \ 94 } \ 95 ASSERT((expr)); \ 96 }) 97 98 #define ASSERT_RBIO_SECTOR(expr, rbio, sector_nr) \ 99 ({ \ 100 if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \ 101 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \ 102 (rbio)->bioc->fs_info : NULL; \ 103 \ 104 btrfs_dump_rbio(__fs_info, (rbio)); \ 105 btrfs_crit(__fs_info, "sector_nr=%d", (sector_nr)); \ 106 } \ 107 ASSERT((expr)); \ 108 }) 109 110 #define ASSERT_RBIO_LOGICAL(expr, rbio, logical) \ 111 ({ \ 112 if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) { \ 113 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \ 114 (rbio)->bioc->fs_info : NULL; \ 115 \ 116 btrfs_dump_rbio(__fs_info, (rbio)); \ 117 btrfs_crit(__fs_info, "logical=%llu", (logical)); \ 118 } \ 119 ASSERT((expr)); \ 120 }) 121 122 /* Used by the raid56 code to lock stripes for read/modify/write */ 123 struct btrfs_stripe_hash { 124 struct list_head hash_list; 125 spinlock_t lock; 126 }; 127 128 /* Used by the raid56 code to lock stripes for read/modify/write */ 129 struct btrfs_stripe_hash_table { 130 struct list_head stripe_cache; 131 spinlock_t cache_lock; 132 int cache_size; 133 struct btrfs_stripe_hash table[]; 134 }; 135 136 /* 137 * The PFN may still be valid, but our paddrs should always be block size 138 * aligned, thus such -1 paddr is definitely not a valid one. 139 */ 140 #define INVALID_PADDR (~(phys_addr_t)0) 141 142 static void rmw_rbio_work(struct work_struct *work); 143 static void rmw_rbio_work_locked(struct work_struct *work); 144 static void index_rbio_pages(struct btrfs_raid_bio *rbio); 145 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 146 147 static int finish_parity_scrub(struct btrfs_raid_bio *rbio); 148 static void scrub_rbio_work_locked(struct work_struct *work); 149 150 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) 151 { 152 bitmap_free(rbio->error_bitmap); 153 bitmap_free(rbio->stripe_uptodate_bitmap); 154 kfree(rbio->stripe_pages); 155 kfree(rbio->bio_paddrs); 156 kfree(rbio->stripe_paddrs); 157 kfree(rbio->finish_pointers); 158 } 159 160 static void free_raid_bio(struct btrfs_raid_bio *rbio) 161 { 162 int i; 163 164 if (!refcount_dec_and_test(&rbio->refs)) 165 return; 166 167 WARN_ON(!list_empty(&rbio->stripe_cache)); 168 WARN_ON(!list_empty(&rbio->hash_list)); 169 WARN_ON(!bio_list_empty(&rbio->bio_list)); 170 171 for (i = 0; i < rbio->nr_pages; i++) { 172 if (rbio->stripe_pages[i]) { 173 __free_page(rbio->stripe_pages[i]); 174 rbio->stripe_pages[i] = NULL; 175 } 176 } 177 178 btrfs_put_bioc(rbio->bioc); 179 free_raid_bio_pointers(rbio); 180 kfree(rbio); 181 } 182 183 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) 184 { 185 INIT_WORK(&rbio->work, work_func); 186 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); 187 } 188 189 /* 190 * the stripe hash table is used for locking, and to collect 191 * bios in hopes of making a full stripe 192 */ 193 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 194 { 195 struct btrfs_stripe_hash_table *table; 196 struct btrfs_stripe_hash_table *x; 197 struct btrfs_stripe_hash *cur; 198 struct btrfs_stripe_hash *h; 199 unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS; 200 201 if (info->stripe_hash_table) 202 return 0; 203 204 /* 205 * The table is large, starting with order 4 and can go as high as 206 * order 7 in case lock debugging is turned on. 207 * 208 * Try harder to allocate and fallback to vmalloc to lower the chance 209 * of a failing mount. 210 */ 211 table = kvzalloc_flex(*table, table, num_entries, GFP_KERNEL); 212 if (!table) 213 return -ENOMEM; 214 215 spin_lock_init(&table->cache_lock); 216 INIT_LIST_HEAD(&table->stripe_cache); 217 218 h = table->table; 219 220 for (unsigned int i = 0; i < num_entries; i++) { 221 cur = h + i; 222 INIT_LIST_HEAD(&cur->hash_list); 223 spin_lock_init(&cur->lock); 224 } 225 226 x = cmpxchg(&info->stripe_hash_table, NULL, table); 227 kvfree(x); 228 return 0; 229 } 230 231 static void memcpy_from_bio_to_stripe(struct btrfs_raid_bio *rbio, unsigned int sector_nr) 232 { 233 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); 234 235 ASSERT(sector_nr < rbio->nr_sectors); 236 for (int i = 0; i < rbio->sector_nsteps; i++) { 237 unsigned int index = sector_nr * rbio->sector_nsteps + i; 238 phys_addr_t dst = rbio->stripe_paddrs[index]; 239 phys_addr_t src = rbio->bio_paddrs[index]; 240 241 ASSERT(dst != INVALID_PADDR); 242 ASSERT(src != INVALID_PADDR); 243 244 memcpy_page(phys_to_page(dst), offset_in_page(dst), 245 phys_to_page(src), offset_in_page(src), step); 246 } 247 } 248 249 /* 250 * caching an rbio means to copy anything from the 251 * bio_sectors array into the stripe_pages array. We 252 * use the page uptodate bit in the stripe cache array 253 * to indicate if it has valid data 254 * 255 * once the caching is done, we set the cache ready 256 * bit. 257 */ 258 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 259 { 260 int i; 261 int ret; 262 263 ret = alloc_rbio_pages(rbio); 264 if (ret) 265 return; 266 267 for (i = 0; i < rbio->nr_sectors; i++) { 268 /* Some range not covered by bio (partial write), skip it */ 269 if (rbio->bio_paddrs[i * rbio->sector_nsteps] == INVALID_PADDR) { 270 /* 271 * Even if the sector is not covered by bio, if it is 272 * a data sector it should still be uptodate as it is 273 * read from disk. 274 */ 275 if (i < rbio->nr_data * rbio->stripe_nsectors) 276 ASSERT(test_bit(i, rbio->stripe_uptodate_bitmap)); 277 continue; 278 } 279 280 memcpy_from_bio_to_stripe(rbio, i); 281 set_bit(i, rbio->stripe_uptodate_bitmap); 282 } 283 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 284 } 285 286 /* 287 * we hash on the first logical address of the stripe 288 */ 289 static int rbio_bucket(struct btrfs_raid_bio *rbio) 290 { 291 u64 num = rbio->bioc->full_stripe_logical; 292 293 /* 294 * we shift down quite a bit. We're using byte 295 * addressing, and most of the lower bits are zeros. 296 * This tends to upset hash_64, and it consistently 297 * returns just one or two different values. 298 * 299 * shifting off the lower bits fixes things. 300 */ 301 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 302 } 303 304 /* Get the sector number of the first sector covered by @page_nr. */ 305 static u32 page_nr_to_sector_nr(struct btrfs_raid_bio *rbio, unsigned int page_nr) 306 { 307 u32 sector_nr; 308 309 ASSERT(page_nr < rbio->nr_pages); 310 311 sector_nr = (page_nr << PAGE_SHIFT) >> rbio->bioc->fs_info->sectorsize_bits; 312 ASSERT(sector_nr < rbio->nr_sectors); 313 return sector_nr; 314 } 315 316 /* 317 * Get the number of sectors covered by @page_nr. 318 * 319 * For bs > ps cases, the result will always be 1. 320 * For bs <= ps cases, the result will be ps / bs. 321 */ 322 static u32 page_nr_to_num_sectors(struct btrfs_raid_bio *rbio, unsigned int page_nr) 323 { 324 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 325 u32 nr_sectors; 326 327 ASSERT(page_nr < rbio->nr_pages); 328 329 nr_sectors = round_up(PAGE_SIZE, fs_info->sectorsize) >> fs_info->sectorsize_bits; 330 ASSERT(nr_sectors > 0); 331 return nr_sectors; 332 } 333 334 static __maybe_unused bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio, 335 unsigned int page_nr) 336 { 337 const u32 sector_nr = page_nr_to_sector_nr(rbio, page_nr); 338 const u32 nr_bits = page_nr_to_num_sectors(rbio, page_nr); 339 int i; 340 341 ASSERT(page_nr < rbio->nr_pages); 342 ASSERT(sector_nr + nr_bits < rbio->nr_sectors); 343 344 for (i = sector_nr; i < sector_nr + nr_bits; i++) { 345 if (!test_bit(i, rbio->stripe_uptodate_bitmap)) 346 return false; 347 } 348 return true; 349 } 350 351 /* 352 * Update the stripe_sectors[] array to use correct page and pgoff 353 * 354 * Should be called every time any page pointer in stripes_pages[] got modified. 355 */ 356 static void index_stripe_sectors(struct btrfs_raid_bio *rbio) 357 { 358 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); 359 u32 offset; 360 int i; 361 362 for (i = 0, offset = 0; i < rbio->nr_sectors * rbio->sector_nsteps; 363 i++, offset += step) { 364 int page_index = offset >> PAGE_SHIFT; 365 366 ASSERT(page_index < rbio->nr_pages); 367 if (!rbio->stripe_pages[page_index]) 368 continue; 369 370 rbio->stripe_paddrs[i] = page_to_phys(rbio->stripe_pages[page_index]) + 371 offset_in_page(offset); 372 } 373 } 374 375 static void steal_rbio_page(struct btrfs_raid_bio *src, 376 struct btrfs_raid_bio *dest, int page_nr) 377 { 378 const u32 sector_nr = page_nr_to_sector_nr(src, page_nr); 379 const u32 nr_bits = page_nr_to_num_sectors(src, page_nr); 380 381 ASSERT(page_nr < src->nr_pages); 382 ASSERT(sector_nr + nr_bits < src->nr_sectors); 383 384 if (dest->stripe_pages[page_nr]) 385 __free_page(dest->stripe_pages[page_nr]); 386 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; 387 src->stripe_pages[page_nr] = NULL; 388 389 /* Also update the stripe_uptodate_bitmap bits. */ 390 bitmap_set(dest->stripe_uptodate_bitmap, sector_nr, nr_bits); 391 } 392 393 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr) 394 { 395 const int sector_nr = page_nr_to_sector_nr(rbio, page_nr); 396 397 /* 398 * We have ensured PAGE_SIZE is aligned with sectorsize, thus 399 * we won't have a page which is half data half parity. 400 * 401 * Thus if the first sector of the page belongs to data stripes, then 402 * the full page belongs to data stripes. 403 */ 404 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); 405 } 406 407 /* 408 * Stealing an rbio means taking all the uptodate pages from the stripe array 409 * in the source rbio and putting them into the destination rbio. 410 * 411 * This will also update the involved stripe_sectors[] which are referring to 412 * the old pages. 413 */ 414 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 415 { 416 int i; 417 418 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 419 return; 420 421 for (i = 0; i < dest->nr_pages; i++) { 422 struct page *p = src->stripe_pages[i]; 423 424 /* 425 * We don't need to steal P/Q pages as they will always be 426 * regenerated for RMW or full write anyway. 427 */ 428 if (!is_data_stripe_page(src, i)) 429 continue; 430 431 /* 432 * If @src already has RBIO_CACHE_READY_BIT, it should have 433 * all data stripe pages present and uptodate. 434 */ 435 ASSERT(p); 436 ASSERT(full_page_sectors_uptodate(src, i)); 437 steal_rbio_page(src, dest, i); 438 } 439 index_stripe_sectors(dest); 440 index_stripe_sectors(src); 441 } 442 443 /* 444 * merging means we take the bio_list from the victim and 445 * splice it into the destination. The victim should 446 * be discarded afterwards. 447 * 448 * must be called with dest->rbio_list_lock held 449 */ 450 static void merge_rbio(struct btrfs_raid_bio *dest, 451 struct btrfs_raid_bio *victim) 452 { 453 bio_list_merge_init(&dest->bio_list, &victim->bio_list); 454 dest->bio_list_bytes += victim->bio_list_bytes; 455 /* Also inherit the bitmaps from @victim. */ 456 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, 457 dest->stripe_nsectors); 458 } 459 460 /* 461 * used to prune items that are in the cache. The caller 462 * must hold the hash table lock. 463 */ 464 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 465 { 466 int bucket = rbio_bucket(rbio); 467 struct btrfs_stripe_hash_table *table; 468 struct btrfs_stripe_hash *h; 469 int freeit = 0; 470 471 /* 472 * check the bit again under the hash table lock. 473 */ 474 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 475 return; 476 477 table = rbio->bioc->fs_info->stripe_hash_table; 478 h = table->table + bucket; 479 480 /* hold the lock for the bucket because we may be 481 * removing it from the hash table 482 */ 483 spin_lock(&h->lock); 484 485 /* 486 * hold the lock for the bio list because we need 487 * to make sure the bio list is empty 488 */ 489 spin_lock(&rbio->bio_list_lock); 490 491 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 492 list_del_init(&rbio->stripe_cache); 493 table->cache_size -= 1; 494 freeit = 1; 495 496 /* if the bio list isn't empty, this rbio is 497 * still involved in an IO. We take it out 498 * of the cache list, and drop the ref that 499 * was held for the list. 500 * 501 * If the bio_list was empty, we also remove 502 * the rbio from the hash_table, and drop 503 * the corresponding ref 504 */ 505 if (bio_list_empty(&rbio->bio_list)) { 506 if (!list_empty(&rbio->hash_list)) { 507 list_del_init(&rbio->hash_list); 508 refcount_dec(&rbio->refs); 509 BUG_ON(!list_empty(&rbio->plug_list)); 510 } 511 } 512 } 513 514 spin_unlock(&rbio->bio_list_lock); 515 spin_unlock(&h->lock); 516 517 if (freeit) 518 free_raid_bio(rbio); 519 } 520 521 /* 522 * prune a given rbio from the cache 523 */ 524 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 525 { 526 struct btrfs_stripe_hash_table *table; 527 528 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 529 return; 530 531 table = rbio->bioc->fs_info->stripe_hash_table; 532 533 spin_lock(&table->cache_lock); 534 __remove_rbio_from_cache(rbio); 535 spin_unlock(&table->cache_lock); 536 } 537 538 /* 539 * remove everything in the cache 540 */ 541 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 542 { 543 struct btrfs_stripe_hash_table *table; 544 struct btrfs_raid_bio *rbio; 545 546 table = info->stripe_hash_table; 547 548 spin_lock(&table->cache_lock); 549 while (!list_empty(&table->stripe_cache)) { 550 rbio = list_first_entry(&table->stripe_cache, 551 struct btrfs_raid_bio, stripe_cache); 552 __remove_rbio_from_cache(rbio); 553 } 554 spin_unlock(&table->cache_lock); 555 } 556 557 /* 558 * remove all cached entries and free the hash table 559 * used by unmount 560 */ 561 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 562 { 563 if (!info->stripe_hash_table) 564 return; 565 btrfs_clear_rbio_cache(info); 566 kvfree(info->stripe_hash_table); 567 info->stripe_hash_table = NULL; 568 } 569 570 /* 571 * insert an rbio into the stripe cache. It 572 * must have already been prepared by calling 573 * cache_rbio_pages 574 * 575 * If this rbio was already cached, it gets 576 * moved to the front of the lru. 577 * 578 * If the size of the rbio cache is too big, we 579 * prune an item. 580 */ 581 static void cache_rbio(struct btrfs_raid_bio *rbio) 582 { 583 struct btrfs_stripe_hash_table *table; 584 585 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 586 return; 587 588 table = rbio->bioc->fs_info->stripe_hash_table; 589 590 spin_lock(&table->cache_lock); 591 spin_lock(&rbio->bio_list_lock); 592 593 /* bump our ref if we were not in the list before */ 594 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 595 refcount_inc(&rbio->refs); 596 597 if (!list_empty(&rbio->stripe_cache)){ 598 list_move(&rbio->stripe_cache, &table->stripe_cache); 599 } else { 600 list_add(&rbio->stripe_cache, &table->stripe_cache); 601 table->cache_size += 1; 602 } 603 604 spin_unlock(&rbio->bio_list_lock); 605 606 if (table->cache_size > RBIO_CACHE_SIZE) { 607 struct btrfs_raid_bio *found; 608 609 found = list_last_entry(&table->stripe_cache, 610 struct btrfs_raid_bio, 611 stripe_cache); 612 613 if (found != rbio) 614 __remove_rbio_from_cache(found); 615 } 616 617 spin_unlock(&table->cache_lock); 618 } 619 620 /* 621 * helper function to run the xor_blocks api. It is only 622 * able to do MAX_XOR_BLOCKS at a time, so we need to 623 * loop through. 624 */ 625 static void run_xor(void **pages, int src_cnt, ssize_t len) 626 { 627 int src_off = 0; 628 int xor_src_cnt = 0; 629 void *dest = pages[src_cnt]; 630 631 while(src_cnt > 0) { 632 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 633 xor_blocks(xor_src_cnt, len, dest, pages + src_off); 634 635 src_cnt -= xor_src_cnt; 636 src_off += xor_src_cnt; 637 } 638 } 639 640 /* 641 * Returns true if the bio list inside this rbio covers an entire stripe (no 642 * rmw required). 643 */ 644 static int rbio_is_full(struct btrfs_raid_bio *rbio) 645 { 646 unsigned long size = rbio->bio_list_bytes; 647 int ret = 1; 648 649 spin_lock(&rbio->bio_list_lock); 650 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) 651 ret = 0; 652 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); 653 spin_unlock(&rbio->bio_list_lock); 654 655 return ret; 656 } 657 658 /* 659 * returns 1 if it is safe to merge two rbios together. 660 * The merging is safe if the two rbios correspond to 661 * the same stripe and if they are both going in the same 662 * direction (read vs write), and if neither one is 663 * locked for final IO 664 * 665 * The caller is responsible for locking such that 666 * rmw_locked is safe to test 667 */ 668 static int rbio_can_merge(struct btrfs_raid_bio *last, 669 struct btrfs_raid_bio *cur) 670 { 671 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 672 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 673 return 0; 674 675 /* 676 * we can't merge with cached rbios, since the 677 * idea is that when we merge the destination 678 * rbio is going to run our IO for us. We can 679 * steal from cached rbios though, other functions 680 * handle that. 681 */ 682 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 683 test_bit(RBIO_CACHE_BIT, &cur->flags)) 684 return 0; 685 686 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical) 687 return 0; 688 689 /* we can't merge with different operations */ 690 if (last->operation != cur->operation) 691 return 0; 692 /* 693 * We've need read the full stripe from the drive. 694 * check and repair the parity and write the new results. 695 * 696 * We're not allowed to add any new bios to the 697 * bio list here, anyone else that wants to 698 * change this stripe needs to do their own rmw. 699 */ 700 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) 701 return 0; 702 703 if (last->operation == BTRFS_RBIO_READ_REBUILD) 704 return 0; 705 706 return 1; 707 } 708 709 /* Return the sector index for @stripe_nr and @sector_nr. */ 710 static unsigned int rbio_sector_index(const struct btrfs_raid_bio *rbio, 711 unsigned int stripe_nr, 712 unsigned int sector_nr) 713 { 714 unsigned int ret; 715 716 ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr); 717 ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr); 718 719 ret = stripe_nr * rbio->stripe_nsectors + sector_nr; 720 ASSERT(ret < rbio->nr_sectors); 721 return ret; 722 } 723 724 /* Return the paddr array index for @stripe_nr, @sector_nr and @step_nr. */ 725 static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio, 726 unsigned int stripe_nr, 727 unsigned int sector_nr, 728 unsigned int step_nr) 729 { 730 unsigned int ret; 731 732 ASSERT_RBIO_SECTOR(step_nr < rbio->sector_nsteps, rbio, step_nr); 733 734 ret = rbio_sector_index(rbio, stripe_nr, sector_nr) * rbio->sector_nsteps + step_nr; 735 ASSERT(ret < rbio->nr_sectors * rbio->sector_nsteps); 736 return ret; 737 } 738 739 static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio, 740 unsigned int stripe_nr, unsigned int sector_nr, 741 unsigned int step_nr) 742 { 743 return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr)]; 744 } 745 746 static phys_addr_t rbio_pstripe_paddr(const struct btrfs_raid_bio *rbio, 747 unsigned int sector_nr, unsigned int step_nr) 748 { 749 return rbio_stripe_paddr(rbio, rbio->nr_data, sector_nr, step_nr); 750 } 751 752 static phys_addr_t rbio_qstripe_paddr(const struct btrfs_raid_bio *rbio, 753 unsigned int sector_nr, unsigned int step_nr) 754 { 755 if (rbio->nr_data + 1 == rbio->real_stripes) 756 return INVALID_PADDR; 757 return rbio_stripe_paddr(rbio, rbio->nr_data + 1, sector_nr, step_nr); 758 } 759 760 /* Return a paddr pointer into the rbio::stripe_paddrs[] for the specified sector. */ 761 static phys_addr_t *rbio_stripe_paddrs(const struct btrfs_raid_bio *rbio, 762 unsigned int stripe_nr, unsigned int sector_nr) 763 { 764 return &rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)]; 765 } 766 767 /* 768 * The first stripe in the table for a logical address 769 * has the lock. rbios are added in one of three ways: 770 * 771 * 1) Nobody has the stripe locked yet. The rbio is given 772 * the lock and 0 is returned. The caller must start the IO 773 * themselves. 774 * 775 * 2) Someone has the stripe locked, but we're able to merge 776 * with the lock owner. The rbio is freed and the IO will 777 * start automatically along with the existing rbio. 1 is returned. 778 * 779 * 3) Someone has the stripe locked, but we're not able to merge. 780 * The rbio is added to the lock owner's plug list, or merged into 781 * an rbio already on the plug list. When the lock owner unlocks, 782 * the next rbio on the list is run and the IO is started automatically. 783 * 1 is returned 784 * 785 * If we return 0, the caller still owns the rbio and must continue with 786 * IO submission. If we return 1, the caller must assume the rbio has 787 * already been freed. 788 */ 789 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 790 { 791 struct btrfs_stripe_hash *h; 792 struct btrfs_raid_bio *cur; 793 struct btrfs_raid_bio *pending; 794 struct btrfs_raid_bio *freeit = NULL; 795 struct btrfs_raid_bio *cache_drop = NULL; 796 int ret = 0; 797 798 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); 799 800 spin_lock(&h->lock); 801 list_for_each_entry(cur, &h->hash_list, hash_list) { 802 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) 803 continue; 804 805 spin_lock(&cur->bio_list_lock); 806 807 /* Can we steal this cached rbio's pages? */ 808 if (bio_list_empty(&cur->bio_list) && 809 list_empty(&cur->plug_list) && 810 test_bit(RBIO_CACHE_BIT, &cur->flags) && 811 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 812 list_del_init(&cur->hash_list); 813 refcount_dec(&cur->refs); 814 815 steal_rbio(cur, rbio); 816 cache_drop = cur; 817 spin_unlock(&cur->bio_list_lock); 818 819 goto lockit; 820 } 821 822 /* Can we merge into the lock owner? */ 823 if (rbio_can_merge(cur, rbio)) { 824 merge_rbio(cur, rbio); 825 spin_unlock(&cur->bio_list_lock); 826 freeit = rbio; 827 ret = 1; 828 goto out; 829 } 830 831 832 /* 833 * We couldn't merge with the running rbio, see if we can merge 834 * with the pending ones. We don't have to check for rmw_locked 835 * because there is no way they are inside finish_rmw right now 836 */ 837 list_for_each_entry(pending, &cur->plug_list, plug_list) { 838 if (rbio_can_merge(pending, rbio)) { 839 merge_rbio(pending, rbio); 840 spin_unlock(&cur->bio_list_lock); 841 freeit = rbio; 842 ret = 1; 843 goto out; 844 } 845 } 846 847 /* 848 * No merging, put us on the tail of the plug list, our rbio 849 * will be started with the currently running rbio unlocks 850 */ 851 list_add_tail(&rbio->plug_list, &cur->plug_list); 852 spin_unlock(&cur->bio_list_lock); 853 ret = 1; 854 goto out; 855 } 856 lockit: 857 refcount_inc(&rbio->refs); 858 list_add(&rbio->hash_list, &h->hash_list); 859 out: 860 spin_unlock(&h->lock); 861 if (cache_drop) 862 remove_rbio_from_cache(cache_drop); 863 if (freeit) 864 free_raid_bio(freeit); 865 return ret; 866 } 867 868 static void recover_rbio_work_locked(struct work_struct *work); 869 870 /* 871 * called as rmw or parity rebuild is completed. If the plug list has more 872 * rbios waiting for this stripe, the next one on the list will be started 873 */ 874 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 875 { 876 int bucket; 877 struct btrfs_stripe_hash *h; 878 int keep_cache = 0; 879 880 bucket = rbio_bucket(rbio); 881 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; 882 883 if (list_empty(&rbio->plug_list)) 884 cache_rbio(rbio); 885 886 spin_lock(&h->lock); 887 spin_lock(&rbio->bio_list_lock); 888 889 if (!list_empty(&rbio->hash_list)) { 890 /* 891 * if we're still cached and there is no other IO 892 * to perform, just leave this rbio here for others 893 * to steal from later 894 */ 895 if (list_empty(&rbio->plug_list) && 896 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 897 keep_cache = 1; 898 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 899 BUG_ON(!bio_list_empty(&rbio->bio_list)); 900 goto done; 901 } 902 903 list_del_init(&rbio->hash_list); 904 refcount_dec(&rbio->refs); 905 906 /* 907 * we use the plug list to hold all the rbios 908 * waiting for the chance to lock this stripe. 909 * hand the lock over to one of them. 910 */ 911 if (!list_empty(&rbio->plug_list)) { 912 struct btrfs_raid_bio *next; 913 struct list_head *head = rbio->plug_list.next; 914 915 next = list_entry(head, struct btrfs_raid_bio, 916 plug_list); 917 918 list_del_init(&rbio->plug_list); 919 920 list_add(&next->hash_list, &h->hash_list); 921 refcount_inc(&next->refs); 922 spin_unlock(&rbio->bio_list_lock); 923 spin_unlock(&h->lock); 924 925 if (next->operation == BTRFS_RBIO_READ_REBUILD) { 926 start_async_work(next, recover_rbio_work_locked); 927 } else if (next->operation == BTRFS_RBIO_WRITE) { 928 steal_rbio(rbio, next); 929 start_async_work(next, rmw_rbio_work_locked); 930 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 931 steal_rbio(rbio, next); 932 start_async_work(next, scrub_rbio_work_locked); 933 } 934 935 goto done_nolock; 936 } 937 } 938 done: 939 spin_unlock(&rbio->bio_list_lock); 940 spin_unlock(&h->lock); 941 942 done_nolock: 943 if (!keep_cache) 944 remove_rbio_from_cache(rbio); 945 } 946 947 static void rbio_endio_bio_list(struct bio *cur, blk_status_t status) 948 { 949 struct bio *next; 950 951 while (cur) { 952 next = cur->bi_next; 953 cur->bi_next = NULL; 954 cur->bi_status = status; 955 bio_endio(cur); 956 cur = next; 957 } 958 } 959 960 /* 961 * this frees the rbio and runs through all the bios in the 962 * bio_list and calls end_io on them 963 */ 964 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status) 965 { 966 struct bio *cur = bio_list_get(&rbio->bio_list); 967 struct bio *extra; 968 969 kfree(rbio->csum_buf); 970 bitmap_free(rbio->csum_bitmap); 971 rbio->csum_buf = NULL; 972 rbio->csum_bitmap = NULL; 973 974 /* 975 * Clear the data bitmap, as the rbio may be cached for later usage. 976 * do this before before unlock_stripe() so there will be no new bio 977 * for this bio. 978 */ 979 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); 980 981 /* 982 * At this moment, rbio->bio_list is empty, however since rbio does not 983 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the 984 * hash list, rbio may be merged with others so that rbio->bio_list 985 * becomes non-empty. 986 * Once unlock_stripe() is done, rbio->bio_list will not be updated any 987 * more and we can call bio_endio() on all queued bios. 988 */ 989 unlock_stripe(rbio); 990 extra = bio_list_get(&rbio->bio_list); 991 free_raid_bio(rbio); 992 993 rbio_endio_bio_list(cur, status); 994 if (extra) 995 rbio_endio_bio_list(extra, status); 996 } 997 998 /* 999 * Get paddr pointer for the sector specified by its @stripe_nr and @sector_nr. 1000 * 1001 * @rbio: The raid bio 1002 * @stripe_nr: Stripe number, valid range [0, real_stripe) 1003 * @sector_nr: Sector number inside the stripe, 1004 * valid range [0, stripe_nsectors) 1005 * @bio_list_only: Whether to use sectors inside the bio list only. 1006 * 1007 * The read/modify/write code wants to reuse the original bio page as much 1008 * as possible, and only use stripe_sectors as fallback. 1009 * 1010 * Return NULL if bio_list_only is set but the specified sector has no 1011 * coresponding bio. 1012 */ 1013 static phys_addr_t *sector_paddrs_in_rbio(struct btrfs_raid_bio *rbio, 1014 int stripe_nr, int sector_nr, 1015 bool bio_list_only) 1016 { 1017 phys_addr_t *ret = NULL; 1018 const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, 0); 1019 1020 ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps); 1021 1022 scoped_guard(spinlock, &rbio->bio_list_lock) { 1023 if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) { 1024 /* Don't return sector without a valid page pointer */ 1025 if (rbio->bio_paddrs[index] != INVALID_PADDR) 1026 ret = &rbio->bio_paddrs[index]; 1027 return ret; 1028 } 1029 } 1030 return &rbio->stripe_paddrs[index]; 1031 } 1032 1033 /* 1034 * Similar to sector_paddr_in_rbio(), but with extra consideration for 1035 * bs > ps cases, where we can have multiple steps for a fs block. 1036 */ 1037 static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio, 1038 int stripe_nr, int sector_nr, int step_nr, 1039 bool bio_list_only) 1040 { 1041 phys_addr_t ret = INVALID_PADDR; 1042 const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr); 1043 1044 ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps); 1045 1046 scoped_guard(spinlock, &rbio->bio_list_lock) { 1047 if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) { 1048 /* Don't return sector without a valid page pointer */ 1049 if (rbio->bio_paddrs[index] != INVALID_PADDR) 1050 ret = rbio->bio_paddrs[index]; 1051 return ret; 1052 } 1053 } 1054 return rbio->stripe_paddrs[index]; 1055 } 1056 1057 /* 1058 * allocation and initial setup for the btrfs_raid_bio. Not 1059 * this does not allocate any pages for rbio->pages. 1060 */ 1061 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 1062 struct btrfs_io_context *bioc) 1063 { 1064 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes; 1065 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT; 1066 const unsigned int num_pages = stripe_npages * real_stripes; 1067 const unsigned int stripe_nsectors = 1068 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; 1069 const unsigned int num_sectors = stripe_nsectors * real_stripes; 1070 const unsigned int step = min(fs_info->sectorsize, PAGE_SIZE); 1071 const unsigned int sector_nsteps = fs_info->sectorsize / step; 1072 struct btrfs_raid_bio *rbio; 1073 1074 /* 1075 * For bs <= ps cases, ps must be aligned to bs. 1076 * For bs > ps cases, bs must be aligned to ps. 1077 */ 1078 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize) || 1079 IS_ALIGNED(fs_info->sectorsize, PAGE_SIZE)); 1080 /* 1081 * Our current stripe len should be fixed to 64k thus stripe_nsectors 1082 * (at most 16) should be no larger than BITS_PER_LONG. 1083 */ 1084 ASSERT(stripe_nsectors <= BITS_PER_LONG); 1085 1086 /* 1087 * Real stripes must be between 2 (2 disks RAID5, aka RAID1) and 256 1088 * (limited by u8). 1089 */ 1090 ASSERT(real_stripes >= 2); 1091 ASSERT(real_stripes <= U8_MAX); 1092 1093 rbio = kzalloc_obj(*rbio, GFP_NOFS); 1094 if (!rbio) 1095 return ERR_PTR(-ENOMEM); 1096 rbio->stripe_pages = kzalloc_objs(struct page *, num_pages, GFP_NOFS); 1097 rbio->bio_paddrs = kzalloc_objs(phys_addr_t, 1098 num_sectors * sector_nsteps, GFP_NOFS); 1099 rbio->stripe_paddrs = kzalloc_objs(phys_addr_t, 1100 num_sectors * sector_nsteps, 1101 GFP_NOFS); 1102 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); 1103 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); 1104 rbio->stripe_uptodate_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); 1105 1106 if (!rbio->stripe_pages || !rbio->bio_paddrs || !rbio->stripe_paddrs || 1107 !rbio->finish_pointers || !rbio->error_bitmap || !rbio->stripe_uptodate_bitmap) { 1108 free_raid_bio_pointers(rbio); 1109 kfree(rbio); 1110 return ERR_PTR(-ENOMEM); 1111 } 1112 for (int i = 0; i < num_sectors * sector_nsteps; i++) { 1113 rbio->stripe_paddrs[i] = INVALID_PADDR; 1114 rbio->bio_paddrs[i] = INVALID_PADDR; 1115 } 1116 1117 bio_list_init(&rbio->bio_list); 1118 init_waitqueue_head(&rbio->io_wait); 1119 INIT_LIST_HEAD(&rbio->plug_list); 1120 spin_lock_init(&rbio->bio_list_lock); 1121 INIT_LIST_HEAD(&rbio->stripe_cache); 1122 INIT_LIST_HEAD(&rbio->hash_list); 1123 btrfs_get_bioc(bioc); 1124 rbio->bioc = bioc; 1125 rbio->nr_pages = num_pages; 1126 rbio->nr_sectors = num_sectors; 1127 rbio->real_stripes = real_stripes; 1128 rbio->stripe_npages = stripe_npages; 1129 rbio->stripe_nsectors = stripe_nsectors; 1130 rbio->sector_nsteps = sector_nsteps; 1131 refcount_set(&rbio->refs, 1); 1132 atomic_set(&rbio->stripes_pending, 0); 1133 1134 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); 1135 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); 1136 ASSERT(rbio->nr_data > 0); 1137 1138 return rbio; 1139 } 1140 1141 /* allocate pages for all the stripes in the bio, including parity */ 1142 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 1143 { 1144 int ret; 1145 1146 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false); 1147 if (ret < 0) 1148 return ret; 1149 /* Mapping all sectors */ 1150 index_stripe_sectors(rbio); 1151 return 0; 1152 } 1153 1154 /* only allocate pages for p/q stripes */ 1155 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 1156 { 1157 const int data_pages = rbio->nr_data * rbio->stripe_npages; 1158 int ret; 1159 1160 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, 1161 rbio->stripe_pages + data_pages, false); 1162 if (ret < 0) 1163 return ret; 1164 1165 index_stripe_sectors(rbio); 1166 return 0; 1167 } 1168 1169 /* 1170 * Return the total number of errors found in the vertical stripe of @sector_nr. 1171 * 1172 * @faila and @failb will also be updated to the first and second stripe 1173 * number of the errors. 1174 */ 1175 static int get_rbio_vertical_errors(struct btrfs_raid_bio *rbio, int sector_nr, 1176 int *faila, int *failb) 1177 { 1178 int stripe_nr; 1179 int found_errors = 0; 1180 1181 if (faila || failb) { 1182 /* 1183 * Both @faila and @failb should be valid pointers if any of 1184 * them is specified. 1185 */ 1186 ASSERT(faila && failb); 1187 *faila = -1; 1188 *failb = -1; 1189 } 1190 1191 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1192 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; 1193 1194 if (test_bit(total_sector_nr, rbio->error_bitmap)) { 1195 found_errors++; 1196 if (faila) { 1197 /* Update faila and failb. */ 1198 if (*faila < 0) 1199 *faila = stripe_nr; 1200 else if (*failb < 0) 1201 *failb = stripe_nr; 1202 } 1203 } 1204 } 1205 return found_errors; 1206 } 1207 1208 static int bio_add_paddrs(struct bio *bio, phys_addr_t *paddrs, unsigned int nr_steps, 1209 unsigned int step) 1210 { 1211 int added = 0; 1212 int ret; 1213 1214 for (int i = 0; i < nr_steps; i++) { 1215 ret = bio_add_page(bio, phys_to_page(paddrs[i]), step, 1216 offset_in_page(paddrs[i])); 1217 if (ret != step) 1218 goto revert; 1219 added += ret; 1220 } 1221 return added; 1222 revert: 1223 /* 1224 * We don't need to revert the bvec, as the bio will be submitted immediately, 1225 * as long as the size is reduced the extra bvec will not be accessed. 1226 */ 1227 bio->bi_iter.bi_size -= added; 1228 return 0; 1229 } 1230 1231 /* 1232 * Add a single sector @sector into our list of bios for IO. 1233 * 1234 * Return 0 if everything went well. 1235 * Return <0 for error, and no byte will be added to @rbio. 1236 */ 1237 static int rbio_add_io_paddrs(struct btrfs_raid_bio *rbio, struct bio_list *bio_list, 1238 phys_addr_t *paddrs, unsigned int stripe_nr, 1239 unsigned int sector_nr, enum req_op op) 1240 { 1241 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1242 const u32 step = min(sectorsize, PAGE_SIZE); 1243 struct bio *last = bio_list->tail; 1244 int ret; 1245 struct bio *bio; 1246 struct btrfs_io_stripe *stripe; 1247 u64 disk_start; 1248 1249 /* 1250 * Note: here stripe_nr has taken device replace into consideration, 1251 * thus it can be larger than rbio->real_stripe. 1252 * So here we check against bioc->num_stripes, not rbio->real_stripes. 1253 */ 1254 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes, 1255 rbio, stripe_nr); 1256 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, 1257 rbio, sector_nr); 1258 ASSERT(paddrs != NULL); 1259 1260 stripe = &rbio->bioc->stripes[stripe_nr]; 1261 disk_start = stripe->physical + sector_nr * sectorsize; 1262 1263 /* if the device is missing, just fail this stripe */ 1264 if (!stripe->dev->bdev) { 1265 int found_errors; 1266 1267 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, 1268 rbio->error_bitmap); 1269 1270 /* Check if we have reached tolerance early. */ 1271 found_errors = get_rbio_vertical_errors(rbio, sector_nr, 1272 NULL, NULL); 1273 if (unlikely(found_errors > rbio->bioc->max_errors)) 1274 return -EIO; 1275 return 0; 1276 } 1277 1278 /* see if we can add this page onto our existing bio */ 1279 if (last) { 1280 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT; 1281 last_end += last->bi_iter.bi_size; 1282 1283 /* 1284 * we can't merge these if they are from different 1285 * devices or if they are not contiguous 1286 */ 1287 if (last_end == disk_start && !last->bi_status && 1288 last->bi_bdev == stripe->dev->bdev) { 1289 ret = bio_add_paddrs(last, paddrs, rbio->sector_nsteps, step); 1290 if (ret == sectorsize) 1291 return 0; 1292 } 1293 } 1294 1295 /* put a new bio on the list */ 1296 bio = bio_alloc(stripe->dev->bdev, 1297 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1), 1298 op, GFP_NOFS); 1299 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; 1300 bio->bi_private = rbio; 1301 1302 ret = bio_add_paddrs(bio, paddrs, rbio->sector_nsteps, step); 1303 ASSERT(ret == sectorsize); 1304 bio_list_add(bio_list, bio); 1305 return 0; 1306 } 1307 1308 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) 1309 { 1310 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1311 const u32 step = min(fs_info->sectorsize, PAGE_SIZE); 1312 const u32 step_bits = min(fs_info->sectorsize_bits, PAGE_SHIFT); 1313 struct bvec_iter iter = bio->bi_iter; 1314 phys_addr_t paddr; 1315 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1316 rbio->bioc->full_stripe_logical; 1317 1318 btrfs_bio_for_each_block(paddr, bio, &iter, step) { 1319 unsigned int index = (offset >> step_bits); 1320 1321 rbio->bio_paddrs[index] = paddr; 1322 offset += step; 1323 } 1324 } 1325 1326 /* 1327 * helper function to walk our bio list and populate the bio_pages array with 1328 * the result. This seems expensive, but it is faster than constantly 1329 * searching through the bio list as we setup the IO in finish_rmw or stripe 1330 * reconstruction. 1331 * 1332 * This must be called before you trust the answers from page_in_rbio 1333 */ 1334 static void index_rbio_pages(struct btrfs_raid_bio *rbio) 1335 { 1336 struct bio *bio; 1337 1338 spin_lock(&rbio->bio_list_lock); 1339 bio_list_for_each(bio, &rbio->bio_list) 1340 index_one_bio(rbio, bio); 1341 1342 spin_unlock(&rbio->bio_list_lock); 1343 } 1344 1345 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio, 1346 struct raid56_bio_trace_info *trace_info) 1347 { 1348 const struct btrfs_io_context *bioc = rbio->bioc; 1349 int i; 1350 1351 ASSERT(bioc); 1352 1353 /* We rely on bio->bi_bdev to find the stripe number. */ 1354 if (!bio->bi_bdev) 1355 goto not_found; 1356 1357 for (i = 0; i < bioc->num_stripes; i++) { 1358 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) 1359 continue; 1360 trace_info->stripe_nr = i; 1361 trace_info->devid = bioc->stripes[i].dev->devid; 1362 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1363 bioc->stripes[i].physical; 1364 return; 1365 } 1366 1367 not_found: 1368 trace_info->devid = -1; 1369 trace_info->offset = -1; 1370 trace_info->stripe_nr = -1; 1371 } 1372 1373 static inline void bio_list_put(struct bio_list *bio_list) 1374 { 1375 struct bio *bio; 1376 1377 while ((bio = bio_list_pop(bio_list))) 1378 bio_put(bio); 1379 } 1380 1381 static void assert_rbio(struct btrfs_raid_bio *rbio) 1382 { 1383 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 1384 return; 1385 1386 /* 1387 * At least two stripes (2 disks RAID5), and since real_stripes is U8, 1388 * we won't go beyond 256 disks anyway. 1389 */ 1390 ASSERT_RBIO(rbio->real_stripes >= 2, rbio); 1391 ASSERT_RBIO(rbio->nr_data > 0, rbio); 1392 1393 /* 1394 * This is another check to make sure nr data stripes is smaller 1395 * than total stripes. 1396 */ 1397 ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio); 1398 } 1399 1400 static inline void *kmap_local_paddr(phys_addr_t paddr) 1401 { 1402 /* The sector pointer must have a page mapped to it. */ 1403 ASSERT(paddr != INVALID_PADDR); 1404 1405 return kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr); 1406 } 1407 1408 static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int sector_nr, 1409 unsigned int step_nr) 1410 { 1411 void **pointers = rbio->finish_pointers; 1412 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); 1413 int stripe; 1414 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; 1415 1416 /* First collect one sector from each data stripe */ 1417 for (stripe = 0; stripe < rbio->nr_data; stripe++) 1418 pointers[stripe] = kmap_local_paddr( 1419 sector_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0)); 1420 1421 /* Then add the parity stripe */ 1422 pointers[stripe++] = kmap_local_paddr(rbio_pstripe_paddr(rbio, sector_nr, step_nr)); 1423 1424 if (has_qstripe) { 1425 /* 1426 * RAID6, add the qstripe and call the library function 1427 * to fill in our p/q 1428 */ 1429 pointers[stripe++] = kmap_local_paddr( 1430 rbio_qstripe_paddr(rbio, sector_nr, step_nr)); 1431 1432 assert_rbio(rbio); 1433 raid6_call.gen_syndrome(rbio->real_stripes, step, pointers); 1434 } else { 1435 /* raid5 */ 1436 memcpy(pointers[rbio->nr_data], pointers[0], step); 1437 run_xor(pointers + 1, rbio->nr_data - 1, step); 1438 } 1439 for (stripe = stripe - 1; stripe >= 0; stripe--) 1440 kunmap_local(pointers[stripe]); 1441 } 1442 1443 /* Generate PQ for one vertical stripe. */ 1444 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) 1445 { 1446 const bool has_qstripe = (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6); 1447 1448 for (int i = 0; i < rbio->sector_nsteps; i++) 1449 generate_pq_vertical_step(rbio, sectornr, i); 1450 1451 set_bit(rbio_sector_index(rbio, rbio->nr_data, sectornr), 1452 rbio->stripe_uptodate_bitmap); 1453 if (has_qstripe) 1454 set_bit(rbio_sector_index(rbio, rbio->nr_data + 1, sectornr), 1455 rbio->stripe_uptodate_bitmap); 1456 } 1457 1458 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio, 1459 struct bio_list *bio_list) 1460 { 1461 /* The total sector number inside the full stripe. */ 1462 int total_sector_nr; 1463 int sectornr; 1464 int stripe; 1465 int ret; 1466 1467 ASSERT(bio_list_size(bio_list) == 0); 1468 1469 /* We should have at least one data sector. */ 1470 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); 1471 1472 /* 1473 * Reset errors, as we may have errors inherited from from degraded 1474 * write. 1475 */ 1476 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 1477 1478 /* 1479 * Start assembly. Make bios for everything from the higher layers (the 1480 * bio_list in our rbio) and our P/Q. Ignore everything else. 1481 */ 1482 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 1483 total_sector_nr++) { 1484 phys_addr_t *paddrs; 1485 1486 stripe = total_sector_nr / rbio->stripe_nsectors; 1487 sectornr = total_sector_nr % rbio->stripe_nsectors; 1488 1489 /* This vertical stripe has no data, skip it. */ 1490 if (!test_bit(sectornr, &rbio->dbitmap)) 1491 continue; 1492 1493 if (stripe < rbio->nr_data) { 1494 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); 1495 if (paddrs == NULL) 1496 continue; 1497 } else { 1498 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); 1499 } 1500 1501 ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, stripe, 1502 sectornr, REQ_OP_WRITE); 1503 if (ret) 1504 goto error; 1505 } 1506 1507 if (likely(!rbio->bioc->replace_nr_stripes)) 1508 return 0; 1509 1510 /* 1511 * Make a copy for the replace target device. 1512 * 1513 * Thus the source stripe number (in replace_stripe_src) should be valid. 1514 */ 1515 ASSERT(rbio->bioc->replace_stripe_src >= 0); 1516 1517 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 1518 total_sector_nr++) { 1519 phys_addr_t *paddrs; 1520 1521 stripe = total_sector_nr / rbio->stripe_nsectors; 1522 sectornr = total_sector_nr % rbio->stripe_nsectors; 1523 1524 /* 1525 * For RAID56, there is only one device that can be replaced, 1526 * and replace_stripe_src[0] indicates the stripe number we 1527 * need to copy from. 1528 */ 1529 if (stripe != rbio->bioc->replace_stripe_src) { 1530 /* 1531 * We can skip the whole stripe completely, note 1532 * total_sector_nr will be increased by one anyway. 1533 */ 1534 ASSERT(sectornr == 0); 1535 total_sector_nr += rbio->stripe_nsectors - 1; 1536 continue; 1537 } 1538 1539 /* This vertical stripe has no data, skip it. */ 1540 if (!test_bit(sectornr, &rbio->dbitmap)) 1541 continue; 1542 1543 if (stripe < rbio->nr_data) { 1544 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); 1545 if (paddrs == NULL) 1546 continue; 1547 } else { 1548 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); 1549 } 1550 1551 ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, 1552 rbio->real_stripes, 1553 sectornr, REQ_OP_WRITE); 1554 if (ret) 1555 goto error; 1556 } 1557 1558 return 0; 1559 error: 1560 bio_list_put(bio_list); 1561 return -EIO; 1562 } 1563 1564 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio) 1565 { 1566 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1567 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1568 rbio->bioc->full_stripe_logical; 1569 int total_nr_sector = offset >> fs_info->sectorsize_bits; 1570 1571 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); 1572 1573 bitmap_set(rbio->error_bitmap, total_nr_sector, 1574 bio->bi_iter.bi_size >> fs_info->sectorsize_bits); 1575 1576 /* 1577 * Special handling for raid56_alloc_missing_rbio() used by 1578 * scrub/replace. Unlike call path in raid56_parity_recover(), they 1579 * pass an empty bio here. Thus we have to find out the missing device 1580 * and mark the stripe error instead. 1581 */ 1582 if (bio->bi_iter.bi_size == 0) { 1583 bool found_missing = false; 1584 int stripe_nr; 1585 1586 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1587 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { 1588 found_missing = true; 1589 bitmap_set(rbio->error_bitmap, 1590 stripe_nr * rbio->stripe_nsectors, 1591 rbio->stripe_nsectors); 1592 } 1593 } 1594 ASSERT(found_missing); 1595 } 1596 } 1597 1598 /* 1599 * Return the index inside the rbio->stripe_sectors[] array. 1600 * 1601 * Return -1 if not found. 1602 */ 1603 static int find_stripe_sector_nr(struct btrfs_raid_bio *rbio, phys_addr_t paddr) 1604 { 1605 for (int i = 0; i < rbio->nr_sectors; i++) { 1606 if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == paddr) 1607 return i; 1608 } 1609 return -1; 1610 } 1611 1612 /* 1613 * this sets each page in the bio uptodate. It should only be used on private 1614 * rbio pages, nothing that comes in from the higher layers 1615 */ 1616 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) 1617 { 1618 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1619 const u32 step = min(sectorsize, PAGE_SIZE); 1620 u32 offset = 0; 1621 phys_addr_t paddr; 1622 1623 ASSERT(!bio_flagged(bio, BIO_CLONED)); 1624 1625 btrfs_bio_for_each_block_all(paddr, bio, step) { 1626 /* Hitting the first step of a sector. */ 1627 if (IS_ALIGNED(offset, sectorsize)) { 1628 int sector_nr = find_stripe_sector_nr(rbio, paddr); 1629 1630 ASSERT(sector_nr >= 0); 1631 if (sector_nr >= 0) 1632 set_bit(sector_nr, rbio->stripe_uptodate_bitmap); 1633 } 1634 offset += step; 1635 } 1636 } 1637 1638 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio) 1639 { 1640 phys_addr_t bvec_paddr = bvec_phys(bio_first_bvec_all(bio)); 1641 int i; 1642 1643 for (i = 0; i < rbio->nr_sectors; i++) { 1644 if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == bvec_paddr) 1645 break; 1646 if (rbio->bio_paddrs[i * rbio->sector_nsteps] == bvec_paddr) 1647 break; 1648 } 1649 ASSERT(i < rbio->nr_sectors); 1650 return i; 1651 } 1652 1653 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio) 1654 { 1655 int total_sector_nr = get_bio_sector_nr(rbio, bio); 1656 u32 bio_size = 0; 1657 struct bio_vec *bvec; 1658 int i; 1659 1660 bio_for_each_bvec_all(bvec, bio, i) 1661 bio_size += bvec->bv_len; 1662 1663 /* 1664 * Since we can have multiple bios touching the error_bitmap, we cannot 1665 * call bitmap_set() without protection. 1666 * 1667 * Instead use set_bit() for each bit, as set_bit() itself is atomic. 1668 */ 1669 for (i = total_sector_nr; i < total_sector_nr + 1670 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) 1671 set_bit(i, rbio->error_bitmap); 1672 } 1673 1674 /* Verify the data sectors at read time. */ 1675 static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio, 1676 struct bio *bio) 1677 { 1678 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1679 const u32 step = min(fs_info->sectorsize, PAGE_SIZE); 1680 const u32 nr_steps = rbio->sector_nsteps; 1681 int total_sector_nr = get_bio_sector_nr(rbio, bio); 1682 u32 offset = 0; 1683 phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE]; 1684 phys_addr_t paddr; 1685 1686 /* No data csum for the whole stripe, no need to verify. */ 1687 if (!rbio->csum_bitmap || !rbio->csum_buf) 1688 return; 1689 1690 /* P/Q stripes, they have no data csum to verify against. */ 1691 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) 1692 return; 1693 1694 btrfs_bio_for_each_block_all(paddr, bio, step) { 1695 u8 csum_buf[BTRFS_CSUM_SIZE]; 1696 u8 *expected_csum; 1697 1698 paddrs[(offset / step) % nr_steps] = paddr; 1699 offset += step; 1700 1701 /* Not yet covering the full fs block, continue to the next step. */ 1702 if (!IS_ALIGNED(offset, fs_info->sectorsize)) 1703 continue; 1704 1705 /* No csum for this sector, skip to the next sector. */ 1706 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) 1707 continue; 1708 1709 expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size; 1710 btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf); 1711 if (unlikely(memcmp(csum_buf, expected_csum, fs_info->csum_size) != 0)) 1712 set_bit(total_sector_nr, rbio->error_bitmap); 1713 total_sector_nr++; 1714 } 1715 } 1716 1717 static void raid_wait_read_end_io(struct bio *bio) 1718 { 1719 struct btrfs_raid_bio *rbio = bio->bi_private; 1720 1721 if (bio->bi_status) { 1722 rbio_update_error_bitmap(rbio, bio); 1723 } else { 1724 set_bio_pages_uptodate(rbio, bio); 1725 verify_bio_data_sectors(rbio, bio); 1726 } 1727 1728 bio_put(bio); 1729 if (atomic_dec_and_test(&rbio->stripes_pending)) 1730 wake_up(&rbio->io_wait); 1731 } 1732 1733 static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio, 1734 struct bio_list *bio_list) 1735 { 1736 struct bio *bio; 1737 1738 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); 1739 while ((bio = bio_list_pop(bio_list))) { 1740 bio->bi_end_io = raid_wait_read_end_io; 1741 1742 if (trace_raid56_read_enabled()) { 1743 struct raid56_bio_trace_info trace_info = { 0 }; 1744 1745 bio_get_trace_info(rbio, bio, &trace_info); 1746 trace_raid56_read(rbio, bio, &trace_info); 1747 } 1748 submit_bio(bio); 1749 } 1750 1751 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 1752 } 1753 1754 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio) 1755 { 1756 const int data_pages = rbio->nr_data * rbio->stripe_npages; 1757 int ret; 1758 1759 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false); 1760 if (ret < 0) 1761 return ret; 1762 1763 index_stripe_sectors(rbio); 1764 return 0; 1765 } 1766 1767 /* 1768 * We use plugging call backs to collect full stripes. 1769 * Any time we get a partial stripe write while plugged 1770 * we collect it into a list. When the unplug comes down, 1771 * we sort the list by logical block number and merge 1772 * everything we can into the same rbios 1773 */ 1774 struct btrfs_plug_cb { 1775 struct blk_plug_cb cb; 1776 struct btrfs_fs_info *info; 1777 struct list_head rbio_list; 1778 }; 1779 1780 /* 1781 * rbios on the plug list are sorted for easier merging. 1782 */ 1783 static int plug_cmp(void *priv, const struct list_head *a, 1784 const struct list_head *b) 1785 { 1786 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 1787 plug_list); 1788 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1789 plug_list); 1790 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 1791 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 1792 1793 if (a_sector < b_sector) 1794 return -1; 1795 if (a_sector > b_sector) 1796 return 1; 1797 return 0; 1798 } 1799 1800 static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 1801 { 1802 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb); 1803 struct btrfs_raid_bio *cur; 1804 struct btrfs_raid_bio *last = NULL; 1805 1806 list_sort(NULL, &plug->rbio_list, plug_cmp); 1807 1808 while (!list_empty(&plug->rbio_list)) { 1809 cur = list_first_entry(&plug->rbio_list, 1810 struct btrfs_raid_bio, plug_list); 1811 list_del_init(&cur->plug_list); 1812 1813 if (rbio_is_full(cur)) { 1814 /* We have a full stripe, queue it down. */ 1815 start_async_work(cur, rmw_rbio_work); 1816 continue; 1817 } 1818 if (last) { 1819 if (rbio_can_merge(last, cur)) { 1820 merge_rbio(last, cur); 1821 free_raid_bio(cur); 1822 continue; 1823 } 1824 start_async_work(last, rmw_rbio_work); 1825 } 1826 last = cur; 1827 } 1828 if (last) 1829 start_async_work(last, rmw_rbio_work); 1830 kfree(plug); 1831 } 1832 1833 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */ 1834 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) 1835 { 1836 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1837 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; 1838 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; 1839 const u32 orig_len = orig_bio->bi_iter.bi_size; 1840 const u32 sectorsize = fs_info->sectorsize; 1841 u64 cur_logical; 1842 1843 ASSERT_RBIO_LOGICAL(orig_logical >= full_stripe_start && 1844 orig_logical + orig_len <= full_stripe_start + 1845 rbio->nr_data * BTRFS_STRIPE_LEN, 1846 rbio, orig_logical); 1847 1848 bio_list_add(&rbio->bio_list, orig_bio); 1849 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; 1850 1851 /* Update the dbitmap. */ 1852 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len; 1853 cur_logical += sectorsize) { 1854 int bit = ((u32)(cur_logical - full_stripe_start) >> 1855 fs_info->sectorsize_bits) % rbio->stripe_nsectors; 1856 1857 set_bit(bit, &rbio->dbitmap); 1858 } 1859 } 1860 1861 /* 1862 * our main entry point for writes from the rest of the FS. 1863 */ 1864 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc) 1865 { 1866 struct btrfs_fs_info *fs_info = bioc->fs_info; 1867 struct btrfs_raid_bio *rbio; 1868 struct btrfs_plug_cb *plug = NULL; 1869 struct blk_plug_cb *cb; 1870 1871 rbio = alloc_rbio(fs_info, bioc); 1872 if (IS_ERR(rbio)) { 1873 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); 1874 bio_endio(bio); 1875 return; 1876 } 1877 rbio->operation = BTRFS_RBIO_WRITE; 1878 rbio_add_bio(rbio, bio); 1879 1880 /* 1881 * Don't plug on full rbios, just get them out the door 1882 * as quickly as we can 1883 */ 1884 if (!rbio_is_full(rbio)) { 1885 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug)); 1886 if (cb) { 1887 plug = container_of(cb, struct btrfs_plug_cb, cb); 1888 if (!plug->info) { 1889 plug->info = fs_info; 1890 INIT_LIST_HEAD(&plug->rbio_list); 1891 } 1892 list_add_tail(&rbio->plug_list, &plug->rbio_list); 1893 return; 1894 } 1895 } 1896 1897 /* 1898 * Either we don't have any existing plug, or we're doing a full stripe, 1899 * queue the rmw work now. 1900 */ 1901 start_async_work(rbio, rmw_rbio_work); 1902 } 1903 1904 static int verify_one_sector(struct btrfs_raid_bio *rbio, 1905 int stripe_nr, int sector_nr) 1906 { 1907 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1908 phys_addr_t *paddrs; 1909 u8 csum_buf[BTRFS_CSUM_SIZE]; 1910 u8 *csum_expected; 1911 1912 if (!rbio->csum_bitmap || !rbio->csum_buf) 1913 return 0; 1914 1915 /* No way to verify P/Q as they are not covered by data csum. */ 1916 if (stripe_nr >= rbio->nr_data) 1917 return 0; 1918 /* 1919 * If we're rebuilding a read, we have to use pages from the 1920 * bio list if possible. 1921 */ 1922 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1923 paddrs = sector_paddrs_in_rbio(rbio, stripe_nr, sector_nr, 0); 1924 } else { 1925 paddrs = rbio_stripe_paddrs(rbio, stripe_nr, sector_nr); 1926 } 1927 1928 csum_expected = rbio->csum_buf + 1929 (stripe_nr * rbio->stripe_nsectors + sector_nr) * 1930 fs_info->csum_size; 1931 btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf); 1932 if (unlikely(memcmp(csum_buf, csum_expected, fs_info->csum_size) != 0)) 1933 return -EIO; 1934 return 0; 1935 } 1936 1937 static void recover_vertical_step(struct btrfs_raid_bio *rbio, 1938 unsigned int sector_nr, 1939 unsigned int step_nr, 1940 int faila, int failb, 1941 void **pointers, void **unmap_array) 1942 { 1943 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1944 const u32 step = min(fs_info->sectorsize, PAGE_SIZE); 1945 int stripe_nr; 1946 1947 ASSERT(step_nr < rbio->sector_nsteps); 1948 ASSERT(sector_nr < rbio->stripe_nsectors); 1949 1950 /* 1951 * Setup our array of pointers with sectors from each stripe 1952 * 1953 * NOTE: store a duplicate array of pointers to preserve the 1954 * pointer order. 1955 */ 1956 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1957 phys_addr_t paddr; 1958 1959 /* 1960 * If we're rebuilding a read, we have to use pages from the 1961 * bio list if possible. 1962 */ 1963 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1964 paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, step_nr, 0); 1965 } else { 1966 paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr, step_nr); 1967 } 1968 pointers[stripe_nr] = kmap_local_paddr(paddr); 1969 unmap_array[stripe_nr] = pointers[stripe_nr]; 1970 } 1971 1972 /* All raid6 handling here */ 1973 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { 1974 /* Single failure, rebuild from parity raid5 style */ 1975 if (failb < 0) { 1976 if (faila == rbio->nr_data) 1977 /* 1978 * Just the P stripe has failed, without 1979 * a bad data or Q stripe. 1980 * We have nothing to do, just skip the 1981 * recovery for this stripe. 1982 */ 1983 goto cleanup; 1984 /* 1985 * a single failure in raid6 is rebuilt 1986 * in the pstripe code below 1987 */ 1988 goto pstripe; 1989 } 1990 1991 /* 1992 * If the q stripe is failed, do a pstripe reconstruction from 1993 * the xors. 1994 * If both the q stripe and the P stripe are failed, we're 1995 * here due to a crc mismatch and we can't give them the 1996 * data they want. 1997 */ 1998 if (failb == rbio->real_stripes - 1) { 1999 if (faila == rbio->real_stripes - 2) 2000 /* 2001 * Only P and Q are corrupted. 2002 * We only care about data stripes recovery, 2003 * can skip this vertical stripe. 2004 */ 2005 goto cleanup; 2006 /* 2007 * Otherwise we have one bad data stripe and 2008 * a good P stripe. raid5! 2009 */ 2010 goto pstripe; 2011 } 2012 2013 if (failb == rbio->real_stripes - 2) { 2014 raid6_datap_recov(rbio->real_stripes, step, 2015 faila, pointers); 2016 } else { 2017 raid6_2data_recov(rbio->real_stripes, step, 2018 faila, failb, pointers); 2019 } 2020 } else { 2021 void *p; 2022 2023 /* Rebuild from P stripe here (raid5 or raid6). */ 2024 ASSERT(failb == -1); 2025 pstripe: 2026 /* Copy parity block into failed block to start with */ 2027 memcpy(pointers[faila], pointers[rbio->nr_data], step); 2028 2029 /* Rearrange the pointer array */ 2030 p = pointers[faila]; 2031 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; 2032 stripe_nr++) 2033 pointers[stripe_nr] = pointers[stripe_nr + 1]; 2034 pointers[rbio->nr_data - 1] = p; 2035 2036 /* Xor in the rest */ 2037 run_xor(pointers, rbio->nr_data - 1, step); 2038 } 2039 2040 cleanup: 2041 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) 2042 kunmap_local(unmap_array[stripe_nr]); 2043 } 2044 2045 /* 2046 * Recover a vertical stripe specified by @sector_nr. 2047 * @*pointers are the pre-allocated pointers by the caller, so we don't 2048 * need to allocate/free the pointers again and again. 2049 */ 2050 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, 2051 void **pointers, void **unmap_array) 2052 { 2053 int found_errors; 2054 int faila; 2055 int failb; 2056 int ret = 0; 2057 2058 /* 2059 * Now we just use bitmap to mark the horizontal stripes in 2060 * which we have data when doing parity scrub. 2061 */ 2062 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 2063 !test_bit(sector_nr, &rbio->dbitmap)) 2064 return 0; 2065 2066 found_errors = get_rbio_vertical_errors(rbio, sector_nr, &faila, 2067 &failb); 2068 /* 2069 * No errors in the vertical stripe, skip it. Can happen for recovery 2070 * which only part of a stripe failed csum check. 2071 */ 2072 if (!found_errors) 2073 return 0; 2074 2075 if (unlikely(found_errors > rbio->bioc->max_errors)) 2076 return -EIO; 2077 2078 for (int i = 0; i < rbio->sector_nsteps; i++) 2079 recover_vertical_step(rbio, sector_nr, i, faila, failb, 2080 pointers, unmap_array); 2081 if (faila >= 0) { 2082 ret = verify_one_sector(rbio, faila, sector_nr); 2083 if (ret < 0) 2084 return ret; 2085 2086 set_bit(rbio_sector_index(rbio, faila, sector_nr), 2087 rbio->stripe_uptodate_bitmap); 2088 } 2089 if (failb >= 0) { 2090 ret = verify_one_sector(rbio, failb, sector_nr); 2091 if (ret < 0) 2092 return ret; 2093 2094 set_bit(rbio_sector_index(rbio, failb, sector_nr), 2095 rbio->stripe_uptodate_bitmap); 2096 } 2097 return ret; 2098 } 2099 2100 static int recover_sectors(struct btrfs_raid_bio *rbio) 2101 { 2102 void **pointers = NULL; 2103 void **unmap_array = NULL; 2104 int sectornr; 2105 int ret = 0; 2106 2107 /* 2108 * @pointers array stores the pointer for each sector. 2109 * 2110 * @unmap_array stores copy of pointers that does not get reordered 2111 * during reconstruction so that kunmap_local works. 2112 */ 2113 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2114 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2115 if (!pointers || !unmap_array) { 2116 ret = -ENOMEM; 2117 goto out; 2118 } 2119 2120 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 2121 spin_lock(&rbio->bio_list_lock); 2122 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 2123 spin_unlock(&rbio->bio_list_lock); 2124 } 2125 2126 index_rbio_pages(rbio); 2127 2128 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 2129 ret = recover_vertical(rbio, sectornr, pointers, unmap_array); 2130 if (ret < 0) 2131 break; 2132 } 2133 2134 out: 2135 kfree(pointers); 2136 kfree(unmap_array); 2137 return ret; 2138 } 2139 2140 static void recover_rbio(struct btrfs_raid_bio *rbio) 2141 { 2142 struct bio_list bio_list = BIO_EMPTY_LIST; 2143 int total_sector_nr; 2144 int ret = 0; 2145 2146 /* 2147 * Either we're doing recover for a read failure or degraded write, 2148 * caller should have set error bitmap correctly. 2149 */ 2150 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); 2151 2152 /* For recovery, we need to read all sectors including P/Q. */ 2153 ret = alloc_rbio_pages(rbio); 2154 if (ret < 0) 2155 goto out; 2156 2157 index_rbio_pages(rbio); 2158 2159 /* 2160 * Read everything that hasn't failed. However this time we will 2161 * not trust any cached sector. 2162 * As we may read out some stale data but higher layer is not reading 2163 * that stale part. 2164 * 2165 * So here we always re-read everything in recovery path. 2166 */ 2167 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2168 total_sector_nr++) { 2169 int stripe = total_sector_nr / rbio->stripe_nsectors; 2170 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2171 phys_addr_t *paddrs; 2172 2173 /* 2174 * Skip the range which has error. It can be a range which is 2175 * marked error (for csum mismatch), or it can be a missing 2176 * device. 2177 */ 2178 if (!rbio->bioc->stripes[stripe].dev->bdev || 2179 test_bit(total_sector_nr, rbio->error_bitmap)) { 2180 /* 2181 * Also set the error bit for missing device, which 2182 * may not yet have its error bit set. 2183 */ 2184 set_bit(total_sector_nr, rbio->error_bitmap); 2185 continue; 2186 } 2187 2188 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); 2189 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, 2190 sectornr, REQ_OP_READ); 2191 if (ret < 0) { 2192 bio_list_put(&bio_list); 2193 goto out; 2194 } 2195 } 2196 2197 submit_read_wait_bio_list(rbio, &bio_list); 2198 ret = recover_sectors(rbio); 2199 out: 2200 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2201 } 2202 2203 static void recover_rbio_work(struct work_struct *work) 2204 { 2205 struct btrfs_raid_bio *rbio; 2206 2207 rbio = container_of(work, struct btrfs_raid_bio, work); 2208 if (!lock_stripe_add(rbio)) 2209 recover_rbio(rbio); 2210 } 2211 2212 static void recover_rbio_work_locked(struct work_struct *work) 2213 { 2214 recover_rbio(container_of(work, struct btrfs_raid_bio, work)); 2215 } 2216 2217 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num) 2218 { 2219 bool found = false; 2220 int sector_nr; 2221 2222 /* 2223 * This is for RAID6 extra recovery tries, thus mirror number should 2224 * be large than 2. 2225 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using 2226 * RAID5 methods. 2227 */ 2228 ASSERT(mirror_num > 2); 2229 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2230 int found_errors; 2231 int faila; 2232 int failb; 2233 2234 found_errors = get_rbio_vertical_errors(rbio, sector_nr, 2235 &faila, &failb); 2236 /* This vertical stripe doesn't have errors. */ 2237 if (!found_errors) 2238 continue; 2239 2240 /* 2241 * If we found errors, there should be only one error marked 2242 * by previous set_rbio_range_error(). 2243 */ 2244 ASSERT(found_errors == 1); 2245 found = true; 2246 2247 /* Now select another stripe to mark as error. */ 2248 failb = rbio->real_stripes - (mirror_num - 1); 2249 if (failb <= faila) 2250 failb--; 2251 2252 /* Set the extra bit in error bitmap. */ 2253 if (failb >= 0) 2254 set_bit(failb * rbio->stripe_nsectors + sector_nr, 2255 rbio->error_bitmap); 2256 } 2257 2258 /* We should found at least one vertical stripe with error.*/ 2259 ASSERT(found); 2260 } 2261 2262 /* 2263 * the main entry point for reads from the higher layers. This 2264 * is really only called when the normal read path had a failure, 2265 * so we assume the bio they send down corresponds to a failed part 2266 * of the drive. 2267 */ 2268 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc, 2269 int mirror_num) 2270 { 2271 struct btrfs_fs_info *fs_info = bioc->fs_info; 2272 struct btrfs_raid_bio *rbio; 2273 2274 rbio = alloc_rbio(fs_info, bioc); 2275 if (IS_ERR(rbio)) { 2276 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); 2277 bio_endio(bio); 2278 return; 2279 } 2280 2281 rbio->operation = BTRFS_RBIO_READ_REBUILD; 2282 rbio_add_bio(rbio, bio); 2283 2284 set_rbio_range_error(rbio, bio); 2285 2286 /* 2287 * Loop retry: 2288 * for 'mirror == 2', reconstruct from all other stripes. 2289 * for 'mirror_num > 2', select a stripe to fail on every retry. 2290 */ 2291 if (mirror_num > 2) 2292 set_rbio_raid6_extra_error(rbio, mirror_num); 2293 2294 start_async_work(rbio, recover_rbio_work); 2295 } 2296 2297 static void fill_data_csums(struct btrfs_raid_bio *rbio) 2298 { 2299 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 2300 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, 2301 rbio->bioc->full_stripe_logical); 2302 const u64 start = rbio->bioc->full_stripe_logical; 2303 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << 2304 fs_info->sectorsize_bits; 2305 int ret; 2306 2307 /* The rbio should not have its csum buffer initialized. */ 2308 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); 2309 2310 /* 2311 * Skip the csum search if: 2312 * 2313 * - The rbio doesn't belong to data block groups 2314 * Then we are doing IO for tree blocks, no need to search csums. 2315 * 2316 * - The rbio belongs to mixed block groups 2317 * This is to avoid deadlock, as we're already holding the full 2318 * stripe lock, if we trigger a metadata read, and it needs to do 2319 * raid56 recovery, we will deadlock. 2320 */ 2321 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || 2322 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) 2323 return; 2324 2325 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * 2326 fs_info->csum_size, GFP_NOFS); 2327 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, 2328 GFP_NOFS); 2329 if (!rbio->csum_buf || !rbio->csum_bitmap) { 2330 ret = -ENOMEM; 2331 goto error; 2332 } 2333 2334 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1, 2335 rbio->csum_buf, rbio->csum_bitmap); 2336 if (ret < 0) 2337 goto error; 2338 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) 2339 goto no_csum; 2340 return; 2341 2342 error: 2343 /* 2344 * We failed to allocate memory or grab the csum, but it's not fatal, 2345 * we can still continue. But better to warn users that RMW is no 2346 * longer safe for this particular sub-stripe write. 2347 */ 2348 btrfs_warn_rl(fs_info, 2349 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d", 2350 rbio->bioc->full_stripe_logical, ret); 2351 no_csum: 2352 kfree(rbio->csum_buf); 2353 bitmap_free(rbio->csum_bitmap); 2354 rbio->csum_buf = NULL; 2355 rbio->csum_bitmap = NULL; 2356 } 2357 2358 static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio) 2359 { 2360 struct bio_list bio_list = BIO_EMPTY_LIST; 2361 int total_sector_nr; 2362 int ret = 0; 2363 2364 /* 2365 * Fill the data csums we need for data verification. We need to fill 2366 * the csum_bitmap/csum_buf first, as our endio function will try to 2367 * verify the data sectors. 2368 */ 2369 fill_data_csums(rbio); 2370 2371 /* 2372 * Build a list of bios to read all sectors (including data and P/Q). 2373 * 2374 * This behavior is to compensate the later csum verification and recovery. 2375 */ 2376 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2377 total_sector_nr++) { 2378 int stripe = total_sector_nr / rbio->stripe_nsectors; 2379 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2380 phys_addr_t *paddrs; 2381 2382 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); 2383 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, 2384 sectornr, REQ_OP_READ); 2385 if (ret) { 2386 bio_list_put(&bio_list); 2387 return ret; 2388 } 2389 } 2390 2391 /* 2392 * We may or may not have any corrupted sectors (including missing dev 2393 * and csum mismatch), just let recover_sectors() to handle them all. 2394 */ 2395 submit_read_wait_bio_list(rbio, &bio_list); 2396 return recover_sectors(rbio); 2397 } 2398 2399 static void raid_wait_write_end_io(struct bio *bio) 2400 { 2401 struct btrfs_raid_bio *rbio = bio->bi_private; 2402 2403 if (bio->bi_status) 2404 rbio_update_error_bitmap(rbio, bio); 2405 bio_put(bio); 2406 if (atomic_dec_and_test(&rbio->stripes_pending)) 2407 wake_up(&rbio->io_wait); 2408 } 2409 2410 static void submit_write_bios(struct btrfs_raid_bio *rbio, 2411 struct bio_list *bio_list) 2412 { 2413 struct bio *bio; 2414 2415 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); 2416 while ((bio = bio_list_pop(bio_list))) { 2417 bio->bi_end_io = raid_wait_write_end_io; 2418 2419 if (trace_raid56_write_enabled()) { 2420 struct raid56_bio_trace_info trace_info = { 0 }; 2421 2422 bio_get_trace_info(rbio, bio, &trace_info); 2423 trace_raid56_write(rbio, bio, &trace_info); 2424 } 2425 submit_bio(bio); 2426 } 2427 } 2428 2429 /* 2430 * To determine if we need to read any sector from the disk. 2431 * Should only be utilized in RMW path, to skip cached rbio. 2432 */ 2433 static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) 2434 { 2435 int i; 2436 2437 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { 2438 phys_addr_t paddr = rbio->stripe_paddrs[i * rbio->sector_nsteps]; 2439 2440 /* 2441 * We have a sector which doesn't have page nor uptodate, 2442 * thus this rbio can not be cached one, as cached one must 2443 * have all its data sectors present and uptodate. 2444 */ 2445 if (paddr == INVALID_PADDR || 2446 !test_bit(i, rbio->stripe_uptodate_bitmap)) 2447 return true; 2448 } 2449 return false; 2450 } 2451 2452 static void rmw_rbio(struct btrfs_raid_bio *rbio) 2453 { 2454 struct bio_list bio_list; 2455 int sectornr; 2456 int ret = 0; 2457 2458 /* 2459 * Allocate the pages for parity first, as P/Q pages will always be 2460 * needed for both full-stripe and sub-stripe writes. 2461 */ 2462 ret = alloc_rbio_parity_pages(rbio); 2463 if (ret < 0) 2464 goto out; 2465 2466 /* 2467 * Either full stripe write, or we have every data sector already 2468 * cached, can go to write path immediately. 2469 */ 2470 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) { 2471 /* 2472 * Now we're doing sub-stripe write, also need all data stripes 2473 * to do the full RMW. 2474 */ 2475 ret = alloc_rbio_data_pages(rbio); 2476 if (ret < 0) 2477 goto out; 2478 2479 index_rbio_pages(rbio); 2480 2481 ret = rmw_read_wait_recover(rbio); 2482 if (ret < 0) 2483 goto out; 2484 } 2485 2486 /* 2487 * At this stage we're not allowed to add any new bios to the 2488 * bio list any more, anyone else that wants to change this stripe 2489 * needs to do their own rmw. 2490 */ 2491 spin_lock(&rbio->bio_list_lock); 2492 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 2493 spin_unlock(&rbio->bio_list_lock); 2494 2495 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2496 2497 index_rbio_pages(rbio); 2498 2499 /* 2500 * We don't cache full rbios because we're assuming 2501 * the higher layers are unlikely to use this area of 2502 * the disk again soon. If they do use it again, 2503 * hopefully they will send another full bio. 2504 */ 2505 if (!rbio_is_full(rbio)) 2506 cache_rbio_pages(rbio); 2507 else 2508 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2509 2510 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) 2511 generate_pq_vertical(rbio, sectornr); 2512 2513 bio_list_init(&bio_list); 2514 ret = rmw_assemble_write_bios(rbio, &bio_list); 2515 if (ret < 0) 2516 goto out; 2517 2518 /* We should have at least one bio assembled. */ 2519 ASSERT(bio_list_size(&bio_list)); 2520 submit_write_bios(rbio, &bio_list); 2521 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 2522 2523 /* We may have more errors than our tolerance during the read. */ 2524 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 2525 int found_errors; 2526 2527 found_errors = get_rbio_vertical_errors(rbio, sectornr, NULL, NULL); 2528 if (unlikely(found_errors > rbio->bioc->max_errors)) { 2529 ret = -EIO; 2530 break; 2531 } 2532 } 2533 out: 2534 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2535 } 2536 2537 static void rmw_rbio_work(struct work_struct *work) 2538 { 2539 struct btrfs_raid_bio *rbio; 2540 2541 rbio = container_of(work, struct btrfs_raid_bio, work); 2542 if (lock_stripe_add(rbio) == 0) 2543 rmw_rbio(rbio); 2544 } 2545 2546 static void rmw_rbio_work_locked(struct work_struct *work) 2547 { 2548 rmw_rbio(container_of(work, struct btrfs_raid_bio, work)); 2549 } 2550 2551 /* 2552 * The following code is used to scrub/replace the parity stripe 2553 * 2554 * Caller must have already increased bio_counter for getting @bioc. 2555 * 2556 * Note: We need make sure all the pages that add into the scrub/replace 2557 * raid bio are correct and not be changed during the scrub/replace. That 2558 * is those pages just hold metadata or file data with checksum. 2559 */ 2560 2561 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio, 2562 struct btrfs_io_context *bioc, 2563 struct btrfs_device *scrub_dev, 2564 unsigned long *dbitmap, int stripe_nsectors) 2565 { 2566 struct btrfs_fs_info *fs_info = bioc->fs_info; 2567 struct btrfs_raid_bio *rbio; 2568 int i; 2569 2570 rbio = alloc_rbio(fs_info, bioc); 2571 if (IS_ERR(rbio)) 2572 return NULL; 2573 bio_list_add(&rbio->bio_list, bio); 2574 /* 2575 * This is a special bio which is used to hold the completion handler 2576 * and make the scrub rbio is similar to the other types 2577 */ 2578 ASSERT(!bio->bi_iter.bi_size); 2579 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 2580 2581 /* 2582 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted 2583 * to the end position, so this search can start from the first parity 2584 * stripe. 2585 */ 2586 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { 2587 if (bioc->stripes[i].dev == scrub_dev) { 2588 rbio->scrubp = i; 2589 break; 2590 } 2591 } 2592 ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i); 2593 2594 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); 2595 return rbio; 2596 } 2597 2598 static int alloc_rbio_sector_pages(struct btrfs_raid_bio *rbio, 2599 int sector_nr) 2600 { 2601 const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize); 2602 const u32 base = sector_nr * rbio->sector_nsteps; 2603 2604 for (int i = base; i < base + rbio->sector_nsteps; i++) { 2605 const unsigned int page_index = (i * step) >> PAGE_SHIFT; 2606 struct page *page; 2607 2608 if (rbio->stripe_pages[page_index]) 2609 continue; 2610 page = alloc_page(GFP_NOFS); 2611 if (!page) 2612 return -ENOMEM; 2613 rbio->stripe_pages[page_index] = page; 2614 } 2615 return 0; 2616 } 2617 2618 /* 2619 * We just scrub the parity that we have correct data on the same horizontal, 2620 * so we needn't allocate all pages for all the stripes. 2621 */ 2622 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 2623 { 2624 int total_sector_nr; 2625 2626 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2627 total_sector_nr++) { 2628 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2629 int ret; 2630 2631 if (!test_bit(sectornr, &rbio->dbitmap)) 2632 continue; 2633 ret = alloc_rbio_sector_pages(rbio, total_sector_nr); 2634 if (ret < 0) 2635 return ret; 2636 } 2637 index_stripe_sectors(rbio); 2638 return 0; 2639 } 2640 2641 /* Return true if the content of the step matches the caclulated one. */ 2642 static bool verify_one_parity_step(struct btrfs_raid_bio *rbio, 2643 void *pointers[], unsigned int sector_nr, 2644 unsigned int step_nr) 2645 { 2646 const unsigned int nr_data = rbio->nr_data; 2647 const bool has_qstripe = (rbio->real_stripes - rbio->nr_data == 2); 2648 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); 2649 void *parity; 2650 bool ret = false; 2651 2652 ASSERT(step_nr < rbio->sector_nsteps); 2653 2654 /* First collect one page from each data stripe. */ 2655 for (int stripe = 0; stripe < nr_data; stripe++) 2656 pointers[stripe] = kmap_local_paddr( 2657 sector_paddr_in_rbio(rbio, stripe, sector_nr, 2658 step_nr, 0)); 2659 2660 if (has_qstripe) { 2661 assert_rbio(rbio); 2662 /* RAID6, call the library function to fill in our P/Q. */ 2663 raid6_call.gen_syndrome(rbio->real_stripes, step, pointers); 2664 } else { 2665 /* RAID5. */ 2666 memcpy(pointers[nr_data], pointers[0], step); 2667 run_xor(pointers + 1, nr_data - 1, step); 2668 } 2669 2670 /* Check scrubbing parity and repair it. */ 2671 parity = kmap_local_paddr(rbio_stripe_paddr(rbio, rbio->scrubp, sector_nr, step_nr)); 2672 if (memcmp(parity, pointers[rbio->scrubp], step) != 0) 2673 memcpy(parity, pointers[rbio->scrubp], step); 2674 else 2675 ret = true; 2676 kunmap_local(parity); 2677 2678 for (int stripe = nr_data - 1; stripe >= 0; stripe--) 2679 kunmap_local(pointers[stripe]); 2680 return ret; 2681 } 2682 2683 /* 2684 * The @pointers array should have the P/Q parity already mapped. 2685 */ 2686 static void verify_one_parity_sector(struct btrfs_raid_bio *rbio, 2687 void *pointers[], unsigned int sector_nr) 2688 { 2689 bool found_error = false; 2690 2691 for (int step_nr = 0; step_nr < rbio->sector_nsteps; step_nr++) { 2692 bool match; 2693 2694 match = verify_one_parity_step(rbio, pointers, sector_nr, step_nr); 2695 if (!match) 2696 found_error = true; 2697 } 2698 if (!found_error) 2699 bitmap_clear(&rbio->dbitmap, sector_nr, 1); 2700 } 2701 2702 static int finish_parity_scrub(struct btrfs_raid_bio *rbio) 2703 { 2704 struct btrfs_io_context *bioc = rbio->bioc; 2705 void **pointers = rbio->finish_pointers; 2706 unsigned long *pbitmap = &rbio->finish_pbitmap; 2707 int nr_data = rbio->nr_data; 2708 int sectornr; 2709 bool has_qstripe; 2710 struct page *page; 2711 phys_addr_t p_paddr = INVALID_PADDR; 2712 phys_addr_t q_paddr = INVALID_PADDR; 2713 struct bio_list bio_list; 2714 int is_replace = 0; 2715 int ret; 2716 2717 bio_list_init(&bio_list); 2718 2719 if (rbio->real_stripes - rbio->nr_data == 1) 2720 has_qstripe = false; 2721 else if (rbio->real_stripes - rbio->nr_data == 2) 2722 has_qstripe = true; 2723 else 2724 BUG(); 2725 2726 /* 2727 * Replace is running and our P/Q stripe is being replaced, then we 2728 * need to duplicate the final write to replace target. 2729 */ 2730 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { 2731 is_replace = 1; 2732 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); 2733 } 2734 2735 /* 2736 * Because the higher layers(scrubber) are unlikely to 2737 * use this area of the disk again soon, so don't cache 2738 * it. 2739 */ 2740 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2741 2742 page = alloc_page(GFP_NOFS); 2743 if (!page) 2744 return -ENOMEM; 2745 p_paddr = page_to_phys(page); 2746 page = NULL; 2747 pointers[nr_data] = kmap_local_paddr(p_paddr); 2748 2749 if (has_qstripe) { 2750 /* RAID6, allocate and map temp space for the Q stripe */ 2751 page = alloc_page(GFP_NOFS); 2752 if (!page) { 2753 __free_page(phys_to_page(p_paddr)); 2754 p_paddr = INVALID_PADDR; 2755 return -ENOMEM; 2756 } 2757 q_paddr = page_to_phys(page); 2758 page = NULL; 2759 pointers[rbio->real_stripes - 1] = kmap_local_paddr(q_paddr); 2760 } 2761 2762 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2763 2764 /* Map the parity stripe just once */ 2765 2766 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) 2767 verify_one_parity_sector(rbio, pointers, sectornr); 2768 2769 kunmap_local(pointers[nr_data]); 2770 __free_page(phys_to_page(p_paddr)); 2771 p_paddr = INVALID_PADDR; 2772 if (q_paddr != INVALID_PADDR) { 2773 __free_page(phys_to_page(q_paddr)); 2774 q_paddr = INVALID_PADDR; 2775 } 2776 2777 /* 2778 * time to start writing. Make bios for everything from the 2779 * higher layers (the bio_list in our rbio) and our p/q. Ignore 2780 * everything else. 2781 */ 2782 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { 2783 phys_addr_t *paddrs; 2784 2785 paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr); 2786 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->scrubp, 2787 sectornr, REQ_OP_WRITE); 2788 if (ret) 2789 goto cleanup; 2790 } 2791 2792 if (!is_replace) 2793 goto submit_write; 2794 2795 /* 2796 * Replace is running and our parity stripe needs to be duplicated to 2797 * the target device. Check we have a valid source stripe number. 2798 */ 2799 ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio); 2800 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { 2801 phys_addr_t *paddrs; 2802 2803 paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr); 2804 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->real_stripes, 2805 sectornr, REQ_OP_WRITE); 2806 if (ret) 2807 goto cleanup; 2808 } 2809 2810 submit_write: 2811 submit_write_bios(rbio, &bio_list); 2812 return 0; 2813 2814 cleanup: 2815 bio_list_put(&bio_list); 2816 return ret; 2817 } 2818 2819 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2820 { 2821 if (stripe >= 0 && stripe < rbio->nr_data) 2822 return 1; 2823 return 0; 2824 } 2825 2826 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio) 2827 { 2828 void **pointers = NULL; 2829 void **unmap_array = NULL; 2830 int sector_nr; 2831 int ret = 0; 2832 2833 /* 2834 * @pointers array stores the pointer for each sector. 2835 * 2836 * @unmap_array stores copy of pointers that does not get reordered 2837 * during reconstruction so that kunmap_local works. 2838 */ 2839 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2840 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2841 if (!pointers || !unmap_array) { 2842 ret = -ENOMEM; 2843 goto out; 2844 } 2845 2846 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2847 int dfail = 0, failp = -1; 2848 int faila; 2849 int failb; 2850 int found_errors; 2851 2852 found_errors = get_rbio_vertical_errors(rbio, sector_nr, 2853 &faila, &failb); 2854 if (unlikely(found_errors > rbio->bioc->max_errors)) { 2855 ret = -EIO; 2856 goto out; 2857 } 2858 if (found_errors == 0) 2859 continue; 2860 2861 /* We should have at least one error here. */ 2862 ASSERT(faila >= 0 || failb >= 0); 2863 2864 if (is_data_stripe(rbio, faila)) 2865 dfail++; 2866 else if (is_parity_stripe(faila)) 2867 failp = faila; 2868 2869 if (is_data_stripe(rbio, failb)) 2870 dfail++; 2871 else if (is_parity_stripe(failb)) 2872 failp = failb; 2873 /* 2874 * Because we can not use a scrubbing parity to repair the 2875 * data, so the capability of the repair is declined. (In the 2876 * case of RAID5, we can not repair anything.) 2877 */ 2878 if (unlikely(dfail > rbio->bioc->max_errors - 1)) { 2879 ret = -EIO; 2880 goto out; 2881 } 2882 /* 2883 * If all data is good, only parity is correctly, just repair 2884 * the parity, no need to recover data stripes. 2885 */ 2886 if (dfail == 0) 2887 continue; 2888 2889 /* 2890 * Here means we got one corrupted data stripe and one 2891 * corrupted parity on RAID6, if the corrupted parity is 2892 * scrubbing parity, luckily, use the other one to repair the 2893 * data, or we can not repair the data stripe. 2894 */ 2895 if (unlikely(failp != rbio->scrubp)) { 2896 ret = -EIO; 2897 goto out; 2898 } 2899 2900 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array); 2901 if (ret < 0) 2902 goto out; 2903 } 2904 out: 2905 kfree(pointers); 2906 kfree(unmap_array); 2907 return ret; 2908 } 2909 2910 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio) 2911 { 2912 struct bio_list bio_list = BIO_EMPTY_LIST; 2913 int total_sector_nr; 2914 int ret = 0; 2915 2916 /* Build a list of bios to read all the missing parts. */ 2917 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2918 total_sector_nr++) { 2919 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2920 int stripe = total_sector_nr / rbio->stripe_nsectors; 2921 phys_addr_t *paddrs; 2922 2923 /* No data in the vertical stripe, no need to read. */ 2924 if (!test_bit(sectornr, &rbio->dbitmap)) 2925 continue; 2926 2927 /* 2928 * We want to find all the sectors missing from the rbio and 2929 * read them from the disk. If sector_paddr_in_rbio() finds a sector 2930 * in the bio list we don't need to read it off the stripe. 2931 */ 2932 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); 2933 if (paddrs == NULL) 2934 continue; 2935 2936 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); 2937 /* 2938 * The bio cache may have handed us an uptodate sector. If so, 2939 * use it. 2940 */ 2941 if (test_bit(rbio_sector_index(rbio, stripe, sectornr), 2942 rbio->stripe_uptodate_bitmap)) 2943 continue; 2944 2945 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, 2946 sectornr, REQ_OP_READ); 2947 if (ret) { 2948 bio_list_put(&bio_list); 2949 return ret; 2950 } 2951 } 2952 2953 submit_read_wait_bio_list(rbio, &bio_list); 2954 return 0; 2955 } 2956 2957 static void scrub_rbio(struct btrfs_raid_bio *rbio) 2958 { 2959 int sector_nr; 2960 int ret; 2961 2962 ret = alloc_rbio_essential_pages(rbio); 2963 if (ret) 2964 goto out; 2965 2966 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2967 2968 ret = scrub_assemble_read_bios(rbio); 2969 if (ret < 0) 2970 goto out; 2971 2972 /* We may have some failures, recover the failed sectors first. */ 2973 ret = recover_scrub_rbio(rbio); 2974 if (ret < 0) 2975 goto out; 2976 2977 /* 2978 * We have every sector properly prepared. Can finish the scrub 2979 * and writeback the good content. 2980 */ 2981 ret = finish_parity_scrub(rbio); 2982 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 2983 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2984 int found_errors; 2985 2986 found_errors = get_rbio_vertical_errors(rbio, sector_nr, NULL, NULL); 2987 if (unlikely(found_errors > rbio->bioc->max_errors)) { 2988 ret = -EIO; 2989 break; 2990 } 2991 } 2992 out: 2993 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2994 } 2995 2996 static void scrub_rbio_work_locked(struct work_struct *work) 2997 { 2998 scrub_rbio(container_of(work, struct btrfs_raid_bio, work)); 2999 } 3000 3001 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 3002 { 3003 if (!lock_stripe_add(rbio)) 3004 start_async_work(rbio, scrub_rbio_work_locked); 3005 } 3006 3007 /* 3008 * This is for scrub call sites where we already have correct data contents. 3009 * This allows us to avoid reading data stripes again. 3010 * 3011 * Unfortunately here we have to do folio copy, other than reusing the pages. 3012 * This is due to the fact rbio has its own page management for its cache. 3013 */ 3014 void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio, 3015 struct folio **data_folios, u64 data_logical) 3016 { 3017 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 3018 const u64 offset_in_full_stripe = data_logical - 3019 rbio->bioc->full_stripe_logical; 3020 unsigned int findex = 0; 3021 unsigned int foffset = 0; 3022 int ret; 3023 3024 /* 3025 * If we hit ENOMEM temporarily, but later at 3026 * raid56_parity_submit_scrub_rbio() time it succeeded, we just do 3027 * the extra read, not a big deal. 3028 * 3029 * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time, 3030 * the bio would got proper error number set. 3031 */ 3032 ret = alloc_rbio_data_pages(rbio); 3033 if (ret < 0) 3034 return; 3035 3036 /* data_logical must be at stripe boundary and inside the full stripe. */ 3037 ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN)); 3038 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); 3039 3040 for (unsigned int cur_off = offset_in_full_stripe; 3041 cur_off < offset_in_full_stripe + BTRFS_STRIPE_LEN; 3042 cur_off += PAGE_SIZE) { 3043 const unsigned int pindex = cur_off >> PAGE_SHIFT; 3044 void *kaddr; 3045 3046 kaddr = kmap_local_page(rbio->stripe_pages[pindex]); 3047 memcpy_from_folio(kaddr, data_folios[findex], foffset, PAGE_SIZE); 3048 kunmap_local(kaddr); 3049 3050 foffset += PAGE_SIZE; 3051 ASSERT(foffset <= folio_size(data_folios[findex])); 3052 if (foffset == folio_size(data_folios[findex])) { 3053 findex++; 3054 foffset = 0; 3055 } 3056 } 3057 bitmap_set(rbio->stripe_uptodate_bitmap, 3058 offset_in_full_stripe >> fs_info->sectorsize_bits, 3059 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); 3060 } 3061