1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Fusion-io All rights reserved. 4 * Copyright (C) 2012 Intel Corp. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/bio.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/raid/pq.h> 12 #include <linux/hash.h> 13 #include <linux/list_sort.h> 14 #include <linux/raid/xor.h> 15 #include <linux/mm.h> 16 #include "messages.h" 17 #include "ctree.h" 18 #include "disk-io.h" 19 #include "volumes.h" 20 #include "raid56.h" 21 #include "async-thread.h" 22 #include "file-item.h" 23 #include "btrfs_inode.h" 24 25 /* set when additional merges to this rbio are not allowed */ 26 #define RBIO_RMW_LOCKED_BIT 1 27 28 /* 29 * set when this rbio is sitting in the hash, but it is just a cache 30 * of past RMW 31 */ 32 #define RBIO_CACHE_BIT 2 33 34 /* 35 * set when it is safe to trust the stripe_pages for caching 36 */ 37 #define RBIO_CACHE_READY_BIT 3 38 39 #define RBIO_CACHE_SIZE 1024 40 41 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 42 43 /* Used by the raid56 code to lock stripes for read/modify/write */ 44 struct btrfs_stripe_hash { 45 struct list_head hash_list; 46 spinlock_t lock; 47 }; 48 49 /* Used by the raid56 code to lock stripes for read/modify/write */ 50 struct btrfs_stripe_hash_table { 51 struct list_head stripe_cache; 52 spinlock_t cache_lock; 53 int cache_size; 54 struct btrfs_stripe_hash table[]; 55 }; 56 57 /* 58 * A bvec like structure to present a sector inside a page. 59 * 60 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize. 61 */ 62 struct sector_ptr { 63 struct page *page; 64 unsigned int pgoff:24; 65 unsigned int uptodate:8; 66 }; 67 68 static void rmw_rbio_work(struct work_struct *work); 69 static void rmw_rbio_work_locked(struct work_struct *work); 70 static void index_rbio_pages(struct btrfs_raid_bio *rbio); 71 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 72 73 static int finish_parity_scrub(struct btrfs_raid_bio *rbio); 74 static void scrub_rbio_work_locked(struct work_struct *work); 75 76 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) 77 { 78 bitmap_free(rbio->error_bitmap); 79 kfree(rbio->stripe_pages); 80 kfree(rbio->bio_sectors); 81 kfree(rbio->stripe_sectors); 82 kfree(rbio->finish_pointers); 83 } 84 85 static void free_raid_bio(struct btrfs_raid_bio *rbio) 86 { 87 int i; 88 89 if (!refcount_dec_and_test(&rbio->refs)) 90 return; 91 92 WARN_ON(!list_empty(&rbio->stripe_cache)); 93 WARN_ON(!list_empty(&rbio->hash_list)); 94 WARN_ON(!bio_list_empty(&rbio->bio_list)); 95 96 for (i = 0; i < rbio->nr_pages; i++) { 97 if (rbio->stripe_pages[i]) { 98 __free_page(rbio->stripe_pages[i]); 99 rbio->stripe_pages[i] = NULL; 100 } 101 } 102 103 btrfs_put_bioc(rbio->bioc); 104 free_raid_bio_pointers(rbio); 105 kfree(rbio); 106 } 107 108 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) 109 { 110 INIT_WORK(&rbio->work, work_func); 111 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); 112 } 113 114 /* 115 * the stripe hash table is used for locking, and to collect 116 * bios in hopes of making a full stripe 117 */ 118 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 119 { 120 struct btrfs_stripe_hash_table *table; 121 struct btrfs_stripe_hash_table *x; 122 struct btrfs_stripe_hash *cur; 123 struct btrfs_stripe_hash *h; 124 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 125 int i; 126 127 if (info->stripe_hash_table) 128 return 0; 129 130 /* 131 * The table is large, starting with order 4 and can go as high as 132 * order 7 in case lock debugging is turned on. 133 * 134 * Try harder to allocate and fallback to vmalloc to lower the chance 135 * of a failing mount. 136 */ 137 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL); 138 if (!table) 139 return -ENOMEM; 140 141 spin_lock_init(&table->cache_lock); 142 INIT_LIST_HEAD(&table->stripe_cache); 143 144 h = table->table; 145 146 for (i = 0; i < num_entries; i++) { 147 cur = h + i; 148 INIT_LIST_HEAD(&cur->hash_list); 149 spin_lock_init(&cur->lock); 150 } 151 152 x = cmpxchg(&info->stripe_hash_table, NULL, table); 153 kvfree(x); 154 return 0; 155 } 156 157 /* 158 * caching an rbio means to copy anything from the 159 * bio_sectors array into the stripe_pages array. We 160 * use the page uptodate bit in the stripe cache array 161 * to indicate if it has valid data 162 * 163 * once the caching is done, we set the cache ready 164 * bit. 165 */ 166 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 167 { 168 int i; 169 int ret; 170 171 ret = alloc_rbio_pages(rbio); 172 if (ret) 173 return; 174 175 for (i = 0; i < rbio->nr_sectors; i++) { 176 /* Some range not covered by bio (partial write), skip it */ 177 if (!rbio->bio_sectors[i].page) { 178 /* 179 * Even if the sector is not covered by bio, if it is 180 * a data sector it should still be uptodate as it is 181 * read from disk. 182 */ 183 if (i < rbio->nr_data * rbio->stripe_nsectors) 184 ASSERT(rbio->stripe_sectors[i].uptodate); 185 continue; 186 } 187 188 ASSERT(rbio->stripe_sectors[i].page); 189 memcpy_page(rbio->stripe_sectors[i].page, 190 rbio->stripe_sectors[i].pgoff, 191 rbio->bio_sectors[i].page, 192 rbio->bio_sectors[i].pgoff, 193 rbio->bioc->fs_info->sectorsize); 194 rbio->stripe_sectors[i].uptodate = 1; 195 } 196 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 197 } 198 199 /* 200 * we hash on the first logical address of the stripe 201 */ 202 static int rbio_bucket(struct btrfs_raid_bio *rbio) 203 { 204 u64 num = rbio->bioc->full_stripe_logical; 205 206 /* 207 * we shift down quite a bit. We're using byte 208 * addressing, and most of the lower bits are zeros. 209 * This tends to upset hash_64, and it consistently 210 * returns just one or two different values. 211 * 212 * shifting off the lower bits fixes things. 213 */ 214 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 215 } 216 217 static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio, 218 unsigned int page_nr) 219 { 220 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 221 const u32 sectors_per_page = PAGE_SIZE / sectorsize; 222 int i; 223 224 ASSERT(page_nr < rbio->nr_pages); 225 226 for (i = sectors_per_page * page_nr; 227 i < sectors_per_page * page_nr + sectors_per_page; 228 i++) { 229 if (!rbio->stripe_sectors[i].uptodate) 230 return false; 231 } 232 return true; 233 } 234 235 /* 236 * Update the stripe_sectors[] array to use correct page and pgoff 237 * 238 * Should be called every time any page pointer in stripes_pages[] got modified. 239 */ 240 static void index_stripe_sectors(struct btrfs_raid_bio *rbio) 241 { 242 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 243 u32 offset; 244 int i; 245 246 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { 247 int page_index = offset >> PAGE_SHIFT; 248 249 ASSERT(page_index < rbio->nr_pages); 250 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; 251 rbio->stripe_sectors[i].pgoff = offset_in_page(offset); 252 } 253 } 254 255 static void steal_rbio_page(struct btrfs_raid_bio *src, 256 struct btrfs_raid_bio *dest, int page_nr) 257 { 258 const u32 sectorsize = src->bioc->fs_info->sectorsize; 259 const u32 sectors_per_page = PAGE_SIZE / sectorsize; 260 int i; 261 262 if (dest->stripe_pages[page_nr]) 263 __free_page(dest->stripe_pages[page_nr]); 264 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; 265 src->stripe_pages[page_nr] = NULL; 266 267 /* Also update the sector->uptodate bits. */ 268 for (i = sectors_per_page * page_nr; 269 i < sectors_per_page * page_nr + sectors_per_page; i++) 270 dest->stripe_sectors[i].uptodate = true; 271 } 272 273 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr) 274 { 275 const int sector_nr = (page_nr << PAGE_SHIFT) >> 276 rbio->bioc->fs_info->sectorsize_bits; 277 278 /* 279 * We have ensured PAGE_SIZE is aligned with sectorsize, thus 280 * we won't have a page which is half data half parity. 281 * 282 * Thus if the first sector of the page belongs to data stripes, then 283 * the full page belongs to data stripes. 284 */ 285 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); 286 } 287 288 /* 289 * Stealing an rbio means taking all the uptodate pages from the stripe array 290 * in the source rbio and putting them into the destination rbio. 291 * 292 * This will also update the involved stripe_sectors[] which are referring to 293 * the old pages. 294 */ 295 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 296 { 297 int i; 298 299 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 300 return; 301 302 for (i = 0; i < dest->nr_pages; i++) { 303 struct page *p = src->stripe_pages[i]; 304 305 /* 306 * We don't need to steal P/Q pages as they will always be 307 * regenerated for RMW or full write anyway. 308 */ 309 if (!is_data_stripe_page(src, i)) 310 continue; 311 312 /* 313 * If @src already has RBIO_CACHE_READY_BIT, it should have 314 * all data stripe pages present and uptodate. 315 */ 316 ASSERT(p); 317 ASSERT(full_page_sectors_uptodate(src, i)); 318 steal_rbio_page(src, dest, i); 319 } 320 index_stripe_sectors(dest); 321 index_stripe_sectors(src); 322 } 323 324 /* 325 * merging means we take the bio_list from the victim and 326 * splice it into the destination. The victim should 327 * be discarded afterwards. 328 * 329 * must be called with dest->rbio_list_lock held 330 */ 331 static void merge_rbio(struct btrfs_raid_bio *dest, 332 struct btrfs_raid_bio *victim) 333 { 334 bio_list_merge_init(&dest->bio_list, &victim->bio_list); 335 dest->bio_list_bytes += victim->bio_list_bytes; 336 /* Also inherit the bitmaps from @victim. */ 337 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, 338 dest->stripe_nsectors); 339 } 340 341 /* 342 * used to prune items that are in the cache. The caller 343 * must hold the hash table lock. 344 */ 345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 346 { 347 int bucket = rbio_bucket(rbio); 348 struct btrfs_stripe_hash_table *table; 349 struct btrfs_stripe_hash *h; 350 int freeit = 0; 351 352 /* 353 * check the bit again under the hash table lock. 354 */ 355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 356 return; 357 358 table = rbio->bioc->fs_info->stripe_hash_table; 359 h = table->table + bucket; 360 361 /* hold the lock for the bucket because we may be 362 * removing it from the hash table 363 */ 364 spin_lock(&h->lock); 365 366 /* 367 * hold the lock for the bio list because we need 368 * to make sure the bio list is empty 369 */ 370 spin_lock(&rbio->bio_list_lock); 371 372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 373 list_del_init(&rbio->stripe_cache); 374 table->cache_size -= 1; 375 freeit = 1; 376 377 /* if the bio list isn't empty, this rbio is 378 * still involved in an IO. We take it out 379 * of the cache list, and drop the ref that 380 * was held for the list. 381 * 382 * If the bio_list was empty, we also remove 383 * the rbio from the hash_table, and drop 384 * the corresponding ref 385 */ 386 if (bio_list_empty(&rbio->bio_list)) { 387 if (!list_empty(&rbio->hash_list)) { 388 list_del_init(&rbio->hash_list); 389 refcount_dec(&rbio->refs); 390 BUG_ON(!list_empty(&rbio->plug_list)); 391 } 392 } 393 } 394 395 spin_unlock(&rbio->bio_list_lock); 396 spin_unlock(&h->lock); 397 398 if (freeit) 399 free_raid_bio(rbio); 400 } 401 402 /* 403 * prune a given rbio from the cache 404 */ 405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 406 { 407 struct btrfs_stripe_hash_table *table; 408 409 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 410 return; 411 412 table = rbio->bioc->fs_info->stripe_hash_table; 413 414 spin_lock(&table->cache_lock); 415 __remove_rbio_from_cache(rbio); 416 spin_unlock(&table->cache_lock); 417 } 418 419 /* 420 * remove everything in the cache 421 */ 422 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 423 { 424 struct btrfs_stripe_hash_table *table; 425 struct btrfs_raid_bio *rbio; 426 427 table = info->stripe_hash_table; 428 429 spin_lock(&table->cache_lock); 430 while (!list_empty(&table->stripe_cache)) { 431 rbio = list_entry(table->stripe_cache.next, 432 struct btrfs_raid_bio, 433 stripe_cache); 434 __remove_rbio_from_cache(rbio); 435 } 436 spin_unlock(&table->cache_lock); 437 } 438 439 /* 440 * remove all cached entries and free the hash table 441 * used by unmount 442 */ 443 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 444 { 445 if (!info->stripe_hash_table) 446 return; 447 btrfs_clear_rbio_cache(info); 448 kvfree(info->stripe_hash_table); 449 info->stripe_hash_table = NULL; 450 } 451 452 /* 453 * insert an rbio into the stripe cache. It 454 * must have already been prepared by calling 455 * cache_rbio_pages 456 * 457 * If this rbio was already cached, it gets 458 * moved to the front of the lru. 459 * 460 * If the size of the rbio cache is too big, we 461 * prune an item. 462 */ 463 static void cache_rbio(struct btrfs_raid_bio *rbio) 464 { 465 struct btrfs_stripe_hash_table *table; 466 467 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 468 return; 469 470 table = rbio->bioc->fs_info->stripe_hash_table; 471 472 spin_lock(&table->cache_lock); 473 spin_lock(&rbio->bio_list_lock); 474 475 /* bump our ref if we were not in the list before */ 476 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 477 refcount_inc(&rbio->refs); 478 479 if (!list_empty(&rbio->stripe_cache)){ 480 list_move(&rbio->stripe_cache, &table->stripe_cache); 481 } else { 482 list_add(&rbio->stripe_cache, &table->stripe_cache); 483 table->cache_size += 1; 484 } 485 486 spin_unlock(&rbio->bio_list_lock); 487 488 if (table->cache_size > RBIO_CACHE_SIZE) { 489 struct btrfs_raid_bio *found; 490 491 found = list_entry(table->stripe_cache.prev, 492 struct btrfs_raid_bio, 493 stripe_cache); 494 495 if (found != rbio) 496 __remove_rbio_from_cache(found); 497 } 498 499 spin_unlock(&table->cache_lock); 500 } 501 502 /* 503 * helper function to run the xor_blocks api. It is only 504 * able to do MAX_XOR_BLOCKS at a time, so we need to 505 * loop through. 506 */ 507 static void run_xor(void **pages, int src_cnt, ssize_t len) 508 { 509 int src_off = 0; 510 int xor_src_cnt = 0; 511 void *dest = pages[src_cnt]; 512 513 while(src_cnt > 0) { 514 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 515 xor_blocks(xor_src_cnt, len, dest, pages + src_off); 516 517 src_cnt -= xor_src_cnt; 518 src_off += xor_src_cnt; 519 } 520 } 521 522 /* 523 * Returns true if the bio list inside this rbio covers an entire stripe (no 524 * rmw required). 525 */ 526 static int rbio_is_full(struct btrfs_raid_bio *rbio) 527 { 528 unsigned long size = rbio->bio_list_bytes; 529 int ret = 1; 530 531 spin_lock(&rbio->bio_list_lock); 532 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) 533 ret = 0; 534 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); 535 spin_unlock(&rbio->bio_list_lock); 536 537 return ret; 538 } 539 540 /* 541 * returns 1 if it is safe to merge two rbios together. 542 * The merging is safe if the two rbios correspond to 543 * the same stripe and if they are both going in the same 544 * direction (read vs write), and if neither one is 545 * locked for final IO 546 * 547 * The caller is responsible for locking such that 548 * rmw_locked is safe to test 549 */ 550 static int rbio_can_merge(struct btrfs_raid_bio *last, 551 struct btrfs_raid_bio *cur) 552 { 553 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 554 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 555 return 0; 556 557 /* 558 * we can't merge with cached rbios, since the 559 * idea is that when we merge the destination 560 * rbio is going to run our IO for us. We can 561 * steal from cached rbios though, other functions 562 * handle that. 563 */ 564 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 565 test_bit(RBIO_CACHE_BIT, &cur->flags)) 566 return 0; 567 568 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical) 569 return 0; 570 571 /* we can't merge with different operations */ 572 if (last->operation != cur->operation) 573 return 0; 574 /* 575 * We've need read the full stripe from the drive. 576 * check and repair the parity and write the new results. 577 * 578 * We're not allowed to add any new bios to the 579 * bio list here, anyone else that wants to 580 * change this stripe needs to do their own rmw. 581 */ 582 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) 583 return 0; 584 585 if (last->operation == BTRFS_RBIO_READ_REBUILD) 586 return 0; 587 588 return 1; 589 } 590 591 static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio, 592 unsigned int stripe_nr, 593 unsigned int sector_nr) 594 { 595 ASSERT(stripe_nr < rbio->real_stripes); 596 ASSERT(sector_nr < rbio->stripe_nsectors); 597 598 return stripe_nr * rbio->stripe_nsectors + sector_nr; 599 } 600 601 /* Return a sector from rbio->stripe_sectors, not from the bio list */ 602 static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio, 603 unsigned int stripe_nr, 604 unsigned int sector_nr) 605 { 606 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, 607 sector_nr)]; 608 } 609 610 /* Grab a sector inside P stripe */ 611 static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio, 612 unsigned int sector_nr) 613 { 614 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); 615 } 616 617 /* Grab a sector inside Q stripe, return NULL if not RAID6 */ 618 static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio, 619 unsigned int sector_nr) 620 { 621 if (rbio->nr_data + 1 == rbio->real_stripes) 622 return NULL; 623 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); 624 } 625 626 /* 627 * The first stripe in the table for a logical address 628 * has the lock. rbios are added in one of three ways: 629 * 630 * 1) Nobody has the stripe locked yet. The rbio is given 631 * the lock and 0 is returned. The caller must start the IO 632 * themselves. 633 * 634 * 2) Someone has the stripe locked, but we're able to merge 635 * with the lock owner. The rbio is freed and the IO will 636 * start automatically along with the existing rbio. 1 is returned. 637 * 638 * 3) Someone has the stripe locked, but we're not able to merge. 639 * The rbio is added to the lock owner's plug list, or merged into 640 * an rbio already on the plug list. When the lock owner unlocks, 641 * the next rbio on the list is run and the IO is started automatically. 642 * 1 is returned 643 * 644 * If we return 0, the caller still owns the rbio and must continue with 645 * IO submission. If we return 1, the caller must assume the rbio has 646 * already been freed. 647 */ 648 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 649 { 650 struct btrfs_stripe_hash *h; 651 struct btrfs_raid_bio *cur; 652 struct btrfs_raid_bio *pending; 653 struct btrfs_raid_bio *freeit = NULL; 654 struct btrfs_raid_bio *cache_drop = NULL; 655 int ret = 0; 656 657 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); 658 659 spin_lock(&h->lock); 660 list_for_each_entry(cur, &h->hash_list, hash_list) { 661 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) 662 continue; 663 664 spin_lock(&cur->bio_list_lock); 665 666 /* Can we steal this cached rbio's pages? */ 667 if (bio_list_empty(&cur->bio_list) && 668 list_empty(&cur->plug_list) && 669 test_bit(RBIO_CACHE_BIT, &cur->flags) && 670 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 671 list_del_init(&cur->hash_list); 672 refcount_dec(&cur->refs); 673 674 steal_rbio(cur, rbio); 675 cache_drop = cur; 676 spin_unlock(&cur->bio_list_lock); 677 678 goto lockit; 679 } 680 681 /* Can we merge into the lock owner? */ 682 if (rbio_can_merge(cur, rbio)) { 683 merge_rbio(cur, rbio); 684 spin_unlock(&cur->bio_list_lock); 685 freeit = rbio; 686 ret = 1; 687 goto out; 688 } 689 690 691 /* 692 * We couldn't merge with the running rbio, see if we can merge 693 * with the pending ones. We don't have to check for rmw_locked 694 * because there is no way they are inside finish_rmw right now 695 */ 696 list_for_each_entry(pending, &cur->plug_list, plug_list) { 697 if (rbio_can_merge(pending, rbio)) { 698 merge_rbio(pending, rbio); 699 spin_unlock(&cur->bio_list_lock); 700 freeit = rbio; 701 ret = 1; 702 goto out; 703 } 704 } 705 706 /* 707 * No merging, put us on the tail of the plug list, our rbio 708 * will be started with the currently running rbio unlocks 709 */ 710 list_add_tail(&rbio->plug_list, &cur->plug_list); 711 spin_unlock(&cur->bio_list_lock); 712 ret = 1; 713 goto out; 714 } 715 lockit: 716 refcount_inc(&rbio->refs); 717 list_add(&rbio->hash_list, &h->hash_list); 718 out: 719 spin_unlock(&h->lock); 720 if (cache_drop) 721 remove_rbio_from_cache(cache_drop); 722 if (freeit) 723 free_raid_bio(freeit); 724 return ret; 725 } 726 727 static void recover_rbio_work_locked(struct work_struct *work); 728 729 /* 730 * called as rmw or parity rebuild is completed. If the plug list has more 731 * rbios waiting for this stripe, the next one on the list will be started 732 */ 733 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 734 { 735 int bucket; 736 struct btrfs_stripe_hash *h; 737 int keep_cache = 0; 738 739 bucket = rbio_bucket(rbio); 740 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; 741 742 if (list_empty(&rbio->plug_list)) 743 cache_rbio(rbio); 744 745 spin_lock(&h->lock); 746 spin_lock(&rbio->bio_list_lock); 747 748 if (!list_empty(&rbio->hash_list)) { 749 /* 750 * if we're still cached and there is no other IO 751 * to perform, just leave this rbio here for others 752 * to steal from later 753 */ 754 if (list_empty(&rbio->plug_list) && 755 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 756 keep_cache = 1; 757 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 758 BUG_ON(!bio_list_empty(&rbio->bio_list)); 759 goto done; 760 } 761 762 list_del_init(&rbio->hash_list); 763 refcount_dec(&rbio->refs); 764 765 /* 766 * we use the plug list to hold all the rbios 767 * waiting for the chance to lock this stripe. 768 * hand the lock over to one of them. 769 */ 770 if (!list_empty(&rbio->plug_list)) { 771 struct btrfs_raid_bio *next; 772 struct list_head *head = rbio->plug_list.next; 773 774 next = list_entry(head, struct btrfs_raid_bio, 775 plug_list); 776 777 list_del_init(&rbio->plug_list); 778 779 list_add(&next->hash_list, &h->hash_list); 780 refcount_inc(&next->refs); 781 spin_unlock(&rbio->bio_list_lock); 782 spin_unlock(&h->lock); 783 784 if (next->operation == BTRFS_RBIO_READ_REBUILD) { 785 start_async_work(next, recover_rbio_work_locked); 786 } else if (next->operation == BTRFS_RBIO_WRITE) { 787 steal_rbio(rbio, next); 788 start_async_work(next, rmw_rbio_work_locked); 789 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 790 steal_rbio(rbio, next); 791 start_async_work(next, scrub_rbio_work_locked); 792 } 793 794 goto done_nolock; 795 } 796 } 797 done: 798 spin_unlock(&rbio->bio_list_lock); 799 spin_unlock(&h->lock); 800 801 done_nolock: 802 if (!keep_cache) 803 remove_rbio_from_cache(rbio); 804 } 805 806 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) 807 { 808 struct bio *next; 809 810 while (cur) { 811 next = cur->bi_next; 812 cur->bi_next = NULL; 813 cur->bi_status = err; 814 bio_endio(cur); 815 cur = next; 816 } 817 } 818 819 /* 820 * this frees the rbio and runs through all the bios in the 821 * bio_list and calls end_io on them 822 */ 823 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) 824 { 825 struct bio *cur = bio_list_get(&rbio->bio_list); 826 struct bio *extra; 827 828 kfree(rbio->csum_buf); 829 bitmap_free(rbio->csum_bitmap); 830 rbio->csum_buf = NULL; 831 rbio->csum_bitmap = NULL; 832 833 /* 834 * Clear the data bitmap, as the rbio may be cached for later usage. 835 * do this before before unlock_stripe() so there will be no new bio 836 * for this bio. 837 */ 838 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); 839 840 /* 841 * At this moment, rbio->bio_list is empty, however since rbio does not 842 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the 843 * hash list, rbio may be merged with others so that rbio->bio_list 844 * becomes non-empty. 845 * Once unlock_stripe() is done, rbio->bio_list will not be updated any 846 * more and we can call bio_endio() on all queued bios. 847 */ 848 unlock_stripe(rbio); 849 extra = bio_list_get(&rbio->bio_list); 850 free_raid_bio(rbio); 851 852 rbio_endio_bio_list(cur, err); 853 if (extra) 854 rbio_endio_bio_list(extra, err); 855 } 856 857 /* 858 * Get a sector pointer specified by its @stripe_nr and @sector_nr. 859 * 860 * @rbio: The raid bio 861 * @stripe_nr: Stripe number, valid range [0, real_stripe) 862 * @sector_nr: Sector number inside the stripe, 863 * valid range [0, stripe_nsectors) 864 * @bio_list_only: Whether to use sectors inside the bio list only. 865 * 866 * The read/modify/write code wants to reuse the original bio page as much 867 * as possible, and only use stripe_sectors as fallback. 868 */ 869 static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio, 870 int stripe_nr, int sector_nr, 871 bool bio_list_only) 872 { 873 struct sector_ptr *sector; 874 int index; 875 876 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); 877 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 878 879 index = stripe_nr * rbio->stripe_nsectors + sector_nr; 880 ASSERT(index >= 0 && index < rbio->nr_sectors); 881 882 spin_lock(&rbio->bio_list_lock); 883 sector = &rbio->bio_sectors[index]; 884 if (sector->page || bio_list_only) { 885 /* Don't return sector without a valid page pointer */ 886 if (!sector->page) 887 sector = NULL; 888 spin_unlock(&rbio->bio_list_lock); 889 return sector; 890 } 891 spin_unlock(&rbio->bio_list_lock); 892 893 return &rbio->stripe_sectors[index]; 894 } 895 896 /* 897 * allocation and initial setup for the btrfs_raid_bio. Not 898 * this does not allocate any pages for rbio->pages. 899 */ 900 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 901 struct btrfs_io_context *bioc) 902 { 903 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes; 904 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT; 905 const unsigned int num_pages = stripe_npages * real_stripes; 906 const unsigned int stripe_nsectors = 907 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; 908 const unsigned int num_sectors = stripe_nsectors * real_stripes; 909 struct btrfs_raid_bio *rbio; 910 911 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */ 912 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); 913 /* 914 * Our current stripe len should be fixed to 64k thus stripe_nsectors 915 * (at most 16) should be no larger than BITS_PER_LONG. 916 */ 917 ASSERT(stripe_nsectors <= BITS_PER_LONG); 918 919 /* 920 * Real stripes must be between 2 (2 disks RAID5, aka RAID1) and 256 921 * (limited by u8). 922 */ 923 ASSERT(real_stripes >= 2); 924 ASSERT(real_stripes <= U8_MAX); 925 926 rbio = kzalloc(sizeof(*rbio), GFP_NOFS); 927 if (!rbio) 928 return ERR_PTR(-ENOMEM); 929 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *), 930 GFP_NOFS); 931 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), 932 GFP_NOFS); 933 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), 934 GFP_NOFS); 935 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); 936 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); 937 938 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || 939 !rbio->finish_pointers || !rbio->error_bitmap) { 940 free_raid_bio_pointers(rbio); 941 kfree(rbio); 942 return ERR_PTR(-ENOMEM); 943 } 944 945 bio_list_init(&rbio->bio_list); 946 init_waitqueue_head(&rbio->io_wait); 947 INIT_LIST_HEAD(&rbio->plug_list); 948 spin_lock_init(&rbio->bio_list_lock); 949 INIT_LIST_HEAD(&rbio->stripe_cache); 950 INIT_LIST_HEAD(&rbio->hash_list); 951 btrfs_get_bioc(bioc); 952 rbio->bioc = bioc; 953 rbio->nr_pages = num_pages; 954 rbio->nr_sectors = num_sectors; 955 rbio->real_stripes = real_stripes; 956 rbio->stripe_npages = stripe_npages; 957 rbio->stripe_nsectors = stripe_nsectors; 958 refcount_set(&rbio->refs, 1); 959 atomic_set(&rbio->stripes_pending, 0); 960 961 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); 962 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); 963 ASSERT(rbio->nr_data > 0); 964 965 return rbio; 966 } 967 968 /* allocate pages for all the stripes in the bio, including parity */ 969 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 970 { 971 int ret; 972 973 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, 0); 974 if (ret < 0) 975 return ret; 976 /* Mapping all sectors */ 977 index_stripe_sectors(rbio); 978 return 0; 979 } 980 981 /* only allocate pages for p/q stripes */ 982 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 983 { 984 const int data_pages = rbio->nr_data * rbio->stripe_npages; 985 int ret; 986 987 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, 988 rbio->stripe_pages + data_pages, 0); 989 if (ret < 0) 990 return ret; 991 992 index_stripe_sectors(rbio); 993 return 0; 994 } 995 996 /* 997 * Return the total number of errors found in the vertical stripe of @sector_nr. 998 * 999 * @faila and @failb will also be updated to the first and second stripe 1000 * number of the errors. 1001 */ 1002 static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr, 1003 int *faila, int *failb) 1004 { 1005 int stripe_nr; 1006 int found_errors = 0; 1007 1008 if (faila || failb) { 1009 /* 1010 * Both @faila and @failb should be valid pointers if any of 1011 * them is specified. 1012 */ 1013 ASSERT(faila && failb); 1014 *faila = -1; 1015 *failb = -1; 1016 } 1017 1018 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1019 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; 1020 1021 if (test_bit(total_sector_nr, rbio->error_bitmap)) { 1022 found_errors++; 1023 if (faila) { 1024 /* Update faila and failb. */ 1025 if (*faila < 0) 1026 *faila = stripe_nr; 1027 else if (*failb < 0) 1028 *failb = stripe_nr; 1029 } 1030 } 1031 } 1032 return found_errors; 1033 } 1034 1035 /* 1036 * Add a single sector @sector into our list of bios for IO. 1037 * 1038 * Return 0 if everything went well. 1039 * Return <0 for error. 1040 */ 1041 static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, 1042 struct bio_list *bio_list, 1043 struct sector_ptr *sector, 1044 unsigned int stripe_nr, 1045 unsigned int sector_nr, 1046 enum req_op op) 1047 { 1048 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1049 struct bio *last = bio_list->tail; 1050 int ret; 1051 struct bio *bio; 1052 struct btrfs_io_stripe *stripe; 1053 u64 disk_start; 1054 1055 /* 1056 * Note: here stripe_nr has taken device replace into consideration, 1057 * thus it can be larger than rbio->real_stripe. 1058 * So here we check against bioc->num_stripes, not rbio->real_stripes. 1059 */ 1060 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); 1061 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 1062 ASSERT(sector->page); 1063 1064 stripe = &rbio->bioc->stripes[stripe_nr]; 1065 disk_start = stripe->physical + sector_nr * sectorsize; 1066 1067 /* if the device is missing, just fail this stripe */ 1068 if (!stripe->dev->bdev) { 1069 int found_errors; 1070 1071 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, 1072 rbio->error_bitmap); 1073 1074 /* Check if we have reached tolerance early. */ 1075 found_errors = get_rbio_veritical_errors(rbio, sector_nr, 1076 NULL, NULL); 1077 if (found_errors > rbio->bioc->max_errors) 1078 return -EIO; 1079 return 0; 1080 } 1081 1082 /* see if we can add this page onto our existing bio */ 1083 if (last) { 1084 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT; 1085 last_end += last->bi_iter.bi_size; 1086 1087 /* 1088 * we can't merge these if they are from different 1089 * devices or if they are not contiguous 1090 */ 1091 if (last_end == disk_start && !last->bi_status && 1092 last->bi_bdev == stripe->dev->bdev) { 1093 ret = bio_add_page(last, sector->page, sectorsize, 1094 sector->pgoff); 1095 if (ret == sectorsize) 1096 return 0; 1097 } 1098 } 1099 1100 /* put a new bio on the list */ 1101 bio = bio_alloc(stripe->dev->bdev, 1102 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1), 1103 op, GFP_NOFS); 1104 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; 1105 bio->bi_private = rbio; 1106 1107 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff); 1108 bio_list_add(bio_list, bio); 1109 return 0; 1110 } 1111 1112 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) 1113 { 1114 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1115 struct bio_vec bvec; 1116 struct bvec_iter iter; 1117 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1118 rbio->bioc->full_stripe_logical; 1119 1120 bio_for_each_segment(bvec, bio, iter) { 1121 u32 bvec_offset; 1122 1123 for (bvec_offset = 0; bvec_offset < bvec.bv_len; 1124 bvec_offset += sectorsize, offset += sectorsize) { 1125 int index = offset / sectorsize; 1126 struct sector_ptr *sector = &rbio->bio_sectors[index]; 1127 1128 sector->page = bvec.bv_page; 1129 sector->pgoff = bvec.bv_offset + bvec_offset; 1130 ASSERT(sector->pgoff < PAGE_SIZE); 1131 } 1132 } 1133 } 1134 1135 /* 1136 * helper function to walk our bio list and populate the bio_pages array with 1137 * the result. This seems expensive, but it is faster than constantly 1138 * searching through the bio list as we setup the IO in finish_rmw or stripe 1139 * reconstruction. 1140 * 1141 * This must be called before you trust the answers from page_in_rbio 1142 */ 1143 static void index_rbio_pages(struct btrfs_raid_bio *rbio) 1144 { 1145 struct bio *bio; 1146 1147 spin_lock(&rbio->bio_list_lock); 1148 bio_list_for_each(bio, &rbio->bio_list) 1149 index_one_bio(rbio, bio); 1150 1151 spin_unlock(&rbio->bio_list_lock); 1152 } 1153 1154 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio, 1155 struct raid56_bio_trace_info *trace_info) 1156 { 1157 const struct btrfs_io_context *bioc = rbio->bioc; 1158 int i; 1159 1160 ASSERT(bioc); 1161 1162 /* We rely on bio->bi_bdev to find the stripe number. */ 1163 if (!bio->bi_bdev) 1164 goto not_found; 1165 1166 for (i = 0; i < bioc->num_stripes; i++) { 1167 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) 1168 continue; 1169 trace_info->stripe_nr = i; 1170 trace_info->devid = bioc->stripes[i].dev->devid; 1171 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1172 bioc->stripes[i].physical; 1173 return; 1174 } 1175 1176 not_found: 1177 trace_info->devid = -1; 1178 trace_info->offset = -1; 1179 trace_info->stripe_nr = -1; 1180 } 1181 1182 static inline void bio_list_put(struct bio_list *bio_list) 1183 { 1184 struct bio *bio; 1185 1186 while ((bio = bio_list_pop(bio_list))) 1187 bio_put(bio); 1188 } 1189 1190 static void assert_rbio(struct btrfs_raid_bio *rbio) 1191 { 1192 if (!IS_ENABLED(CONFIG_BTRFS_DEBUG) || 1193 !IS_ENABLED(CONFIG_BTRFS_ASSERT)) 1194 return; 1195 1196 /* 1197 * At least two stripes (2 disks RAID5), and since real_stripes is U8, 1198 * we won't go beyond 256 disks anyway. 1199 */ 1200 ASSERT(rbio->real_stripes >= 2); 1201 ASSERT(rbio->nr_data > 0); 1202 1203 /* 1204 * This is another check to make sure nr data stripes is smaller 1205 * than total stripes. 1206 */ 1207 ASSERT(rbio->nr_data < rbio->real_stripes); 1208 } 1209 1210 /* Generate PQ for one vertical stripe. */ 1211 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) 1212 { 1213 void **pointers = rbio->finish_pointers; 1214 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1215 struct sector_ptr *sector; 1216 int stripe; 1217 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; 1218 1219 /* First collect one sector from each data stripe */ 1220 for (stripe = 0; stripe < rbio->nr_data; stripe++) { 1221 sector = sector_in_rbio(rbio, stripe, sectornr, 0); 1222 pointers[stripe] = kmap_local_page(sector->page) + 1223 sector->pgoff; 1224 } 1225 1226 /* Then add the parity stripe */ 1227 sector = rbio_pstripe_sector(rbio, sectornr); 1228 sector->uptodate = 1; 1229 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; 1230 1231 if (has_qstripe) { 1232 /* 1233 * RAID6, add the qstripe and call the library function 1234 * to fill in our p/q 1235 */ 1236 sector = rbio_qstripe_sector(rbio, sectornr); 1237 sector->uptodate = 1; 1238 pointers[stripe++] = kmap_local_page(sector->page) + 1239 sector->pgoff; 1240 1241 assert_rbio(rbio); 1242 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 1243 pointers); 1244 } else { 1245 /* raid5 */ 1246 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize); 1247 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize); 1248 } 1249 for (stripe = stripe - 1; stripe >= 0; stripe--) 1250 kunmap_local(pointers[stripe]); 1251 } 1252 1253 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio, 1254 struct bio_list *bio_list) 1255 { 1256 /* The total sector number inside the full stripe. */ 1257 int total_sector_nr; 1258 int sectornr; 1259 int stripe; 1260 int ret; 1261 1262 ASSERT(bio_list_size(bio_list) == 0); 1263 1264 /* We should have at least one data sector. */ 1265 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); 1266 1267 /* 1268 * Reset errors, as we may have errors inherited from from degraded 1269 * write. 1270 */ 1271 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 1272 1273 /* 1274 * Start assembly. Make bios for everything from the higher layers (the 1275 * bio_list in our rbio) and our P/Q. Ignore everything else. 1276 */ 1277 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 1278 total_sector_nr++) { 1279 struct sector_ptr *sector; 1280 1281 stripe = total_sector_nr / rbio->stripe_nsectors; 1282 sectornr = total_sector_nr % rbio->stripe_nsectors; 1283 1284 /* This vertical stripe has no data, skip it. */ 1285 if (!test_bit(sectornr, &rbio->dbitmap)) 1286 continue; 1287 1288 if (stripe < rbio->nr_data) { 1289 sector = sector_in_rbio(rbio, stripe, sectornr, 1); 1290 if (!sector) 1291 continue; 1292 } else { 1293 sector = rbio_stripe_sector(rbio, stripe, sectornr); 1294 } 1295 1296 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe, 1297 sectornr, REQ_OP_WRITE); 1298 if (ret) 1299 goto error; 1300 } 1301 1302 if (likely(!rbio->bioc->replace_nr_stripes)) 1303 return 0; 1304 1305 /* 1306 * Make a copy for the replace target device. 1307 * 1308 * Thus the source stripe number (in replace_stripe_src) should be valid. 1309 */ 1310 ASSERT(rbio->bioc->replace_stripe_src >= 0); 1311 1312 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 1313 total_sector_nr++) { 1314 struct sector_ptr *sector; 1315 1316 stripe = total_sector_nr / rbio->stripe_nsectors; 1317 sectornr = total_sector_nr % rbio->stripe_nsectors; 1318 1319 /* 1320 * For RAID56, there is only one device that can be replaced, 1321 * and replace_stripe_src[0] indicates the stripe number we 1322 * need to copy from. 1323 */ 1324 if (stripe != rbio->bioc->replace_stripe_src) { 1325 /* 1326 * We can skip the whole stripe completely, note 1327 * total_sector_nr will be increased by one anyway. 1328 */ 1329 ASSERT(sectornr == 0); 1330 total_sector_nr += rbio->stripe_nsectors - 1; 1331 continue; 1332 } 1333 1334 /* This vertical stripe has no data, skip it. */ 1335 if (!test_bit(sectornr, &rbio->dbitmap)) 1336 continue; 1337 1338 if (stripe < rbio->nr_data) { 1339 sector = sector_in_rbio(rbio, stripe, sectornr, 1); 1340 if (!sector) 1341 continue; 1342 } else { 1343 sector = rbio_stripe_sector(rbio, stripe, sectornr); 1344 } 1345 1346 ret = rbio_add_io_sector(rbio, bio_list, sector, 1347 rbio->real_stripes, 1348 sectornr, REQ_OP_WRITE); 1349 if (ret) 1350 goto error; 1351 } 1352 1353 return 0; 1354 error: 1355 bio_list_put(bio_list); 1356 return -EIO; 1357 } 1358 1359 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio) 1360 { 1361 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1362 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1363 rbio->bioc->full_stripe_logical; 1364 int total_nr_sector = offset >> fs_info->sectorsize_bits; 1365 1366 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); 1367 1368 bitmap_set(rbio->error_bitmap, total_nr_sector, 1369 bio->bi_iter.bi_size >> fs_info->sectorsize_bits); 1370 1371 /* 1372 * Special handling for raid56_alloc_missing_rbio() used by 1373 * scrub/replace. Unlike call path in raid56_parity_recover(), they 1374 * pass an empty bio here. Thus we have to find out the missing device 1375 * and mark the stripe error instead. 1376 */ 1377 if (bio->bi_iter.bi_size == 0) { 1378 bool found_missing = false; 1379 int stripe_nr; 1380 1381 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1382 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { 1383 found_missing = true; 1384 bitmap_set(rbio->error_bitmap, 1385 stripe_nr * rbio->stripe_nsectors, 1386 rbio->stripe_nsectors); 1387 } 1388 } 1389 ASSERT(found_missing); 1390 } 1391 } 1392 1393 /* 1394 * For subpage case, we can no longer set page Up-to-date directly for 1395 * stripe_pages[], thus we need to locate the sector. 1396 */ 1397 static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio, 1398 struct page *page, 1399 unsigned int pgoff) 1400 { 1401 int i; 1402 1403 for (i = 0; i < rbio->nr_sectors; i++) { 1404 struct sector_ptr *sector = &rbio->stripe_sectors[i]; 1405 1406 if (sector->page == page && sector->pgoff == pgoff) 1407 return sector; 1408 } 1409 return NULL; 1410 } 1411 1412 /* 1413 * this sets each page in the bio uptodate. It should only be used on private 1414 * rbio pages, nothing that comes in from the higher layers 1415 */ 1416 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) 1417 { 1418 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 1419 struct bio_vec *bvec; 1420 struct bvec_iter_all iter_all; 1421 1422 ASSERT(!bio_flagged(bio, BIO_CLONED)); 1423 1424 bio_for_each_segment_all(bvec, bio, iter_all) { 1425 struct sector_ptr *sector; 1426 int pgoff; 1427 1428 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; 1429 pgoff += sectorsize) { 1430 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); 1431 ASSERT(sector); 1432 if (sector) 1433 sector->uptodate = 1; 1434 } 1435 } 1436 } 1437 1438 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio) 1439 { 1440 struct bio_vec *bv = bio_first_bvec_all(bio); 1441 int i; 1442 1443 for (i = 0; i < rbio->nr_sectors; i++) { 1444 struct sector_ptr *sector; 1445 1446 sector = &rbio->stripe_sectors[i]; 1447 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) 1448 break; 1449 sector = &rbio->bio_sectors[i]; 1450 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) 1451 break; 1452 } 1453 ASSERT(i < rbio->nr_sectors); 1454 return i; 1455 } 1456 1457 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio) 1458 { 1459 int total_sector_nr = get_bio_sector_nr(rbio, bio); 1460 u32 bio_size = 0; 1461 struct bio_vec *bvec; 1462 int i; 1463 1464 bio_for_each_bvec_all(bvec, bio, i) 1465 bio_size += bvec->bv_len; 1466 1467 /* 1468 * Since we can have multiple bios touching the error_bitmap, we cannot 1469 * call bitmap_set() without protection. 1470 * 1471 * Instead use set_bit() for each bit, as set_bit() itself is atomic. 1472 */ 1473 for (i = total_sector_nr; i < total_sector_nr + 1474 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) 1475 set_bit(i, rbio->error_bitmap); 1476 } 1477 1478 /* Verify the data sectors at read time. */ 1479 static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio, 1480 struct bio *bio) 1481 { 1482 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1483 int total_sector_nr = get_bio_sector_nr(rbio, bio); 1484 struct bio_vec *bvec; 1485 struct bvec_iter_all iter_all; 1486 1487 /* No data csum for the whole stripe, no need to verify. */ 1488 if (!rbio->csum_bitmap || !rbio->csum_buf) 1489 return; 1490 1491 /* P/Q stripes, they have no data csum to verify against. */ 1492 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) 1493 return; 1494 1495 bio_for_each_segment_all(bvec, bio, iter_all) { 1496 int bv_offset; 1497 1498 for (bv_offset = bvec->bv_offset; 1499 bv_offset < bvec->bv_offset + bvec->bv_len; 1500 bv_offset += fs_info->sectorsize, total_sector_nr++) { 1501 u8 csum_buf[BTRFS_CSUM_SIZE]; 1502 u8 *expected_csum = rbio->csum_buf + 1503 total_sector_nr * fs_info->csum_size; 1504 int ret; 1505 1506 /* No csum for this sector, skip to the next sector. */ 1507 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) 1508 continue; 1509 1510 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page, 1511 bv_offset, csum_buf, expected_csum); 1512 if (ret < 0) 1513 set_bit(total_sector_nr, rbio->error_bitmap); 1514 } 1515 } 1516 } 1517 1518 static void raid_wait_read_end_io(struct bio *bio) 1519 { 1520 struct btrfs_raid_bio *rbio = bio->bi_private; 1521 1522 if (bio->bi_status) { 1523 rbio_update_error_bitmap(rbio, bio); 1524 } else { 1525 set_bio_pages_uptodate(rbio, bio); 1526 verify_bio_data_sectors(rbio, bio); 1527 } 1528 1529 bio_put(bio); 1530 if (atomic_dec_and_test(&rbio->stripes_pending)) 1531 wake_up(&rbio->io_wait); 1532 } 1533 1534 static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio, 1535 struct bio_list *bio_list) 1536 { 1537 struct bio *bio; 1538 1539 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); 1540 while ((bio = bio_list_pop(bio_list))) { 1541 bio->bi_end_io = raid_wait_read_end_io; 1542 1543 if (trace_raid56_read_enabled()) { 1544 struct raid56_bio_trace_info trace_info = { 0 }; 1545 1546 bio_get_trace_info(rbio, bio, &trace_info); 1547 trace_raid56_read(rbio, bio, &trace_info); 1548 } 1549 submit_bio(bio); 1550 } 1551 1552 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 1553 } 1554 1555 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio) 1556 { 1557 const int data_pages = rbio->nr_data * rbio->stripe_npages; 1558 int ret; 1559 1560 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, 0); 1561 if (ret < 0) 1562 return ret; 1563 1564 index_stripe_sectors(rbio); 1565 return 0; 1566 } 1567 1568 /* 1569 * We use plugging call backs to collect full stripes. 1570 * Any time we get a partial stripe write while plugged 1571 * we collect it into a list. When the unplug comes down, 1572 * we sort the list by logical block number and merge 1573 * everything we can into the same rbios 1574 */ 1575 struct btrfs_plug_cb { 1576 struct blk_plug_cb cb; 1577 struct btrfs_fs_info *info; 1578 struct list_head rbio_list; 1579 }; 1580 1581 /* 1582 * rbios on the plug list are sorted for easier merging. 1583 */ 1584 static int plug_cmp(void *priv, const struct list_head *a, 1585 const struct list_head *b) 1586 { 1587 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 1588 plug_list); 1589 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1590 plug_list); 1591 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 1592 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 1593 1594 if (a_sector < b_sector) 1595 return -1; 1596 if (a_sector > b_sector) 1597 return 1; 1598 return 0; 1599 } 1600 1601 static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 1602 { 1603 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb); 1604 struct btrfs_raid_bio *cur; 1605 struct btrfs_raid_bio *last = NULL; 1606 1607 list_sort(NULL, &plug->rbio_list, plug_cmp); 1608 1609 while (!list_empty(&plug->rbio_list)) { 1610 cur = list_entry(plug->rbio_list.next, 1611 struct btrfs_raid_bio, plug_list); 1612 list_del_init(&cur->plug_list); 1613 1614 if (rbio_is_full(cur)) { 1615 /* We have a full stripe, queue it down. */ 1616 start_async_work(cur, rmw_rbio_work); 1617 continue; 1618 } 1619 if (last) { 1620 if (rbio_can_merge(last, cur)) { 1621 merge_rbio(last, cur); 1622 free_raid_bio(cur); 1623 continue; 1624 } 1625 start_async_work(last, rmw_rbio_work); 1626 } 1627 last = cur; 1628 } 1629 if (last) 1630 start_async_work(last, rmw_rbio_work); 1631 kfree(plug); 1632 } 1633 1634 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */ 1635 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) 1636 { 1637 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1638 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; 1639 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; 1640 const u32 orig_len = orig_bio->bi_iter.bi_size; 1641 const u32 sectorsize = fs_info->sectorsize; 1642 u64 cur_logical; 1643 1644 ASSERT(orig_logical >= full_stripe_start && 1645 orig_logical + orig_len <= full_stripe_start + 1646 rbio->nr_data * BTRFS_STRIPE_LEN); 1647 1648 bio_list_add(&rbio->bio_list, orig_bio); 1649 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; 1650 1651 /* Update the dbitmap. */ 1652 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len; 1653 cur_logical += sectorsize) { 1654 int bit = ((u32)(cur_logical - full_stripe_start) >> 1655 fs_info->sectorsize_bits) % rbio->stripe_nsectors; 1656 1657 set_bit(bit, &rbio->dbitmap); 1658 } 1659 } 1660 1661 /* 1662 * our main entry point for writes from the rest of the FS. 1663 */ 1664 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc) 1665 { 1666 struct btrfs_fs_info *fs_info = bioc->fs_info; 1667 struct btrfs_raid_bio *rbio; 1668 struct btrfs_plug_cb *plug = NULL; 1669 struct blk_plug_cb *cb; 1670 1671 rbio = alloc_rbio(fs_info, bioc); 1672 if (IS_ERR(rbio)) { 1673 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); 1674 bio_endio(bio); 1675 return; 1676 } 1677 rbio->operation = BTRFS_RBIO_WRITE; 1678 rbio_add_bio(rbio, bio); 1679 1680 /* 1681 * Don't plug on full rbios, just get them out the door 1682 * as quickly as we can 1683 */ 1684 if (!rbio_is_full(rbio)) { 1685 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug)); 1686 if (cb) { 1687 plug = container_of(cb, struct btrfs_plug_cb, cb); 1688 if (!plug->info) { 1689 plug->info = fs_info; 1690 INIT_LIST_HEAD(&plug->rbio_list); 1691 } 1692 list_add_tail(&rbio->plug_list, &plug->rbio_list); 1693 return; 1694 } 1695 } 1696 1697 /* 1698 * Either we don't have any existing plug, or we're doing a full stripe, 1699 * queue the rmw work now. 1700 */ 1701 start_async_work(rbio, rmw_rbio_work); 1702 } 1703 1704 static int verify_one_sector(struct btrfs_raid_bio *rbio, 1705 int stripe_nr, int sector_nr) 1706 { 1707 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1708 struct sector_ptr *sector; 1709 u8 csum_buf[BTRFS_CSUM_SIZE]; 1710 u8 *csum_expected; 1711 int ret; 1712 1713 if (!rbio->csum_bitmap || !rbio->csum_buf) 1714 return 0; 1715 1716 /* No way to verify P/Q as they are not covered by data csum. */ 1717 if (stripe_nr >= rbio->nr_data) 1718 return 0; 1719 /* 1720 * If we're rebuilding a read, we have to use pages from the 1721 * bio list if possible. 1722 */ 1723 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1724 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0); 1725 } else { 1726 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr); 1727 } 1728 1729 ASSERT(sector->page); 1730 1731 csum_expected = rbio->csum_buf + 1732 (stripe_nr * rbio->stripe_nsectors + sector_nr) * 1733 fs_info->csum_size; 1734 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff, 1735 csum_buf, csum_expected); 1736 return ret; 1737 } 1738 1739 /* 1740 * Recover a vertical stripe specified by @sector_nr. 1741 * @*pointers are the pre-allocated pointers by the caller, so we don't 1742 * need to allocate/free the pointers again and again. 1743 */ 1744 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, 1745 void **pointers, void **unmap_array) 1746 { 1747 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1748 struct sector_ptr *sector; 1749 const u32 sectorsize = fs_info->sectorsize; 1750 int found_errors; 1751 int faila; 1752 int failb; 1753 int stripe_nr; 1754 int ret = 0; 1755 1756 /* 1757 * Now we just use bitmap to mark the horizontal stripes in 1758 * which we have data when doing parity scrub. 1759 */ 1760 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 1761 !test_bit(sector_nr, &rbio->dbitmap)) 1762 return 0; 1763 1764 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila, 1765 &failb); 1766 /* 1767 * No errors in the vertical stripe, skip it. Can happen for recovery 1768 * which only part of a stripe failed csum check. 1769 */ 1770 if (!found_errors) 1771 return 0; 1772 1773 if (found_errors > rbio->bioc->max_errors) 1774 return -EIO; 1775 1776 /* 1777 * Setup our array of pointers with sectors from each stripe 1778 * 1779 * NOTE: store a duplicate array of pointers to preserve the 1780 * pointer order. 1781 */ 1782 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1783 /* 1784 * If we're rebuilding a read, we have to use pages from the 1785 * bio list if possible. 1786 */ 1787 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1788 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0); 1789 } else { 1790 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr); 1791 } 1792 ASSERT(sector->page); 1793 pointers[stripe_nr] = kmap_local_page(sector->page) + 1794 sector->pgoff; 1795 unmap_array[stripe_nr] = pointers[stripe_nr]; 1796 } 1797 1798 /* All raid6 handling here */ 1799 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { 1800 /* Single failure, rebuild from parity raid5 style */ 1801 if (failb < 0) { 1802 if (faila == rbio->nr_data) 1803 /* 1804 * Just the P stripe has failed, without 1805 * a bad data or Q stripe. 1806 * We have nothing to do, just skip the 1807 * recovery for this stripe. 1808 */ 1809 goto cleanup; 1810 /* 1811 * a single failure in raid6 is rebuilt 1812 * in the pstripe code below 1813 */ 1814 goto pstripe; 1815 } 1816 1817 /* 1818 * If the q stripe is failed, do a pstripe reconstruction from 1819 * the xors. 1820 * If both the q stripe and the P stripe are failed, we're 1821 * here due to a crc mismatch and we can't give them the 1822 * data they want. 1823 */ 1824 if (failb == rbio->real_stripes - 1) { 1825 if (faila == rbio->real_stripes - 2) 1826 /* 1827 * Only P and Q are corrupted. 1828 * We only care about data stripes recovery, 1829 * can skip this vertical stripe. 1830 */ 1831 goto cleanup; 1832 /* 1833 * Otherwise we have one bad data stripe and 1834 * a good P stripe. raid5! 1835 */ 1836 goto pstripe; 1837 } 1838 1839 if (failb == rbio->real_stripes - 2) { 1840 raid6_datap_recov(rbio->real_stripes, sectorsize, 1841 faila, pointers); 1842 } else { 1843 raid6_2data_recov(rbio->real_stripes, sectorsize, 1844 faila, failb, pointers); 1845 } 1846 } else { 1847 void *p; 1848 1849 /* Rebuild from P stripe here (raid5 or raid6). */ 1850 ASSERT(failb == -1); 1851 pstripe: 1852 /* Copy parity block into failed block to start with */ 1853 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); 1854 1855 /* Rearrange the pointer array */ 1856 p = pointers[faila]; 1857 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; 1858 stripe_nr++) 1859 pointers[stripe_nr] = pointers[stripe_nr + 1]; 1860 pointers[rbio->nr_data - 1] = p; 1861 1862 /* Xor in the rest */ 1863 run_xor(pointers, rbio->nr_data - 1, sectorsize); 1864 1865 } 1866 1867 /* 1868 * No matter if this is a RMW or recovery, we should have all 1869 * failed sectors repaired in the vertical stripe, thus they are now 1870 * uptodate. 1871 * Especially if we determine to cache the rbio, we need to 1872 * have at least all data sectors uptodate. 1873 * 1874 * If possible, also check if the repaired sector matches its data 1875 * checksum. 1876 */ 1877 if (faila >= 0) { 1878 ret = verify_one_sector(rbio, faila, sector_nr); 1879 if (ret < 0) 1880 goto cleanup; 1881 1882 sector = rbio_stripe_sector(rbio, faila, sector_nr); 1883 sector->uptodate = 1; 1884 } 1885 if (failb >= 0) { 1886 ret = verify_one_sector(rbio, failb, sector_nr); 1887 if (ret < 0) 1888 goto cleanup; 1889 1890 sector = rbio_stripe_sector(rbio, failb, sector_nr); 1891 sector->uptodate = 1; 1892 } 1893 1894 cleanup: 1895 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) 1896 kunmap_local(unmap_array[stripe_nr]); 1897 return ret; 1898 } 1899 1900 static int recover_sectors(struct btrfs_raid_bio *rbio) 1901 { 1902 void **pointers = NULL; 1903 void **unmap_array = NULL; 1904 int sectornr; 1905 int ret = 0; 1906 1907 /* 1908 * @pointers array stores the pointer for each sector. 1909 * 1910 * @unmap_array stores copy of pointers that does not get reordered 1911 * during reconstruction so that kunmap_local works. 1912 */ 1913 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1914 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1915 if (!pointers || !unmap_array) { 1916 ret = -ENOMEM; 1917 goto out; 1918 } 1919 1920 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1921 spin_lock(&rbio->bio_list_lock); 1922 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1923 spin_unlock(&rbio->bio_list_lock); 1924 } 1925 1926 index_rbio_pages(rbio); 1927 1928 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 1929 ret = recover_vertical(rbio, sectornr, pointers, unmap_array); 1930 if (ret < 0) 1931 break; 1932 } 1933 1934 out: 1935 kfree(pointers); 1936 kfree(unmap_array); 1937 return ret; 1938 } 1939 1940 static void recover_rbio(struct btrfs_raid_bio *rbio) 1941 { 1942 struct bio_list bio_list = BIO_EMPTY_LIST; 1943 int total_sector_nr; 1944 int ret = 0; 1945 1946 /* 1947 * Either we're doing recover for a read failure or degraded write, 1948 * caller should have set error bitmap correctly. 1949 */ 1950 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); 1951 1952 /* For recovery, we need to read all sectors including P/Q. */ 1953 ret = alloc_rbio_pages(rbio); 1954 if (ret < 0) 1955 goto out; 1956 1957 index_rbio_pages(rbio); 1958 1959 /* 1960 * Read everything that hasn't failed. However this time we will 1961 * not trust any cached sector. 1962 * As we may read out some stale data but higher layer is not reading 1963 * that stale part. 1964 * 1965 * So here we always re-read everything in recovery path. 1966 */ 1967 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 1968 total_sector_nr++) { 1969 int stripe = total_sector_nr / rbio->stripe_nsectors; 1970 int sectornr = total_sector_nr % rbio->stripe_nsectors; 1971 struct sector_ptr *sector; 1972 1973 /* 1974 * Skip the range which has error. It can be a range which is 1975 * marked error (for csum mismatch), or it can be a missing 1976 * device. 1977 */ 1978 if (!rbio->bioc->stripes[stripe].dev->bdev || 1979 test_bit(total_sector_nr, rbio->error_bitmap)) { 1980 /* 1981 * Also set the error bit for missing device, which 1982 * may not yet have its error bit set. 1983 */ 1984 set_bit(total_sector_nr, rbio->error_bitmap); 1985 continue; 1986 } 1987 1988 sector = rbio_stripe_sector(rbio, stripe, sectornr); 1989 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 1990 sectornr, REQ_OP_READ); 1991 if (ret < 0) { 1992 bio_list_put(&bio_list); 1993 goto out; 1994 } 1995 } 1996 1997 submit_read_wait_bio_list(rbio, &bio_list); 1998 ret = recover_sectors(rbio); 1999 out: 2000 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2001 } 2002 2003 static void recover_rbio_work(struct work_struct *work) 2004 { 2005 struct btrfs_raid_bio *rbio; 2006 2007 rbio = container_of(work, struct btrfs_raid_bio, work); 2008 if (!lock_stripe_add(rbio)) 2009 recover_rbio(rbio); 2010 } 2011 2012 static void recover_rbio_work_locked(struct work_struct *work) 2013 { 2014 recover_rbio(container_of(work, struct btrfs_raid_bio, work)); 2015 } 2016 2017 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num) 2018 { 2019 bool found = false; 2020 int sector_nr; 2021 2022 /* 2023 * This is for RAID6 extra recovery tries, thus mirror number should 2024 * be large than 2. 2025 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using 2026 * RAID5 methods. 2027 */ 2028 ASSERT(mirror_num > 2); 2029 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2030 int found_errors; 2031 int faila; 2032 int failb; 2033 2034 found_errors = get_rbio_veritical_errors(rbio, sector_nr, 2035 &faila, &failb); 2036 /* This vertical stripe doesn't have errors. */ 2037 if (!found_errors) 2038 continue; 2039 2040 /* 2041 * If we found errors, there should be only one error marked 2042 * by previous set_rbio_range_error(). 2043 */ 2044 ASSERT(found_errors == 1); 2045 found = true; 2046 2047 /* Now select another stripe to mark as error. */ 2048 failb = rbio->real_stripes - (mirror_num - 1); 2049 if (failb <= faila) 2050 failb--; 2051 2052 /* Set the extra bit in error bitmap. */ 2053 if (failb >= 0) 2054 set_bit(failb * rbio->stripe_nsectors + sector_nr, 2055 rbio->error_bitmap); 2056 } 2057 2058 /* We should found at least one vertical stripe with error.*/ 2059 ASSERT(found); 2060 } 2061 2062 /* 2063 * the main entry point for reads from the higher layers. This 2064 * is really only called when the normal read path had a failure, 2065 * so we assume the bio they send down corresponds to a failed part 2066 * of the drive. 2067 */ 2068 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc, 2069 int mirror_num) 2070 { 2071 struct btrfs_fs_info *fs_info = bioc->fs_info; 2072 struct btrfs_raid_bio *rbio; 2073 2074 rbio = alloc_rbio(fs_info, bioc); 2075 if (IS_ERR(rbio)) { 2076 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); 2077 bio_endio(bio); 2078 return; 2079 } 2080 2081 rbio->operation = BTRFS_RBIO_READ_REBUILD; 2082 rbio_add_bio(rbio, bio); 2083 2084 set_rbio_range_error(rbio, bio); 2085 2086 /* 2087 * Loop retry: 2088 * for 'mirror == 2', reconstruct from all other stripes. 2089 * for 'mirror_num > 2', select a stripe to fail on every retry. 2090 */ 2091 if (mirror_num > 2) 2092 set_rbio_raid6_extra_error(rbio, mirror_num); 2093 2094 start_async_work(rbio, recover_rbio_work); 2095 } 2096 2097 static void fill_data_csums(struct btrfs_raid_bio *rbio) 2098 { 2099 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 2100 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, 2101 rbio->bioc->full_stripe_logical); 2102 const u64 start = rbio->bioc->full_stripe_logical; 2103 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << 2104 fs_info->sectorsize_bits; 2105 int ret; 2106 2107 /* The rbio should not have its csum buffer initialized. */ 2108 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); 2109 2110 /* 2111 * Skip the csum search if: 2112 * 2113 * - The rbio doesn't belong to data block groups 2114 * Then we are doing IO for tree blocks, no need to search csums. 2115 * 2116 * - The rbio belongs to mixed block groups 2117 * This is to avoid deadlock, as we're already holding the full 2118 * stripe lock, if we trigger a metadata read, and it needs to do 2119 * raid56 recovery, we will deadlock. 2120 */ 2121 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || 2122 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) 2123 return; 2124 2125 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * 2126 fs_info->csum_size, GFP_NOFS); 2127 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, 2128 GFP_NOFS); 2129 if (!rbio->csum_buf || !rbio->csum_bitmap) { 2130 ret = -ENOMEM; 2131 goto error; 2132 } 2133 2134 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1, 2135 rbio->csum_buf, rbio->csum_bitmap); 2136 if (ret < 0) 2137 goto error; 2138 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) 2139 goto no_csum; 2140 return; 2141 2142 error: 2143 /* 2144 * We failed to allocate memory or grab the csum, but it's not fatal, 2145 * we can still continue. But better to warn users that RMW is no 2146 * longer safe for this particular sub-stripe write. 2147 */ 2148 btrfs_warn_rl(fs_info, 2149 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d", 2150 rbio->bioc->full_stripe_logical, ret); 2151 no_csum: 2152 kfree(rbio->csum_buf); 2153 bitmap_free(rbio->csum_bitmap); 2154 rbio->csum_buf = NULL; 2155 rbio->csum_bitmap = NULL; 2156 } 2157 2158 static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio) 2159 { 2160 struct bio_list bio_list = BIO_EMPTY_LIST; 2161 int total_sector_nr; 2162 int ret = 0; 2163 2164 /* 2165 * Fill the data csums we need for data verification. We need to fill 2166 * the csum_bitmap/csum_buf first, as our endio function will try to 2167 * verify the data sectors. 2168 */ 2169 fill_data_csums(rbio); 2170 2171 /* 2172 * Build a list of bios to read all sectors (including data and P/Q). 2173 * 2174 * This behavior is to compensate the later csum verification and recovery. 2175 */ 2176 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2177 total_sector_nr++) { 2178 struct sector_ptr *sector; 2179 int stripe = total_sector_nr / rbio->stripe_nsectors; 2180 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2181 2182 sector = rbio_stripe_sector(rbio, stripe, sectornr); 2183 ret = rbio_add_io_sector(rbio, &bio_list, sector, 2184 stripe, sectornr, REQ_OP_READ); 2185 if (ret) { 2186 bio_list_put(&bio_list); 2187 return ret; 2188 } 2189 } 2190 2191 /* 2192 * We may or may not have any corrupted sectors (including missing dev 2193 * and csum mismatch), just let recover_sectors() to handle them all. 2194 */ 2195 submit_read_wait_bio_list(rbio, &bio_list); 2196 return recover_sectors(rbio); 2197 } 2198 2199 static void raid_wait_write_end_io(struct bio *bio) 2200 { 2201 struct btrfs_raid_bio *rbio = bio->bi_private; 2202 blk_status_t err = bio->bi_status; 2203 2204 if (err) 2205 rbio_update_error_bitmap(rbio, bio); 2206 bio_put(bio); 2207 if (atomic_dec_and_test(&rbio->stripes_pending)) 2208 wake_up(&rbio->io_wait); 2209 } 2210 2211 static void submit_write_bios(struct btrfs_raid_bio *rbio, 2212 struct bio_list *bio_list) 2213 { 2214 struct bio *bio; 2215 2216 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); 2217 while ((bio = bio_list_pop(bio_list))) { 2218 bio->bi_end_io = raid_wait_write_end_io; 2219 2220 if (trace_raid56_write_enabled()) { 2221 struct raid56_bio_trace_info trace_info = { 0 }; 2222 2223 bio_get_trace_info(rbio, bio, &trace_info); 2224 trace_raid56_write(rbio, bio, &trace_info); 2225 } 2226 submit_bio(bio); 2227 } 2228 } 2229 2230 /* 2231 * To determine if we need to read any sector from the disk. 2232 * Should only be utilized in RMW path, to skip cached rbio. 2233 */ 2234 static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) 2235 { 2236 int i; 2237 2238 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { 2239 struct sector_ptr *sector = &rbio->stripe_sectors[i]; 2240 2241 /* 2242 * We have a sector which doesn't have page nor uptodate, 2243 * thus this rbio can not be cached one, as cached one must 2244 * have all its data sectors present and uptodate. 2245 */ 2246 if (!sector->page || !sector->uptodate) 2247 return true; 2248 } 2249 return false; 2250 } 2251 2252 static void rmw_rbio(struct btrfs_raid_bio *rbio) 2253 { 2254 struct bio_list bio_list; 2255 int sectornr; 2256 int ret = 0; 2257 2258 /* 2259 * Allocate the pages for parity first, as P/Q pages will always be 2260 * needed for both full-stripe and sub-stripe writes. 2261 */ 2262 ret = alloc_rbio_parity_pages(rbio); 2263 if (ret < 0) 2264 goto out; 2265 2266 /* 2267 * Either full stripe write, or we have every data sector already 2268 * cached, can go to write path immediately. 2269 */ 2270 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) { 2271 /* 2272 * Now we're doing sub-stripe write, also need all data stripes 2273 * to do the full RMW. 2274 */ 2275 ret = alloc_rbio_data_pages(rbio); 2276 if (ret < 0) 2277 goto out; 2278 2279 index_rbio_pages(rbio); 2280 2281 ret = rmw_read_wait_recover(rbio); 2282 if (ret < 0) 2283 goto out; 2284 } 2285 2286 /* 2287 * At this stage we're not allowed to add any new bios to the 2288 * bio list any more, anyone else that wants to change this stripe 2289 * needs to do their own rmw. 2290 */ 2291 spin_lock(&rbio->bio_list_lock); 2292 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 2293 spin_unlock(&rbio->bio_list_lock); 2294 2295 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2296 2297 index_rbio_pages(rbio); 2298 2299 /* 2300 * We don't cache full rbios because we're assuming 2301 * the higher layers are unlikely to use this area of 2302 * the disk again soon. If they do use it again, 2303 * hopefully they will send another full bio. 2304 */ 2305 if (!rbio_is_full(rbio)) 2306 cache_rbio_pages(rbio); 2307 else 2308 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2309 2310 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) 2311 generate_pq_vertical(rbio, sectornr); 2312 2313 bio_list_init(&bio_list); 2314 ret = rmw_assemble_write_bios(rbio, &bio_list); 2315 if (ret < 0) 2316 goto out; 2317 2318 /* We should have at least one bio assembled. */ 2319 ASSERT(bio_list_size(&bio_list)); 2320 submit_write_bios(rbio, &bio_list); 2321 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 2322 2323 /* We may have more errors than our tolerance during the read. */ 2324 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 2325 int found_errors; 2326 2327 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL); 2328 if (found_errors > rbio->bioc->max_errors) { 2329 ret = -EIO; 2330 break; 2331 } 2332 } 2333 out: 2334 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2335 } 2336 2337 static void rmw_rbio_work(struct work_struct *work) 2338 { 2339 struct btrfs_raid_bio *rbio; 2340 2341 rbio = container_of(work, struct btrfs_raid_bio, work); 2342 if (lock_stripe_add(rbio) == 0) 2343 rmw_rbio(rbio); 2344 } 2345 2346 static void rmw_rbio_work_locked(struct work_struct *work) 2347 { 2348 rmw_rbio(container_of(work, struct btrfs_raid_bio, work)); 2349 } 2350 2351 /* 2352 * The following code is used to scrub/replace the parity stripe 2353 * 2354 * Caller must have already increased bio_counter for getting @bioc. 2355 * 2356 * Note: We need make sure all the pages that add into the scrub/replace 2357 * raid bio are correct and not be changed during the scrub/replace. That 2358 * is those pages just hold metadata or file data with checksum. 2359 */ 2360 2361 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio, 2362 struct btrfs_io_context *bioc, 2363 struct btrfs_device *scrub_dev, 2364 unsigned long *dbitmap, int stripe_nsectors) 2365 { 2366 struct btrfs_fs_info *fs_info = bioc->fs_info; 2367 struct btrfs_raid_bio *rbio; 2368 int i; 2369 2370 rbio = alloc_rbio(fs_info, bioc); 2371 if (IS_ERR(rbio)) 2372 return NULL; 2373 bio_list_add(&rbio->bio_list, bio); 2374 /* 2375 * This is a special bio which is used to hold the completion handler 2376 * and make the scrub rbio is similar to the other types 2377 */ 2378 ASSERT(!bio->bi_iter.bi_size); 2379 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 2380 2381 /* 2382 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted 2383 * to the end position, so this search can start from the first parity 2384 * stripe. 2385 */ 2386 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { 2387 if (bioc->stripes[i].dev == scrub_dev) { 2388 rbio->scrubp = i; 2389 break; 2390 } 2391 } 2392 ASSERT(i < rbio->real_stripes); 2393 2394 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); 2395 return rbio; 2396 } 2397 2398 /* 2399 * We just scrub the parity that we have correct data on the same horizontal, 2400 * so we needn't allocate all pages for all the stripes. 2401 */ 2402 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 2403 { 2404 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 2405 int total_sector_nr; 2406 2407 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2408 total_sector_nr++) { 2409 struct page *page; 2410 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2411 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT; 2412 2413 if (!test_bit(sectornr, &rbio->dbitmap)) 2414 continue; 2415 if (rbio->stripe_pages[index]) 2416 continue; 2417 page = alloc_page(GFP_NOFS); 2418 if (!page) 2419 return -ENOMEM; 2420 rbio->stripe_pages[index] = page; 2421 } 2422 index_stripe_sectors(rbio); 2423 return 0; 2424 } 2425 2426 static int finish_parity_scrub(struct btrfs_raid_bio *rbio) 2427 { 2428 struct btrfs_io_context *bioc = rbio->bioc; 2429 const u32 sectorsize = bioc->fs_info->sectorsize; 2430 void **pointers = rbio->finish_pointers; 2431 unsigned long *pbitmap = &rbio->finish_pbitmap; 2432 int nr_data = rbio->nr_data; 2433 int stripe; 2434 int sectornr; 2435 bool has_qstripe; 2436 struct sector_ptr p_sector = { 0 }; 2437 struct sector_ptr q_sector = { 0 }; 2438 struct bio_list bio_list; 2439 int is_replace = 0; 2440 int ret; 2441 2442 bio_list_init(&bio_list); 2443 2444 if (rbio->real_stripes - rbio->nr_data == 1) 2445 has_qstripe = false; 2446 else if (rbio->real_stripes - rbio->nr_data == 2) 2447 has_qstripe = true; 2448 else 2449 BUG(); 2450 2451 /* 2452 * Replace is running and our P/Q stripe is being replaced, then we 2453 * need to duplicate the final write to replace target. 2454 */ 2455 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { 2456 is_replace = 1; 2457 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); 2458 } 2459 2460 /* 2461 * Because the higher layers(scrubber) are unlikely to 2462 * use this area of the disk again soon, so don't cache 2463 * it. 2464 */ 2465 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2466 2467 p_sector.page = alloc_page(GFP_NOFS); 2468 if (!p_sector.page) 2469 return -ENOMEM; 2470 p_sector.pgoff = 0; 2471 p_sector.uptodate = 1; 2472 2473 if (has_qstripe) { 2474 /* RAID6, allocate and map temp space for the Q stripe */ 2475 q_sector.page = alloc_page(GFP_NOFS); 2476 if (!q_sector.page) { 2477 __free_page(p_sector.page); 2478 p_sector.page = NULL; 2479 return -ENOMEM; 2480 } 2481 q_sector.pgoff = 0; 2482 q_sector.uptodate = 1; 2483 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); 2484 } 2485 2486 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2487 2488 /* Map the parity stripe just once */ 2489 pointers[nr_data] = kmap_local_page(p_sector.page); 2490 2491 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { 2492 struct sector_ptr *sector; 2493 void *parity; 2494 2495 /* first collect one page from each data stripe */ 2496 for (stripe = 0; stripe < nr_data; stripe++) { 2497 sector = sector_in_rbio(rbio, stripe, sectornr, 0); 2498 pointers[stripe] = kmap_local_page(sector->page) + 2499 sector->pgoff; 2500 } 2501 2502 if (has_qstripe) { 2503 assert_rbio(rbio); 2504 /* RAID6, call the library function to fill in our P/Q */ 2505 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 2506 pointers); 2507 } else { 2508 /* raid5 */ 2509 memcpy(pointers[nr_data], pointers[0], sectorsize); 2510 run_xor(pointers + 1, nr_data - 1, sectorsize); 2511 } 2512 2513 /* Check scrubbing parity and repair it */ 2514 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 2515 parity = kmap_local_page(sector->page) + sector->pgoff; 2516 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) 2517 memcpy(parity, pointers[rbio->scrubp], sectorsize); 2518 else 2519 /* Parity is right, needn't writeback */ 2520 bitmap_clear(&rbio->dbitmap, sectornr, 1); 2521 kunmap_local(parity); 2522 2523 for (stripe = nr_data - 1; stripe >= 0; stripe--) 2524 kunmap_local(pointers[stripe]); 2525 } 2526 2527 kunmap_local(pointers[nr_data]); 2528 __free_page(p_sector.page); 2529 p_sector.page = NULL; 2530 if (q_sector.page) { 2531 kunmap_local(pointers[rbio->real_stripes - 1]); 2532 __free_page(q_sector.page); 2533 q_sector.page = NULL; 2534 } 2535 2536 /* 2537 * time to start writing. Make bios for everything from the 2538 * higher layers (the bio_list in our rbio) and our p/q. Ignore 2539 * everything else. 2540 */ 2541 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { 2542 struct sector_ptr *sector; 2543 2544 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 2545 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, 2546 sectornr, REQ_OP_WRITE); 2547 if (ret) 2548 goto cleanup; 2549 } 2550 2551 if (!is_replace) 2552 goto submit_write; 2553 2554 /* 2555 * Replace is running and our parity stripe needs to be duplicated to 2556 * the target device. Check we have a valid source stripe number. 2557 */ 2558 ASSERT(rbio->bioc->replace_stripe_src >= 0); 2559 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { 2560 struct sector_ptr *sector; 2561 2562 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 2563 ret = rbio_add_io_sector(rbio, &bio_list, sector, 2564 rbio->real_stripes, 2565 sectornr, REQ_OP_WRITE); 2566 if (ret) 2567 goto cleanup; 2568 } 2569 2570 submit_write: 2571 submit_write_bios(rbio, &bio_list); 2572 return 0; 2573 2574 cleanup: 2575 bio_list_put(&bio_list); 2576 return ret; 2577 } 2578 2579 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 2580 { 2581 if (stripe >= 0 && stripe < rbio->nr_data) 2582 return 1; 2583 return 0; 2584 } 2585 2586 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio) 2587 { 2588 void **pointers = NULL; 2589 void **unmap_array = NULL; 2590 int sector_nr; 2591 int ret = 0; 2592 2593 /* 2594 * @pointers array stores the pointer for each sector. 2595 * 2596 * @unmap_array stores copy of pointers that does not get reordered 2597 * during reconstruction so that kunmap_local works. 2598 */ 2599 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2600 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 2601 if (!pointers || !unmap_array) { 2602 ret = -ENOMEM; 2603 goto out; 2604 } 2605 2606 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2607 int dfail = 0, failp = -1; 2608 int faila; 2609 int failb; 2610 int found_errors; 2611 2612 found_errors = get_rbio_veritical_errors(rbio, sector_nr, 2613 &faila, &failb); 2614 if (found_errors > rbio->bioc->max_errors) { 2615 ret = -EIO; 2616 goto out; 2617 } 2618 if (found_errors == 0) 2619 continue; 2620 2621 /* We should have at least one error here. */ 2622 ASSERT(faila >= 0 || failb >= 0); 2623 2624 if (is_data_stripe(rbio, faila)) 2625 dfail++; 2626 else if (is_parity_stripe(faila)) 2627 failp = faila; 2628 2629 if (is_data_stripe(rbio, failb)) 2630 dfail++; 2631 else if (is_parity_stripe(failb)) 2632 failp = failb; 2633 /* 2634 * Because we can not use a scrubbing parity to repair the 2635 * data, so the capability of the repair is declined. (In the 2636 * case of RAID5, we can not repair anything.) 2637 */ 2638 if (dfail > rbio->bioc->max_errors - 1) { 2639 ret = -EIO; 2640 goto out; 2641 } 2642 /* 2643 * If all data is good, only parity is correctly, just repair 2644 * the parity, no need to recover data stripes. 2645 */ 2646 if (dfail == 0) 2647 continue; 2648 2649 /* 2650 * Here means we got one corrupted data stripe and one 2651 * corrupted parity on RAID6, if the corrupted parity is 2652 * scrubbing parity, luckily, use the other one to repair the 2653 * data, or we can not repair the data stripe. 2654 */ 2655 if (failp != rbio->scrubp) { 2656 ret = -EIO; 2657 goto out; 2658 } 2659 2660 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array); 2661 if (ret < 0) 2662 goto out; 2663 } 2664 out: 2665 kfree(pointers); 2666 kfree(unmap_array); 2667 return ret; 2668 } 2669 2670 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio) 2671 { 2672 struct bio_list bio_list = BIO_EMPTY_LIST; 2673 int total_sector_nr; 2674 int ret = 0; 2675 2676 /* Build a list of bios to read all the missing parts. */ 2677 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2678 total_sector_nr++) { 2679 int sectornr = total_sector_nr % rbio->stripe_nsectors; 2680 int stripe = total_sector_nr / rbio->stripe_nsectors; 2681 struct sector_ptr *sector; 2682 2683 /* No data in the vertical stripe, no need to read. */ 2684 if (!test_bit(sectornr, &rbio->dbitmap)) 2685 continue; 2686 2687 /* 2688 * We want to find all the sectors missing from the rbio and 2689 * read them from the disk. If sector_in_rbio() finds a sector 2690 * in the bio list we don't need to read it off the stripe. 2691 */ 2692 sector = sector_in_rbio(rbio, stripe, sectornr, 1); 2693 if (sector) 2694 continue; 2695 2696 sector = rbio_stripe_sector(rbio, stripe, sectornr); 2697 /* 2698 * The bio cache may have handed us an uptodate sector. If so, 2699 * use it. 2700 */ 2701 if (sector->uptodate) 2702 continue; 2703 2704 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 2705 sectornr, REQ_OP_READ); 2706 if (ret) { 2707 bio_list_put(&bio_list); 2708 return ret; 2709 } 2710 } 2711 2712 submit_read_wait_bio_list(rbio, &bio_list); 2713 return 0; 2714 } 2715 2716 static void scrub_rbio(struct btrfs_raid_bio *rbio) 2717 { 2718 int sector_nr; 2719 int ret; 2720 2721 ret = alloc_rbio_essential_pages(rbio); 2722 if (ret) 2723 goto out; 2724 2725 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); 2726 2727 ret = scrub_assemble_read_bios(rbio); 2728 if (ret < 0) 2729 goto out; 2730 2731 /* We may have some failures, recover the failed sectors first. */ 2732 ret = recover_scrub_rbio(rbio); 2733 if (ret < 0) 2734 goto out; 2735 2736 /* 2737 * We have every sector properly prepared. Can finish the scrub 2738 * and writeback the good content. 2739 */ 2740 ret = finish_parity_scrub(rbio); 2741 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); 2742 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { 2743 int found_errors; 2744 2745 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL); 2746 if (found_errors > rbio->bioc->max_errors) { 2747 ret = -EIO; 2748 break; 2749 } 2750 } 2751 out: 2752 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); 2753 } 2754 2755 static void scrub_rbio_work_locked(struct work_struct *work) 2756 { 2757 scrub_rbio(container_of(work, struct btrfs_raid_bio, work)); 2758 } 2759 2760 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 2761 { 2762 if (!lock_stripe_add(rbio)) 2763 start_async_work(rbio, scrub_rbio_work_locked); 2764 } 2765 2766 /* 2767 * This is for scrub call sites where we already have correct data contents. 2768 * This allows us to avoid reading data stripes again. 2769 * 2770 * Unfortunately here we have to do page copy, other than reusing the pages. 2771 * This is due to the fact rbio has its own page management for its cache. 2772 */ 2773 void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio, 2774 struct page **data_pages, u64 data_logical) 2775 { 2776 const u64 offset_in_full_stripe = data_logical - 2777 rbio->bioc->full_stripe_logical; 2778 const int page_index = offset_in_full_stripe >> PAGE_SHIFT; 2779 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 2780 const u32 sectors_per_page = PAGE_SIZE / sectorsize; 2781 int ret; 2782 2783 /* 2784 * If we hit ENOMEM temporarily, but later at 2785 * raid56_parity_submit_scrub_rbio() time it succeeded, we just do 2786 * the extra read, not a big deal. 2787 * 2788 * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time, 2789 * the bio would got proper error number set. 2790 */ 2791 ret = alloc_rbio_data_pages(rbio); 2792 if (ret < 0) 2793 return; 2794 2795 /* data_logical must be at stripe boundary and inside the full stripe. */ 2796 ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN)); 2797 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); 2798 2799 for (int page_nr = 0; page_nr < (BTRFS_STRIPE_LEN >> PAGE_SHIFT); page_nr++) { 2800 struct page *dst = rbio->stripe_pages[page_nr + page_index]; 2801 struct page *src = data_pages[page_nr]; 2802 2803 memcpy_page(dst, 0, src, 0, PAGE_SIZE); 2804 for (int sector_nr = sectors_per_page * page_index; 2805 sector_nr < sectors_per_page * (page_index + 1); 2806 sector_nr++) 2807 rbio->stripe_sectors[sector_nr].uptodate = true; 2808 } 2809 } 2810