1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0 253b381b3SDavid Woodhouse /* 353b381b3SDavid Woodhouse * Copyright (C) 2012 Fusion-io All rights reserved. 453b381b3SDavid Woodhouse * Copyright (C) 2012 Intel Corp. All rights reserved. 553b381b3SDavid Woodhouse */ 6c1d7c514SDavid Sterba 753b381b3SDavid Woodhouse #include <linux/sched.h> 853b381b3SDavid Woodhouse #include <linux/bio.h> 953b381b3SDavid Woodhouse #include <linux/slab.h> 1053b381b3SDavid Woodhouse #include <linux/blkdev.h> 1153b381b3SDavid Woodhouse #include <linux/raid/pq.h> 1253b381b3SDavid Woodhouse #include <linux/hash.h> 1353b381b3SDavid Woodhouse #include <linux/list_sort.h> 1453b381b3SDavid Woodhouse #include <linux/raid/xor.h> 15818e010bSDavid Sterba #include <linux/mm.h> 169b569ea0SJosef Bacik #include "messages.h" 17cea62800SJohannes Thumshirn #include "misc.h" 1853b381b3SDavid Woodhouse #include "ctree.h" 1953b381b3SDavid Woodhouse #include "disk-io.h" 2053b381b3SDavid Woodhouse #include "volumes.h" 2153b381b3SDavid Woodhouse #include "raid56.h" 2253b381b3SDavid Woodhouse #include "async-thread.h" 2353b381b3SDavid Woodhouse 2453b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */ 2553b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT 1 2653b381b3SDavid Woodhouse 274ae10b3aSChris Mason /* 284ae10b3aSChris Mason * set when this rbio is sitting in the hash, but it is just a cache 294ae10b3aSChris Mason * of past RMW 304ae10b3aSChris Mason */ 314ae10b3aSChris Mason #define RBIO_CACHE_BIT 2 324ae10b3aSChris Mason 334ae10b3aSChris Mason /* 344ae10b3aSChris Mason * set when it is safe to trust the stripe_pages for caching 354ae10b3aSChris Mason */ 364ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT 3 374ae10b3aSChris Mason 384ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024 394ae10b3aSChris Mason 408a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS 11 418a953348SDavid Sterba 428a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */ 438a953348SDavid Sterba struct btrfs_stripe_hash { 448a953348SDavid Sterba struct list_head hash_list; 458a953348SDavid Sterba spinlock_t lock; 468a953348SDavid Sterba }; 478a953348SDavid Sterba 488a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */ 498a953348SDavid Sterba struct btrfs_stripe_hash_table { 508a953348SDavid Sterba struct list_head stripe_cache; 518a953348SDavid Sterba spinlock_t cache_lock; 528a953348SDavid Sterba int cache_size; 538a953348SDavid Sterba struct btrfs_stripe_hash table[]; 548a953348SDavid Sterba }; 558a953348SDavid Sterba 56eb357060SQu Wenruo /* 57eb357060SQu Wenruo * A bvec like structure to present a sector inside a page. 58eb357060SQu Wenruo * 59eb357060SQu Wenruo * Unlike bvec we don't need bvlen, as it's fixed to sectorsize. 60eb357060SQu Wenruo */ 61eb357060SQu Wenruo struct sector_ptr { 62eb357060SQu Wenruo struct page *page; 6300425dd9SQu Wenruo unsigned int pgoff:24; 6400425dd9SQu Wenruo unsigned int uptodate:8; 65eb357060SQu Wenruo }; 66eb357060SQu Wenruo 6753b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 6853b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 69385de0efSChristoph Hellwig static void rmw_work(struct work_struct *work); 70385de0efSChristoph Hellwig static void read_rebuild_work(struct work_struct *work); 7153b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 7253b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 7353b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio); 7453b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 7553b381b3SDavid Woodhouse 765a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 775a6ac9eaSMiao Xie int need_check); 78385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work); 795a6ac9eaSMiao Xie 80797d74b7SQu Wenruo static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) 81797d74b7SQu Wenruo { 82797d74b7SQu Wenruo kfree(rbio->stripe_pages); 83797d74b7SQu Wenruo kfree(rbio->bio_sectors); 84797d74b7SQu Wenruo kfree(rbio->stripe_sectors); 85797d74b7SQu Wenruo kfree(rbio->finish_pointers); 86797d74b7SQu Wenruo } 87797d74b7SQu Wenruo 88ff2b64a2SQu Wenruo static void free_raid_bio(struct btrfs_raid_bio *rbio) 89ff2b64a2SQu Wenruo { 90ff2b64a2SQu Wenruo int i; 91ff2b64a2SQu Wenruo 92ff2b64a2SQu Wenruo if (!refcount_dec_and_test(&rbio->refs)) 93ff2b64a2SQu Wenruo return; 94ff2b64a2SQu Wenruo 95ff2b64a2SQu Wenruo WARN_ON(!list_empty(&rbio->stripe_cache)); 96ff2b64a2SQu Wenruo WARN_ON(!list_empty(&rbio->hash_list)); 97ff2b64a2SQu Wenruo WARN_ON(!bio_list_empty(&rbio->bio_list)); 98ff2b64a2SQu Wenruo 99ff2b64a2SQu Wenruo for (i = 0; i < rbio->nr_pages; i++) { 100ff2b64a2SQu Wenruo if (rbio->stripe_pages[i]) { 101ff2b64a2SQu Wenruo __free_page(rbio->stripe_pages[i]); 102ff2b64a2SQu Wenruo rbio->stripe_pages[i] = NULL; 103ff2b64a2SQu Wenruo } 104ff2b64a2SQu Wenruo } 105ff2b64a2SQu Wenruo 106ff2b64a2SQu Wenruo btrfs_put_bioc(rbio->bioc); 107797d74b7SQu Wenruo free_raid_bio_pointers(rbio); 108ff2b64a2SQu Wenruo kfree(rbio); 109ff2b64a2SQu Wenruo } 110ff2b64a2SQu Wenruo 111385de0efSChristoph Hellwig static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) 112ac638859SDavid Sterba { 113385de0efSChristoph Hellwig INIT_WORK(&rbio->work, work_func); 114385de0efSChristoph Hellwig queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); 115ac638859SDavid Sterba } 116ac638859SDavid Sterba 11753b381b3SDavid Woodhouse /* 11853b381b3SDavid Woodhouse * the stripe hash table is used for locking, and to collect 11953b381b3SDavid Woodhouse * bios in hopes of making a full stripe 12053b381b3SDavid Woodhouse */ 12153b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 12253b381b3SDavid Woodhouse { 12353b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *table; 12453b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *x; 12553b381b3SDavid Woodhouse struct btrfs_stripe_hash *cur; 12653b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 12753b381b3SDavid Woodhouse int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 12853b381b3SDavid Woodhouse int i; 12953b381b3SDavid Woodhouse 13053b381b3SDavid Woodhouse if (info->stripe_hash_table) 13153b381b3SDavid Woodhouse return 0; 13253b381b3SDavid Woodhouse 13383c8266aSDavid Sterba /* 13483c8266aSDavid Sterba * The table is large, starting with order 4 and can go as high as 13583c8266aSDavid Sterba * order 7 in case lock debugging is turned on. 13683c8266aSDavid Sterba * 13783c8266aSDavid Sterba * Try harder to allocate and fallback to vmalloc to lower the chance 13883c8266aSDavid Sterba * of a failing mount. 13983c8266aSDavid Sterba */ 140ee787f95SDavid Sterba table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL); 14153b381b3SDavid Woodhouse if (!table) 14253b381b3SDavid Woodhouse return -ENOMEM; 14353b381b3SDavid Woodhouse 1444ae10b3aSChris Mason spin_lock_init(&table->cache_lock); 1454ae10b3aSChris Mason INIT_LIST_HEAD(&table->stripe_cache); 1464ae10b3aSChris Mason 14753b381b3SDavid Woodhouse h = table->table; 14853b381b3SDavid Woodhouse 14953b381b3SDavid Woodhouse for (i = 0; i < num_entries; i++) { 15053b381b3SDavid Woodhouse cur = h + i; 15153b381b3SDavid Woodhouse INIT_LIST_HEAD(&cur->hash_list); 15253b381b3SDavid Woodhouse spin_lock_init(&cur->lock); 15353b381b3SDavid Woodhouse } 15453b381b3SDavid Woodhouse 15553b381b3SDavid Woodhouse x = cmpxchg(&info->stripe_hash_table, NULL, table); 156f749303bSWang Shilong kvfree(x); 15753b381b3SDavid Woodhouse return 0; 15853b381b3SDavid Woodhouse } 15953b381b3SDavid Woodhouse 16053b381b3SDavid Woodhouse /* 1614ae10b3aSChris Mason * caching an rbio means to copy anything from the 162ac26df8bSQu Wenruo * bio_sectors array into the stripe_pages array. We 1634ae10b3aSChris Mason * use the page uptodate bit in the stripe cache array 1644ae10b3aSChris Mason * to indicate if it has valid data 1654ae10b3aSChris Mason * 1664ae10b3aSChris Mason * once the caching is done, we set the cache ready 1674ae10b3aSChris Mason * bit. 1684ae10b3aSChris Mason */ 1694ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 1704ae10b3aSChris Mason { 1714ae10b3aSChris Mason int i; 1724ae10b3aSChris Mason int ret; 1734ae10b3aSChris Mason 1744ae10b3aSChris Mason ret = alloc_rbio_pages(rbio); 1754ae10b3aSChris Mason if (ret) 1764ae10b3aSChris Mason return; 1774ae10b3aSChris Mason 17800425dd9SQu Wenruo for (i = 0; i < rbio->nr_sectors; i++) { 17900425dd9SQu Wenruo /* Some range not covered by bio (partial write), skip it */ 18088074c8bSQu Wenruo if (!rbio->bio_sectors[i].page) { 18188074c8bSQu Wenruo /* 18288074c8bSQu Wenruo * Even if the sector is not covered by bio, if it is 18388074c8bSQu Wenruo * a data sector it should still be uptodate as it is 18488074c8bSQu Wenruo * read from disk. 18588074c8bSQu Wenruo */ 18688074c8bSQu Wenruo if (i < rbio->nr_data * rbio->stripe_nsectors) 18788074c8bSQu Wenruo ASSERT(rbio->stripe_sectors[i].uptodate); 18800425dd9SQu Wenruo continue; 18988074c8bSQu Wenruo } 19000425dd9SQu Wenruo 19100425dd9SQu Wenruo ASSERT(rbio->stripe_sectors[i].page); 19200425dd9SQu Wenruo memcpy_page(rbio->stripe_sectors[i].page, 19300425dd9SQu Wenruo rbio->stripe_sectors[i].pgoff, 19400425dd9SQu Wenruo rbio->bio_sectors[i].page, 19500425dd9SQu Wenruo rbio->bio_sectors[i].pgoff, 19600425dd9SQu Wenruo rbio->bioc->fs_info->sectorsize); 19700425dd9SQu Wenruo rbio->stripe_sectors[i].uptodate = 1; 19800425dd9SQu Wenruo } 1994ae10b3aSChris Mason set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2004ae10b3aSChris Mason } 2014ae10b3aSChris Mason 2024ae10b3aSChris Mason /* 20353b381b3SDavid Woodhouse * we hash on the first logical address of the stripe 20453b381b3SDavid Woodhouse */ 20553b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio) 20653b381b3SDavid Woodhouse { 2074c664611SQu Wenruo u64 num = rbio->bioc->raid_map[0]; 20853b381b3SDavid Woodhouse 20953b381b3SDavid Woodhouse /* 21053b381b3SDavid Woodhouse * we shift down quite a bit. We're using byte 21153b381b3SDavid Woodhouse * addressing, and most of the lower bits are zeros. 21253b381b3SDavid Woodhouse * This tends to upset hash_64, and it consistently 21353b381b3SDavid Woodhouse * returns just one or two different values. 21453b381b3SDavid Woodhouse * 21553b381b3SDavid Woodhouse * shifting off the lower bits fixes things. 21653b381b3SDavid Woodhouse */ 21753b381b3SDavid Woodhouse return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 21853b381b3SDavid Woodhouse } 21953b381b3SDavid Woodhouse 220d4e28d9bSQu Wenruo static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio, 221d4e28d9bSQu Wenruo unsigned int page_nr) 222d4e28d9bSQu Wenruo { 223d4e28d9bSQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 224d4e28d9bSQu Wenruo const u32 sectors_per_page = PAGE_SIZE / sectorsize; 225d4e28d9bSQu Wenruo int i; 226d4e28d9bSQu Wenruo 227d4e28d9bSQu Wenruo ASSERT(page_nr < rbio->nr_pages); 228d4e28d9bSQu Wenruo 229d4e28d9bSQu Wenruo for (i = sectors_per_page * page_nr; 230d4e28d9bSQu Wenruo i < sectors_per_page * page_nr + sectors_per_page; 231d4e28d9bSQu Wenruo i++) { 232d4e28d9bSQu Wenruo if (!rbio->stripe_sectors[i].uptodate) 233d4e28d9bSQu Wenruo return false; 234d4e28d9bSQu Wenruo } 235d4e28d9bSQu Wenruo return true; 236d4e28d9bSQu Wenruo } 237d4e28d9bSQu Wenruo 23853b381b3SDavid Woodhouse /* 239eb357060SQu Wenruo * Update the stripe_sectors[] array to use correct page and pgoff 240eb357060SQu Wenruo * 241eb357060SQu Wenruo * Should be called every time any page pointer in stripes_pages[] got modified. 242eb357060SQu Wenruo */ 243eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio) 244eb357060SQu Wenruo { 245eb357060SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 246eb357060SQu Wenruo u32 offset; 247eb357060SQu Wenruo int i; 248eb357060SQu Wenruo 249eb357060SQu Wenruo for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { 250eb357060SQu Wenruo int page_index = offset >> PAGE_SHIFT; 251eb357060SQu Wenruo 252eb357060SQu Wenruo ASSERT(page_index < rbio->nr_pages); 253eb357060SQu Wenruo rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; 254eb357060SQu Wenruo rbio->stripe_sectors[i].pgoff = offset_in_page(offset); 255eb357060SQu Wenruo } 256eb357060SQu Wenruo } 257eb357060SQu Wenruo 2584d100466SQu Wenruo static void steal_rbio_page(struct btrfs_raid_bio *src, 2594d100466SQu Wenruo struct btrfs_raid_bio *dest, int page_nr) 2604d100466SQu Wenruo { 2614d100466SQu Wenruo const u32 sectorsize = src->bioc->fs_info->sectorsize; 2624d100466SQu Wenruo const u32 sectors_per_page = PAGE_SIZE / sectorsize; 2634d100466SQu Wenruo int i; 2644d100466SQu Wenruo 2654d100466SQu Wenruo if (dest->stripe_pages[page_nr]) 2664d100466SQu Wenruo __free_page(dest->stripe_pages[page_nr]); 2674d100466SQu Wenruo dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; 2684d100466SQu Wenruo src->stripe_pages[page_nr] = NULL; 2694d100466SQu Wenruo 2704d100466SQu Wenruo /* Also update the sector->uptodate bits. */ 2714d100466SQu Wenruo for (i = sectors_per_page * page_nr; 2724d100466SQu Wenruo i < sectors_per_page * page_nr + sectors_per_page; i++) 2734d100466SQu Wenruo dest->stripe_sectors[i].uptodate = true; 2744d100466SQu Wenruo } 2754d100466SQu Wenruo 27688074c8bSQu Wenruo static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr) 27788074c8bSQu Wenruo { 27888074c8bSQu Wenruo const int sector_nr = (page_nr << PAGE_SHIFT) >> 27988074c8bSQu Wenruo rbio->bioc->fs_info->sectorsize_bits; 28088074c8bSQu Wenruo 28188074c8bSQu Wenruo /* 28288074c8bSQu Wenruo * We have ensured PAGE_SIZE is aligned with sectorsize, thus 28388074c8bSQu Wenruo * we won't have a page which is half data half parity. 28488074c8bSQu Wenruo * 28588074c8bSQu Wenruo * Thus if the first sector of the page belongs to data stripes, then 28688074c8bSQu Wenruo * the full page belongs to data stripes. 28788074c8bSQu Wenruo */ 28888074c8bSQu Wenruo return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); 28988074c8bSQu Wenruo } 29088074c8bSQu Wenruo 291eb357060SQu Wenruo /* 292d4e28d9bSQu Wenruo * Stealing an rbio means taking all the uptodate pages from the stripe array 293d4e28d9bSQu Wenruo * in the source rbio and putting them into the destination rbio. 294d4e28d9bSQu Wenruo * 295d4e28d9bSQu Wenruo * This will also update the involved stripe_sectors[] which are referring to 296d4e28d9bSQu Wenruo * the old pages. 2974ae10b3aSChris Mason */ 2984ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 2994ae10b3aSChris Mason { 3004ae10b3aSChris Mason int i; 3014ae10b3aSChris Mason 3024ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 3034ae10b3aSChris Mason return; 3044ae10b3aSChris Mason 3054ae10b3aSChris Mason for (i = 0; i < dest->nr_pages; i++) { 30688074c8bSQu Wenruo struct page *p = src->stripe_pages[i]; 30788074c8bSQu Wenruo 30888074c8bSQu Wenruo /* 30988074c8bSQu Wenruo * We don't need to steal P/Q pages as they will always be 31088074c8bSQu Wenruo * regenerated for RMW or full write anyway. 31188074c8bSQu Wenruo */ 31288074c8bSQu Wenruo if (!is_data_stripe_page(src, i)) 3134ae10b3aSChris Mason continue; 3144ae10b3aSChris Mason 31588074c8bSQu Wenruo /* 31688074c8bSQu Wenruo * If @src already has RBIO_CACHE_READY_BIT, it should have 31788074c8bSQu Wenruo * all data stripe pages present and uptodate. 31888074c8bSQu Wenruo */ 31988074c8bSQu Wenruo ASSERT(p); 32088074c8bSQu Wenruo ASSERT(full_page_sectors_uptodate(src, i)); 3214d100466SQu Wenruo steal_rbio_page(src, dest, i); 3224ae10b3aSChris Mason } 323eb357060SQu Wenruo index_stripe_sectors(dest); 324eb357060SQu Wenruo index_stripe_sectors(src); 3254ae10b3aSChris Mason } 3264ae10b3aSChris Mason 3274ae10b3aSChris Mason /* 32853b381b3SDavid Woodhouse * merging means we take the bio_list from the victim and 32953b381b3SDavid Woodhouse * splice it into the destination. The victim should 33053b381b3SDavid Woodhouse * be discarded afterwards. 33153b381b3SDavid Woodhouse * 33253b381b3SDavid Woodhouse * must be called with dest->rbio_list_lock held 33353b381b3SDavid Woodhouse */ 33453b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest, 33553b381b3SDavid Woodhouse struct btrfs_raid_bio *victim) 33653b381b3SDavid Woodhouse { 33753b381b3SDavid Woodhouse bio_list_merge(&dest->bio_list, &victim->bio_list); 33853b381b3SDavid Woodhouse dest->bio_list_bytes += victim->bio_list_bytes; 339bd8f7e62SQu Wenruo /* Also inherit the bitmaps from @victim. */ 340bd8f7e62SQu Wenruo bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, 341bd8f7e62SQu Wenruo dest->stripe_nsectors); 34253b381b3SDavid Woodhouse bio_list_init(&victim->bio_list); 34353b381b3SDavid Woodhouse } 34453b381b3SDavid Woodhouse 34553b381b3SDavid Woodhouse /* 3464ae10b3aSChris Mason * used to prune items that are in the cache. The caller 3474ae10b3aSChris Mason * must hold the hash table lock. 3484ae10b3aSChris Mason */ 3494ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 3504ae10b3aSChris Mason { 3514ae10b3aSChris Mason int bucket = rbio_bucket(rbio); 3524ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 3534ae10b3aSChris Mason struct btrfs_stripe_hash *h; 3544ae10b3aSChris Mason int freeit = 0; 3554ae10b3aSChris Mason 3564ae10b3aSChris Mason /* 3574ae10b3aSChris Mason * check the bit again under the hash table lock. 3584ae10b3aSChris Mason */ 3594ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 3604ae10b3aSChris Mason return; 3614ae10b3aSChris Mason 3626a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 3634ae10b3aSChris Mason h = table->table + bucket; 3644ae10b3aSChris Mason 3654ae10b3aSChris Mason /* hold the lock for the bucket because we may be 3664ae10b3aSChris Mason * removing it from the hash table 3674ae10b3aSChris Mason */ 3684ae10b3aSChris Mason spin_lock(&h->lock); 3694ae10b3aSChris Mason 3704ae10b3aSChris Mason /* 3714ae10b3aSChris Mason * hold the lock for the bio list because we need 3724ae10b3aSChris Mason * to make sure the bio list is empty 3734ae10b3aSChris Mason */ 3744ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 3754ae10b3aSChris Mason 3764ae10b3aSChris Mason if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 3774ae10b3aSChris Mason list_del_init(&rbio->stripe_cache); 3784ae10b3aSChris Mason table->cache_size -= 1; 3794ae10b3aSChris Mason freeit = 1; 3804ae10b3aSChris Mason 3814ae10b3aSChris Mason /* if the bio list isn't empty, this rbio is 3824ae10b3aSChris Mason * still involved in an IO. We take it out 3834ae10b3aSChris Mason * of the cache list, and drop the ref that 3844ae10b3aSChris Mason * was held for the list. 3854ae10b3aSChris Mason * 3864ae10b3aSChris Mason * If the bio_list was empty, we also remove 3874ae10b3aSChris Mason * the rbio from the hash_table, and drop 3884ae10b3aSChris Mason * the corresponding ref 3894ae10b3aSChris Mason */ 3904ae10b3aSChris Mason if (bio_list_empty(&rbio->bio_list)) { 3914ae10b3aSChris Mason if (!list_empty(&rbio->hash_list)) { 3924ae10b3aSChris Mason list_del_init(&rbio->hash_list); 393dec95574SElena Reshetova refcount_dec(&rbio->refs); 3944ae10b3aSChris Mason BUG_ON(!list_empty(&rbio->plug_list)); 3954ae10b3aSChris Mason } 3964ae10b3aSChris Mason } 3974ae10b3aSChris Mason } 3984ae10b3aSChris Mason 3994ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4004ae10b3aSChris Mason spin_unlock(&h->lock); 4014ae10b3aSChris Mason 4024ae10b3aSChris Mason if (freeit) 403ff2b64a2SQu Wenruo free_raid_bio(rbio); 4044ae10b3aSChris Mason } 4054ae10b3aSChris Mason 4064ae10b3aSChris Mason /* 4074ae10b3aSChris Mason * prune a given rbio from the cache 4084ae10b3aSChris Mason */ 4094ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 4104ae10b3aSChris Mason { 4114ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4124ae10b3aSChris Mason unsigned long flags; 4134ae10b3aSChris Mason 4144ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4154ae10b3aSChris Mason return; 4164ae10b3aSChris Mason 4176a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 4184ae10b3aSChris Mason 4194ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4204ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4214ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4224ae10b3aSChris Mason } 4234ae10b3aSChris Mason 4244ae10b3aSChris Mason /* 4254ae10b3aSChris Mason * remove everything in the cache 4264ae10b3aSChris Mason */ 42748a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 4284ae10b3aSChris Mason { 4294ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4304ae10b3aSChris Mason unsigned long flags; 4314ae10b3aSChris Mason struct btrfs_raid_bio *rbio; 4324ae10b3aSChris Mason 4334ae10b3aSChris Mason table = info->stripe_hash_table; 4344ae10b3aSChris Mason 4354ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4364ae10b3aSChris Mason while (!list_empty(&table->stripe_cache)) { 4374ae10b3aSChris Mason rbio = list_entry(table->stripe_cache.next, 4384ae10b3aSChris Mason struct btrfs_raid_bio, 4394ae10b3aSChris Mason stripe_cache); 4404ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4414ae10b3aSChris Mason } 4424ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4434ae10b3aSChris Mason } 4444ae10b3aSChris Mason 4454ae10b3aSChris Mason /* 4464ae10b3aSChris Mason * remove all cached entries and free the hash table 4474ae10b3aSChris Mason * used by unmount 44853b381b3SDavid Woodhouse */ 44953b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 45053b381b3SDavid Woodhouse { 45153b381b3SDavid Woodhouse if (!info->stripe_hash_table) 45253b381b3SDavid Woodhouse return; 4534ae10b3aSChris Mason btrfs_clear_rbio_cache(info); 454f749303bSWang Shilong kvfree(info->stripe_hash_table); 45553b381b3SDavid Woodhouse info->stripe_hash_table = NULL; 45653b381b3SDavid Woodhouse } 45753b381b3SDavid Woodhouse 45853b381b3SDavid Woodhouse /* 4594ae10b3aSChris Mason * insert an rbio into the stripe cache. It 4604ae10b3aSChris Mason * must have already been prepared by calling 4614ae10b3aSChris Mason * cache_rbio_pages 4624ae10b3aSChris Mason * 4634ae10b3aSChris Mason * If this rbio was already cached, it gets 4644ae10b3aSChris Mason * moved to the front of the lru. 4654ae10b3aSChris Mason * 4664ae10b3aSChris Mason * If the size of the rbio cache is too big, we 4674ae10b3aSChris Mason * prune an item. 4684ae10b3aSChris Mason */ 4694ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio) 4704ae10b3aSChris Mason { 4714ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4724ae10b3aSChris Mason unsigned long flags; 4734ae10b3aSChris Mason 4744ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 4754ae10b3aSChris Mason return; 4764ae10b3aSChris Mason 4776a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 4784ae10b3aSChris Mason 4794ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4804ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 4814ae10b3aSChris Mason 4824ae10b3aSChris Mason /* bump our ref if we were not in the list before */ 4834ae10b3aSChris Mason if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 484dec95574SElena Reshetova refcount_inc(&rbio->refs); 4854ae10b3aSChris Mason 4864ae10b3aSChris Mason if (!list_empty(&rbio->stripe_cache)){ 4874ae10b3aSChris Mason list_move(&rbio->stripe_cache, &table->stripe_cache); 4884ae10b3aSChris Mason } else { 4894ae10b3aSChris Mason list_add(&rbio->stripe_cache, &table->stripe_cache); 4904ae10b3aSChris Mason table->cache_size += 1; 4914ae10b3aSChris Mason } 4924ae10b3aSChris Mason 4934ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4944ae10b3aSChris Mason 4954ae10b3aSChris Mason if (table->cache_size > RBIO_CACHE_SIZE) { 4964ae10b3aSChris Mason struct btrfs_raid_bio *found; 4974ae10b3aSChris Mason 4984ae10b3aSChris Mason found = list_entry(table->stripe_cache.prev, 4994ae10b3aSChris Mason struct btrfs_raid_bio, 5004ae10b3aSChris Mason stripe_cache); 5014ae10b3aSChris Mason 5024ae10b3aSChris Mason if (found != rbio) 5034ae10b3aSChris Mason __remove_rbio_from_cache(found); 5044ae10b3aSChris Mason } 5054ae10b3aSChris Mason 5064ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 5074ae10b3aSChris Mason } 5084ae10b3aSChris Mason 5094ae10b3aSChris Mason /* 51053b381b3SDavid Woodhouse * helper function to run the xor_blocks api. It is only 51153b381b3SDavid Woodhouse * able to do MAX_XOR_BLOCKS at a time, so we need to 51253b381b3SDavid Woodhouse * loop through. 51353b381b3SDavid Woodhouse */ 51453b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len) 51553b381b3SDavid Woodhouse { 51653b381b3SDavid Woodhouse int src_off = 0; 51753b381b3SDavid Woodhouse int xor_src_cnt = 0; 51853b381b3SDavid Woodhouse void *dest = pages[src_cnt]; 51953b381b3SDavid Woodhouse 52053b381b3SDavid Woodhouse while(src_cnt > 0) { 52153b381b3SDavid Woodhouse xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 52253b381b3SDavid Woodhouse xor_blocks(xor_src_cnt, len, dest, pages + src_off); 52353b381b3SDavid Woodhouse 52453b381b3SDavid Woodhouse src_cnt -= xor_src_cnt; 52553b381b3SDavid Woodhouse src_off += xor_src_cnt; 52653b381b3SDavid Woodhouse } 52753b381b3SDavid Woodhouse } 52853b381b3SDavid Woodhouse 52953b381b3SDavid Woodhouse /* 530176571a1SDavid Sterba * Returns true if the bio list inside this rbio covers an entire stripe (no 531176571a1SDavid Sterba * rmw required). 53253b381b3SDavid Woodhouse */ 53353b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio) 53453b381b3SDavid Woodhouse { 53553b381b3SDavid Woodhouse unsigned long flags; 536176571a1SDavid Sterba unsigned long size = rbio->bio_list_bytes; 537176571a1SDavid Sterba int ret = 1; 53853b381b3SDavid Woodhouse 53953b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 540ff18a4afSChristoph Hellwig if (size != rbio->nr_data * BTRFS_STRIPE_LEN) 541176571a1SDavid Sterba ret = 0; 542ff18a4afSChristoph Hellwig BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); 54353b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 544176571a1SDavid Sterba 54553b381b3SDavid Woodhouse return ret; 54653b381b3SDavid Woodhouse } 54753b381b3SDavid Woodhouse 54853b381b3SDavid Woodhouse /* 54953b381b3SDavid Woodhouse * returns 1 if it is safe to merge two rbios together. 55053b381b3SDavid Woodhouse * The merging is safe if the two rbios correspond to 55153b381b3SDavid Woodhouse * the same stripe and if they are both going in the same 55253b381b3SDavid Woodhouse * direction (read vs write), and if neither one is 55353b381b3SDavid Woodhouse * locked for final IO 55453b381b3SDavid Woodhouse * 55553b381b3SDavid Woodhouse * The caller is responsible for locking such that 55653b381b3SDavid Woodhouse * rmw_locked is safe to test 55753b381b3SDavid Woodhouse */ 55853b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last, 55953b381b3SDavid Woodhouse struct btrfs_raid_bio *cur) 56053b381b3SDavid Woodhouse { 56153b381b3SDavid Woodhouse if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 56253b381b3SDavid Woodhouse test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 56353b381b3SDavid Woodhouse return 0; 56453b381b3SDavid Woodhouse 5654ae10b3aSChris Mason /* 5664ae10b3aSChris Mason * we can't merge with cached rbios, since the 5674ae10b3aSChris Mason * idea is that when we merge the destination 5684ae10b3aSChris Mason * rbio is going to run our IO for us. We can 56901327610SNicholas D Steeves * steal from cached rbios though, other functions 5704ae10b3aSChris Mason * handle that. 5714ae10b3aSChris Mason */ 5724ae10b3aSChris Mason if (test_bit(RBIO_CACHE_BIT, &last->flags) || 5734ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags)) 5744ae10b3aSChris Mason return 0; 5754ae10b3aSChris Mason 5764c664611SQu Wenruo if (last->bioc->raid_map[0] != cur->bioc->raid_map[0]) 57753b381b3SDavid Woodhouse return 0; 57853b381b3SDavid Woodhouse 5795a6ac9eaSMiao Xie /* we can't merge with different operations */ 5805a6ac9eaSMiao Xie if (last->operation != cur->operation) 58153b381b3SDavid Woodhouse return 0; 5825a6ac9eaSMiao Xie /* 5835a6ac9eaSMiao Xie * We've need read the full stripe from the drive. 5845a6ac9eaSMiao Xie * check and repair the parity and write the new results. 5855a6ac9eaSMiao Xie * 5865a6ac9eaSMiao Xie * We're not allowed to add any new bios to the 5875a6ac9eaSMiao Xie * bio list here, anyone else that wants to 5885a6ac9eaSMiao Xie * change this stripe needs to do their own rmw. 5895a6ac9eaSMiao Xie */ 590db34be19SLiu Bo if (last->operation == BTRFS_RBIO_PARITY_SCRUB) 5915a6ac9eaSMiao Xie return 0; 59253b381b3SDavid Woodhouse 593db34be19SLiu Bo if (last->operation == BTRFS_RBIO_REBUILD_MISSING) 594b4ee1782SOmar Sandoval return 0; 595b4ee1782SOmar Sandoval 596cc54ff62SLiu Bo if (last->operation == BTRFS_RBIO_READ_REBUILD) { 597cc54ff62SLiu Bo int fa = last->faila; 598cc54ff62SLiu Bo int fb = last->failb; 599cc54ff62SLiu Bo int cur_fa = cur->faila; 600cc54ff62SLiu Bo int cur_fb = cur->failb; 601cc54ff62SLiu Bo 602cc54ff62SLiu Bo if (last->faila >= last->failb) { 603cc54ff62SLiu Bo fa = last->failb; 604cc54ff62SLiu Bo fb = last->faila; 605cc54ff62SLiu Bo } 606cc54ff62SLiu Bo 607cc54ff62SLiu Bo if (cur->faila >= cur->failb) { 608cc54ff62SLiu Bo cur_fa = cur->failb; 609cc54ff62SLiu Bo cur_fb = cur->faila; 610cc54ff62SLiu Bo } 611cc54ff62SLiu Bo 612cc54ff62SLiu Bo if (fa != cur_fa || fb != cur_fb) 613cc54ff62SLiu Bo return 0; 614cc54ff62SLiu Bo } 61553b381b3SDavid Woodhouse return 1; 61653b381b3SDavid Woodhouse } 61753b381b3SDavid Woodhouse 6183e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio, 6193e77605dSQu Wenruo unsigned int stripe_nr, 6203e77605dSQu Wenruo unsigned int sector_nr) 6213e77605dSQu Wenruo { 6223e77605dSQu Wenruo ASSERT(stripe_nr < rbio->real_stripes); 6233e77605dSQu Wenruo ASSERT(sector_nr < rbio->stripe_nsectors); 6243e77605dSQu Wenruo 6253e77605dSQu Wenruo return stripe_nr * rbio->stripe_nsectors + sector_nr; 6263e77605dSQu Wenruo } 6273e77605dSQu Wenruo 6283e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */ 6293e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio, 6303e77605dSQu Wenruo unsigned int stripe_nr, 6313e77605dSQu Wenruo unsigned int sector_nr) 6323e77605dSQu Wenruo { 6333e77605dSQu Wenruo return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, 6343e77605dSQu Wenruo sector_nr)]; 6353e77605dSQu Wenruo } 6363e77605dSQu Wenruo 6371145059aSQu Wenruo /* Grab a sector inside P stripe */ 6381145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio, 6391145059aSQu Wenruo unsigned int sector_nr) 640b7178a5fSZhao Lei { 6411145059aSQu Wenruo return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); 642b7178a5fSZhao Lei } 643b7178a5fSZhao Lei 6441145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */ 6451145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio, 6461145059aSQu Wenruo unsigned int sector_nr) 64753b381b3SDavid Woodhouse { 6482c8cdd6eSMiao Xie if (rbio->nr_data + 1 == rbio->real_stripes) 64953b381b3SDavid Woodhouse return NULL; 6501145059aSQu Wenruo return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); 6511145059aSQu Wenruo } 6521145059aSQu Wenruo 65353b381b3SDavid Woodhouse /* 65453b381b3SDavid Woodhouse * The first stripe in the table for a logical address 65553b381b3SDavid Woodhouse * has the lock. rbios are added in one of three ways: 65653b381b3SDavid Woodhouse * 65753b381b3SDavid Woodhouse * 1) Nobody has the stripe locked yet. The rbio is given 65853b381b3SDavid Woodhouse * the lock and 0 is returned. The caller must start the IO 65953b381b3SDavid Woodhouse * themselves. 66053b381b3SDavid Woodhouse * 66153b381b3SDavid Woodhouse * 2) Someone has the stripe locked, but we're able to merge 66253b381b3SDavid Woodhouse * with the lock owner. The rbio is freed and the IO will 66353b381b3SDavid Woodhouse * start automatically along with the existing rbio. 1 is returned. 66453b381b3SDavid Woodhouse * 66553b381b3SDavid Woodhouse * 3) Someone has the stripe locked, but we're not able to merge. 66653b381b3SDavid Woodhouse * The rbio is added to the lock owner's plug list, or merged into 66753b381b3SDavid Woodhouse * an rbio already on the plug list. When the lock owner unlocks, 66853b381b3SDavid Woodhouse * the next rbio on the list is run and the IO is started automatically. 66953b381b3SDavid Woodhouse * 1 is returned 67053b381b3SDavid Woodhouse * 67153b381b3SDavid Woodhouse * If we return 0, the caller still owns the rbio and must continue with 67253b381b3SDavid Woodhouse * IO submission. If we return 1, the caller must assume the rbio has 67353b381b3SDavid Woodhouse * already been freed. 67453b381b3SDavid Woodhouse */ 67553b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 67653b381b3SDavid Woodhouse { 677721860d5SJohannes Thumshirn struct btrfs_stripe_hash *h; 67853b381b3SDavid Woodhouse struct btrfs_raid_bio *cur; 67953b381b3SDavid Woodhouse struct btrfs_raid_bio *pending; 68053b381b3SDavid Woodhouse unsigned long flags; 68153b381b3SDavid Woodhouse struct btrfs_raid_bio *freeit = NULL; 6824ae10b3aSChris Mason struct btrfs_raid_bio *cache_drop = NULL; 68353b381b3SDavid Woodhouse int ret = 0; 68453b381b3SDavid Woodhouse 6856a258d72SQu Wenruo h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); 686721860d5SJohannes Thumshirn 68753b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 68853b381b3SDavid Woodhouse list_for_each_entry(cur, &h->hash_list, hash_list) { 6894c664611SQu Wenruo if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0]) 6909d6cb1b0SJohannes Thumshirn continue; 6919d6cb1b0SJohannes Thumshirn 69253b381b3SDavid Woodhouse spin_lock(&cur->bio_list_lock); 69353b381b3SDavid Woodhouse 6949d6cb1b0SJohannes Thumshirn /* Can we steal this cached rbio's pages? */ 6954ae10b3aSChris Mason if (bio_list_empty(&cur->bio_list) && 6964ae10b3aSChris Mason list_empty(&cur->plug_list) && 6974ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags) && 6984ae10b3aSChris Mason !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 6994ae10b3aSChris Mason list_del_init(&cur->hash_list); 700dec95574SElena Reshetova refcount_dec(&cur->refs); 7014ae10b3aSChris Mason 7024ae10b3aSChris Mason steal_rbio(cur, rbio); 7034ae10b3aSChris Mason cache_drop = cur; 7044ae10b3aSChris Mason spin_unlock(&cur->bio_list_lock); 7054ae10b3aSChris Mason 7064ae10b3aSChris Mason goto lockit; 7074ae10b3aSChris Mason } 7084ae10b3aSChris Mason 7099d6cb1b0SJohannes Thumshirn /* Can we merge into the lock owner? */ 71053b381b3SDavid Woodhouse if (rbio_can_merge(cur, rbio)) { 71153b381b3SDavid Woodhouse merge_rbio(cur, rbio); 71253b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 71353b381b3SDavid Woodhouse freeit = rbio; 71453b381b3SDavid Woodhouse ret = 1; 71553b381b3SDavid Woodhouse goto out; 71653b381b3SDavid Woodhouse } 71753b381b3SDavid Woodhouse 7184ae10b3aSChris Mason 71953b381b3SDavid Woodhouse /* 7209d6cb1b0SJohannes Thumshirn * We couldn't merge with the running rbio, see if we can merge 7219d6cb1b0SJohannes Thumshirn * with the pending ones. We don't have to check for rmw_locked 7229d6cb1b0SJohannes Thumshirn * because there is no way they are inside finish_rmw right now 72353b381b3SDavid Woodhouse */ 7249d6cb1b0SJohannes Thumshirn list_for_each_entry(pending, &cur->plug_list, plug_list) { 72553b381b3SDavid Woodhouse if (rbio_can_merge(pending, rbio)) { 72653b381b3SDavid Woodhouse merge_rbio(pending, rbio); 72753b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 72853b381b3SDavid Woodhouse freeit = rbio; 72953b381b3SDavid Woodhouse ret = 1; 73053b381b3SDavid Woodhouse goto out; 73153b381b3SDavid Woodhouse } 73253b381b3SDavid Woodhouse } 73353b381b3SDavid Woodhouse 7349d6cb1b0SJohannes Thumshirn /* 7359d6cb1b0SJohannes Thumshirn * No merging, put us on the tail of the plug list, our rbio 7369d6cb1b0SJohannes Thumshirn * will be started with the currently running rbio unlocks 73753b381b3SDavid Woodhouse */ 73853b381b3SDavid Woodhouse list_add_tail(&rbio->plug_list, &cur->plug_list); 73953b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 74053b381b3SDavid Woodhouse ret = 1; 74153b381b3SDavid Woodhouse goto out; 74253b381b3SDavid Woodhouse } 7434ae10b3aSChris Mason lockit: 744dec95574SElena Reshetova refcount_inc(&rbio->refs); 74553b381b3SDavid Woodhouse list_add(&rbio->hash_list, &h->hash_list); 74653b381b3SDavid Woodhouse out: 74753b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 7484ae10b3aSChris Mason if (cache_drop) 7494ae10b3aSChris Mason remove_rbio_from_cache(cache_drop); 75053b381b3SDavid Woodhouse if (freeit) 751ff2b64a2SQu Wenruo free_raid_bio(freeit); 75253b381b3SDavid Woodhouse return ret; 75353b381b3SDavid Woodhouse } 75453b381b3SDavid Woodhouse 75553b381b3SDavid Woodhouse /* 75653b381b3SDavid Woodhouse * called as rmw or parity rebuild is completed. If the plug list has more 75753b381b3SDavid Woodhouse * rbios waiting for this stripe, the next one on the list will be started 75853b381b3SDavid Woodhouse */ 75953b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 76053b381b3SDavid Woodhouse { 76153b381b3SDavid Woodhouse int bucket; 76253b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 76353b381b3SDavid Woodhouse unsigned long flags; 7644ae10b3aSChris Mason int keep_cache = 0; 76553b381b3SDavid Woodhouse 76653b381b3SDavid Woodhouse bucket = rbio_bucket(rbio); 7676a258d72SQu Wenruo h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; 76853b381b3SDavid Woodhouse 7694ae10b3aSChris Mason if (list_empty(&rbio->plug_list)) 7704ae10b3aSChris Mason cache_rbio(rbio); 7714ae10b3aSChris Mason 77253b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 77353b381b3SDavid Woodhouse spin_lock(&rbio->bio_list_lock); 77453b381b3SDavid Woodhouse 77553b381b3SDavid Woodhouse if (!list_empty(&rbio->hash_list)) { 7764ae10b3aSChris Mason /* 7774ae10b3aSChris Mason * if we're still cached and there is no other IO 7784ae10b3aSChris Mason * to perform, just leave this rbio here for others 7794ae10b3aSChris Mason * to steal from later 7804ae10b3aSChris Mason */ 7814ae10b3aSChris Mason if (list_empty(&rbio->plug_list) && 7824ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 7834ae10b3aSChris Mason keep_cache = 1; 7844ae10b3aSChris Mason clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 7854ae10b3aSChris Mason BUG_ON(!bio_list_empty(&rbio->bio_list)); 7864ae10b3aSChris Mason goto done; 7874ae10b3aSChris Mason } 78853b381b3SDavid Woodhouse 78953b381b3SDavid Woodhouse list_del_init(&rbio->hash_list); 790dec95574SElena Reshetova refcount_dec(&rbio->refs); 79153b381b3SDavid Woodhouse 79253b381b3SDavid Woodhouse /* 79353b381b3SDavid Woodhouse * we use the plug list to hold all the rbios 79453b381b3SDavid Woodhouse * waiting for the chance to lock this stripe. 79553b381b3SDavid Woodhouse * hand the lock over to one of them. 79653b381b3SDavid Woodhouse */ 79753b381b3SDavid Woodhouse if (!list_empty(&rbio->plug_list)) { 79853b381b3SDavid Woodhouse struct btrfs_raid_bio *next; 79953b381b3SDavid Woodhouse struct list_head *head = rbio->plug_list.next; 80053b381b3SDavid Woodhouse 80153b381b3SDavid Woodhouse next = list_entry(head, struct btrfs_raid_bio, 80253b381b3SDavid Woodhouse plug_list); 80353b381b3SDavid Woodhouse 80453b381b3SDavid Woodhouse list_del_init(&rbio->plug_list); 80553b381b3SDavid Woodhouse 80653b381b3SDavid Woodhouse list_add(&next->hash_list, &h->hash_list); 807dec95574SElena Reshetova refcount_inc(&next->refs); 80853b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 80953b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 81053b381b3SDavid Woodhouse 8111b94b556SMiao Xie if (next->operation == BTRFS_RBIO_READ_REBUILD) 812e66d8d5aSDavid Sterba start_async_work(next, read_rebuild_work); 813b4ee1782SOmar Sandoval else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { 814b4ee1782SOmar Sandoval steal_rbio(rbio, next); 815e66d8d5aSDavid Sterba start_async_work(next, read_rebuild_work); 816b4ee1782SOmar Sandoval } else if (next->operation == BTRFS_RBIO_WRITE) { 8174ae10b3aSChris Mason steal_rbio(rbio, next); 818cf6a4a75SDavid Sterba start_async_work(next, rmw_work); 8195a6ac9eaSMiao Xie } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 8205a6ac9eaSMiao Xie steal_rbio(rbio, next); 821a81b747dSDavid Sterba start_async_work(next, scrub_parity_work); 8224ae10b3aSChris Mason } 82353b381b3SDavid Woodhouse 82453b381b3SDavid Woodhouse goto done_nolock; 82553b381b3SDavid Woodhouse } 82653b381b3SDavid Woodhouse } 8274ae10b3aSChris Mason done: 82853b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 82953b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 83053b381b3SDavid Woodhouse 83153b381b3SDavid Woodhouse done_nolock: 8324ae10b3aSChris Mason if (!keep_cache) 8334ae10b3aSChris Mason remove_rbio_from_cache(rbio); 83453b381b3SDavid Woodhouse } 83553b381b3SDavid Woodhouse 8367583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) 83753b381b3SDavid Woodhouse { 8387583d8d0SLiu Bo struct bio *next; 8397583d8d0SLiu Bo 8407583d8d0SLiu Bo while (cur) { 8417583d8d0SLiu Bo next = cur->bi_next; 8427583d8d0SLiu Bo cur->bi_next = NULL; 8437583d8d0SLiu Bo cur->bi_status = err; 8447583d8d0SLiu Bo bio_endio(cur); 8457583d8d0SLiu Bo cur = next; 8467583d8d0SLiu Bo } 84753b381b3SDavid Woodhouse } 84853b381b3SDavid Woodhouse 84953b381b3SDavid Woodhouse /* 85053b381b3SDavid Woodhouse * this frees the rbio and runs through all the bios in the 85153b381b3SDavid Woodhouse * bio_list and calls end_io on them 85253b381b3SDavid Woodhouse */ 8534e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) 85453b381b3SDavid Woodhouse { 85553b381b3SDavid Woodhouse struct bio *cur = bio_list_get(&rbio->bio_list); 8567583d8d0SLiu Bo struct bio *extra; 8574245215dSMiao Xie 858bd8f7e62SQu Wenruo /* 859bd8f7e62SQu Wenruo * Clear the data bitmap, as the rbio may be cached for later usage. 860bd8f7e62SQu Wenruo * do this before before unlock_stripe() so there will be no new bio 861bd8f7e62SQu Wenruo * for this bio. 862bd8f7e62SQu Wenruo */ 863bd8f7e62SQu Wenruo bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); 8644245215dSMiao Xie 8657583d8d0SLiu Bo /* 8667583d8d0SLiu Bo * At this moment, rbio->bio_list is empty, however since rbio does not 8677583d8d0SLiu Bo * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the 8687583d8d0SLiu Bo * hash list, rbio may be merged with others so that rbio->bio_list 8697583d8d0SLiu Bo * becomes non-empty. 8707583d8d0SLiu Bo * Once unlock_stripe() is done, rbio->bio_list will not be updated any 8717583d8d0SLiu Bo * more and we can call bio_endio() on all queued bios. 8727583d8d0SLiu Bo */ 8737583d8d0SLiu Bo unlock_stripe(rbio); 8747583d8d0SLiu Bo extra = bio_list_get(&rbio->bio_list); 875ff2b64a2SQu Wenruo free_raid_bio(rbio); 87653b381b3SDavid Woodhouse 8777583d8d0SLiu Bo rbio_endio_bio_list(cur, err); 8787583d8d0SLiu Bo if (extra) 8797583d8d0SLiu Bo rbio_endio_bio_list(extra, err); 88053b381b3SDavid Woodhouse } 88153b381b3SDavid Woodhouse 88253b381b3SDavid Woodhouse /* 88353b381b3SDavid Woodhouse * end io function used by finish_rmw. When we finally 88453b381b3SDavid Woodhouse * get here, we've written a full stripe 88553b381b3SDavid Woodhouse */ 8864246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio) 88753b381b3SDavid Woodhouse { 88853b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 8894e4cbee9SChristoph Hellwig blk_status_t err = bio->bi_status; 890a6111d11SZhao Lei int max_errors; 89153b381b3SDavid Woodhouse 89253b381b3SDavid Woodhouse if (err) 89353b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 89453b381b3SDavid Woodhouse 89553b381b3SDavid Woodhouse bio_put(bio); 89653b381b3SDavid Woodhouse 897b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 89853b381b3SDavid Woodhouse return; 89953b381b3SDavid Woodhouse 90058efbc9fSOmar Sandoval err = BLK_STS_OK; 90153b381b3SDavid Woodhouse 90253b381b3SDavid Woodhouse /* OK, we have read all the stripes we need to. */ 903a6111d11SZhao Lei max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 9044c664611SQu Wenruo 0 : rbio->bioc->max_errors; 905a6111d11SZhao Lei if (atomic_read(&rbio->error) > max_errors) 9064e4cbee9SChristoph Hellwig err = BLK_STS_IOERR; 90753b381b3SDavid Woodhouse 9084246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 90953b381b3SDavid Woodhouse } 91053b381b3SDavid Woodhouse 91143dd529aSDavid Sterba /* 91243dd529aSDavid Sterba * Get a sector pointer specified by its @stripe_nr and @sector_nr. 9133e77605dSQu Wenruo * 9143e77605dSQu Wenruo * @rbio: The raid bio 9153e77605dSQu Wenruo * @stripe_nr: Stripe number, valid range [0, real_stripe) 9163e77605dSQu Wenruo * @sector_nr: Sector number inside the stripe, 9173e77605dSQu Wenruo * valid range [0, stripe_nsectors) 9183e77605dSQu Wenruo * @bio_list_only: Whether to use sectors inside the bio list only. 9193e77605dSQu Wenruo * 9203e77605dSQu Wenruo * The read/modify/write code wants to reuse the original bio page as much 9213e77605dSQu Wenruo * as possible, and only use stripe_sectors as fallback. 9223e77605dSQu Wenruo */ 9233e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio, 9243e77605dSQu Wenruo int stripe_nr, int sector_nr, 9253e77605dSQu Wenruo bool bio_list_only) 9263e77605dSQu Wenruo { 9273e77605dSQu Wenruo struct sector_ptr *sector; 9283e77605dSQu Wenruo int index; 9293e77605dSQu Wenruo 9303e77605dSQu Wenruo ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); 9313e77605dSQu Wenruo ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 9323e77605dSQu Wenruo 9333e77605dSQu Wenruo index = stripe_nr * rbio->stripe_nsectors + sector_nr; 9343e77605dSQu Wenruo ASSERT(index >= 0 && index < rbio->nr_sectors); 9353e77605dSQu Wenruo 9363e77605dSQu Wenruo spin_lock_irq(&rbio->bio_list_lock); 9373e77605dSQu Wenruo sector = &rbio->bio_sectors[index]; 9383e77605dSQu Wenruo if (sector->page || bio_list_only) { 9393e77605dSQu Wenruo /* Don't return sector without a valid page pointer */ 9403e77605dSQu Wenruo if (!sector->page) 9413e77605dSQu Wenruo sector = NULL; 9423e77605dSQu Wenruo spin_unlock_irq(&rbio->bio_list_lock); 9433e77605dSQu Wenruo return sector; 9443e77605dSQu Wenruo } 9453e77605dSQu Wenruo spin_unlock_irq(&rbio->bio_list_lock); 9463e77605dSQu Wenruo 9473e77605dSQu Wenruo return &rbio->stripe_sectors[index]; 9483e77605dSQu Wenruo } 9493e77605dSQu Wenruo 95053b381b3SDavid Woodhouse /* 95153b381b3SDavid Woodhouse * allocation and initial setup for the btrfs_raid_bio. Not 95253b381b3SDavid Woodhouse * this does not allocate any pages for rbio->pages. 95353b381b3SDavid Woodhouse */ 9542ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 955ff18a4afSChristoph Hellwig struct btrfs_io_context *bioc) 95653b381b3SDavid Woodhouse { 957843de58bSQu Wenruo const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs; 958ff18a4afSChristoph Hellwig const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT; 959843de58bSQu Wenruo const unsigned int num_pages = stripe_npages * real_stripes; 960ff18a4afSChristoph Hellwig const unsigned int stripe_nsectors = 961ff18a4afSChristoph Hellwig BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; 96294efbe19SQu Wenruo const unsigned int num_sectors = stripe_nsectors * real_stripes; 96353b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 96453b381b3SDavid Woodhouse 96594efbe19SQu Wenruo /* PAGE_SIZE must also be aligned to sectorsize for subpage support */ 96694efbe19SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); 967c67c68ebSQu Wenruo /* 968c67c68ebSQu Wenruo * Our current stripe len should be fixed to 64k thus stripe_nsectors 969c67c68ebSQu Wenruo * (at most 16) should be no larger than BITS_PER_LONG. 970c67c68ebSQu Wenruo */ 971c67c68ebSQu Wenruo ASSERT(stripe_nsectors <= BITS_PER_LONG); 972843de58bSQu Wenruo 973797d74b7SQu Wenruo rbio = kzalloc(sizeof(*rbio), GFP_NOFS); 974af8e2d1dSMiao Xie if (!rbio) 97553b381b3SDavid Woodhouse return ERR_PTR(-ENOMEM); 976797d74b7SQu Wenruo rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *), 977797d74b7SQu Wenruo GFP_NOFS); 978797d74b7SQu Wenruo rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), 979797d74b7SQu Wenruo GFP_NOFS); 980797d74b7SQu Wenruo rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), 981797d74b7SQu Wenruo GFP_NOFS); 982797d74b7SQu Wenruo rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); 983797d74b7SQu Wenruo 984797d74b7SQu Wenruo if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || 985797d74b7SQu Wenruo !rbio->finish_pointers) { 986797d74b7SQu Wenruo free_raid_bio_pointers(rbio); 987797d74b7SQu Wenruo kfree(rbio); 988797d74b7SQu Wenruo return ERR_PTR(-ENOMEM); 989797d74b7SQu Wenruo } 99053b381b3SDavid Woodhouse 99153b381b3SDavid Woodhouse bio_list_init(&rbio->bio_list); 99253b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->plug_list); 99353b381b3SDavid Woodhouse spin_lock_init(&rbio->bio_list_lock); 9944ae10b3aSChris Mason INIT_LIST_HEAD(&rbio->stripe_cache); 99553b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->hash_list); 996f1c29379SChristoph Hellwig btrfs_get_bioc(bioc); 9974c664611SQu Wenruo rbio->bioc = bioc; 99853b381b3SDavid Woodhouse rbio->nr_pages = num_pages; 99994efbe19SQu Wenruo rbio->nr_sectors = num_sectors; 10002c8cdd6eSMiao Xie rbio->real_stripes = real_stripes; 10015a6ac9eaSMiao Xie rbio->stripe_npages = stripe_npages; 100294efbe19SQu Wenruo rbio->stripe_nsectors = stripe_nsectors; 100353b381b3SDavid Woodhouse rbio->faila = -1; 100453b381b3SDavid Woodhouse rbio->failb = -1; 1005dec95574SElena Reshetova refcount_set(&rbio->refs, 1); 1006b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 1007b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, 0); 100853b381b3SDavid Woodhouse 10090b30f719SQu Wenruo ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); 10100b30f719SQu Wenruo rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); 101153b381b3SDavid Woodhouse 101253b381b3SDavid Woodhouse return rbio; 101353b381b3SDavid Woodhouse } 101453b381b3SDavid Woodhouse 101553b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */ 101653b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 101753b381b3SDavid Woodhouse { 1018eb357060SQu Wenruo int ret; 1019eb357060SQu Wenruo 1020eb357060SQu Wenruo ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages); 1021eb357060SQu Wenruo if (ret < 0) 1022eb357060SQu Wenruo return ret; 1023eb357060SQu Wenruo /* Mapping all sectors */ 1024eb357060SQu Wenruo index_stripe_sectors(rbio); 1025eb357060SQu Wenruo return 0; 102653b381b3SDavid Woodhouse } 102753b381b3SDavid Woodhouse 1028b7178a5fSZhao Lei /* only allocate pages for p/q stripes */ 102953b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 103053b381b3SDavid Woodhouse { 1031f77183dcSQu Wenruo const int data_pages = rbio->nr_data * rbio->stripe_npages; 1032eb357060SQu Wenruo int ret; 103353b381b3SDavid Woodhouse 1034eb357060SQu Wenruo ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, 1035dd137dd1SSweet Tea Dorminy rbio->stripe_pages + data_pages); 1036eb357060SQu Wenruo if (ret < 0) 1037eb357060SQu Wenruo return ret; 1038eb357060SQu Wenruo 1039eb357060SQu Wenruo index_stripe_sectors(rbio); 1040eb357060SQu Wenruo return 0; 104153b381b3SDavid Woodhouse } 104253b381b3SDavid Woodhouse 104353b381b3SDavid Woodhouse /* 10443e77605dSQu Wenruo * Add a single sector @sector into our list of bios for IO. 10453e77605dSQu Wenruo * 10463e77605dSQu Wenruo * Return 0 if everything went well. 10473e77605dSQu Wenruo * Return <0 for error. 104853b381b3SDavid Woodhouse */ 10493e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, 105053b381b3SDavid Woodhouse struct bio_list *bio_list, 10513e77605dSQu Wenruo struct sector_ptr *sector, 10523e77605dSQu Wenruo unsigned int stripe_nr, 10533e77605dSQu Wenruo unsigned int sector_nr, 1054bf9486d6SBart Van Assche enum req_op op) 105553b381b3SDavid Woodhouse { 10563e77605dSQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 105753b381b3SDavid Woodhouse struct bio *last = bio_list->tail; 105853b381b3SDavid Woodhouse int ret; 105953b381b3SDavid Woodhouse struct bio *bio; 10604c664611SQu Wenruo struct btrfs_io_stripe *stripe; 106153b381b3SDavid Woodhouse u64 disk_start; 106253b381b3SDavid Woodhouse 10633e77605dSQu Wenruo /* 10643e77605dSQu Wenruo * Note: here stripe_nr has taken device replace into consideration, 10653e77605dSQu Wenruo * thus it can be larger than rbio->real_stripe. 10663e77605dSQu Wenruo * So here we check against bioc->num_stripes, not rbio->real_stripes. 10673e77605dSQu Wenruo */ 10683e77605dSQu Wenruo ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); 10693e77605dSQu Wenruo ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 10703e77605dSQu Wenruo ASSERT(sector->page); 10713e77605dSQu Wenruo 10724c664611SQu Wenruo stripe = &rbio->bioc->stripes[stripe_nr]; 10733e77605dSQu Wenruo disk_start = stripe->physical + sector_nr * sectorsize; 107453b381b3SDavid Woodhouse 107553b381b3SDavid Woodhouse /* if the device is missing, just fail this stripe */ 107653b381b3SDavid Woodhouse if (!stripe->dev->bdev) 107753b381b3SDavid Woodhouse return fail_rbio_index(rbio, stripe_nr); 107853b381b3SDavid Woodhouse 107953b381b3SDavid Woodhouse /* see if we can add this page onto our existing bio */ 108053b381b3SDavid Woodhouse if (last) { 10811201b58bSDavid Sterba u64 last_end = last->bi_iter.bi_sector << 9; 10824f024f37SKent Overstreet last_end += last->bi_iter.bi_size; 108353b381b3SDavid Woodhouse 108453b381b3SDavid Woodhouse /* 108553b381b3SDavid Woodhouse * we can't merge these if they are from different 108653b381b3SDavid Woodhouse * devices or if they are not contiguous 108753b381b3SDavid Woodhouse */ 1088f90ae76aSNikolay Borisov if (last_end == disk_start && !last->bi_status && 1089309dca30SChristoph Hellwig last->bi_bdev == stripe->dev->bdev) { 10903e77605dSQu Wenruo ret = bio_add_page(last, sector->page, sectorsize, 10913e77605dSQu Wenruo sector->pgoff); 10923e77605dSQu Wenruo if (ret == sectorsize) 109353b381b3SDavid Woodhouse return 0; 109453b381b3SDavid Woodhouse } 109553b381b3SDavid Woodhouse } 109653b381b3SDavid Woodhouse 109753b381b3SDavid Woodhouse /* put a new bio on the list */ 1098ff18a4afSChristoph Hellwig bio = bio_alloc(stripe->dev->bdev, 1099ff18a4afSChristoph Hellwig max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1), 1100bf9486d6SBart Van Assche op, GFP_NOFS); 11014f024f37SKent Overstreet bio->bi_iter.bi_sector = disk_start >> 9; 1102e01bf588SChristoph Hellwig bio->bi_private = rbio; 110353b381b3SDavid Woodhouse 11043e77605dSQu Wenruo bio_add_page(bio, sector->page, sectorsize, sector->pgoff); 110553b381b3SDavid Woodhouse bio_list_add(bio_list, bio); 110653b381b3SDavid Woodhouse return 0; 110753b381b3SDavid Woodhouse } 110853b381b3SDavid Woodhouse 110953b381b3SDavid Woodhouse /* 111053b381b3SDavid Woodhouse * while we're doing the read/modify/write cycle, we could 111153b381b3SDavid Woodhouse * have errors in reading pages off the disk. This checks 111253b381b3SDavid Woodhouse * for errors and if we're not able to read the page it'll 111353b381b3SDavid Woodhouse * trigger parity reconstruction. The rmw will be finished 111453b381b3SDavid Woodhouse * after we've reconstructed the failed stripes 111553b381b3SDavid Woodhouse */ 111653b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 111753b381b3SDavid Woodhouse { 111853b381b3SDavid Woodhouse if (rbio->faila >= 0 || rbio->failb >= 0) { 11192c8cdd6eSMiao Xie BUG_ON(rbio->faila == rbio->real_stripes - 1); 112053b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 112153b381b3SDavid Woodhouse } else { 112253b381b3SDavid Woodhouse finish_rmw(rbio); 112353b381b3SDavid Woodhouse } 112453b381b3SDavid Woodhouse } 112553b381b3SDavid Woodhouse 112600425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) 112700425dd9SQu Wenruo { 112800425dd9SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 112900425dd9SQu Wenruo struct bio_vec bvec; 113000425dd9SQu Wenruo struct bvec_iter iter; 113100425dd9SQu Wenruo u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 113200425dd9SQu Wenruo rbio->bioc->raid_map[0]; 113300425dd9SQu Wenruo 113400425dd9SQu Wenruo bio_for_each_segment(bvec, bio, iter) { 113500425dd9SQu Wenruo u32 bvec_offset; 113600425dd9SQu Wenruo 113700425dd9SQu Wenruo for (bvec_offset = 0; bvec_offset < bvec.bv_len; 113800425dd9SQu Wenruo bvec_offset += sectorsize, offset += sectorsize) { 113900425dd9SQu Wenruo int index = offset / sectorsize; 114000425dd9SQu Wenruo struct sector_ptr *sector = &rbio->bio_sectors[index]; 114100425dd9SQu Wenruo 114200425dd9SQu Wenruo sector->page = bvec.bv_page; 114300425dd9SQu Wenruo sector->pgoff = bvec.bv_offset + bvec_offset; 114400425dd9SQu Wenruo ASSERT(sector->pgoff < PAGE_SIZE); 114500425dd9SQu Wenruo } 114600425dd9SQu Wenruo } 114700425dd9SQu Wenruo } 114800425dd9SQu Wenruo 114953b381b3SDavid Woodhouse /* 115053b381b3SDavid Woodhouse * helper function to walk our bio list and populate the bio_pages array with 115153b381b3SDavid Woodhouse * the result. This seems expensive, but it is faster than constantly 115253b381b3SDavid Woodhouse * searching through the bio list as we setup the IO in finish_rmw or stripe 115353b381b3SDavid Woodhouse * reconstruction. 115453b381b3SDavid Woodhouse * 115553b381b3SDavid Woodhouse * This must be called before you trust the answers from page_in_rbio 115653b381b3SDavid Woodhouse */ 115753b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio) 115853b381b3SDavid Woodhouse { 115953b381b3SDavid Woodhouse struct bio *bio; 116053b381b3SDavid Woodhouse 116153b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 116200425dd9SQu Wenruo bio_list_for_each(bio, &rbio->bio_list) 116300425dd9SQu Wenruo index_one_bio(rbio, bio); 116400425dd9SQu Wenruo 116553b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 116653b381b3SDavid Woodhouse } 116753b381b3SDavid Woodhouse 1168b8bea09aSQu Wenruo static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio, 1169b8bea09aSQu Wenruo struct raid56_bio_trace_info *trace_info) 1170b8bea09aSQu Wenruo { 1171b8bea09aSQu Wenruo const struct btrfs_io_context *bioc = rbio->bioc; 1172b8bea09aSQu Wenruo int i; 1173b8bea09aSQu Wenruo 1174b8bea09aSQu Wenruo ASSERT(bioc); 1175b8bea09aSQu Wenruo 1176b8bea09aSQu Wenruo /* We rely on bio->bi_bdev to find the stripe number. */ 1177b8bea09aSQu Wenruo if (!bio->bi_bdev) 1178b8bea09aSQu Wenruo goto not_found; 1179b8bea09aSQu Wenruo 1180b8bea09aSQu Wenruo for (i = 0; i < bioc->num_stripes; i++) { 1181b8bea09aSQu Wenruo if (bio->bi_bdev != bioc->stripes[i].dev->bdev) 1182b8bea09aSQu Wenruo continue; 1183b8bea09aSQu Wenruo trace_info->stripe_nr = i; 1184b8bea09aSQu Wenruo trace_info->devid = bioc->stripes[i].dev->devid; 1185b8bea09aSQu Wenruo trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 1186b8bea09aSQu Wenruo bioc->stripes[i].physical; 1187b8bea09aSQu Wenruo return; 1188b8bea09aSQu Wenruo } 1189b8bea09aSQu Wenruo 1190b8bea09aSQu Wenruo not_found: 1191b8bea09aSQu Wenruo trace_info->devid = -1; 1192b8bea09aSQu Wenruo trace_info->offset = -1; 1193b8bea09aSQu Wenruo trace_info->stripe_nr = -1; 1194b8bea09aSQu Wenruo } 1195b8bea09aSQu Wenruo 119653b381b3SDavid Woodhouse /* 119753b381b3SDavid Woodhouse * this is called from one of two situations. We either 119853b381b3SDavid Woodhouse * have a full stripe from the higher layers, or we've read all 119953b381b3SDavid Woodhouse * the missing bits off disk. 120053b381b3SDavid Woodhouse * 120153b381b3SDavid Woodhouse * This will calculate the parity and then send down any 120253b381b3SDavid Woodhouse * changed blocks. 120353b381b3SDavid Woodhouse */ 120453b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 120553b381b3SDavid Woodhouse { 12064c664611SQu Wenruo struct btrfs_io_context *bioc = rbio->bioc; 12071145059aSQu Wenruo const u32 sectorsize = bioc->fs_info->sectorsize; 12081389053eSKees Cook void **pointers = rbio->finish_pointers; 120953b381b3SDavid Woodhouse int nr_data = rbio->nr_data; 121036920044SQu Wenruo /* The total sector number inside the full stripe. */ 121136920044SQu Wenruo int total_sector_nr; 121253b381b3SDavid Woodhouse int stripe; 121336920044SQu Wenruo /* Sector number inside a stripe. */ 12143e77605dSQu Wenruo int sectornr; 1215c17af965SDavid Sterba bool has_qstripe; 121653b381b3SDavid Woodhouse struct bio_list bio_list; 121753b381b3SDavid Woodhouse struct bio *bio; 121853b381b3SDavid Woodhouse int ret; 121953b381b3SDavid Woodhouse 122053b381b3SDavid Woodhouse bio_list_init(&bio_list); 122153b381b3SDavid Woodhouse 1222c17af965SDavid Sterba if (rbio->real_stripes - rbio->nr_data == 1) 1223c17af965SDavid Sterba has_qstripe = false; 1224c17af965SDavid Sterba else if (rbio->real_stripes - rbio->nr_data == 2) 1225c17af965SDavid Sterba has_qstripe = true; 1226c17af965SDavid Sterba else 122753b381b3SDavid Woodhouse BUG(); 122853b381b3SDavid Woodhouse 1229bd8f7e62SQu Wenruo /* We should have at least one data sector. */ 1230bd8f7e62SQu Wenruo ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); 1231bd8f7e62SQu Wenruo 123253b381b3SDavid Woodhouse /* at this point we either have a full stripe, 123353b381b3SDavid Woodhouse * or we've read the full stripe from the drive. 123453b381b3SDavid Woodhouse * recalculate the parity and write the new results. 123553b381b3SDavid Woodhouse * 123653b381b3SDavid Woodhouse * We're not allowed to add any new bios to the 123753b381b3SDavid Woodhouse * bio list here, anyone else that wants to 123853b381b3SDavid Woodhouse * change this stripe needs to do their own rmw. 123953b381b3SDavid Woodhouse */ 124053b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 124153b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 124253b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 124353b381b3SDavid Woodhouse 1244b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 124553b381b3SDavid Woodhouse 124653b381b3SDavid Woodhouse /* 124753b381b3SDavid Woodhouse * now that we've set rmw_locked, run through the 124853b381b3SDavid Woodhouse * bio list one last time and map the page pointers 12494ae10b3aSChris Mason * 12504ae10b3aSChris Mason * We don't cache full rbios because we're assuming 12514ae10b3aSChris Mason * the higher layers are unlikely to use this area of 12524ae10b3aSChris Mason * the disk again soon. If they do use it again, 12534ae10b3aSChris Mason * hopefully they will send another full bio. 125453b381b3SDavid Woodhouse */ 125553b381b3SDavid Woodhouse index_rbio_pages(rbio); 12564ae10b3aSChris Mason if (!rbio_is_full(rbio)) 12574ae10b3aSChris Mason cache_rbio_pages(rbio); 12584ae10b3aSChris Mason else 12594ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 126053b381b3SDavid Woodhouse 12613e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 12621145059aSQu Wenruo struct sector_ptr *sector; 12631145059aSQu Wenruo 12641145059aSQu Wenruo /* First collect one sector from each data stripe */ 126553b381b3SDavid Woodhouse for (stripe = 0; stripe < nr_data; stripe++) { 12661145059aSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 0); 12671145059aSQu Wenruo pointers[stripe] = kmap_local_page(sector->page) + 12681145059aSQu Wenruo sector->pgoff; 126953b381b3SDavid Woodhouse } 127053b381b3SDavid Woodhouse 12711145059aSQu Wenruo /* Then add the parity stripe */ 12721145059aSQu Wenruo sector = rbio_pstripe_sector(rbio, sectornr); 12731145059aSQu Wenruo sector->uptodate = 1; 12741145059aSQu Wenruo pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; 127553b381b3SDavid Woodhouse 1276c17af965SDavid Sterba if (has_qstripe) { 127753b381b3SDavid Woodhouse /* 12781145059aSQu Wenruo * RAID6, add the qstripe and call the library function 12791145059aSQu Wenruo * to fill in our p/q 128053b381b3SDavid Woodhouse */ 12811145059aSQu Wenruo sector = rbio_qstripe_sector(rbio, sectornr); 12821145059aSQu Wenruo sector->uptodate = 1; 12831145059aSQu Wenruo pointers[stripe++] = kmap_local_page(sector->page) + 12841145059aSQu Wenruo sector->pgoff; 128553b381b3SDavid Woodhouse 12861145059aSQu Wenruo raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 128753b381b3SDavid Woodhouse pointers); 128853b381b3SDavid Woodhouse } else { 128953b381b3SDavid Woodhouse /* raid5 */ 12901145059aSQu Wenruo memcpy(pointers[nr_data], pointers[0], sectorsize); 12911145059aSQu Wenruo run_xor(pointers + 1, nr_data - 1, sectorsize); 129253b381b3SDavid Woodhouse } 129394a0b58dSIra Weiny for (stripe = stripe - 1; stripe >= 0; stripe--) 129494a0b58dSIra Weiny kunmap_local(pointers[stripe]); 129553b381b3SDavid Woodhouse } 129653b381b3SDavid Woodhouse 129753b381b3SDavid Woodhouse /* 129836920044SQu Wenruo * Start writing. Make bios for everything from the higher layers (the 129936920044SQu Wenruo * bio_list in our rbio) and our P/Q. Ignore everything else. 130053b381b3SDavid Woodhouse */ 130136920044SQu Wenruo for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 130236920044SQu Wenruo total_sector_nr++) { 13033e77605dSQu Wenruo struct sector_ptr *sector; 13043e77605dSQu Wenruo 130536920044SQu Wenruo stripe = total_sector_nr / rbio->stripe_nsectors; 130636920044SQu Wenruo sectornr = total_sector_nr % rbio->stripe_nsectors; 130736920044SQu Wenruo 1308bd8f7e62SQu Wenruo /* This vertical stripe has no data, skip it. */ 1309bd8f7e62SQu Wenruo if (!test_bit(sectornr, &rbio->dbitmap)) 1310bd8f7e62SQu Wenruo continue; 1311bd8f7e62SQu Wenruo 131253b381b3SDavid Woodhouse if (stripe < rbio->nr_data) { 13133e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 13143e77605dSQu Wenruo if (!sector) 131553b381b3SDavid Woodhouse continue; 131653b381b3SDavid Woodhouse } else { 13173e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 131853b381b3SDavid Woodhouse } 131953b381b3SDavid Woodhouse 13203e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 1321ff18a4afSChristoph Hellwig sectornr, REQ_OP_WRITE); 132253b381b3SDavid Woodhouse if (ret) 132353b381b3SDavid Woodhouse goto cleanup; 132453b381b3SDavid Woodhouse } 132553b381b3SDavid Woodhouse 13264c664611SQu Wenruo if (likely(!bioc->num_tgtdevs)) 13272c8cdd6eSMiao Xie goto write_data; 13282c8cdd6eSMiao Xie 132936920044SQu Wenruo for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 133036920044SQu Wenruo total_sector_nr++) { 13313e77605dSQu Wenruo struct sector_ptr *sector; 13323e77605dSQu Wenruo 133336920044SQu Wenruo stripe = total_sector_nr / rbio->stripe_nsectors; 133436920044SQu Wenruo sectornr = total_sector_nr % rbio->stripe_nsectors; 133536920044SQu Wenruo 133636920044SQu Wenruo if (!bioc->tgtdev_map[stripe]) { 133736920044SQu Wenruo /* 133836920044SQu Wenruo * We can skip the whole stripe completely, note 133936920044SQu Wenruo * total_sector_nr will be increased by one anyway. 134036920044SQu Wenruo */ 134136920044SQu Wenruo ASSERT(sectornr == 0); 134236920044SQu Wenruo total_sector_nr += rbio->stripe_nsectors - 1; 134336920044SQu Wenruo continue; 134436920044SQu Wenruo } 134536920044SQu Wenruo 1346bd8f7e62SQu Wenruo /* This vertical stripe has no data, skip it. */ 1347bd8f7e62SQu Wenruo if (!test_bit(sectornr, &rbio->dbitmap)) 1348bd8f7e62SQu Wenruo continue; 1349bd8f7e62SQu Wenruo 13502c8cdd6eSMiao Xie if (stripe < rbio->nr_data) { 13513e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 13523e77605dSQu Wenruo if (!sector) 13532c8cdd6eSMiao Xie continue; 13542c8cdd6eSMiao Xie } else { 13553e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 13562c8cdd6eSMiao Xie } 13572c8cdd6eSMiao Xie 13583e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 13594c664611SQu Wenruo rbio->bioc->tgtdev_map[stripe], 1360ff18a4afSChristoph Hellwig sectornr, REQ_OP_WRITE); 13612c8cdd6eSMiao Xie if (ret) 13622c8cdd6eSMiao Xie goto cleanup; 13632c8cdd6eSMiao Xie } 13642c8cdd6eSMiao Xie 13652c8cdd6eSMiao Xie write_data: 1366b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1367b89e1b01SMiao Xie BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 136853b381b3SDavid Woodhouse 1369bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 137053b381b3SDavid Woodhouse bio->bi_end_io = raid_write_end_io; 13714e49ea4aSMike Christie 1372b8bea09aSQu Wenruo if (trace_raid56_write_stripe_enabled()) { 1373b8bea09aSQu Wenruo struct raid56_bio_trace_info trace_info = { 0 }; 1374b8bea09aSQu Wenruo 1375b8bea09aSQu Wenruo bio_get_trace_info(rbio, bio, &trace_info); 1376b8bea09aSQu Wenruo trace_raid56_write_stripe(rbio, bio, &trace_info); 1377b8bea09aSQu Wenruo } 13784e49ea4aSMike Christie submit_bio(bio); 137953b381b3SDavid Woodhouse } 138053b381b3SDavid Woodhouse return; 138153b381b3SDavid Woodhouse 138253b381b3SDavid Woodhouse cleanup: 138358efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 1384785884fcSLiu Bo 1385785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 1386785884fcSLiu Bo bio_put(bio); 138753b381b3SDavid Woodhouse } 138853b381b3SDavid Woodhouse 138953b381b3SDavid Woodhouse /* 139053b381b3SDavid Woodhouse * helper to find the stripe number for a given bio. Used to figure out which 139153b381b3SDavid Woodhouse * stripe has failed. This expects the bio to correspond to a physical disk, 139253b381b3SDavid Woodhouse * so it looks up based on physical sector numbers. 139353b381b3SDavid Woodhouse */ 139453b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio, 139553b381b3SDavid Woodhouse struct bio *bio) 139653b381b3SDavid Woodhouse { 13974f024f37SKent Overstreet u64 physical = bio->bi_iter.bi_sector; 139853b381b3SDavid Woodhouse int i; 13994c664611SQu Wenruo struct btrfs_io_stripe *stripe; 140053b381b3SDavid Woodhouse 140153b381b3SDavid Woodhouse physical <<= 9; 140253b381b3SDavid Woodhouse 14034c664611SQu Wenruo for (i = 0; i < rbio->bioc->num_stripes; i++) { 14044c664611SQu Wenruo stripe = &rbio->bioc->stripes[i]; 1405ff18a4afSChristoph Hellwig if (in_range(physical, stripe->physical, BTRFS_STRIPE_LEN) && 1406309dca30SChristoph Hellwig stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) { 140753b381b3SDavid Woodhouse return i; 140853b381b3SDavid Woodhouse } 140953b381b3SDavid Woodhouse } 141053b381b3SDavid Woodhouse return -1; 141153b381b3SDavid Woodhouse } 141253b381b3SDavid Woodhouse 141353b381b3SDavid Woodhouse /* 141453b381b3SDavid Woodhouse * helper to find the stripe number for a given 141553b381b3SDavid Woodhouse * bio (before mapping). Used to figure out which stripe has 141653b381b3SDavid Woodhouse * failed. This looks up based on logical block numbers. 141753b381b3SDavid Woodhouse */ 141853b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 141953b381b3SDavid Woodhouse struct bio *bio) 142053b381b3SDavid Woodhouse { 14211201b58bSDavid Sterba u64 logical = bio->bi_iter.bi_sector << 9; 142253b381b3SDavid Woodhouse int i; 142353b381b3SDavid Woodhouse 142453b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_data; i++) { 14254c664611SQu Wenruo u64 stripe_start = rbio->bioc->raid_map[i]; 142683025863SNikolay Borisov 1427ff18a4afSChristoph Hellwig if (in_range(logical, stripe_start, BTRFS_STRIPE_LEN)) 142853b381b3SDavid Woodhouse return i; 142953b381b3SDavid Woodhouse } 143053b381b3SDavid Woodhouse return -1; 143153b381b3SDavid Woodhouse } 143253b381b3SDavid Woodhouse 143353b381b3SDavid Woodhouse /* 143453b381b3SDavid Woodhouse * returns -EIO if we had too many failures 143553b381b3SDavid Woodhouse */ 143653b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 143753b381b3SDavid Woodhouse { 143853b381b3SDavid Woodhouse unsigned long flags; 143953b381b3SDavid Woodhouse int ret = 0; 144053b381b3SDavid Woodhouse 144153b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 144253b381b3SDavid Woodhouse 144353b381b3SDavid Woodhouse /* we already know this stripe is bad, move on */ 144453b381b3SDavid Woodhouse if (rbio->faila == failed || rbio->failb == failed) 144553b381b3SDavid Woodhouse goto out; 144653b381b3SDavid Woodhouse 144753b381b3SDavid Woodhouse if (rbio->faila == -1) { 144853b381b3SDavid Woodhouse /* first failure on this rbio */ 144953b381b3SDavid Woodhouse rbio->faila = failed; 1450b89e1b01SMiao Xie atomic_inc(&rbio->error); 145153b381b3SDavid Woodhouse } else if (rbio->failb == -1) { 145253b381b3SDavid Woodhouse /* second failure on this rbio */ 145353b381b3SDavid Woodhouse rbio->failb = failed; 1454b89e1b01SMiao Xie atomic_inc(&rbio->error); 145553b381b3SDavid Woodhouse } else { 145653b381b3SDavid Woodhouse ret = -EIO; 145753b381b3SDavid Woodhouse } 145853b381b3SDavid Woodhouse out: 145953b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 146053b381b3SDavid Woodhouse 146153b381b3SDavid Woodhouse return ret; 146253b381b3SDavid Woodhouse } 146353b381b3SDavid Woodhouse 146453b381b3SDavid Woodhouse /* 146553b381b3SDavid Woodhouse * helper to fail a stripe based on a physical disk 146653b381b3SDavid Woodhouse * bio. 146753b381b3SDavid Woodhouse */ 146853b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 146953b381b3SDavid Woodhouse struct bio *bio) 147053b381b3SDavid Woodhouse { 147153b381b3SDavid Woodhouse int failed = find_bio_stripe(rbio, bio); 147253b381b3SDavid Woodhouse 147353b381b3SDavid Woodhouse if (failed < 0) 147453b381b3SDavid Woodhouse return -EIO; 147553b381b3SDavid Woodhouse 147653b381b3SDavid Woodhouse return fail_rbio_index(rbio, failed); 147753b381b3SDavid Woodhouse } 147853b381b3SDavid Woodhouse 147953b381b3SDavid Woodhouse /* 14805fdb7afcSQu Wenruo * For subpage case, we can no longer set page Uptodate directly for 14815fdb7afcSQu Wenruo * stripe_pages[], thus we need to locate the sector. 14825fdb7afcSQu Wenruo */ 14835fdb7afcSQu Wenruo static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio, 14845fdb7afcSQu Wenruo struct page *page, 14855fdb7afcSQu Wenruo unsigned int pgoff) 14865fdb7afcSQu Wenruo { 14875fdb7afcSQu Wenruo int i; 14885fdb7afcSQu Wenruo 14895fdb7afcSQu Wenruo for (i = 0; i < rbio->nr_sectors; i++) { 14905fdb7afcSQu Wenruo struct sector_ptr *sector = &rbio->stripe_sectors[i]; 14915fdb7afcSQu Wenruo 14925fdb7afcSQu Wenruo if (sector->page == page && sector->pgoff == pgoff) 14935fdb7afcSQu Wenruo return sector; 14945fdb7afcSQu Wenruo } 14955fdb7afcSQu Wenruo return NULL; 14965fdb7afcSQu Wenruo } 14975fdb7afcSQu Wenruo 14985fdb7afcSQu Wenruo /* 149953b381b3SDavid Woodhouse * this sets each page in the bio uptodate. It should only be used on private 150053b381b3SDavid Woodhouse * rbio pages, nothing that comes in from the higher layers 150153b381b3SDavid Woodhouse */ 15025fdb7afcSQu Wenruo static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) 150353b381b3SDavid Woodhouse { 15045fdb7afcSQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 15050198e5b7SLiu Bo struct bio_vec *bvec; 15066dc4f100SMing Lei struct bvec_iter_all iter_all; 150753b381b3SDavid Woodhouse 15080198e5b7SLiu Bo ASSERT(!bio_flagged(bio, BIO_CLONED)); 15096592e58cSFilipe Manana 15105fdb7afcSQu Wenruo bio_for_each_segment_all(bvec, bio, iter_all) { 15115fdb7afcSQu Wenruo struct sector_ptr *sector; 15125fdb7afcSQu Wenruo int pgoff; 15135fdb7afcSQu Wenruo 15145fdb7afcSQu Wenruo for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; 15155fdb7afcSQu Wenruo pgoff += sectorsize) { 15165fdb7afcSQu Wenruo sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); 15175fdb7afcSQu Wenruo ASSERT(sector); 15185fdb7afcSQu Wenruo if (sector) 15195fdb7afcSQu Wenruo sector->uptodate = 1; 15205fdb7afcSQu Wenruo } 15215fdb7afcSQu Wenruo } 152253b381b3SDavid Woodhouse } 152353b381b3SDavid Woodhouse 1524d34e123dSChristoph Hellwig static void raid56_bio_end_io(struct bio *bio) 152553b381b3SDavid Woodhouse { 152653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 152753b381b3SDavid Woodhouse 15284e4cbee9SChristoph Hellwig if (bio->bi_status) 152953b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 153053b381b3SDavid Woodhouse else 15315fdb7afcSQu Wenruo set_bio_pages_uptodate(rbio, bio); 153253b381b3SDavid Woodhouse 153353b381b3SDavid Woodhouse bio_put(bio); 153453b381b3SDavid Woodhouse 1535d34e123dSChristoph Hellwig if (atomic_dec_and_test(&rbio->stripes_pending)) 1536d34e123dSChristoph Hellwig queue_work(rbio->bioc->fs_info->endio_raid56_workers, 1537d34e123dSChristoph Hellwig &rbio->end_io_work); 1538d34e123dSChristoph Hellwig } 153953b381b3SDavid Woodhouse 154053b381b3SDavid Woodhouse /* 1541d34e123dSChristoph Hellwig * End io handler for the read phase of the RMW cycle. All the bios here are 1542d34e123dSChristoph Hellwig * physical stripe bios we've read from the disk so we can recalculate the 1543d34e123dSChristoph Hellwig * parity of the stripe. 1544d34e123dSChristoph Hellwig * 1545d34e123dSChristoph Hellwig * This will usually kick off finish_rmw once all the bios are read in, but it 1546d34e123dSChristoph Hellwig * may trigger parity reconstruction if we had any errors along the way 1547d34e123dSChristoph Hellwig */ 1548d34e123dSChristoph Hellwig static void raid56_rmw_end_io_work(struct work_struct *work) 1549d34e123dSChristoph Hellwig { 1550d34e123dSChristoph Hellwig struct btrfs_raid_bio *rbio = 1551d34e123dSChristoph Hellwig container_of(work, struct btrfs_raid_bio, end_io_work); 1552d34e123dSChristoph Hellwig 1553d34e123dSChristoph Hellwig if (atomic_read(&rbio->error) > rbio->bioc->max_errors) { 1554d34e123dSChristoph Hellwig rbio_orig_end_io(rbio, BLK_STS_IOERR); 1555d34e123dSChristoph Hellwig return; 1556d34e123dSChristoph Hellwig } 1557d34e123dSChristoph Hellwig 1558d34e123dSChristoph Hellwig /* 1559d34e123dSChristoph Hellwig * This will normally call finish_rmw to start our write but if there 1560d34e123dSChristoph Hellwig * are any failed stripes we'll reconstruct from parity first. 156153b381b3SDavid Woodhouse */ 156253b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 156353b381b3SDavid Woodhouse } 156453b381b3SDavid Woodhouse 156553b381b3SDavid Woodhouse /* 156653b381b3SDavid Woodhouse * the stripe must be locked by the caller. It will 156753b381b3SDavid Woodhouse * unlock after all the writes are done 156853b381b3SDavid Woodhouse */ 156953b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 157053b381b3SDavid Woodhouse { 157153b381b3SDavid Woodhouse int bios_to_read = 0; 157253b381b3SDavid Woodhouse struct bio_list bio_list; 1573550cdeb3SQu Wenruo const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data; 157453b381b3SDavid Woodhouse int ret; 1575550cdeb3SQu Wenruo int total_sector_nr; 157653b381b3SDavid Woodhouse struct bio *bio; 157753b381b3SDavid Woodhouse 157853b381b3SDavid Woodhouse bio_list_init(&bio_list); 157953b381b3SDavid Woodhouse 158053b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 158153b381b3SDavid Woodhouse if (ret) 158253b381b3SDavid Woodhouse goto cleanup; 158353b381b3SDavid Woodhouse 158453b381b3SDavid Woodhouse index_rbio_pages(rbio); 158553b381b3SDavid Woodhouse 1586b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 1587550cdeb3SQu Wenruo /* Build a list of bios to read all the missing data sectors. */ 1588550cdeb3SQu Wenruo for (total_sector_nr = 0; total_sector_nr < nr_data_sectors; 1589550cdeb3SQu Wenruo total_sector_nr++) { 15903e77605dSQu Wenruo struct sector_ptr *sector; 1591550cdeb3SQu Wenruo int stripe = total_sector_nr / rbio->stripe_nsectors; 1592550cdeb3SQu Wenruo int sectornr = total_sector_nr % rbio->stripe_nsectors; 15933e77605dSQu Wenruo 159453b381b3SDavid Woodhouse /* 1595550cdeb3SQu Wenruo * We want to find all the sectors missing from the rbio and 1596550cdeb3SQu Wenruo * read them from the disk. If sector_in_rbio() finds a page 1597550cdeb3SQu Wenruo * in the bio list we don't need to read it off the stripe. 159853b381b3SDavid Woodhouse */ 15993e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 16003e77605dSQu Wenruo if (sector) 160153b381b3SDavid Woodhouse continue; 160253b381b3SDavid Woodhouse 16033e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 16044ae10b3aSChris Mason /* 1605550cdeb3SQu Wenruo * The bio cache may have handed us an uptodate page. If so, 1606550cdeb3SQu Wenruo * use it. 16074ae10b3aSChris Mason */ 16083e77605dSQu Wenruo if (sector->uptodate) 16094ae10b3aSChris Mason continue; 16104ae10b3aSChris Mason 16113e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 1612ff18a4afSChristoph Hellwig stripe, sectornr, REQ_OP_READ); 161353b381b3SDavid Woodhouse if (ret) 161453b381b3SDavid Woodhouse goto cleanup; 161553b381b3SDavid Woodhouse } 161653b381b3SDavid Woodhouse 161753b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 161853b381b3SDavid Woodhouse if (!bios_to_read) { 161953b381b3SDavid Woodhouse /* 162053b381b3SDavid Woodhouse * this can happen if others have merged with 162153b381b3SDavid Woodhouse * us, it means there is nothing left to read. 162253b381b3SDavid Woodhouse * But if there are missing devices it may not be 162353b381b3SDavid Woodhouse * safe to do the full stripe write yet. 162453b381b3SDavid Woodhouse */ 162553b381b3SDavid Woodhouse goto finish; 162653b381b3SDavid Woodhouse } 162753b381b3SDavid Woodhouse 162853b381b3SDavid Woodhouse /* 16294c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 16304c664611SQu Wenruo * touch it after that. 163153b381b3SDavid Woodhouse */ 1632b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 1633d34e123dSChristoph Hellwig INIT_WORK(&rbio->end_io_work, raid56_rmw_end_io_work); 1634bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 1635d34e123dSChristoph Hellwig bio->bi_end_io = raid56_bio_end_io; 163653b381b3SDavid Woodhouse 1637b8bea09aSQu Wenruo if (trace_raid56_read_partial_enabled()) { 1638b8bea09aSQu Wenruo struct raid56_bio_trace_info trace_info = { 0 }; 163953b381b3SDavid Woodhouse 1640b8bea09aSQu Wenruo bio_get_trace_info(rbio, bio, &trace_info); 1641b8bea09aSQu Wenruo trace_raid56_read_partial(rbio, bio, &trace_info); 1642b8bea09aSQu Wenruo } 16434e49ea4aSMike Christie submit_bio(bio); 164453b381b3SDavid Woodhouse } 164553b381b3SDavid Woodhouse /* the actual write will happen once the reads are done */ 164653b381b3SDavid Woodhouse return 0; 164753b381b3SDavid Woodhouse 164853b381b3SDavid Woodhouse cleanup: 164958efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 1650785884fcSLiu Bo 1651785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 1652785884fcSLiu Bo bio_put(bio); 1653785884fcSLiu Bo 165453b381b3SDavid Woodhouse return -EIO; 165553b381b3SDavid Woodhouse 165653b381b3SDavid Woodhouse finish: 165753b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 165853b381b3SDavid Woodhouse return 0; 165953b381b3SDavid Woodhouse } 166053b381b3SDavid Woodhouse 166153b381b3SDavid Woodhouse /* 166253b381b3SDavid Woodhouse * if the upper layers pass in a full stripe, we thank them by only allocating 166353b381b3SDavid Woodhouse * enough pages to hold the parity, and sending it all down quickly. 166453b381b3SDavid Woodhouse */ 166553b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio) 166653b381b3SDavid Woodhouse { 166753b381b3SDavid Woodhouse int ret; 166853b381b3SDavid Woodhouse 166953b381b3SDavid Woodhouse ret = alloc_rbio_parity_pages(rbio); 1670ab4c54c6SQu Wenruo if (ret) 167153b381b3SDavid Woodhouse return ret; 167253b381b3SDavid Woodhouse 167353b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 167453b381b3SDavid Woodhouse if (ret == 0) 167553b381b3SDavid Woodhouse finish_rmw(rbio); 167653b381b3SDavid Woodhouse return 0; 167753b381b3SDavid Woodhouse } 167853b381b3SDavid Woodhouse 167953b381b3SDavid Woodhouse /* 168053b381b3SDavid Woodhouse * partial stripe writes get handed over to async helpers. 168153b381b3SDavid Woodhouse * We're really hoping to merge a few more writes into this 168253b381b3SDavid Woodhouse * rbio before calculating new parity 168353b381b3SDavid Woodhouse */ 168453b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio) 168553b381b3SDavid Woodhouse { 168653b381b3SDavid Woodhouse int ret; 168753b381b3SDavid Woodhouse 168853b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 168953b381b3SDavid Woodhouse if (ret == 0) 1690cf6a4a75SDavid Sterba start_async_work(rbio, rmw_work); 169153b381b3SDavid Woodhouse return 0; 169253b381b3SDavid Woodhouse } 169353b381b3SDavid Woodhouse 169453b381b3SDavid Woodhouse /* 169553b381b3SDavid Woodhouse * sometimes while we were reading from the drive to 169653b381b3SDavid Woodhouse * recalculate parity, enough new bios come into create 169753b381b3SDavid Woodhouse * a full stripe. So we do a check here to see if we can 169853b381b3SDavid Woodhouse * go directly to finish_rmw 169953b381b3SDavid Woodhouse */ 170053b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 170153b381b3SDavid Woodhouse { 170253b381b3SDavid Woodhouse /* head off into rmw land if we don't have a full stripe */ 170353b381b3SDavid Woodhouse if (!rbio_is_full(rbio)) 170453b381b3SDavid Woodhouse return partial_stripe_write(rbio); 170553b381b3SDavid Woodhouse return full_stripe_write(rbio); 170653b381b3SDavid Woodhouse } 170753b381b3SDavid Woodhouse 170853b381b3SDavid Woodhouse /* 17096ac0f488SChris Mason * We use plugging call backs to collect full stripes. 17106ac0f488SChris Mason * Any time we get a partial stripe write while plugged 17116ac0f488SChris Mason * we collect it into a list. When the unplug comes down, 17126ac0f488SChris Mason * we sort the list by logical block number and merge 17136ac0f488SChris Mason * everything we can into the same rbios 17146ac0f488SChris Mason */ 17156ac0f488SChris Mason struct btrfs_plug_cb { 17166ac0f488SChris Mason struct blk_plug_cb cb; 17176ac0f488SChris Mason struct btrfs_fs_info *info; 17186ac0f488SChris Mason struct list_head rbio_list; 1719385de0efSChristoph Hellwig struct work_struct work; 17206ac0f488SChris Mason }; 17216ac0f488SChris Mason 17226ac0f488SChris Mason /* 17236ac0f488SChris Mason * rbios on the plug list are sorted for easier merging. 17246ac0f488SChris Mason */ 17254f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a, 17264f0f586bSSami Tolvanen const struct list_head *b) 17276ac0f488SChris Mason { 1728214cc184SDavid Sterba const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 17296ac0f488SChris Mason plug_list); 1730214cc184SDavid Sterba const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 17316ac0f488SChris Mason plug_list); 17324f024f37SKent Overstreet u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 17334f024f37SKent Overstreet u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 17346ac0f488SChris Mason 17356ac0f488SChris Mason if (a_sector < b_sector) 17366ac0f488SChris Mason return -1; 17376ac0f488SChris Mason if (a_sector > b_sector) 17386ac0f488SChris Mason return 1; 17396ac0f488SChris Mason return 0; 17406ac0f488SChris Mason } 17416ac0f488SChris Mason 17426ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug) 17436ac0f488SChris Mason { 17446ac0f488SChris Mason struct btrfs_raid_bio *cur; 17456ac0f488SChris Mason struct btrfs_raid_bio *last = NULL; 17466ac0f488SChris Mason 17476ac0f488SChris Mason /* 17486ac0f488SChris Mason * sort our plug list then try to merge 17496ac0f488SChris Mason * everything we can in hopes of creating full 17506ac0f488SChris Mason * stripes. 17516ac0f488SChris Mason */ 17526ac0f488SChris Mason list_sort(NULL, &plug->rbio_list, plug_cmp); 17536ac0f488SChris Mason while (!list_empty(&plug->rbio_list)) { 17546ac0f488SChris Mason cur = list_entry(plug->rbio_list.next, 17556ac0f488SChris Mason struct btrfs_raid_bio, plug_list); 17566ac0f488SChris Mason list_del_init(&cur->plug_list); 17576ac0f488SChris Mason 17586ac0f488SChris Mason if (rbio_is_full(cur)) { 1759c7b562c5SDavid Sterba int ret; 1760c7b562c5SDavid Sterba 17616ac0f488SChris Mason /* we have a full stripe, send it down */ 1762c7b562c5SDavid Sterba ret = full_stripe_write(cur); 1763c7b562c5SDavid Sterba BUG_ON(ret); 17646ac0f488SChris Mason continue; 17656ac0f488SChris Mason } 17666ac0f488SChris Mason if (last) { 17676ac0f488SChris Mason if (rbio_can_merge(last, cur)) { 17686ac0f488SChris Mason merge_rbio(last, cur); 1769ff2b64a2SQu Wenruo free_raid_bio(cur); 17706ac0f488SChris Mason continue; 17716ac0f488SChris Mason 17726ac0f488SChris Mason } 17736ac0f488SChris Mason __raid56_parity_write(last); 17746ac0f488SChris Mason } 17756ac0f488SChris Mason last = cur; 17766ac0f488SChris Mason } 17776ac0f488SChris Mason if (last) { 17786ac0f488SChris Mason __raid56_parity_write(last); 17796ac0f488SChris Mason } 17806ac0f488SChris Mason kfree(plug); 17816ac0f488SChris Mason } 17826ac0f488SChris Mason 17836ac0f488SChris Mason /* 17846ac0f488SChris Mason * if the unplug comes from schedule, we have to push the 17856ac0f488SChris Mason * work off to a helper thread 17866ac0f488SChris Mason */ 1787385de0efSChristoph Hellwig static void unplug_work(struct work_struct *work) 17886ac0f488SChris Mason { 17896ac0f488SChris Mason struct btrfs_plug_cb *plug; 17906ac0f488SChris Mason plug = container_of(work, struct btrfs_plug_cb, work); 17916ac0f488SChris Mason run_plug(plug); 17926ac0f488SChris Mason } 17936ac0f488SChris Mason 17946ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 17956ac0f488SChris Mason { 17966ac0f488SChris Mason struct btrfs_plug_cb *plug; 17976ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17986ac0f488SChris Mason 17996ac0f488SChris Mason if (from_schedule) { 1800385de0efSChristoph Hellwig INIT_WORK(&plug->work, unplug_work); 1801385de0efSChristoph Hellwig queue_work(plug->info->rmw_workers, &plug->work); 18026ac0f488SChris Mason return; 18036ac0f488SChris Mason } 18046ac0f488SChris Mason run_plug(plug); 18056ac0f488SChris Mason } 18066ac0f488SChris Mason 1807bd8f7e62SQu Wenruo /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */ 1808bd8f7e62SQu Wenruo static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) 1809bd8f7e62SQu Wenruo { 1810bd8f7e62SQu Wenruo const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1811bd8f7e62SQu Wenruo const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; 1812bd8f7e62SQu Wenruo const u64 full_stripe_start = rbio->bioc->raid_map[0]; 1813bd8f7e62SQu Wenruo const u32 orig_len = orig_bio->bi_iter.bi_size; 1814bd8f7e62SQu Wenruo const u32 sectorsize = fs_info->sectorsize; 1815bd8f7e62SQu Wenruo u64 cur_logical; 1816bd8f7e62SQu Wenruo 1817bd8f7e62SQu Wenruo ASSERT(orig_logical >= full_stripe_start && 1818bd8f7e62SQu Wenruo orig_logical + orig_len <= full_stripe_start + 1819ff18a4afSChristoph Hellwig rbio->nr_data * BTRFS_STRIPE_LEN); 1820bd8f7e62SQu Wenruo 1821bd8f7e62SQu Wenruo bio_list_add(&rbio->bio_list, orig_bio); 1822bd8f7e62SQu Wenruo rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; 1823bd8f7e62SQu Wenruo 1824bd8f7e62SQu Wenruo /* Update the dbitmap. */ 1825bd8f7e62SQu Wenruo for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len; 1826bd8f7e62SQu Wenruo cur_logical += sectorsize) { 1827bd8f7e62SQu Wenruo int bit = ((u32)(cur_logical - full_stripe_start) >> 1828bd8f7e62SQu Wenruo fs_info->sectorsize_bits) % rbio->stripe_nsectors; 1829bd8f7e62SQu Wenruo 1830bd8f7e62SQu Wenruo set_bit(bit, &rbio->dbitmap); 1831bd8f7e62SQu Wenruo } 1832bd8f7e62SQu Wenruo } 1833bd8f7e62SQu Wenruo 18346ac0f488SChris Mason /* 183553b381b3SDavid Woodhouse * our main entry point for writes from the rest of the FS. 183653b381b3SDavid Woodhouse */ 183731683f4aSChristoph Hellwig void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc) 183853b381b3SDavid Woodhouse { 18396a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 184053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 18416ac0f488SChris Mason struct btrfs_plug_cb *plug = NULL; 18426ac0f488SChris Mason struct blk_plug_cb *cb; 184331683f4aSChristoph Hellwig int ret = 0; 184453b381b3SDavid Woodhouse 1845ff18a4afSChristoph Hellwig rbio = alloc_rbio(fs_info, bioc); 1846af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 184731683f4aSChristoph Hellwig ret = PTR_ERR(rbio); 1848f1c29379SChristoph Hellwig goto fail; 1849af8e2d1dSMiao Xie } 18501b94b556SMiao Xie rbio->operation = BTRFS_RBIO_WRITE; 1851bd8f7e62SQu Wenruo rbio_add_bio(rbio, bio); 18526ac0f488SChris Mason 18536ac0f488SChris Mason /* 18546ac0f488SChris Mason * don't plug on full rbios, just get them out the door 18556ac0f488SChris Mason * as quickly as we can 18566ac0f488SChris Mason */ 18574245215dSMiao Xie if (rbio_is_full(rbio)) { 18584245215dSMiao Xie ret = full_stripe_write(rbio); 1859ab4c54c6SQu Wenruo if (ret) { 1860ff2b64a2SQu Wenruo free_raid_bio(rbio); 1861f1c29379SChristoph Hellwig goto fail; 1862ab4c54c6SQu Wenruo } 186331683f4aSChristoph Hellwig return; 18644245215dSMiao Xie } 18656ac0f488SChris Mason 18660b246afaSJeff Mahoney cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); 18676ac0f488SChris Mason if (cb) { 18686ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 18696ac0f488SChris Mason if (!plug->info) { 18700b246afaSJeff Mahoney plug->info = fs_info; 18716ac0f488SChris Mason INIT_LIST_HEAD(&plug->rbio_list); 18726ac0f488SChris Mason } 18736ac0f488SChris Mason list_add_tail(&rbio->plug_list, &plug->rbio_list); 18746ac0f488SChris Mason } else { 18754245215dSMiao Xie ret = __raid56_parity_write(rbio); 1876ab4c54c6SQu Wenruo if (ret) { 1877ff2b64a2SQu Wenruo free_raid_bio(rbio); 1878f1c29379SChristoph Hellwig goto fail; 187953b381b3SDavid Woodhouse } 1880ab4c54c6SQu Wenruo } 188131683f4aSChristoph Hellwig 188231683f4aSChristoph Hellwig return; 188331683f4aSChristoph Hellwig 1884f1c29379SChristoph Hellwig fail: 188531683f4aSChristoph Hellwig bio->bi_status = errno_to_blk_status(ret); 188631683f4aSChristoph Hellwig bio_endio(bio); 18876ac0f488SChris Mason } 188853b381b3SDavid Woodhouse 188953b381b3SDavid Woodhouse /* 1890*9c5ff9b4SQu Wenruo * Recover a vertical stripe specified by @sector_nr. 1891*9c5ff9b4SQu Wenruo * @*pointers are the pre-allocated pointers by the caller, so we don't 1892*9c5ff9b4SQu Wenruo * need to allocate/free the pointers again and again. 1893*9c5ff9b4SQu Wenruo */ 1894*9c5ff9b4SQu Wenruo static void recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, 1895*9c5ff9b4SQu Wenruo void **pointers, void **unmap_array) 1896*9c5ff9b4SQu Wenruo { 1897*9c5ff9b4SQu Wenruo struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 1898*9c5ff9b4SQu Wenruo struct sector_ptr *sector; 1899*9c5ff9b4SQu Wenruo const u32 sectorsize = fs_info->sectorsize; 1900*9c5ff9b4SQu Wenruo const int faila = rbio->faila; 1901*9c5ff9b4SQu Wenruo const int failb = rbio->failb; 1902*9c5ff9b4SQu Wenruo int stripe_nr; 1903*9c5ff9b4SQu Wenruo 1904*9c5ff9b4SQu Wenruo /* 1905*9c5ff9b4SQu Wenruo * Now we just use bitmap to mark the horizontal stripes in 1906*9c5ff9b4SQu Wenruo * which we have data when doing parity scrub. 1907*9c5ff9b4SQu Wenruo */ 1908*9c5ff9b4SQu Wenruo if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 1909*9c5ff9b4SQu Wenruo !test_bit(sector_nr, &rbio->dbitmap)) 1910*9c5ff9b4SQu Wenruo return; 1911*9c5ff9b4SQu Wenruo 1912*9c5ff9b4SQu Wenruo /* 1913*9c5ff9b4SQu Wenruo * Setup our array of pointers with sectors from each stripe 1914*9c5ff9b4SQu Wenruo * 1915*9c5ff9b4SQu Wenruo * NOTE: store a duplicate array of pointers to preserve the 1916*9c5ff9b4SQu Wenruo * pointer order. 1917*9c5ff9b4SQu Wenruo */ 1918*9c5ff9b4SQu Wenruo for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { 1919*9c5ff9b4SQu Wenruo /* 1920*9c5ff9b4SQu Wenruo * If we're rebuilding a read, we have to use 1921*9c5ff9b4SQu Wenruo * pages from the bio list 1922*9c5ff9b4SQu Wenruo */ 1923*9c5ff9b4SQu Wenruo if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1924*9c5ff9b4SQu Wenruo rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 1925*9c5ff9b4SQu Wenruo (stripe_nr == faila || stripe_nr == failb)) { 1926*9c5ff9b4SQu Wenruo sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0); 1927*9c5ff9b4SQu Wenruo } else { 1928*9c5ff9b4SQu Wenruo sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr); 1929*9c5ff9b4SQu Wenruo } 1930*9c5ff9b4SQu Wenruo ASSERT(sector->page); 1931*9c5ff9b4SQu Wenruo pointers[stripe_nr] = kmap_local_page(sector->page) + 1932*9c5ff9b4SQu Wenruo sector->pgoff; 1933*9c5ff9b4SQu Wenruo unmap_array[stripe_nr] = pointers[stripe_nr]; 1934*9c5ff9b4SQu Wenruo } 1935*9c5ff9b4SQu Wenruo 1936*9c5ff9b4SQu Wenruo /* All raid6 handling here */ 1937*9c5ff9b4SQu Wenruo if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { 1938*9c5ff9b4SQu Wenruo /* Single failure, rebuild from parity raid5 style */ 1939*9c5ff9b4SQu Wenruo if (failb < 0) { 1940*9c5ff9b4SQu Wenruo if (faila == rbio->nr_data) 1941*9c5ff9b4SQu Wenruo /* 1942*9c5ff9b4SQu Wenruo * Just the P stripe has failed, without 1943*9c5ff9b4SQu Wenruo * a bad data or Q stripe. 1944*9c5ff9b4SQu Wenruo * We have nothing to do, just skip the 1945*9c5ff9b4SQu Wenruo * recovery for this stripe. 1946*9c5ff9b4SQu Wenruo */ 1947*9c5ff9b4SQu Wenruo goto cleanup; 1948*9c5ff9b4SQu Wenruo /* 1949*9c5ff9b4SQu Wenruo * a single failure in raid6 is rebuilt 1950*9c5ff9b4SQu Wenruo * in the pstripe code below 1951*9c5ff9b4SQu Wenruo */ 1952*9c5ff9b4SQu Wenruo goto pstripe; 1953*9c5ff9b4SQu Wenruo } 1954*9c5ff9b4SQu Wenruo 1955*9c5ff9b4SQu Wenruo /* 1956*9c5ff9b4SQu Wenruo * If the q stripe is failed, do a pstripe reconstruction from 1957*9c5ff9b4SQu Wenruo * the xors. 1958*9c5ff9b4SQu Wenruo * If both the q stripe and the P stripe are failed, we're 1959*9c5ff9b4SQu Wenruo * here due to a crc mismatch and we can't give them the 1960*9c5ff9b4SQu Wenruo * data they want. 1961*9c5ff9b4SQu Wenruo */ 1962*9c5ff9b4SQu Wenruo if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) { 1963*9c5ff9b4SQu Wenruo if (rbio->bioc->raid_map[faila] == 1964*9c5ff9b4SQu Wenruo RAID5_P_STRIPE) 1965*9c5ff9b4SQu Wenruo /* 1966*9c5ff9b4SQu Wenruo * Only P and Q are corrupted. 1967*9c5ff9b4SQu Wenruo * We only care about data stripes recovery, 1968*9c5ff9b4SQu Wenruo * can skip this vertical stripe. 1969*9c5ff9b4SQu Wenruo */ 1970*9c5ff9b4SQu Wenruo goto cleanup; 1971*9c5ff9b4SQu Wenruo /* 1972*9c5ff9b4SQu Wenruo * Otherwise we have one bad data stripe and 1973*9c5ff9b4SQu Wenruo * a good P stripe. raid5! 1974*9c5ff9b4SQu Wenruo */ 1975*9c5ff9b4SQu Wenruo goto pstripe; 1976*9c5ff9b4SQu Wenruo } 1977*9c5ff9b4SQu Wenruo 1978*9c5ff9b4SQu Wenruo if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) { 1979*9c5ff9b4SQu Wenruo raid6_datap_recov(rbio->real_stripes, sectorsize, 1980*9c5ff9b4SQu Wenruo faila, pointers); 1981*9c5ff9b4SQu Wenruo } else { 1982*9c5ff9b4SQu Wenruo raid6_2data_recov(rbio->real_stripes, sectorsize, 1983*9c5ff9b4SQu Wenruo faila, failb, pointers); 1984*9c5ff9b4SQu Wenruo } 1985*9c5ff9b4SQu Wenruo } else { 1986*9c5ff9b4SQu Wenruo void *p; 1987*9c5ff9b4SQu Wenruo 1988*9c5ff9b4SQu Wenruo /* Rebuild from P stripe here (raid5 or raid6). */ 1989*9c5ff9b4SQu Wenruo ASSERT(failb == -1); 1990*9c5ff9b4SQu Wenruo pstripe: 1991*9c5ff9b4SQu Wenruo /* Copy parity block into failed block to start with */ 1992*9c5ff9b4SQu Wenruo memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); 1993*9c5ff9b4SQu Wenruo 1994*9c5ff9b4SQu Wenruo /* Rearrange the pointer array */ 1995*9c5ff9b4SQu Wenruo p = pointers[faila]; 1996*9c5ff9b4SQu Wenruo for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; 1997*9c5ff9b4SQu Wenruo stripe_nr++) 1998*9c5ff9b4SQu Wenruo pointers[stripe_nr] = pointers[stripe_nr + 1]; 1999*9c5ff9b4SQu Wenruo pointers[rbio->nr_data - 1] = p; 2000*9c5ff9b4SQu Wenruo 2001*9c5ff9b4SQu Wenruo /* Xor in the rest */ 2002*9c5ff9b4SQu Wenruo run_xor(pointers, rbio->nr_data - 1, sectorsize); 2003*9c5ff9b4SQu Wenruo 2004*9c5ff9b4SQu Wenruo } 2005*9c5ff9b4SQu Wenruo 2006*9c5ff9b4SQu Wenruo /* 2007*9c5ff9b4SQu Wenruo * No matter if this is a RMW or recovery, we should have all 2008*9c5ff9b4SQu Wenruo * failed sectors repaired in the vertical stripe, thus they are now 2009*9c5ff9b4SQu Wenruo * uptodate. 2010*9c5ff9b4SQu Wenruo * Especially if we determine to cache the rbio, we need to 2011*9c5ff9b4SQu Wenruo * have at least all data sectors uptodate. 2012*9c5ff9b4SQu Wenruo */ 2013*9c5ff9b4SQu Wenruo if (rbio->faila >= 0) { 2014*9c5ff9b4SQu Wenruo sector = rbio_stripe_sector(rbio, rbio->faila, sector_nr); 2015*9c5ff9b4SQu Wenruo sector->uptodate = 1; 2016*9c5ff9b4SQu Wenruo } 2017*9c5ff9b4SQu Wenruo if (rbio->failb >= 0) { 2018*9c5ff9b4SQu Wenruo sector = rbio_stripe_sector(rbio, rbio->failb, sector_nr); 2019*9c5ff9b4SQu Wenruo sector->uptodate = 1; 2020*9c5ff9b4SQu Wenruo } 2021*9c5ff9b4SQu Wenruo 2022*9c5ff9b4SQu Wenruo cleanup: 2023*9c5ff9b4SQu Wenruo for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) 2024*9c5ff9b4SQu Wenruo kunmap_local(unmap_array[stripe_nr]); 2025*9c5ff9b4SQu Wenruo } 2026*9c5ff9b4SQu Wenruo 2027*9c5ff9b4SQu Wenruo /* 202853b381b3SDavid Woodhouse * all parity reconstruction happens here. We've read in everything 202953b381b3SDavid Woodhouse * we can find from the drives and this does the heavy lifting of 203053b381b3SDavid Woodhouse * sorting the good from the bad. 203153b381b3SDavid Woodhouse */ 203253b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 203353b381b3SDavid Woodhouse { 2034*9c5ff9b4SQu Wenruo int sectornr; 2035*9c5ff9b4SQu Wenruo void **pointers = NULL; 2036*9c5ff9b4SQu Wenruo void **unmap_array = NULL; 203758efbc9fSOmar Sandoval blk_status_t err; 203853b381b3SDavid Woodhouse 203907e4d380SQu Wenruo /* 204007e4d380SQu Wenruo * This array stores the pointer for each sector, thus it has the extra 204107e4d380SQu Wenruo * pgoff value added from each sector 204207e4d380SQu Wenruo */ 204331e818feSDavid Sterba pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 204453b381b3SDavid Woodhouse if (!pointers) { 204558efbc9fSOmar Sandoval err = BLK_STS_RESOURCE; 2046*9c5ff9b4SQu Wenruo goto cleanup; 204753b381b3SDavid Woodhouse } 204853b381b3SDavid Woodhouse 204994a0b58dSIra Weiny /* 205094a0b58dSIra Weiny * Store copy of pointers that does not get reordered during 205194a0b58dSIra Weiny * reconstruction so that kunmap_local works. 205294a0b58dSIra Weiny */ 205394a0b58dSIra Weiny unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 205494a0b58dSIra Weiny if (!unmap_array) { 205594a0b58dSIra Weiny err = BLK_STS_RESOURCE; 2056*9c5ff9b4SQu Wenruo goto cleanup; 205794a0b58dSIra Weiny } 205894a0b58dSIra Weiny 2059*9c5ff9b4SQu Wenruo /* Make sure faila and fail b are in order. */ 2060*9c5ff9b4SQu Wenruo if (rbio->faila >= 0 && rbio->failb >= 0 && rbio->faila > rbio->failb) 2061*9c5ff9b4SQu Wenruo swap(rbio->faila, rbio->failb); 206253b381b3SDavid Woodhouse 2063b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2064b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 206553b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 206653b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 206753b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 206853b381b3SDavid Woodhouse } 206953b381b3SDavid Woodhouse 207053b381b3SDavid Woodhouse index_rbio_pages(rbio); 207153b381b3SDavid Woodhouse 2072*9c5ff9b4SQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) 2073*9c5ff9b4SQu Wenruo recover_vertical(rbio, sectornr, pointers, unmap_array); 207453b381b3SDavid Woodhouse 207558efbc9fSOmar Sandoval err = BLK_STS_OK; 2076*9c5ff9b4SQu Wenruo 207753b381b3SDavid Woodhouse cleanup: 207894a0b58dSIra Weiny kfree(unmap_array); 207953b381b3SDavid Woodhouse kfree(pointers); 208053b381b3SDavid Woodhouse 2081580c6efaSLiu Bo /* 2082580c6efaSLiu Bo * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a 2083580c6efaSLiu Bo * valid rbio which is consistent with ondisk content, thus such a 2084580c6efaSLiu Bo * valid rbio can be cached to avoid further disk reads. 2085580c6efaSLiu Bo */ 2086580c6efaSLiu Bo if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2087580c6efaSLiu Bo rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 208844ac474dSLiu Bo /* 208944ac474dSLiu Bo * - In case of two failures, where rbio->failb != -1: 209044ac474dSLiu Bo * 209144ac474dSLiu Bo * Do not cache this rbio since the above read reconstruction 209244ac474dSLiu Bo * (raid6_datap_recov() or raid6_2data_recov()) may have 209344ac474dSLiu Bo * changed some content of stripes which are not identical to 209444ac474dSLiu Bo * on-disk content any more, otherwise, a later write/recover 209544ac474dSLiu Bo * may steal stripe_pages from this rbio and end up with 209644ac474dSLiu Bo * corruptions or rebuild failures. 209744ac474dSLiu Bo * 209844ac474dSLiu Bo * - In case of single failure, where rbio->failb == -1: 209944ac474dSLiu Bo * 210044ac474dSLiu Bo * Cache this rbio iff the above read reconstruction is 210152042d8eSAndrea Gelmini * executed without problems. 210244ac474dSLiu Bo */ 210344ac474dSLiu Bo if (err == BLK_STS_OK && rbio->failb < 0) 21044ae10b3aSChris Mason cache_rbio_pages(rbio); 21054ae10b3aSChris Mason else 21064ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 21074ae10b3aSChris Mason 21084246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 210958efbc9fSOmar Sandoval } else if (err == BLK_STS_OK) { 211053b381b3SDavid Woodhouse rbio->faila = -1; 211153b381b3SDavid Woodhouse rbio->failb = -1; 21125a6ac9eaSMiao Xie 21135a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) 211453b381b3SDavid Woodhouse finish_rmw(rbio); 21155a6ac9eaSMiao Xie else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 21165a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 21175a6ac9eaSMiao Xie else 21185a6ac9eaSMiao Xie BUG(); 211953b381b3SDavid Woodhouse } else { 21204246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 212153b381b3SDavid Woodhouse } 212253b381b3SDavid Woodhouse } 212353b381b3SDavid Woodhouse 212453b381b3SDavid Woodhouse /* 2125d34e123dSChristoph Hellwig * This is called only for stripes we've read from disk to reconstruct the 2126d34e123dSChristoph Hellwig * parity. 212753b381b3SDavid Woodhouse */ 2128d34e123dSChristoph Hellwig static void raid_recover_end_io_work(struct work_struct *work) 212953b381b3SDavid Woodhouse { 2130d34e123dSChristoph Hellwig struct btrfs_raid_bio *rbio = 2131d34e123dSChristoph Hellwig container_of(work, struct btrfs_raid_bio, end_io_work); 213253b381b3SDavid Woodhouse 21334c664611SQu Wenruo if (atomic_read(&rbio->error) > rbio->bioc->max_errors) 213458efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 213553b381b3SDavid Woodhouse else 213653b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 213753b381b3SDavid Woodhouse } 213853b381b3SDavid Woodhouse 213953b381b3SDavid Woodhouse /* 214053b381b3SDavid Woodhouse * reads everything we need off the disk to reconstruct 214153b381b3SDavid Woodhouse * the parity. endio handlers trigger final reconstruction 214253b381b3SDavid Woodhouse * when the IO is done. 214353b381b3SDavid Woodhouse * 214453b381b3SDavid Woodhouse * This is used both for reads from the higher layers and for 214553b381b3SDavid Woodhouse * parity construction required to finish a rmw cycle. 214653b381b3SDavid Woodhouse */ 214753b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 214853b381b3SDavid Woodhouse { 214953b381b3SDavid Woodhouse int bios_to_read = 0; 215053b381b3SDavid Woodhouse struct bio_list bio_list; 215153b381b3SDavid Woodhouse int ret; 2152ef340fccSQu Wenruo int total_sector_nr; 215353b381b3SDavid Woodhouse struct bio *bio; 215453b381b3SDavid Woodhouse 215553b381b3SDavid Woodhouse bio_list_init(&bio_list); 215653b381b3SDavid Woodhouse 215753b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 215853b381b3SDavid Woodhouse if (ret) 215953b381b3SDavid Woodhouse goto cleanup; 216053b381b3SDavid Woodhouse 2161b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 216253b381b3SDavid Woodhouse 216353b381b3SDavid Woodhouse /* 2164f6065f8eSQu Wenruo * Read everything that hasn't failed. However this time we will 2165f6065f8eSQu Wenruo * not trust any cached sector. 2166f6065f8eSQu Wenruo * As we may read out some stale data but higher layer is not reading 2167f6065f8eSQu Wenruo * that stale part. 2168f6065f8eSQu Wenruo * 2169f6065f8eSQu Wenruo * So here we always re-read everything in recovery path. 217053b381b3SDavid Woodhouse */ 2171ef340fccSQu Wenruo for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2172ef340fccSQu Wenruo total_sector_nr++) { 2173ef340fccSQu Wenruo int stripe = total_sector_nr / rbio->stripe_nsectors; 2174ef340fccSQu Wenruo int sectornr = total_sector_nr % rbio->stripe_nsectors; 21753e77605dSQu Wenruo struct sector_ptr *sector; 217653b381b3SDavid Woodhouse 2177ef340fccSQu Wenruo if (rbio->faila == stripe || rbio->failb == stripe) { 2178ef340fccSQu Wenruo atomic_inc(&rbio->error); 2179ef340fccSQu Wenruo /* Skip the current stripe. */ 2180ef340fccSQu Wenruo ASSERT(sectornr == 0); 2181ef340fccSQu Wenruo total_sector_nr += rbio->stripe_nsectors - 1; 218253b381b3SDavid Woodhouse continue; 2183ef340fccSQu Wenruo } 218453b381b3SDavid Woodhouse sector = rbio_stripe_sector(rbio, stripe, sectornr); 2185ef340fccSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 2186ff18a4afSChristoph Hellwig sectornr, REQ_OP_READ); 218753b381b3SDavid Woodhouse if (ret < 0) 218853b381b3SDavid Woodhouse goto cleanup; 218953b381b3SDavid Woodhouse } 219053b381b3SDavid Woodhouse 219153b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 219253b381b3SDavid Woodhouse if (!bios_to_read) { 219353b381b3SDavid Woodhouse /* 219453b381b3SDavid Woodhouse * we might have no bios to read just because the pages 219553b381b3SDavid Woodhouse * were up to date, or we might have no bios to read because 219653b381b3SDavid Woodhouse * the devices were gone. 219753b381b3SDavid Woodhouse */ 21984c664611SQu Wenruo if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) { 219953b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 2200813f8a0eSNikolay Borisov return 0; 220153b381b3SDavid Woodhouse } else { 220253b381b3SDavid Woodhouse goto cleanup; 220353b381b3SDavid Woodhouse } 220453b381b3SDavid Woodhouse } 220553b381b3SDavid Woodhouse 220653b381b3SDavid Woodhouse /* 22074c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 22084c664611SQu Wenruo * touch it after that. 220953b381b3SDavid Woodhouse */ 2210b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 2211d34e123dSChristoph Hellwig INIT_WORK(&rbio->end_io_work, raid_recover_end_io_work); 2212bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 2213d34e123dSChristoph Hellwig bio->bi_end_io = raid56_bio_end_io; 221453b381b3SDavid Woodhouse 2215b8bea09aSQu Wenruo if (trace_raid56_scrub_read_recover_enabled()) { 2216b8bea09aSQu Wenruo struct raid56_bio_trace_info trace_info = { 0 }; 221753b381b3SDavid Woodhouse 2218b8bea09aSQu Wenruo bio_get_trace_info(rbio, bio, &trace_info); 2219b8bea09aSQu Wenruo trace_raid56_scrub_read_recover(rbio, bio, &trace_info); 2220b8bea09aSQu Wenruo } 22214e49ea4aSMike Christie submit_bio(bio); 222253b381b3SDavid Woodhouse } 2223813f8a0eSNikolay Borisov 222453b381b3SDavid Woodhouse return 0; 222553b381b3SDavid Woodhouse 222653b381b3SDavid Woodhouse cleanup: 2227b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2228b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 222958efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2230785884fcSLiu Bo 2231785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2232785884fcSLiu Bo bio_put(bio); 2233785884fcSLiu Bo 223453b381b3SDavid Woodhouse return -EIO; 223553b381b3SDavid Woodhouse } 223653b381b3SDavid Woodhouse 223753b381b3SDavid Woodhouse /* 223853b381b3SDavid Woodhouse * the main entry point for reads from the higher layers. This 223953b381b3SDavid Woodhouse * is really only called when the normal read path had a failure, 224053b381b3SDavid Woodhouse * so we assume the bio they send down corresponds to a failed part 224153b381b3SDavid Woodhouse * of the drive. 224253b381b3SDavid Woodhouse */ 22436065fd95SChristoph Hellwig void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc, 2244f1c29379SChristoph Hellwig int mirror_num) 224553b381b3SDavid Woodhouse { 22466a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 224753b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 224853b381b3SDavid Woodhouse 2249ff18a4afSChristoph Hellwig rbio = alloc_rbio(fs_info, bioc); 2250af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 22516065fd95SChristoph Hellwig bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); 22526065fd95SChristoph Hellwig goto out_end_bio; 2253af8e2d1dSMiao Xie } 225453b381b3SDavid Woodhouse 22551b94b556SMiao Xie rbio->operation = BTRFS_RBIO_READ_REBUILD; 2256bd8f7e62SQu Wenruo rbio_add_bio(rbio, bio); 225753b381b3SDavid Woodhouse 225853b381b3SDavid Woodhouse rbio->faila = find_logical_bio_stripe(rbio, bio); 225953b381b3SDavid Woodhouse if (rbio->faila == -1) { 22600b246afaSJeff Mahoney btrfs_warn(fs_info, 22614c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)", 22621201b58bSDavid Sterba __func__, bio->bi_iter.bi_sector << 9, 22634c664611SQu Wenruo (u64)bio->bi_iter.bi_size, bioc->map_type); 2264ff2b64a2SQu Wenruo free_raid_bio(rbio); 22656065fd95SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 22666065fd95SChristoph Hellwig goto out_end_bio; 226753b381b3SDavid Woodhouse } 226853b381b3SDavid Woodhouse 226953b381b3SDavid Woodhouse /* 22708810f751SLiu Bo * Loop retry: 22718810f751SLiu Bo * for 'mirror == 2', reconstruct from all other stripes. 22728810f751SLiu Bo * for 'mirror_num > 2', select a stripe to fail on every retry. 227353b381b3SDavid Woodhouse */ 22748810f751SLiu Bo if (mirror_num > 2) { 22758810f751SLiu Bo /* 22768810f751SLiu Bo * 'mirror == 3' is to fail the p stripe and 22778810f751SLiu Bo * reconstruct from the q stripe. 'mirror > 3' is to 22788810f751SLiu Bo * fail a data stripe and reconstruct from p+q stripe. 22798810f751SLiu Bo */ 22808810f751SLiu Bo rbio->failb = rbio->real_stripes - (mirror_num - 1); 22818810f751SLiu Bo ASSERT(rbio->failb > 0); 22828810f751SLiu Bo if (rbio->failb <= rbio->faila) 22838810f751SLiu Bo rbio->failb--; 22848810f751SLiu Bo } 228553b381b3SDavid Woodhouse 22866065fd95SChristoph Hellwig if (lock_stripe_add(rbio)) 22876065fd95SChristoph Hellwig return; 228853b381b3SDavid Woodhouse 228953b381b3SDavid Woodhouse /* 22906065fd95SChristoph Hellwig * This adds our rbio to the list of rbios that will be handled after 22916065fd95SChristoph Hellwig * the current lock owner is done. 229253b381b3SDavid Woodhouse */ 229353b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 22946065fd95SChristoph Hellwig return; 229553b381b3SDavid Woodhouse 22966065fd95SChristoph Hellwig out_end_bio: 22976065fd95SChristoph Hellwig bio_endio(bio); 229853b381b3SDavid Woodhouse } 229953b381b3SDavid Woodhouse 2300385de0efSChristoph Hellwig static void rmw_work(struct work_struct *work) 230153b381b3SDavid Woodhouse { 230253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 230353b381b3SDavid Woodhouse 230453b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 230553b381b3SDavid Woodhouse raid56_rmw_stripe(rbio); 230653b381b3SDavid Woodhouse } 230753b381b3SDavid Woodhouse 2308385de0efSChristoph Hellwig static void read_rebuild_work(struct work_struct *work) 230953b381b3SDavid Woodhouse { 231053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 231153b381b3SDavid Woodhouse 231253b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 231353b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 231453b381b3SDavid Woodhouse } 23155a6ac9eaSMiao Xie 23165a6ac9eaSMiao Xie /* 23175a6ac9eaSMiao Xie * The following code is used to scrub/replace the parity stripe 23185a6ac9eaSMiao Xie * 23194c664611SQu Wenruo * Caller must have already increased bio_counter for getting @bioc. 2320ae6529c3SQu Wenruo * 23215a6ac9eaSMiao Xie * Note: We need make sure all the pages that add into the scrub/replace 23225a6ac9eaSMiao Xie * raid bio are correct and not be changed during the scrub/replace. That 23235a6ac9eaSMiao Xie * is those pages just hold metadata or file data with checksum. 23245a6ac9eaSMiao Xie */ 23255a6ac9eaSMiao Xie 23266a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio, 23276a258d72SQu Wenruo struct btrfs_io_context *bioc, 2328ff18a4afSChristoph Hellwig struct btrfs_device *scrub_dev, 23295a6ac9eaSMiao Xie unsigned long *dbitmap, int stripe_nsectors) 23305a6ac9eaSMiao Xie { 23316a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 23325a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 23335a6ac9eaSMiao Xie int i; 23345a6ac9eaSMiao Xie 2335ff18a4afSChristoph Hellwig rbio = alloc_rbio(fs_info, bioc); 23365a6ac9eaSMiao Xie if (IS_ERR(rbio)) 23375a6ac9eaSMiao Xie return NULL; 23385a6ac9eaSMiao Xie bio_list_add(&rbio->bio_list, bio); 23395a6ac9eaSMiao Xie /* 23405a6ac9eaSMiao Xie * This is a special bio which is used to hold the completion handler 23415a6ac9eaSMiao Xie * and make the scrub rbio is similar to the other types 23425a6ac9eaSMiao Xie */ 23435a6ac9eaSMiao Xie ASSERT(!bio->bi_iter.bi_size); 23445a6ac9eaSMiao Xie rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 23455a6ac9eaSMiao Xie 23469cd3a7ebSLiu Bo /* 23474c664611SQu Wenruo * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted 23489cd3a7ebSLiu Bo * to the end position, so this search can start from the first parity 23499cd3a7ebSLiu Bo * stripe. 23509cd3a7ebSLiu Bo */ 23519cd3a7ebSLiu Bo for (i = rbio->nr_data; i < rbio->real_stripes; i++) { 23524c664611SQu Wenruo if (bioc->stripes[i].dev == scrub_dev) { 23535a6ac9eaSMiao Xie rbio->scrubp = i; 23545a6ac9eaSMiao Xie break; 23555a6ac9eaSMiao Xie } 23565a6ac9eaSMiao Xie } 23579cd3a7ebSLiu Bo ASSERT(i < rbio->real_stripes); 23585a6ac9eaSMiao Xie 2359c67c68ebSQu Wenruo bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); 23605a6ac9eaSMiao Xie return rbio; 23615a6ac9eaSMiao Xie } 23625a6ac9eaSMiao Xie 2363b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */ 2364b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, 23656346f6bfSQu Wenruo unsigned int pgoff, u64 logical) 23665a6ac9eaSMiao Xie { 23676346f6bfSQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 23685a6ac9eaSMiao Xie int stripe_offset; 23695a6ac9eaSMiao Xie int index; 23705a6ac9eaSMiao Xie 23714c664611SQu Wenruo ASSERT(logical >= rbio->bioc->raid_map[0]); 23726346f6bfSQu Wenruo ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] + 2373ff18a4afSChristoph Hellwig BTRFS_STRIPE_LEN * rbio->nr_data); 23744c664611SQu Wenruo stripe_offset = (int)(logical - rbio->bioc->raid_map[0]); 23756346f6bfSQu Wenruo index = stripe_offset / sectorsize; 23766346f6bfSQu Wenruo rbio->bio_sectors[index].page = page; 23776346f6bfSQu Wenruo rbio->bio_sectors[index].pgoff = pgoff; 23785a6ac9eaSMiao Xie } 23795a6ac9eaSMiao Xie 23805a6ac9eaSMiao Xie /* 23815a6ac9eaSMiao Xie * We just scrub the parity that we have correct data on the same horizontal, 23825a6ac9eaSMiao Xie * so we needn't allocate all pages for all the stripes. 23835a6ac9eaSMiao Xie */ 23845a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 23855a6ac9eaSMiao Xie { 23863907ce29SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 2387aee35e4bSQu Wenruo int total_sector_nr; 23885a6ac9eaSMiao Xie 2389aee35e4bSQu Wenruo for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 2390aee35e4bSQu Wenruo total_sector_nr++) { 23913907ce29SQu Wenruo struct page *page; 2392aee35e4bSQu Wenruo int sectornr = total_sector_nr % rbio->stripe_nsectors; 2393aee35e4bSQu Wenruo int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT; 23943907ce29SQu Wenruo 2395aee35e4bSQu Wenruo if (!test_bit(sectornr, &rbio->dbitmap)) 2396aee35e4bSQu Wenruo continue; 23975a6ac9eaSMiao Xie if (rbio->stripe_pages[index]) 23985a6ac9eaSMiao Xie continue; 2399b0ee5e1eSDavid Sterba page = alloc_page(GFP_NOFS); 24005a6ac9eaSMiao Xie if (!page) 24015a6ac9eaSMiao Xie return -ENOMEM; 24025a6ac9eaSMiao Xie rbio->stripe_pages[index] = page; 24035a6ac9eaSMiao Xie } 2404eb357060SQu Wenruo index_stripe_sectors(rbio); 24055a6ac9eaSMiao Xie return 0; 24065a6ac9eaSMiao Xie } 24075a6ac9eaSMiao Xie 24085a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 24095a6ac9eaSMiao Xie int need_check) 24105a6ac9eaSMiao Xie { 24114c664611SQu Wenruo struct btrfs_io_context *bioc = rbio->bioc; 241246900662SQu Wenruo const u32 sectorsize = bioc->fs_info->sectorsize; 24131389053eSKees Cook void **pointers = rbio->finish_pointers; 2414c67c68ebSQu Wenruo unsigned long *pbitmap = &rbio->finish_pbitmap; 24155a6ac9eaSMiao Xie int nr_data = rbio->nr_data; 24165a6ac9eaSMiao Xie int stripe; 24173e77605dSQu Wenruo int sectornr; 2418c17af965SDavid Sterba bool has_qstripe; 241946900662SQu Wenruo struct sector_ptr p_sector = { 0 }; 242046900662SQu Wenruo struct sector_ptr q_sector = { 0 }; 24215a6ac9eaSMiao Xie struct bio_list bio_list; 24225a6ac9eaSMiao Xie struct bio *bio; 242376035976SMiao Xie int is_replace = 0; 24245a6ac9eaSMiao Xie int ret; 24255a6ac9eaSMiao Xie 24265a6ac9eaSMiao Xie bio_list_init(&bio_list); 24275a6ac9eaSMiao Xie 2428c17af965SDavid Sterba if (rbio->real_stripes - rbio->nr_data == 1) 2429c17af965SDavid Sterba has_qstripe = false; 2430c17af965SDavid Sterba else if (rbio->real_stripes - rbio->nr_data == 2) 2431c17af965SDavid Sterba has_qstripe = true; 2432c17af965SDavid Sterba else 24335a6ac9eaSMiao Xie BUG(); 24345a6ac9eaSMiao Xie 24354c664611SQu Wenruo if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) { 243676035976SMiao Xie is_replace = 1; 2437c67c68ebSQu Wenruo bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); 243876035976SMiao Xie } 243976035976SMiao Xie 24405a6ac9eaSMiao Xie /* 24415a6ac9eaSMiao Xie * Because the higher layers(scrubber) are unlikely to 24425a6ac9eaSMiao Xie * use this area of the disk again soon, so don't cache 24435a6ac9eaSMiao Xie * it. 24445a6ac9eaSMiao Xie */ 24455a6ac9eaSMiao Xie clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 24465a6ac9eaSMiao Xie 24475a6ac9eaSMiao Xie if (!need_check) 24485a6ac9eaSMiao Xie goto writeback; 24495a6ac9eaSMiao Xie 245046900662SQu Wenruo p_sector.page = alloc_page(GFP_NOFS); 245146900662SQu Wenruo if (!p_sector.page) 24525a6ac9eaSMiao Xie goto cleanup; 245346900662SQu Wenruo p_sector.pgoff = 0; 245446900662SQu Wenruo p_sector.uptodate = 1; 24555a6ac9eaSMiao Xie 2456c17af965SDavid Sterba if (has_qstripe) { 2457d70cef0dSIra Weiny /* RAID6, allocate and map temp space for the Q stripe */ 245846900662SQu Wenruo q_sector.page = alloc_page(GFP_NOFS); 245946900662SQu Wenruo if (!q_sector.page) { 246046900662SQu Wenruo __free_page(p_sector.page); 246146900662SQu Wenruo p_sector.page = NULL; 24625a6ac9eaSMiao Xie goto cleanup; 24635a6ac9eaSMiao Xie } 246446900662SQu Wenruo q_sector.pgoff = 0; 246546900662SQu Wenruo q_sector.uptodate = 1; 246646900662SQu Wenruo pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); 24675a6ac9eaSMiao Xie } 24685a6ac9eaSMiao Xie 24695a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 24705a6ac9eaSMiao Xie 2471d70cef0dSIra Weiny /* Map the parity stripe just once */ 247246900662SQu Wenruo pointers[nr_data] = kmap_local_page(p_sector.page); 2473d70cef0dSIra Weiny 2474c67c68ebSQu Wenruo for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { 247546900662SQu Wenruo struct sector_ptr *sector; 24765a6ac9eaSMiao Xie void *parity; 247746900662SQu Wenruo 24785a6ac9eaSMiao Xie /* first collect one page from each data stripe */ 24795a6ac9eaSMiao Xie for (stripe = 0; stripe < nr_data; stripe++) { 248046900662SQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 0); 248146900662SQu Wenruo pointers[stripe] = kmap_local_page(sector->page) + 248246900662SQu Wenruo sector->pgoff; 24835a6ac9eaSMiao Xie } 24845a6ac9eaSMiao Xie 2485c17af965SDavid Sterba if (has_qstripe) { 2486d70cef0dSIra Weiny /* RAID6, call the library function to fill in our P/Q */ 248746900662SQu Wenruo raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 24885a6ac9eaSMiao Xie pointers); 24895a6ac9eaSMiao Xie } else { 24905a6ac9eaSMiao Xie /* raid5 */ 249146900662SQu Wenruo memcpy(pointers[nr_data], pointers[0], sectorsize); 249246900662SQu Wenruo run_xor(pointers + 1, nr_data - 1, sectorsize); 24935a6ac9eaSMiao Xie } 24945a6ac9eaSMiao Xie 249501327610SNicholas D Steeves /* Check scrubbing parity and repair it */ 249646900662SQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 249746900662SQu Wenruo parity = kmap_local_page(sector->page) + sector->pgoff; 249846900662SQu Wenruo if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) 249946900662SQu Wenruo memcpy(parity, pointers[rbio->scrubp], sectorsize); 25005a6ac9eaSMiao Xie else 25015a6ac9eaSMiao Xie /* Parity is right, needn't writeback */ 2502c67c68ebSQu Wenruo bitmap_clear(&rbio->dbitmap, sectornr, 1); 250358c1a35cSIra Weiny kunmap_local(parity); 25045a6ac9eaSMiao Xie 250594a0b58dSIra Weiny for (stripe = nr_data - 1; stripe >= 0; stripe--) 250694a0b58dSIra Weiny kunmap_local(pointers[stripe]); 25075a6ac9eaSMiao Xie } 25085a6ac9eaSMiao Xie 250994a0b58dSIra Weiny kunmap_local(pointers[nr_data]); 251046900662SQu Wenruo __free_page(p_sector.page); 251146900662SQu Wenruo p_sector.page = NULL; 251246900662SQu Wenruo if (q_sector.page) { 251394a0b58dSIra Weiny kunmap_local(pointers[rbio->real_stripes - 1]); 251446900662SQu Wenruo __free_page(q_sector.page); 251546900662SQu Wenruo q_sector.page = NULL; 2516d70cef0dSIra Weiny } 25175a6ac9eaSMiao Xie 25185a6ac9eaSMiao Xie writeback: 25195a6ac9eaSMiao Xie /* 25205a6ac9eaSMiao Xie * time to start writing. Make bios for everything from the 25215a6ac9eaSMiao Xie * higher layers (the bio_list in our rbio) and our p/q. Ignore 25225a6ac9eaSMiao Xie * everything else. 25235a6ac9eaSMiao Xie */ 2524c67c68ebSQu Wenruo for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { 25253e77605dSQu Wenruo struct sector_ptr *sector; 25265a6ac9eaSMiao Xie 25273e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 25283e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, 2529ff18a4afSChristoph Hellwig sectornr, REQ_OP_WRITE); 25305a6ac9eaSMiao Xie if (ret) 25315a6ac9eaSMiao Xie goto cleanup; 25325a6ac9eaSMiao Xie } 25335a6ac9eaSMiao Xie 253476035976SMiao Xie if (!is_replace) 253576035976SMiao Xie goto submit_write; 253676035976SMiao Xie 25373e77605dSQu Wenruo for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { 25383e77605dSQu Wenruo struct sector_ptr *sector; 253976035976SMiao Xie 25403e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 25413e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 25424c664611SQu Wenruo bioc->tgtdev_map[rbio->scrubp], 2543ff18a4afSChristoph Hellwig sectornr, REQ_OP_WRITE); 254476035976SMiao Xie if (ret) 254576035976SMiao Xie goto cleanup; 254676035976SMiao Xie } 254776035976SMiao Xie 254876035976SMiao Xie submit_write: 25495a6ac9eaSMiao Xie nr_data = bio_list_size(&bio_list); 25505a6ac9eaSMiao Xie if (!nr_data) { 25515a6ac9eaSMiao Xie /* Every parity is right */ 255258efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_OK); 25535a6ac9eaSMiao Xie return; 25545a6ac9eaSMiao Xie } 25555a6ac9eaSMiao Xie 25565a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, nr_data); 25575a6ac9eaSMiao Xie 2558bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 2559a6111d11SZhao Lei bio->bi_end_io = raid_write_end_io; 25604e49ea4aSMike Christie 2561b8bea09aSQu Wenruo if (trace_raid56_scrub_write_stripe_enabled()) { 2562b8bea09aSQu Wenruo struct raid56_bio_trace_info trace_info = { 0 }; 2563b8bea09aSQu Wenruo 2564b8bea09aSQu Wenruo bio_get_trace_info(rbio, bio, &trace_info); 2565b8bea09aSQu Wenruo trace_raid56_scrub_write_stripe(rbio, bio, &trace_info); 2566b8bea09aSQu Wenruo } 25674e49ea4aSMike Christie submit_bio(bio); 25685a6ac9eaSMiao Xie } 25695a6ac9eaSMiao Xie return; 25705a6ac9eaSMiao Xie 25715a6ac9eaSMiao Xie cleanup: 257258efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2573785884fcSLiu Bo 2574785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2575785884fcSLiu Bo bio_put(bio); 25765a6ac9eaSMiao Xie } 25775a6ac9eaSMiao Xie 25785a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 25795a6ac9eaSMiao Xie { 25805a6ac9eaSMiao Xie if (stripe >= 0 && stripe < rbio->nr_data) 25815a6ac9eaSMiao Xie return 1; 25825a6ac9eaSMiao Xie return 0; 25835a6ac9eaSMiao Xie } 25845a6ac9eaSMiao Xie 25855a6ac9eaSMiao Xie /* 25865a6ac9eaSMiao Xie * While we're doing the parity check and repair, we could have errors 25875a6ac9eaSMiao Xie * in reading pages off the disk. This checks for errors and if we're 25885a6ac9eaSMiao Xie * not able to read the page it'll trigger parity reconstruction. The 25895a6ac9eaSMiao Xie * parity scrub will be finished after we've reconstructed the failed 25905a6ac9eaSMiao Xie * stripes 25915a6ac9eaSMiao Xie */ 25925a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 25935a6ac9eaSMiao Xie { 25944c664611SQu Wenruo if (atomic_read(&rbio->error) > rbio->bioc->max_errors) 25955a6ac9eaSMiao Xie goto cleanup; 25965a6ac9eaSMiao Xie 25975a6ac9eaSMiao Xie if (rbio->faila >= 0 || rbio->failb >= 0) { 25985a6ac9eaSMiao Xie int dfail = 0, failp = -1; 25995a6ac9eaSMiao Xie 26005a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->faila)) 26015a6ac9eaSMiao Xie dfail++; 26025a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->faila)) 26035a6ac9eaSMiao Xie failp = rbio->faila; 26045a6ac9eaSMiao Xie 26055a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->failb)) 26065a6ac9eaSMiao Xie dfail++; 26075a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->failb)) 26085a6ac9eaSMiao Xie failp = rbio->failb; 26095a6ac9eaSMiao Xie 26105a6ac9eaSMiao Xie /* 26115a6ac9eaSMiao Xie * Because we can not use a scrubbing parity to repair 26125a6ac9eaSMiao Xie * the data, so the capability of the repair is declined. 26135a6ac9eaSMiao Xie * (In the case of RAID5, we can not repair anything) 26145a6ac9eaSMiao Xie */ 26154c664611SQu Wenruo if (dfail > rbio->bioc->max_errors - 1) 26165a6ac9eaSMiao Xie goto cleanup; 26175a6ac9eaSMiao Xie 26185a6ac9eaSMiao Xie /* 26195a6ac9eaSMiao Xie * If all data is good, only parity is correctly, just 26205a6ac9eaSMiao Xie * repair the parity. 26215a6ac9eaSMiao Xie */ 26225a6ac9eaSMiao Xie if (dfail == 0) { 26235a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 26245a6ac9eaSMiao Xie return; 26255a6ac9eaSMiao Xie } 26265a6ac9eaSMiao Xie 26275a6ac9eaSMiao Xie /* 26285a6ac9eaSMiao Xie * Here means we got one corrupted data stripe and one 26295a6ac9eaSMiao Xie * corrupted parity on RAID6, if the corrupted parity 263001327610SNicholas D Steeves * is scrubbing parity, luckily, use the other one to repair 26315a6ac9eaSMiao Xie * the data, or we can not repair the data stripe. 26325a6ac9eaSMiao Xie */ 26335a6ac9eaSMiao Xie if (failp != rbio->scrubp) 26345a6ac9eaSMiao Xie goto cleanup; 26355a6ac9eaSMiao Xie 26365a6ac9eaSMiao Xie __raid_recover_end_io(rbio); 26375a6ac9eaSMiao Xie } else { 26385a6ac9eaSMiao Xie finish_parity_scrub(rbio, 1); 26395a6ac9eaSMiao Xie } 26405a6ac9eaSMiao Xie return; 26415a6ac9eaSMiao Xie 26425a6ac9eaSMiao Xie cleanup: 264358efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 26445a6ac9eaSMiao Xie } 26455a6ac9eaSMiao Xie 26465a6ac9eaSMiao Xie /* 26475a6ac9eaSMiao Xie * end io for the read phase of the rmw cycle. All the bios here are physical 26485a6ac9eaSMiao Xie * stripe bios we've read from the disk so we can recalculate the parity of the 26495a6ac9eaSMiao Xie * stripe. 26505a6ac9eaSMiao Xie * 26515a6ac9eaSMiao Xie * This will usually kick off finish_rmw once all the bios are read in, but it 26525a6ac9eaSMiao Xie * may trigger parity reconstruction if we had any errors along the way 26535a6ac9eaSMiao Xie */ 2654d34e123dSChristoph Hellwig static void raid56_parity_scrub_end_io_work(struct work_struct *work) 26555a6ac9eaSMiao Xie { 2656d34e123dSChristoph Hellwig struct btrfs_raid_bio *rbio = 2657d34e123dSChristoph Hellwig container_of(work, struct btrfs_raid_bio, end_io_work); 26585a6ac9eaSMiao Xie 26595a6ac9eaSMiao Xie /* 2660d34e123dSChristoph Hellwig * This will normally call finish_rmw to start our write, but if there 2661d34e123dSChristoph Hellwig * are any failed stripes we'll reconstruct from parity first 26625a6ac9eaSMiao Xie */ 26635a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 26645a6ac9eaSMiao Xie } 26655a6ac9eaSMiao Xie 26665a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 26675a6ac9eaSMiao Xie { 26685a6ac9eaSMiao Xie int bios_to_read = 0; 26695a6ac9eaSMiao Xie struct bio_list bio_list; 26705a6ac9eaSMiao Xie int ret; 26711c10702eSQu Wenruo int total_sector_nr; 26725a6ac9eaSMiao Xie struct bio *bio; 26735a6ac9eaSMiao Xie 2674785884fcSLiu Bo bio_list_init(&bio_list); 2675785884fcSLiu Bo 26765a6ac9eaSMiao Xie ret = alloc_rbio_essential_pages(rbio); 26775a6ac9eaSMiao Xie if (ret) 26785a6ac9eaSMiao Xie goto cleanup; 26795a6ac9eaSMiao Xie 26805a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 26811c10702eSQu Wenruo /* Build a list of bios to read all the missing parts. */ 26821c10702eSQu Wenruo for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; 26831c10702eSQu Wenruo total_sector_nr++) { 26841c10702eSQu Wenruo int sectornr = total_sector_nr % rbio->stripe_nsectors; 26851c10702eSQu Wenruo int stripe = total_sector_nr / rbio->stripe_nsectors; 26863e77605dSQu Wenruo struct sector_ptr *sector; 26871c10702eSQu Wenruo 26881c10702eSQu Wenruo /* No data in the vertical stripe, no need to read. */ 26891c10702eSQu Wenruo if (!test_bit(sectornr, &rbio->dbitmap)) 26901c10702eSQu Wenruo continue; 26911c10702eSQu Wenruo 26925a6ac9eaSMiao Xie /* 26931c10702eSQu Wenruo * We want to find all the sectors missing from the rbio and 26941c10702eSQu Wenruo * read them from the disk. If sector_in_rbio() finds a sector 26951c10702eSQu Wenruo * in the bio list we don't need to read it off the stripe. 26965a6ac9eaSMiao Xie */ 26973e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 26983e77605dSQu Wenruo if (sector) 26995a6ac9eaSMiao Xie continue; 27005a6ac9eaSMiao Xie 27013e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 27025a6ac9eaSMiao Xie /* 27031c10702eSQu Wenruo * The bio cache may have handed us an uptodate sector. If so, 27041c10702eSQu Wenruo * use it. 27055a6ac9eaSMiao Xie */ 27063e77605dSQu Wenruo if (sector->uptodate) 27075a6ac9eaSMiao Xie continue; 27085a6ac9eaSMiao Xie 27091c10702eSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 2710ff18a4afSChristoph Hellwig sectornr, REQ_OP_READ); 27115a6ac9eaSMiao Xie if (ret) 27125a6ac9eaSMiao Xie goto cleanup; 27135a6ac9eaSMiao Xie } 27145a6ac9eaSMiao Xie 27155a6ac9eaSMiao Xie bios_to_read = bio_list_size(&bio_list); 27165a6ac9eaSMiao Xie if (!bios_to_read) { 27175a6ac9eaSMiao Xie /* 27185a6ac9eaSMiao Xie * this can happen if others have merged with 27195a6ac9eaSMiao Xie * us, it means there is nothing left to read. 27205a6ac9eaSMiao Xie * But if there are missing devices it may not be 27215a6ac9eaSMiao Xie * safe to do the full stripe write yet. 27225a6ac9eaSMiao Xie */ 27235a6ac9eaSMiao Xie goto finish; 27245a6ac9eaSMiao Xie } 27255a6ac9eaSMiao Xie 27265a6ac9eaSMiao Xie /* 27274c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 27284c664611SQu Wenruo * touch it after that. 27295a6ac9eaSMiao Xie */ 27305a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 2731d34e123dSChristoph Hellwig INIT_WORK(&rbio->end_io_work, raid56_parity_scrub_end_io_work); 2732bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 2733d34e123dSChristoph Hellwig bio->bi_end_io = raid56_bio_end_io; 27345a6ac9eaSMiao Xie 2735b8bea09aSQu Wenruo if (trace_raid56_scrub_read_enabled()) { 2736b8bea09aSQu Wenruo struct raid56_bio_trace_info trace_info = { 0 }; 27375a6ac9eaSMiao Xie 2738b8bea09aSQu Wenruo bio_get_trace_info(rbio, bio, &trace_info); 2739b8bea09aSQu Wenruo trace_raid56_scrub_read(rbio, bio, &trace_info); 2740b8bea09aSQu Wenruo } 27414e49ea4aSMike Christie submit_bio(bio); 27425a6ac9eaSMiao Xie } 27435a6ac9eaSMiao Xie /* the actual write will happen once the reads are done */ 27445a6ac9eaSMiao Xie return; 27455a6ac9eaSMiao Xie 27465a6ac9eaSMiao Xie cleanup: 274758efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2748785884fcSLiu Bo 2749785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2750785884fcSLiu Bo bio_put(bio); 2751785884fcSLiu Bo 27525a6ac9eaSMiao Xie return; 27535a6ac9eaSMiao Xie 27545a6ac9eaSMiao Xie finish: 27555a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 27565a6ac9eaSMiao Xie } 27575a6ac9eaSMiao Xie 2758385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work) 27595a6ac9eaSMiao Xie { 27605a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 27615a6ac9eaSMiao Xie 27625a6ac9eaSMiao Xie rbio = container_of(work, struct btrfs_raid_bio, work); 27635a6ac9eaSMiao Xie raid56_parity_scrub_stripe(rbio); 27645a6ac9eaSMiao Xie } 27655a6ac9eaSMiao Xie 27665a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 27675a6ac9eaSMiao Xie { 27685a6ac9eaSMiao Xie if (!lock_stripe_add(rbio)) 2769a81b747dSDavid Sterba start_async_work(rbio, scrub_parity_work); 27705a6ac9eaSMiao Xie } 2771b4ee1782SOmar Sandoval 2772b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */ 2773b4ee1782SOmar Sandoval 2774b4ee1782SOmar Sandoval struct btrfs_raid_bio * 2775ff18a4afSChristoph Hellwig raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc) 2776b4ee1782SOmar Sandoval { 27776a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 2778b4ee1782SOmar Sandoval struct btrfs_raid_bio *rbio; 2779b4ee1782SOmar Sandoval 2780ff18a4afSChristoph Hellwig rbio = alloc_rbio(fs_info, bioc); 2781b4ee1782SOmar Sandoval if (IS_ERR(rbio)) 2782b4ee1782SOmar Sandoval return NULL; 2783b4ee1782SOmar Sandoval 2784b4ee1782SOmar Sandoval rbio->operation = BTRFS_RBIO_REBUILD_MISSING; 2785b4ee1782SOmar Sandoval bio_list_add(&rbio->bio_list, bio); 2786b4ee1782SOmar Sandoval /* 2787b4ee1782SOmar Sandoval * This is a special bio which is used to hold the completion handler 2788b4ee1782SOmar Sandoval * and make the scrub rbio is similar to the other types 2789b4ee1782SOmar Sandoval */ 2790b4ee1782SOmar Sandoval ASSERT(!bio->bi_iter.bi_size); 2791b4ee1782SOmar Sandoval 2792b4ee1782SOmar Sandoval rbio->faila = find_logical_bio_stripe(rbio, bio); 2793b4ee1782SOmar Sandoval if (rbio->faila == -1) { 2794f15fb2cdSQu Wenruo btrfs_warn_rl(fs_info, 2795f15fb2cdSQu Wenruo "can not determine the failed stripe number for full stripe %llu", 2796f15fb2cdSQu Wenruo bioc->raid_map[0]); 2797ff2b64a2SQu Wenruo free_raid_bio(rbio); 2798b4ee1782SOmar Sandoval return NULL; 2799b4ee1782SOmar Sandoval } 2800b4ee1782SOmar Sandoval 2801b4ee1782SOmar Sandoval return rbio; 2802b4ee1782SOmar Sandoval } 2803b4ee1782SOmar Sandoval 2804b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) 2805b4ee1782SOmar Sandoval { 2806b4ee1782SOmar Sandoval if (!lock_stripe_add(rbio)) 2807e66d8d5aSDavid Sterba start_async_work(rbio, read_rebuild_work); 2808b4ee1782SOmar Sandoval } 2809