1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0 253b381b3SDavid Woodhouse /* 353b381b3SDavid Woodhouse * Copyright (C) 2012 Fusion-io All rights reserved. 453b381b3SDavid Woodhouse * Copyright (C) 2012 Intel Corp. All rights reserved. 553b381b3SDavid Woodhouse */ 6c1d7c514SDavid Sterba 753b381b3SDavid Woodhouse #include <linux/sched.h> 853b381b3SDavid Woodhouse #include <linux/bio.h> 953b381b3SDavid Woodhouse #include <linux/slab.h> 1053b381b3SDavid Woodhouse #include <linux/blkdev.h> 1153b381b3SDavid Woodhouse #include <linux/raid/pq.h> 1253b381b3SDavid Woodhouse #include <linux/hash.h> 1353b381b3SDavid Woodhouse #include <linux/list_sort.h> 1453b381b3SDavid Woodhouse #include <linux/raid/xor.h> 15818e010bSDavid Sterba #include <linux/mm.h> 16cea62800SJohannes Thumshirn #include "misc.h" 1753b381b3SDavid Woodhouse #include "ctree.h" 1853b381b3SDavid Woodhouse #include "disk-io.h" 1953b381b3SDavid Woodhouse #include "volumes.h" 2053b381b3SDavid Woodhouse #include "raid56.h" 2153b381b3SDavid Woodhouse #include "async-thread.h" 2253b381b3SDavid Woodhouse 2353b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */ 2453b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT 1 2553b381b3SDavid Woodhouse 264ae10b3aSChris Mason /* 274ae10b3aSChris Mason * set when this rbio is sitting in the hash, but it is just a cache 284ae10b3aSChris Mason * of past RMW 294ae10b3aSChris Mason */ 304ae10b3aSChris Mason #define RBIO_CACHE_BIT 2 314ae10b3aSChris Mason 324ae10b3aSChris Mason /* 334ae10b3aSChris Mason * set when it is safe to trust the stripe_pages for caching 344ae10b3aSChris Mason */ 354ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT 3 364ae10b3aSChris Mason 374ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024 384ae10b3aSChris Mason 398a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS 11 408a953348SDavid Sterba 418a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */ 428a953348SDavid Sterba struct btrfs_stripe_hash { 438a953348SDavid Sterba struct list_head hash_list; 448a953348SDavid Sterba spinlock_t lock; 458a953348SDavid Sterba }; 468a953348SDavid Sterba 478a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */ 488a953348SDavid Sterba struct btrfs_stripe_hash_table { 498a953348SDavid Sterba struct list_head stripe_cache; 508a953348SDavid Sterba spinlock_t cache_lock; 518a953348SDavid Sterba int cache_size; 528a953348SDavid Sterba struct btrfs_stripe_hash table[]; 538a953348SDavid Sterba }; 548a953348SDavid Sterba 55eb357060SQu Wenruo /* 56eb357060SQu Wenruo * A bvec like structure to present a sector inside a page. 57eb357060SQu Wenruo * 58eb357060SQu Wenruo * Unlike bvec we don't need bvlen, as it's fixed to sectorsize. 59eb357060SQu Wenruo */ 60eb357060SQu Wenruo struct sector_ptr { 61eb357060SQu Wenruo struct page *page; 6200425dd9SQu Wenruo unsigned int pgoff:24; 6300425dd9SQu Wenruo unsigned int uptodate:8; 64eb357060SQu Wenruo }; 65eb357060SQu Wenruo 661b94b556SMiao Xie enum btrfs_rbio_ops { 67b4ee1782SOmar Sandoval BTRFS_RBIO_WRITE, 68b4ee1782SOmar Sandoval BTRFS_RBIO_READ_REBUILD, 69b4ee1782SOmar Sandoval BTRFS_RBIO_PARITY_SCRUB, 70b4ee1782SOmar Sandoval BTRFS_RBIO_REBUILD_MISSING, 711b94b556SMiao Xie }; 721b94b556SMiao Xie 7353b381b3SDavid Woodhouse struct btrfs_raid_bio { 744c664611SQu Wenruo struct btrfs_io_context *bioc; 7553b381b3SDavid Woodhouse 7653b381b3SDavid Woodhouse /* while we're doing rmw on a stripe 7753b381b3SDavid Woodhouse * we put it into a hash table so we can 7853b381b3SDavid Woodhouse * lock the stripe and merge more rbios 7953b381b3SDavid Woodhouse * into it. 8053b381b3SDavid Woodhouse */ 8153b381b3SDavid Woodhouse struct list_head hash_list; 8253b381b3SDavid Woodhouse 8353b381b3SDavid Woodhouse /* 844ae10b3aSChris Mason * LRU list for the stripe cache 854ae10b3aSChris Mason */ 864ae10b3aSChris Mason struct list_head stripe_cache; 874ae10b3aSChris Mason 884ae10b3aSChris Mason /* 8953b381b3SDavid Woodhouse * for scheduling work in the helper threads 9053b381b3SDavid Woodhouse */ 9153b381b3SDavid Woodhouse struct btrfs_work work; 9253b381b3SDavid Woodhouse 9353b381b3SDavid Woodhouse /* 9453b381b3SDavid Woodhouse * bio list and bio_list_lock are used 9553b381b3SDavid Woodhouse * to add more bios into the stripe 9653b381b3SDavid Woodhouse * in hopes of avoiding the full rmw 9753b381b3SDavid Woodhouse */ 9853b381b3SDavid Woodhouse struct bio_list bio_list; 9953b381b3SDavid Woodhouse spinlock_t bio_list_lock; 10053b381b3SDavid Woodhouse 1016ac0f488SChris Mason /* also protected by the bio_list_lock, the 1026ac0f488SChris Mason * plug list is used by the plugging code 1036ac0f488SChris Mason * to collect partial bios while plugged. The 1046ac0f488SChris Mason * stripe locking code also uses it to hand off 10553b381b3SDavid Woodhouse * the stripe lock to the next pending IO 10653b381b3SDavid Woodhouse */ 10753b381b3SDavid Woodhouse struct list_head plug_list; 10853b381b3SDavid Woodhouse 10953b381b3SDavid Woodhouse /* 11053b381b3SDavid Woodhouse * flags that tell us if it is safe to 11153b381b3SDavid Woodhouse * merge with this bio 11253b381b3SDavid Woodhouse */ 11353b381b3SDavid Woodhouse unsigned long flags; 11453b381b3SDavid Woodhouse 11553b381b3SDavid Woodhouse /* 11653b381b3SDavid Woodhouse * set if we're doing a parity rebuild 11753b381b3SDavid Woodhouse * for a read from higher up, which is handled 11853b381b3SDavid Woodhouse * differently from a parity rebuild as part of 11953b381b3SDavid Woodhouse * rmw 12053b381b3SDavid Woodhouse */ 1211b94b556SMiao Xie enum btrfs_rbio_ops operation; 12253b381b3SDavid Woodhouse 12329b06838SQu Wenruo /* Size of each individual stripe on disk */ 12429b06838SQu Wenruo u32 stripe_len; 12553b381b3SDavid Woodhouse 12629b06838SQu Wenruo /* How many pages there are for the full stripe including P/Q */ 12729b06838SQu Wenruo u16 nr_pages; 12853b381b3SDavid Woodhouse 12994efbe19SQu Wenruo /* How many sectors there are for the full stripe including P/Q */ 13094efbe19SQu Wenruo u16 nr_sectors; 13194efbe19SQu Wenruo 13229b06838SQu Wenruo /* Number of data stripes (no p/q) */ 13329b06838SQu Wenruo u8 nr_data; 13429b06838SQu Wenruo 13529b06838SQu Wenruo /* Numer of all stripes (including P/Q) */ 13629b06838SQu Wenruo u8 real_stripes; 13729b06838SQu Wenruo 13829b06838SQu Wenruo /* How many pages there are for each stripe */ 13929b06838SQu Wenruo u8 stripe_npages; 14029b06838SQu Wenruo 14194efbe19SQu Wenruo /* How many sectors there are for each stripe */ 14294efbe19SQu Wenruo u8 stripe_nsectors; 14394efbe19SQu Wenruo 14429b06838SQu Wenruo /* First bad stripe, -1 means no corruption */ 14529b06838SQu Wenruo s8 faila; 14629b06838SQu Wenruo 14729b06838SQu Wenruo /* Second bad stripe (for RAID6 use) */ 14829b06838SQu Wenruo s8 failb; 14929b06838SQu Wenruo 15029b06838SQu Wenruo /* Stripe number that we're scrubbing */ 15129b06838SQu Wenruo u8 scrubp; 15253b381b3SDavid Woodhouse 15353b381b3SDavid Woodhouse /* 15453b381b3SDavid Woodhouse * size of all the bios in the bio_list. This 15553b381b3SDavid Woodhouse * helps us decide if the rbio maps to a full 15653b381b3SDavid Woodhouse * stripe or not 15753b381b3SDavid Woodhouse */ 15853b381b3SDavid Woodhouse int bio_list_bytes; 15953b381b3SDavid Woodhouse 1604245215dSMiao Xie int generic_bio_cnt; 1614245215dSMiao Xie 162dec95574SElena Reshetova refcount_t refs; 16353b381b3SDavid Woodhouse 164b89e1b01SMiao Xie atomic_t stripes_pending; 165b89e1b01SMiao Xie 166b89e1b01SMiao Xie atomic_t error; 16753b381b3SDavid Woodhouse /* 16853b381b3SDavid Woodhouse * these are two arrays of pointers. We allocate the 16953b381b3SDavid Woodhouse * rbio big enough to hold them both and setup their 17053b381b3SDavid Woodhouse * locations when the rbio is allocated 17153b381b3SDavid Woodhouse */ 17253b381b3SDavid Woodhouse 17353b381b3SDavid Woodhouse /* pointers to pages that we allocated for 17453b381b3SDavid Woodhouse * reading/writing stripes directly from the disk (including P/Q) 17553b381b3SDavid Woodhouse */ 17653b381b3SDavid Woodhouse struct page **stripe_pages; 17753b381b3SDavid Woodhouse 17800425dd9SQu Wenruo /* Pointers to the sectors in the bio_list, for faster lookup */ 17900425dd9SQu Wenruo struct sector_ptr *bio_sectors; 18000425dd9SQu Wenruo 18153b381b3SDavid Woodhouse /* 18253b381b3SDavid Woodhouse * pointers to the pages in the bio_list. Stored 18353b381b3SDavid Woodhouse * here for faster lookup 18453b381b3SDavid Woodhouse */ 18553b381b3SDavid Woodhouse struct page **bio_pages; 1865a6ac9eaSMiao Xie 1875a6ac9eaSMiao Xie /* 188eb357060SQu Wenruo * For subpage support, we need to map each sector to above 189eb357060SQu Wenruo * stripe_pages. 1905a6ac9eaSMiao Xie */ 191eb357060SQu Wenruo struct sector_ptr *stripe_sectors; 192eb357060SQu Wenruo 193eb357060SQu Wenruo /* Bitmap to record which horizontal stripe has data */ 1945a6ac9eaSMiao Xie unsigned long *dbitmap; 1951389053eSKees Cook 1961389053eSKees Cook /* allocated with real_stripes-many pointers for finish_*() calls */ 1971389053eSKees Cook void **finish_pointers; 1981389053eSKees Cook 19994efbe19SQu Wenruo /* Allocated with stripe_nsectors-many bits for finish_*() calls */ 2001389053eSKees Cook unsigned long *finish_pbitmap; 20153b381b3SDavid Woodhouse }; 20253b381b3SDavid Woodhouse 20353b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 20453b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 20553b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work); 20653b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work); 20753b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 20853b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 20953b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio); 21053b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio); 21153b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 21253b381b3SDavid Woodhouse 2135a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2145a6ac9eaSMiao Xie int need_check); 215a81b747dSDavid Sterba static void scrub_parity_work(struct btrfs_work *work); 2165a6ac9eaSMiao Xie 217ac638859SDavid Sterba static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) 218ac638859SDavid Sterba { 219a0cac0ecSOmar Sandoval btrfs_init_work(&rbio->work, work_func, NULL, NULL); 2206a258d72SQu Wenruo btrfs_queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); 221ac638859SDavid Sterba } 222ac638859SDavid Sterba 22353b381b3SDavid Woodhouse /* 22453b381b3SDavid Woodhouse * the stripe hash table is used for locking, and to collect 22553b381b3SDavid Woodhouse * bios in hopes of making a full stripe 22653b381b3SDavid Woodhouse */ 22753b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 22853b381b3SDavid Woodhouse { 22953b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *table; 23053b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *x; 23153b381b3SDavid Woodhouse struct btrfs_stripe_hash *cur; 23253b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 23353b381b3SDavid Woodhouse int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 23453b381b3SDavid Woodhouse int i; 23553b381b3SDavid Woodhouse 23653b381b3SDavid Woodhouse if (info->stripe_hash_table) 23753b381b3SDavid Woodhouse return 0; 23853b381b3SDavid Woodhouse 23983c8266aSDavid Sterba /* 24083c8266aSDavid Sterba * The table is large, starting with order 4 and can go as high as 24183c8266aSDavid Sterba * order 7 in case lock debugging is turned on. 24283c8266aSDavid Sterba * 24383c8266aSDavid Sterba * Try harder to allocate and fallback to vmalloc to lower the chance 24483c8266aSDavid Sterba * of a failing mount. 24583c8266aSDavid Sterba */ 246ee787f95SDavid Sterba table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL); 24753b381b3SDavid Woodhouse if (!table) 24853b381b3SDavid Woodhouse return -ENOMEM; 24953b381b3SDavid Woodhouse 2504ae10b3aSChris Mason spin_lock_init(&table->cache_lock); 2514ae10b3aSChris Mason INIT_LIST_HEAD(&table->stripe_cache); 2524ae10b3aSChris Mason 25353b381b3SDavid Woodhouse h = table->table; 25453b381b3SDavid Woodhouse 25553b381b3SDavid Woodhouse for (i = 0; i < num_entries; i++) { 25653b381b3SDavid Woodhouse cur = h + i; 25753b381b3SDavid Woodhouse INIT_LIST_HEAD(&cur->hash_list); 25853b381b3SDavid Woodhouse spin_lock_init(&cur->lock); 25953b381b3SDavid Woodhouse } 26053b381b3SDavid Woodhouse 26153b381b3SDavid Woodhouse x = cmpxchg(&info->stripe_hash_table, NULL, table); 262f749303bSWang Shilong kvfree(x); 26353b381b3SDavid Woodhouse return 0; 26453b381b3SDavid Woodhouse } 26553b381b3SDavid Woodhouse 26653b381b3SDavid Woodhouse /* 2674ae10b3aSChris Mason * caching an rbio means to copy anything from the 2684ae10b3aSChris Mason * bio_pages array into the stripe_pages array. We 2694ae10b3aSChris Mason * use the page uptodate bit in the stripe cache array 2704ae10b3aSChris Mason * to indicate if it has valid data 2714ae10b3aSChris Mason * 2724ae10b3aSChris Mason * once the caching is done, we set the cache ready 2734ae10b3aSChris Mason * bit. 2744ae10b3aSChris Mason */ 2754ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 2764ae10b3aSChris Mason { 2774ae10b3aSChris Mason int i; 2784ae10b3aSChris Mason int ret; 2794ae10b3aSChris Mason 2804ae10b3aSChris Mason ret = alloc_rbio_pages(rbio); 2814ae10b3aSChris Mason if (ret) 2824ae10b3aSChris Mason return; 2834ae10b3aSChris Mason 2844ae10b3aSChris Mason for (i = 0; i < rbio->nr_pages; i++) { 2854ae10b3aSChris Mason if (!rbio->bio_pages[i]) 2864ae10b3aSChris Mason continue; 2874ae10b3aSChris Mason 28880cc8384SIra Weiny copy_highpage(rbio->stripe_pages[i], rbio->bio_pages[i]); 2894ae10b3aSChris Mason SetPageUptodate(rbio->stripe_pages[i]); 2904ae10b3aSChris Mason } 29100425dd9SQu Wenruo 29200425dd9SQu Wenruo /* 29300425dd9SQu Wenruo * This work is duplicated with the above loop, will be removed when 29400425dd9SQu Wenruo * the switch is done. 29500425dd9SQu Wenruo */ 29600425dd9SQu Wenruo for (i = 0; i < rbio->nr_sectors; i++) { 29700425dd9SQu Wenruo /* Some range not covered by bio (partial write), skip it */ 29800425dd9SQu Wenruo if (!rbio->bio_sectors[i].page) 29900425dd9SQu Wenruo continue; 30000425dd9SQu Wenruo 30100425dd9SQu Wenruo ASSERT(rbio->stripe_sectors[i].page); 30200425dd9SQu Wenruo memcpy_page(rbio->stripe_sectors[i].page, 30300425dd9SQu Wenruo rbio->stripe_sectors[i].pgoff, 30400425dd9SQu Wenruo rbio->bio_sectors[i].page, 30500425dd9SQu Wenruo rbio->bio_sectors[i].pgoff, 30600425dd9SQu Wenruo rbio->bioc->fs_info->sectorsize); 30700425dd9SQu Wenruo rbio->stripe_sectors[i].uptodate = 1; 30800425dd9SQu Wenruo } 3094ae10b3aSChris Mason set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 3104ae10b3aSChris Mason } 3114ae10b3aSChris Mason 3124ae10b3aSChris Mason /* 31353b381b3SDavid Woodhouse * we hash on the first logical address of the stripe 31453b381b3SDavid Woodhouse */ 31553b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio) 31653b381b3SDavid Woodhouse { 3174c664611SQu Wenruo u64 num = rbio->bioc->raid_map[0]; 31853b381b3SDavid Woodhouse 31953b381b3SDavid Woodhouse /* 32053b381b3SDavid Woodhouse * we shift down quite a bit. We're using byte 32153b381b3SDavid Woodhouse * addressing, and most of the lower bits are zeros. 32253b381b3SDavid Woodhouse * This tends to upset hash_64, and it consistently 32353b381b3SDavid Woodhouse * returns just one or two different values. 32453b381b3SDavid Woodhouse * 32553b381b3SDavid Woodhouse * shifting off the lower bits fixes things. 32653b381b3SDavid Woodhouse */ 32753b381b3SDavid Woodhouse return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 32853b381b3SDavid Woodhouse } 32953b381b3SDavid Woodhouse 33053b381b3SDavid Woodhouse /* 331eb357060SQu Wenruo * Update the stripe_sectors[] array to use correct page and pgoff 332eb357060SQu Wenruo * 333eb357060SQu Wenruo * Should be called every time any page pointer in stripes_pages[] got modified. 334eb357060SQu Wenruo */ 335eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio) 336eb357060SQu Wenruo { 337eb357060SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 338eb357060SQu Wenruo u32 offset; 339eb357060SQu Wenruo int i; 340eb357060SQu Wenruo 341eb357060SQu Wenruo for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { 342eb357060SQu Wenruo int page_index = offset >> PAGE_SHIFT; 343eb357060SQu Wenruo 344eb357060SQu Wenruo ASSERT(page_index < rbio->nr_pages); 345eb357060SQu Wenruo rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; 346eb357060SQu Wenruo rbio->stripe_sectors[i].pgoff = offset_in_page(offset); 347eb357060SQu Wenruo } 348eb357060SQu Wenruo } 349eb357060SQu Wenruo 350eb357060SQu Wenruo /* 3514ae10b3aSChris Mason * stealing an rbio means taking all the uptodate pages from the stripe 3524ae10b3aSChris Mason * array in the source rbio and putting them into the destination rbio 3534ae10b3aSChris Mason */ 3544ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 3554ae10b3aSChris Mason { 3564ae10b3aSChris Mason int i; 3574ae10b3aSChris Mason struct page *s; 3584ae10b3aSChris Mason struct page *d; 3594ae10b3aSChris Mason 3604ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 3614ae10b3aSChris Mason return; 3624ae10b3aSChris Mason 3634ae10b3aSChris Mason for (i = 0; i < dest->nr_pages; i++) { 3644ae10b3aSChris Mason s = src->stripe_pages[i]; 3654ae10b3aSChris Mason if (!s || !PageUptodate(s)) { 3664ae10b3aSChris Mason continue; 3674ae10b3aSChris Mason } 3684ae10b3aSChris Mason 3694ae10b3aSChris Mason d = dest->stripe_pages[i]; 3704ae10b3aSChris Mason if (d) 3714ae10b3aSChris Mason __free_page(d); 3724ae10b3aSChris Mason 3734ae10b3aSChris Mason dest->stripe_pages[i] = s; 3744ae10b3aSChris Mason src->stripe_pages[i] = NULL; 3754ae10b3aSChris Mason } 376eb357060SQu Wenruo index_stripe_sectors(dest); 377eb357060SQu Wenruo index_stripe_sectors(src); 3784ae10b3aSChris Mason } 3794ae10b3aSChris Mason 3804ae10b3aSChris Mason /* 38153b381b3SDavid Woodhouse * merging means we take the bio_list from the victim and 38253b381b3SDavid Woodhouse * splice it into the destination. The victim should 38353b381b3SDavid Woodhouse * be discarded afterwards. 38453b381b3SDavid Woodhouse * 38553b381b3SDavid Woodhouse * must be called with dest->rbio_list_lock held 38653b381b3SDavid Woodhouse */ 38753b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest, 38853b381b3SDavid Woodhouse struct btrfs_raid_bio *victim) 38953b381b3SDavid Woodhouse { 39053b381b3SDavid Woodhouse bio_list_merge(&dest->bio_list, &victim->bio_list); 39153b381b3SDavid Woodhouse dest->bio_list_bytes += victim->bio_list_bytes; 3924245215dSMiao Xie dest->generic_bio_cnt += victim->generic_bio_cnt; 39353b381b3SDavid Woodhouse bio_list_init(&victim->bio_list); 39453b381b3SDavid Woodhouse } 39553b381b3SDavid Woodhouse 39653b381b3SDavid Woodhouse /* 3974ae10b3aSChris Mason * used to prune items that are in the cache. The caller 3984ae10b3aSChris Mason * must hold the hash table lock. 3994ae10b3aSChris Mason */ 4004ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 4014ae10b3aSChris Mason { 4024ae10b3aSChris Mason int bucket = rbio_bucket(rbio); 4034ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4044ae10b3aSChris Mason struct btrfs_stripe_hash *h; 4054ae10b3aSChris Mason int freeit = 0; 4064ae10b3aSChris Mason 4074ae10b3aSChris Mason /* 4084ae10b3aSChris Mason * check the bit again under the hash table lock. 4094ae10b3aSChris Mason */ 4104ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4114ae10b3aSChris Mason return; 4124ae10b3aSChris Mason 4136a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 4144ae10b3aSChris Mason h = table->table + bucket; 4154ae10b3aSChris Mason 4164ae10b3aSChris Mason /* hold the lock for the bucket because we may be 4174ae10b3aSChris Mason * removing it from the hash table 4184ae10b3aSChris Mason */ 4194ae10b3aSChris Mason spin_lock(&h->lock); 4204ae10b3aSChris Mason 4214ae10b3aSChris Mason /* 4224ae10b3aSChris Mason * hold the lock for the bio list because we need 4234ae10b3aSChris Mason * to make sure the bio list is empty 4244ae10b3aSChris Mason */ 4254ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 4264ae10b3aSChris Mason 4274ae10b3aSChris Mason if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 4284ae10b3aSChris Mason list_del_init(&rbio->stripe_cache); 4294ae10b3aSChris Mason table->cache_size -= 1; 4304ae10b3aSChris Mason freeit = 1; 4314ae10b3aSChris Mason 4324ae10b3aSChris Mason /* if the bio list isn't empty, this rbio is 4334ae10b3aSChris Mason * still involved in an IO. We take it out 4344ae10b3aSChris Mason * of the cache list, and drop the ref that 4354ae10b3aSChris Mason * was held for the list. 4364ae10b3aSChris Mason * 4374ae10b3aSChris Mason * If the bio_list was empty, we also remove 4384ae10b3aSChris Mason * the rbio from the hash_table, and drop 4394ae10b3aSChris Mason * the corresponding ref 4404ae10b3aSChris Mason */ 4414ae10b3aSChris Mason if (bio_list_empty(&rbio->bio_list)) { 4424ae10b3aSChris Mason if (!list_empty(&rbio->hash_list)) { 4434ae10b3aSChris Mason list_del_init(&rbio->hash_list); 444dec95574SElena Reshetova refcount_dec(&rbio->refs); 4454ae10b3aSChris Mason BUG_ON(!list_empty(&rbio->plug_list)); 4464ae10b3aSChris Mason } 4474ae10b3aSChris Mason } 4484ae10b3aSChris Mason } 4494ae10b3aSChris Mason 4504ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4514ae10b3aSChris Mason spin_unlock(&h->lock); 4524ae10b3aSChris Mason 4534ae10b3aSChris Mason if (freeit) 4544ae10b3aSChris Mason __free_raid_bio(rbio); 4554ae10b3aSChris Mason } 4564ae10b3aSChris Mason 4574ae10b3aSChris Mason /* 4584ae10b3aSChris Mason * prune a given rbio from the cache 4594ae10b3aSChris Mason */ 4604ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 4614ae10b3aSChris Mason { 4624ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4634ae10b3aSChris Mason unsigned long flags; 4644ae10b3aSChris Mason 4654ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4664ae10b3aSChris Mason return; 4674ae10b3aSChris Mason 4686a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 4694ae10b3aSChris Mason 4704ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4714ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4724ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4734ae10b3aSChris Mason } 4744ae10b3aSChris Mason 4754ae10b3aSChris Mason /* 4764ae10b3aSChris Mason * remove everything in the cache 4774ae10b3aSChris Mason */ 47848a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 4794ae10b3aSChris Mason { 4804ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4814ae10b3aSChris Mason unsigned long flags; 4824ae10b3aSChris Mason struct btrfs_raid_bio *rbio; 4834ae10b3aSChris Mason 4844ae10b3aSChris Mason table = info->stripe_hash_table; 4854ae10b3aSChris Mason 4864ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4874ae10b3aSChris Mason while (!list_empty(&table->stripe_cache)) { 4884ae10b3aSChris Mason rbio = list_entry(table->stripe_cache.next, 4894ae10b3aSChris Mason struct btrfs_raid_bio, 4904ae10b3aSChris Mason stripe_cache); 4914ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4924ae10b3aSChris Mason } 4934ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4944ae10b3aSChris Mason } 4954ae10b3aSChris Mason 4964ae10b3aSChris Mason /* 4974ae10b3aSChris Mason * remove all cached entries and free the hash table 4984ae10b3aSChris Mason * used by unmount 49953b381b3SDavid Woodhouse */ 50053b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 50153b381b3SDavid Woodhouse { 50253b381b3SDavid Woodhouse if (!info->stripe_hash_table) 50353b381b3SDavid Woodhouse return; 5044ae10b3aSChris Mason btrfs_clear_rbio_cache(info); 505f749303bSWang Shilong kvfree(info->stripe_hash_table); 50653b381b3SDavid Woodhouse info->stripe_hash_table = NULL; 50753b381b3SDavid Woodhouse } 50853b381b3SDavid Woodhouse 50953b381b3SDavid Woodhouse /* 5104ae10b3aSChris Mason * insert an rbio into the stripe cache. It 5114ae10b3aSChris Mason * must have already been prepared by calling 5124ae10b3aSChris Mason * cache_rbio_pages 5134ae10b3aSChris Mason * 5144ae10b3aSChris Mason * If this rbio was already cached, it gets 5154ae10b3aSChris Mason * moved to the front of the lru. 5164ae10b3aSChris Mason * 5174ae10b3aSChris Mason * If the size of the rbio cache is too big, we 5184ae10b3aSChris Mason * prune an item. 5194ae10b3aSChris Mason */ 5204ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio) 5214ae10b3aSChris Mason { 5224ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 5234ae10b3aSChris Mason unsigned long flags; 5244ae10b3aSChris Mason 5254ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 5264ae10b3aSChris Mason return; 5274ae10b3aSChris Mason 5286a258d72SQu Wenruo table = rbio->bioc->fs_info->stripe_hash_table; 5294ae10b3aSChris Mason 5304ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 5314ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 5324ae10b3aSChris Mason 5334ae10b3aSChris Mason /* bump our ref if we were not in the list before */ 5344ae10b3aSChris Mason if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 535dec95574SElena Reshetova refcount_inc(&rbio->refs); 5364ae10b3aSChris Mason 5374ae10b3aSChris Mason if (!list_empty(&rbio->stripe_cache)){ 5384ae10b3aSChris Mason list_move(&rbio->stripe_cache, &table->stripe_cache); 5394ae10b3aSChris Mason } else { 5404ae10b3aSChris Mason list_add(&rbio->stripe_cache, &table->stripe_cache); 5414ae10b3aSChris Mason table->cache_size += 1; 5424ae10b3aSChris Mason } 5434ae10b3aSChris Mason 5444ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 5454ae10b3aSChris Mason 5464ae10b3aSChris Mason if (table->cache_size > RBIO_CACHE_SIZE) { 5474ae10b3aSChris Mason struct btrfs_raid_bio *found; 5484ae10b3aSChris Mason 5494ae10b3aSChris Mason found = list_entry(table->stripe_cache.prev, 5504ae10b3aSChris Mason struct btrfs_raid_bio, 5514ae10b3aSChris Mason stripe_cache); 5524ae10b3aSChris Mason 5534ae10b3aSChris Mason if (found != rbio) 5544ae10b3aSChris Mason __remove_rbio_from_cache(found); 5554ae10b3aSChris Mason } 5564ae10b3aSChris Mason 5574ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 5584ae10b3aSChris Mason } 5594ae10b3aSChris Mason 5604ae10b3aSChris Mason /* 56153b381b3SDavid Woodhouse * helper function to run the xor_blocks api. It is only 56253b381b3SDavid Woodhouse * able to do MAX_XOR_BLOCKS at a time, so we need to 56353b381b3SDavid Woodhouse * loop through. 56453b381b3SDavid Woodhouse */ 56553b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len) 56653b381b3SDavid Woodhouse { 56753b381b3SDavid Woodhouse int src_off = 0; 56853b381b3SDavid Woodhouse int xor_src_cnt = 0; 56953b381b3SDavid Woodhouse void *dest = pages[src_cnt]; 57053b381b3SDavid Woodhouse 57153b381b3SDavid Woodhouse while(src_cnt > 0) { 57253b381b3SDavid Woodhouse xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 57353b381b3SDavid Woodhouse xor_blocks(xor_src_cnt, len, dest, pages + src_off); 57453b381b3SDavid Woodhouse 57553b381b3SDavid Woodhouse src_cnt -= xor_src_cnt; 57653b381b3SDavid Woodhouse src_off += xor_src_cnt; 57753b381b3SDavid Woodhouse } 57853b381b3SDavid Woodhouse } 57953b381b3SDavid Woodhouse 58053b381b3SDavid Woodhouse /* 581176571a1SDavid Sterba * Returns true if the bio list inside this rbio covers an entire stripe (no 582176571a1SDavid Sterba * rmw required). 58353b381b3SDavid Woodhouse */ 58453b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio) 58553b381b3SDavid Woodhouse { 58653b381b3SDavid Woodhouse unsigned long flags; 587176571a1SDavid Sterba unsigned long size = rbio->bio_list_bytes; 588176571a1SDavid Sterba int ret = 1; 58953b381b3SDavid Woodhouse 59053b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 591176571a1SDavid Sterba if (size != rbio->nr_data * rbio->stripe_len) 592176571a1SDavid Sterba ret = 0; 593176571a1SDavid Sterba BUG_ON(size > rbio->nr_data * rbio->stripe_len); 59453b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 595176571a1SDavid Sterba 59653b381b3SDavid Woodhouse return ret; 59753b381b3SDavid Woodhouse } 59853b381b3SDavid Woodhouse 59953b381b3SDavid Woodhouse /* 60053b381b3SDavid Woodhouse * returns 1 if it is safe to merge two rbios together. 60153b381b3SDavid Woodhouse * The merging is safe if the two rbios correspond to 60253b381b3SDavid Woodhouse * the same stripe and if they are both going in the same 60353b381b3SDavid Woodhouse * direction (read vs write), and if neither one is 60453b381b3SDavid Woodhouse * locked for final IO 60553b381b3SDavid Woodhouse * 60653b381b3SDavid Woodhouse * The caller is responsible for locking such that 60753b381b3SDavid Woodhouse * rmw_locked is safe to test 60853b381b3SDavid Woodhouse */ 60953b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last, 61053b381b3SDavid Woodhouse struct btrfs_raid_bio *cur) 61153b381b3SDavid Woodhouse { 61253b381b3SDavid Woodhouse if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 61353b381b3SDavid Woodhouse test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 61453b381b3SDavid Woodhouse return 0; 61553b381b3SDavid Woodhouse 6164ae10b3aSChris Mason /* 6174ae10b3aSChris Mason * we can't merge with cached rbios, since the 6184ae10b3aSChris Mason * idea is that when we merge the destination 6194ae10b3aSChris Mason * rbio is going to run our IO for us. We can 62001327610SNicholas D Steeves * steal from cached rbios though, other functions 6214ae10b3aSChris Mason * handle that. 6224ae10b3aSChris Mason */ 6234ae10b3aSChris Mason if (test_bit(RBIO_CACHE_BIT, &last->flags) || 6244ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags)) 6254ae10b3aSChris Mason return 0; 6264ae10b3aSChris Mason 6274c664611SQu Wenruo if (last->bioc->raid_map[0] != cur->bioc->raid_map[0]) 62853b381b3SDavid Woodhouse return 0; 62953b381b3SDavid Woodhouse 6305a6ac9eaSMiao Xie /* we can't merge with different operations */ 6315a6ac9eaSMiao Xie if (last->operation != cur->operation) 63253b381b3SDavid Woodhouse return 0; 6335a6ac9eaSMiao Xie /* 6345a6ac9eaSMiao Xie * We've need read the full stripe from the drive. 6355a6ac9eaSMiao Xie * check and repair the parity and write the new results. 6365a6ac9eaSMiao Xie * 6375a6ac9eaSMiao Xie * We're not allowed to add any new bios to the 6385a6ac9eaSMiao Xie * bio list here, anyone else that wants to 6395a6ac9eaSMiao Xie * change this stripe needs to do their own rmw. 6405a6ac9eaSMiao Xie */ 641db34be19SLiu Bo if (last->operation == BTRFS_RBIO_PARITY_SCRUB) 6425a6ac9eaSMiao Xie return 0; 64353b381b3SDavid Woodhouse 644db34be19SLiu Bo if (last->operation == BTRFS_RBIO_REBUILD_MISSING) 645b4ee1782SOmar Sandoval return 0; 646b4ee1782SOmar Sandoval 647cc54ff62SLiu Bo if (last->operation == BTRFS_RBIO_READ_REBUILD) { 648cc54ff62SLiu Bo int fa = last->faila; 649cc54ff62SLiu Bo int fb = last->failb; 650cc54ff62SLiu Bo int cur_fa = cur->faila; 651cc54ff62SLiu Bo int cur_fb = cur->failb; 652cc54ff62SLiu Bo 653cc54ff62SLiu Bo if (last->faila >= last->failb) { 654cc54ff62SLiu Bo fa = last->failb; 655cc54ff62SLiu Bo fb = last->faila; 656cc54ff62SLiu Bo } 657cc54ff62SLiu Bo 658cc54ff62SLiu Bo if (cur->faila >= cur->failb) { 659cc54ff62SLiu Bo cur_fa = cur->failb; 660cc54ff62SLiu Bo cur_fb = cur->faila; 661cc54ff62SLiu Bo } 662cc54ff62SLiu Bo 663cc54ff62SLiu Bo if (fa != cur_fa || fb != cur_fb) 664cc54ff62SLiu Bo return 0; 665cc54ff62SLiu Bo } 66653b381b3SDavid Woodhouse return 1; 66753b381b3SDavid Woodhouse } 66853b381b3SDavid Woodhouse 6693e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio, 6703e77605dSQu Wenruo unsigned int stripe_nr, 6713e77605dSQu Wenruo unsigned int sector_nr) 6723e77605dSQu Wenruo { 6733e77605dSQu Wenruo ASSERT(stripe_nr < rbio->real_stripes); 6743e77605dSQu Wenruo ASSERT(sector_nr < rbio->stripe_nsectors); 6753e77605dSQu Wenruo 6763e77605dSQu Wenruo return stripe_nr * rbio->stripe_nsectors + sector_nr; 6773e77605dSQu Wenruo } 6783e77605dSQu Wenruo 6793e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */ 6803e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio, 6813e77605dSQu Wenruo unsigned int stripe_nr, 6823e77605dSQu Wenruo unsigned int sector_nr) 6833e77605dSQu Wenruo { 6843e77605dSQu Wenruo return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, 6853e77605dSQu Wenruo sector_nr)]; 6863e77605dSQu Wenruo } 6873e77605dSQu Wenruo 688*1145059aSQu Wenruo /* Grab a sector inside P stripe */ 689*1145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio, 690*1145059aSQu Wenruo unsigned int sector_nr) 691b7178a5fSZhao Lei { 692*1145059aSQu Wenruo return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); 693b7178a5fSZhao Lei } 694b7178a5fSZhao Lei 695*1145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */ 696*1145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio, 697*1145059aSQu Wenruo unsigned int sector_nr) 69853b381b3SDavid Woodhouse { 6992c8cdd6eSMiao Xie if (rbio->nr_data + 1 == rbio->real_stripes) 70053b381b3SDavid Woodhouse return NULL; 701*1145059aSQu Wenruo return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); 702*1145059aSQu Wenruo } 703*1145059aSQu Wenruo 704*1145059aSQu Wenruo static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, int index) 705*1145059aSQu Wenruo { 706*1145059aSQu Wenruo return stripe * rbio->stripe_npages + index; 70753b381b3SDavid Woodhouse } 70853b381b3SDavid Woodhouse 70953b381b3SDavid Woodhouse /* 71053b381b3SDavid Woodhouse * The first stripe in the table for a logical address 71153b381b3SDavid Woodhouse * has the lock. rbios are added in one of three ways: 71253b381b3SDavid Woodhouse * 71353b381b3SDavid Woodhouse * 1) Nobody has the stripe locked yet. The rbio is given 71453b381b3SDavid Woodhouse * the lock and 0 is returned. The caller must start the IO 71553b381b3SDavid Woodhouse * themselves. 71653b381b3SDavid Woodhouse * 71753b381b3SDavid Woodhouse * 2) Someone has the stripe locked, but we're able to merge 71853b381b3SDavid Woodhouse * with the lock owner. The rbio is freed and the IO will 71953b381b3SDavid Woodhouse * start automatically along with the existing rbio. 1 is returned. 72053b381b3SDavid Woodhouse * 72153b381b3SDavid Woodhouse * 3) Someone has the stripe locked, but we're not able to merge. 72253b381b3SDavid Woodhouse * The rbio is added to the lock owner's plug list, or merged into 72353b381b3SDavid Woodhouse * an rbio already on the plug list. When the lock owner unlocks, 72453b381b3SDavid Woodhouse * the next rbio on the list is run and the IO is started automatically. 72553b381b3SDavid Woodhouse * 1 is returned 72653b381b3SDavid Woodhouse * 72753b381b3SDavid Woodhouse * If we return 0, the caller still owns the rbio and must continue with 72853b381b3SDavid Woodhouse * IO submission. If we return 1, the caller must assume the rbio has 72953b381b3SDavid Woodhouse * already been freed. 73053b381b3SDavid Woodhouse */ 73153b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 73253b381b3SDavid Woodhouse { 733721860d5SJohannes Thumshirn struct btrfs_stripe_hash *h; 73453b381b3SDavid Woodhouse struct btrfs_raid_bio *cur; 73553b381b3SDavid Woodhouse struct btrfs_raid_bio *pending; 73653b381b3SDavid Woodhouse unsigned long flags; 73753b381b3SDavid Woodhouse struct btrfs_raid_bio *freeit = NULL; 7384ae10b3aSChris Mason struct btrfs_raid_bio *cache_drop = NULL; 73953b381b3SDavid Woodhouse int ret = 0; 74053b381b3SDavid Woodhouse 7416a258d72SQu Wenruo h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); 742721860d5SJohannes Thumshirn 74353b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 74453b381b3SDavid Woodhouse list_for_each_entry(cur, &h->hash_list, hash_list) { 7454c664611SQu Wenruo if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0]) 7469d6cb1b0SJohannes Thumshirn continue; 7479d6cb1b0SJohannes Thumshirn 74853b381b3SDavid Woodhouse spin_lock(&cur->bio_list_lock); 74953b381b3SDavid Woodhouse 7509d6cb1b0SJohannes Thumshirn /* Can we steal this cached rbio's pages? */ 7514ae10b3aSChris Mason if (bio_list_empty(&cur->bio_list) && 7524ae10b3aSChris Mason list_empty(&cur->plug_list) && 7534ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags) && 7544ae10b3aSChris Mason !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 7554ae10b3aSChris Mason list_del_init(&cur->hash_list); 756dec95574SElena Reshetova refcount_dec(&cur->refs); 7574ae10b3aSChris Mason 7584ae10b3aSChris Mason steal_rbio(cur, rbio); 7594ae10b3aSChris Mason cache_drop = cur; 7604ae10b3aSChris Mason spin_unlock(&cur->bio_list_lock); 7614ae10b3aSChris Mason 7624ae10b3aSChris Mason goto lockit; 7634ae10b3aSChris Mason } 7644ae10b3aSChris Mason 7659d6cb1b0SJohannes Thumshirn /* Can we merge into the lock owner? */ 76653b381b3SDavid Woodhouse if (rbio_can_merge(cur, rbio)) { 76753b381b3SDavid Woodhouse merge_rbio(cur, rbio); 76853b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 76953b381b3SDavid Woodhouse freeit = rbio; 77053b381b3SDavid Woodhouse ret = 1; 77153b381b3SDavid Woodhouse goto out; 77253b381b3SDavid Woodhouse } 77353b381b3SDavid Woodhouse 7744ae10b3aSChris Mason 77553b381b3SDavid Woodhouse /* 7769d6cb1b0SJohannes Thumshirn * We couldn't merge with the running rbio, see if we can merge 7779d6cb1b0SJohannes Thumshirn * with the pending ones. We don't have to check for rmw_locked 7789d6cb1b0SJohannes Thumshirn * because there is no way they are inside finish_rmw right now 77953b381b3SDavid Woodhouse */ 7809d6cb1b0SJohannes Thumshirn list_for_each_entry(pending, &cur->plug_list, plug_list) { 78153b381b3SDavid Woodhouse if (rbio_can_merge(pending, rbio)) { 78253b381b3SDavid Woodhouse merge_rbio(pending, rbio); 78353b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 78453b381b3SDavid Woodhouse freeit = rbio; 78553b381b3SDavid Woodhouse ret = 1; 78653b381b3SDavid Woodhouse goto out; 78753b381b3SDavid Woodhouse } 78853b381b3SDavid Woodhouse } 78953b381b3SDavid Woodhouse 7909d6cb1b0SJohannes Thumshirn /* 7919d6cb1b0SJohannes Thumshirn * No merging, put us on the tail of the plug list, our rbio 7929d6cb1b0SJohannes Thumshirn * will be started with the currently running rbio unlocks 79353b381b3SDavid Woodhouse */ 79453b381b3SDavid Woodhouse list_add_tail(&rbio->plug_list, &cur->plug_list); 79553b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 79653b381b3SDavid Woodhouse ret = 1; 79753b381b3SDavid Woodhouse goto out; 79853b381b3SDavid Woodhouse } 7994ae10b3aSChris Mason lockit: 800dec95574SElena Reshetova refcount_inc(&rbio->refs); 80153b381b3SDavid Woodhouse list_add(&rbio->hash_list, &h->hash_list); 80253b381b3SDavid Woodhouse out: 80353b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 8044ae10b3aSChris Mason if (cache_drop) 8054ae10b3aSChris Mason remove_rbio_from_cache(cache_drop); 80653b381b3SDavid Woodhouse if (freeit) 80753b381b3SDavid Woodhouse __free_raid_bio(freeit); 80853b381b3SDavid Woodhouse return ret; 80953b381b3SDavid Woodhouse } 81053b381b3SDavid Woodhouse 81153b381b3SDavid Woodhouse /* 81253b381b3SDavid Woodhouse * called as rmw or parity rebuild is completed. If the plug list has more 81353b381b3SDavid Woodhouse * rbios waiting for this stripe, the next one on the list will be started 81453b381b3SDavid Woodhouse */ 81553b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 81653b381b3SDavid Woodhouse { 81753b381b3SDavid Woodhouse int bucket; 81853b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 81953b381b3SDavid Woodhouse unsigned long flags; 8204ae10b3aSChris Mason int keep_cache = 0; 82153b381b3SDavid Woodhouse 82253b381b3SDavid Woodhouse bucket = rbio_bucket(rbio); 8236a258d72SQu Wenruo h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; 82453b381b3SDavid Woodhouse 8254ae10b3aSChris Mason if (list_empty(&rbio->plug_list)) 8264ae10b3aSChris Mason cache_rbio(rbio); 8274ae10b3aSChris Mason 82853b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 82953b381b3SDavid Woodhouse spin_lock(&rbio->bio_list_lock); 83053b381b3SDavid Woodhouse 83153b381b3SDavid Woodhouse if (!list_empty(&rbio->hash_list)) { 8324ae10b3aSChris Mason /* 8334ae10b3aSChris Mason * if we're still cached and there is no other IO 8344ae10b3aSChris Mason * to perform, just leave this rbio here for others 8354ae10b3aSChris Mason * to steal from later 8364ae10b3aSChris Mason */ 8374ae10b3aSChris Mason if (list_empty(&rbio->plug_list) && 8384ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 8394ae10b3aSChris Mason keep_cache = 1; 8404ae10b3aSChris Mason clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 8414ae10b3aSChris Mason BUG_ON(!bio_list_empty(&rbio->bio_list)); 8424ae10b3aSChris Mason goto done; 8434ae10b3aSChris Mason } 84453b381b3SDavid Woodhouse 84553b381b3SDavid Woodhouse list_del_init(&rbio->hash_list); 846dec95574SElena Reshetova refcount_dec(&rbio->refs); 84753b381b3SDavid Woodhouse 84853b381b3SDavid Woodhouse /* 84953b381b3SDavid Woodhouse * we use the plug list to hold all the rbios 85053b381b3SDavid Woodhouse * waiting for the chance to lock this stripe. 85153b381b3SDavid Woodhouse * hand the lock over to one of them. 85253b381b3SDavid Woodhouse */ 85353b381b3SDavid Woodhouse if (!list_empty(&rbio->plug_list)) { 85453b381b3SDavid Woodhouse struct btrfs_raid_bio *next; 85553b381b3SDavid Woodhouse struct list_head *head = rbio->plug_list.next; 85653b381b3SDavid Woodhouse 85753b381b3SDavid Woodhouse next = list_entry(head, struct btrfs_raid_bio, 85853b381b3SDavid Woodhouse plug_list); 85953b381b3SDavid Woodhouse 86053b381b3SDavid Woodhouse list_del_init(&rbio->plug_list); 86153b381b3SDavid Woodhouse 86253b381b3SDavid Woodhouse list_add(&next->hash_list, &h->hash_list); 863dec95574SElena Reshetova refcount_inc(&next->refs); 86453b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 86553b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 86653b381b3SDavid Woodhouse 8671b94b556SMiao Xie if (next->operation == BTRFS_RBIO_READ_REBUILD) 868e66d8d5aSDavid Sterba start_async_work(next, read_rebuild_work); 869b4ee1782SOmar Sandoval else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { 870b4ee1782SOmar Sandoval steal_rbio(rbio, next); 871e66d8d5aSDavid Sterba start_async_work(next, read_rebuild_work); 872b4ee1782SOmar Sandoval } else if (next->operation == BTRFS_RBIO_WRITE) { 8734ae10b3aSChris Mason steal_rbio(rbio, next); 874cf6a4a75SDavid Sterba start_async_work(next, rmw_work); 8755a6ac9eaSMiao Xie } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 8765a6ac9eaSMiao Xie steal_rbio(rbio, next); 877a81b747dSDavid Sterba start_async_work(next, scrub_parity_work); 8784ae10b3aSChris Mason } 87953b381b3SDavid Woodhouse 88053b381b3SDavid Woodhouse goto done_nolock; 88153b381b3SDavid Woodhouse } 88253b381b3SDavid Woodhouse } 8834ae10b3aSChris Mason done: 88453b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 88553b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 88653b381b3SDavid Woodhouse 88753b381b3SDavid Woodhouse done_nolock: 8884ae10b3aSChris Mason if (!keep_cache) 8894ae10b3aSChris Mason remove_rbio_from_cache(rbio); 89053b381b3SDavid Woodhouse } 89153b381b3SDavid Woodhouse 89253b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio) 89353b381b3SDavid Woodhouse { 89453b381b3SDavid Woodhouse int i; 89553b381b3SDavid Woodhouse 896dec95574SElena Reshetova if (!refcount_dec_and_test(&rbio->refs)) 89753b381b3SDavid Woodhouse return; 89853b381b3SDavid Woodhouse 8994ae10b3aSChris Mason WARN_ON(!list_empty(&rbio->stripe_cache)); 90053b381b3SDavid Woodhouse WARN_ON(!list_empty(&rbio->hash_list)); 90153b381b3SDavid Woodhouse WARN_ON(!bio_list_empty(&rbio->bio_list)); 90253b381b3SDavid Woodhouse 90353b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 90453b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) { 90553b381b3SDavid Woodhouse __free_page(rbio->stripe_pages[i]); 90653b381b3SDavid Woodhouse rbio->stripe_pages[i] = NULL; 90753b381b3SDavid Woodhouse } 90853b381b3SDavid Woodhouse } 909af8e2d1dSMiao Xie 9104c664611SQu Wenruo btrfs_put_bioc(rbio->bioc); 91153b381b3SDavid Woodhouse kfree(rbio); 91253b381b3SDavid Woodhouse } 91353b381b3SDavid Woodhouse 9147583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) 91553b381b3SDavid Woodhouse { 9167583d8d0SLiu Bo struct bio *next; 9177583d8d0SLiu Bo 9187583d8d0SLiu Bo while (cur) { 9197583d8d0SLiu Bo next = cur->bi_next; 9207583d8d0SLiu Bo cur->bi_next = NULL; 9217583d8d0SLiu Bo cur->bi_status = err; 9227583d8d0SLiu Bo bio_endio(cur); 9237583d8d0SLiu Bo cur = next; 9247583d8d0SLiu Bo } 92553b381b3SDavid Woodhouse } 92653b381b3SDavid Woodhouse 92753b381b3SDavid Woodhouse /* 92853b381b3SDavid Woodhouse * this frees the rbio and runs through all the bios in the 92953b381b3SDavid Woodhouse * bio_list and calls end_io on them 93053b381b3SDavid Woodhouse */ 9314e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) 93253b381b3SDavid Woodhouse { 93353b381b3SDavid Woodhouse struct bio *cur = bio_list_get(&rbio->bio_list); 9347583d8d0SLiu Bo struct bio *extra; 9354245215dSMiao Xie 9364245215dSMiao Xie if (rbio->generic_bio_cnt) 9376a258d72SQu Wenruo btrfs_bio_counter_sub(rbio->bioc->fs_info, rbio->generic_bio_cnt); 9384245215dSMiao Xie 9397583d8d0SLiu Bo /* 9407583d8d0SLiu Bo * At this moment, rbio->bio_list is empty, however since rbio does not 9417583d8d0SLiu Bo * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the 9427583d8d0SLiu Bo * hash list, rbio may be merged with others so that rbio->bio_list 9437583d8d0SLiu Bo * becomes non-empty. 9447583d8d0SLiu Bo * Once unlock_stripe() is done, rbio->bio_list will not be updated any 9457583d8d0SLiu Bo * more and we can call bio_endio() on all queued bios. 9467583d8d0SLiu Bo */ 9477583d8d0SLiu Bo unlock_stripe(rbio); 9487583d8d0SLiu Bo extra = bio_list_get(&rbio->bio_list); 9497583d8d0SLiu Bo __free_raid_bio(rbio); 95053b381b3SDavid Woodhouse 9517583d8d0SLiu Bo rbio_endio_bio_list(cur, err); 9527583d8d0SLiu Bo if (extra) 9537583d8d0SLiu Bo rbio_endio_bio_list(extra, err); 95453b381b3SDavid Woodhouse } 95553b381b3SDavid Woodhouse 95653b381b3SDavid Woodhouse /* 95753b381b3SDavid Woodhouse * end io function used by finish_rmw. When we finally 95853b381b3SDavid Woodhouse * get here, we've written a full stripe 95953b381b3SDavid Woodhouse */ 9604246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio) 96153b381b3SDavid Woodhouse { 96253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 9634e4cbee9SChristoph Hellwig blk_status_t err = bio->bi_status; 964a6111d11SZhao Lei int max_errors; 96553b381b3SDavid Woodhouse 96653b381b3SDavid Woodhouse if (err) 96753b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 96853b381b3SDavid Woodhouse 96953b381b3SDavid Woodhouse bio_put(bio); 97053b381b3SDavid Woodhouse 971b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 97253b381b3SDavid Woodhouse return; 97353b381b3SDavid Woodhouse 97458efbc9fSOmar Sandoval err = BLK_STS_OK; 97553b381b3SDavid Woodhouse 97653b381b3SDavid Woodhouse /* OK, we have read all the stripes we need to. */ 977a6111d11SZhao Lei max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 9784c664611SQu Wenruo 0 : rbio->bioc->max_errors; 979a6111d11SZhao Lei if (atomic_read(&rbio->error) > max_errors) 9804e4cbee9SChristoph Hellwig err = BLK_STS_IOERR; 98153b381b3SDavid Woodhouse 9824246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 98353b381b3SDavid Woodhouse } 98453b381b3SDavid Woodhouse 9853e77605dSQu Wenruo /** 9863e77605dSQu Wenruo * Get a sector pointer specified by its @stripe_nr and @sector_nr 9873e77605dSQu Wenruo * 9883e77605dSQu Wenruo * @rbio: The raid bio 9893e77605dSQu Wenruo * @stripe_nr: Stripe number, valid range [0, real_stripe) 9903e77605dSQu Wenruo * @sector_nr: Sector number inside the stripe, 9913e77605dSQu Wenruo * valid range [0, stripe_nsectors) 9923e77605dSQu Wenruo * @bio_list_only: Whether to use sectors inside the bio list only. 9933e77605dSQu Wenruo * 9943e77605dSQu Wenruo * The read/modify/write code wants to reuse the original bio page as much 9953e77605dSQu Wenruo * as possible, and only use stripe_sectors as fallback. 9963e77605dSQu Wenruo */ 9973e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio, 9983e77605dSQu Wenruo int stripe_nr, int sector_nr, 9993e77605dSQu Wenruo bool bio_list_only) 10003e77605dSQu Wenruo { 10013e77605dSQu Wenruo struct sector_ptr *sector; 10023e77605dSQu Wenruo int index; 10033e77605dSQu Wenruo 10043e77605dSQu Wenruo ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); 10053e77605dSQu Wenruo ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 10063e77605dSQu Wenruo 10073e77605dSQu Wenruo index = stripe_nr * rbio->stripe_nsectors + sector_nr; 10083e77605dSQu Wenruo ASSERT(index >= 0 && index < rbio->nr_sectors); 10093e77605dSQu Wenruo 10103e77605dSQu Wenruo spin_lock_irq(&rbio->bio_list_lock); 10113e77605dSQu Wenruo sector = &rbio->bio_sectors[index]; 10123e77605dSQu Wenruo if (sector->page || bio_list_only) { 10133e77605dSQu Wenruo /* Don't return sector without a valid page pointer */ 10143e77605dSQu Wenruo if (!sector->page) 10153e77605dSQu Wenruo sector = NULL; 10163e77605dSQu Wenruo spin_unlock_irq(&rbio->bio_list_lock); 10173e77605dSQu Wenruo return sector; 10183e77605dSQu Wenruo } 10193e77605dSQu Wenruo spin_unlock_irq(&rbio->bio_list_lock); 10203e77605dSQu Wenruo 10213e77605dSQu Wenruo return &rbio->stripe_sectors[index]; 10223e77605dSQu Wenruo } 10233e77605dSQu Wenruo 102453b381b3SDavid Woodhouse /* 102553b381b3SDavid Woodhouse * allocation and initial setup for the btrfs_raid_bio. Not 102653b381b3SDavid Woodhouse * this does not allocate any pages for rbio->pages. 102753b381b3SDavid Woodhouse */ 10282ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 10294c664611SQu Wenruo struct btrfs_io_context *bioc, 1030cc353a8bSQu Wenruo u32 stripe_len) 103153b381b3SDavid Woodhouse { 1032843de58bSQu Wenruo const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs; 1033843de58bSQu Wenruo const unsigned int stripe_npages = stripe_len >> PAGE_SHIFT; 1034843de58bSQu Wenruo const unsigned int num_pages = stripe_npages * real_stripes; 103594efbe19SQu Wenruo const unsigned int stripe_nsectors = stripe_len >> fs_info->sectorsize_bits; 103694efbe19SQu Wenruo const unsigned int num_sectors = stripe_nsectors * real_stripes; 103753b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 103853b381b3SDavid Woodhouse int nr_data = 0; 103953b381b3SDavid Woodhouse void *p; 104053b381b3SDavid Woodhouse 1041843de58bSQu Wenruo ASSERT(IS_ALIGNED(stripe_len, PAGE_SIZE)); 104294efbe19SQu Wenruo /* PAGE_SIZE must also be aligned to sectorsize for subpage support */ 104394efbe19SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); 1044843de58bSQu Wenruo 10451389053eSKees Cook rbio = kzalloc(sizeof(*rbio) + 10461389053eSKees Cook sizeof(*rbio->stripe_pages) * num_pages + 104700425dd9SQu Wenruo sizeof(*rbio->bio_sectors) * num_sectors + 1048eb357060SQu Wenruo sizeof(*rbio->stripe_sectors) * num_sectors + 10491389053eSKees Cook sizeof(*rbio->finish_pointers) * real_stripes + 105094efbe19SQu Wenruo sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_nsectors) + 105194efbe19SQu Wenruo sizeof(*rbio->finish_pbitmap) * BITS_TO_LONGS(stripe_nsectors), 10521389053eSKees Cook GFP_NOFS); 1053af8e2d1dSMiao Xie if (!rbio) 105453b381b3SDavid Woodhouse return ERR_PTR(-ENOMEM); 105553b381b3SDavid Woodhouse 105653b381b3SDavid Woodhouse bio_list_init(&rbio->bio_list); 105753b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->plug_list); 105853b381b3SDavid Woodhouse spin_lock_init(&rbio->bio_list_lock); 10594ae10b3aSChris Mason INIT_LIST_HEAD(&rbio->stripe_cache); 106053b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->hash_list); 10614c664611SQu Wenruo rbio->bioc = bioc; 106253b381b3SDavid Woodhouse rbio->stripe_len = stripe_len; 106353b381b3SDavid Woodhouse rbio->nr_pages = num_pages; 106494efbe19SQu Wenruo rbio->nr_sectors = num_sectors; 10652c8cdd6eSMiao Xie rbio->real_stripes = real_stripes; 10665a6ac9eaSMiao Xie rbio->stripe_npages = stripe_npages; 106794efbe19SQu Wenruo rbio->stripe_nsectors = stripe_nsectors; 106853b381b3SDavid Woodhouse rbio->faila = -1; 106953b381b3SDavid Woodhouse rbio->failb = -1; 1070dec95574SElena Reshetova refcount_set(&rbio->refs, 1); 1071b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 1072b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, 0); 107353b381b3SDavid Woodhouse 107453b381b3SDavid Woodhouse /* 10751389053eSKees Cook * the stripe_pages, bio_pages, etc arrays point to the extra 107653b381b3SDavid Woodhouse * memory we allocated past the end of the rbio 107753b381b3SDavid Woodhouse */ 107853b381b3SDavid Woodhouse p = rbio + 1; 10791389053eSKees Cook #define CONSUME_ALLOC(ptr, count) do { \ 10801389053eSKees Cook ptr = p; \ 10811389053eSKees Cook p = (unsigned char *)p + sizeof(*(ptr)) * (count); \ 10821389053eSKees Cook } while (0) 10831389053eSKees Cook CONSUME_ALLOC(rbio->stripe_pages, num_pages); 10841389053eSKees Cook CONSUME_ALLOC(rbio->bio_pages, num_pages); 108500425dd9SQu Wenruo CONSUME_ALLOC(rbio->bio_sectors, num_sectors); 1086eb357060SQu Wenruo CONSUME_ALLOC(rbio->stripe_sectors, num_sectors); 10871389053eSKees Cook CONSUME_ALLOC(rbio->finish_pointers, real_stripes); 108894efbe19SQu Wenruo CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_nsectors)); 108994efbe19SQu Wenruo CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_nsectors)); 10901389053eSKees Cook #undef CONSUME_ALLOC 109153b381b3SDavid Woodhouse 10924c664611SQu Wenruo if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5) 109310f11900SZhao Lei nr_data = real_stripes - 1; 10944c664611SQu Wenruo else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) 10952c8cdd6eSMiao Xie nr_data = real_stripes - 2; 109653b381b3SDavid Woodhouse else 109710f11900SZhao Lei BUG(); 109853b381b3SDavid Woodhouse 109953b381b3SDavid Woodhouse rbio->nr_data = nr_data; 110053b381b3SDavid Woodhouse return rbio; 110153b381b3SDavid Woodhouse } 110253b381b3SDavid Woodhouse 110353b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */ 110453b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 110553b381b3SDavid Woodhouse { 1106eb357060SQu Wenruo int ret; 1107eb357060SQu Wenruo 1108eb357060SQu Wenruo ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages); 1109eb357060SQu Wenruo if (ret < 0) 1110eb357060SQu Wenruo return ret; 1111eb357060SQu Wenruo /* Mapping all sectors */ 1112eb357060SQu Wenruo index_stripe_sectors(rbio); 1113eb357060SQu Wenruo return 0; 111453b381b3SDavid Woodhouse } 111553b381b3SDavid Woodhouse 1116b7178a5fSZhao Lei /* only allocate pages for p/q stripes */ 111753b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 111853b381b3SDavid Woodhouse { 1119dd137dd1SSweet Tea Dorminy int data_pages = rbio_stripe_page_index(rbio, rbio->nr_data, 0); 1120eb357060SQu Wenruo int ret; 112153b381b3SDavid Woodhouse 1122eb357060SQu Wenruo ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, 1123dd137dd1SSweet Tea Dorminy rbio->stripe_pages + data_pages); 1124eb357060SQu Wenruo if (ret < 0) 1125eb357060SQu Wenruo return ret; 1126eb357060SQu Wenruo 1127eb357060SQu Wenruo index_stripe_sectors(rbio); 1128eb357060SQu Wenruo return 0; 112953b381b3SDavid Woodhouse } 113053b381b3SDavid Woodhouse 113153b381b3SDavid Woodhouse /* 11323e77605dSQu Wenruo * Add a single sector @sector into our list of bios for IO. 11333e77605dSQu Wenruo * 11343e77605dSQu Wenruo * Return 0 if everything went well. 11353e77605dSQu Wenruo * Return <0 for error. 113653b381b3SDavid Woodhouse */ 11373e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, 113853b381b3SDavid Woodhouse struct bio_list *bio_list, 11393e77605dSQu Wenruo struct sector_ptr *sector, 11403e77605dSQu Wenruo unsigned int stripe_nr, 11413e77605dSQu Wenruo unsigned int sector_nr, 1142e01bf588SChristoph Hellwig unsigned long bio_max_len, 1143e01bf588SChristoph Hellwig unsigned int opf) 114453b381b3SDavid Woodhouse { 11453e77605dSQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 114653b381b3SDavid Woodhouse struct bio *last = bio_list->tail; 114753b381b3SDavid Woodhouse int ret; 114853b381b3SDavid Woodhouse struct bio *bio; 11494c664611SQu Wenruo struct btrfs_io_stripe *stripe; 115053b381b3SDavid Woodhouse u64 disk_start; 115153b381b3SDavid Woodhouse 11523e77605dSQu Wenruo /* 11533e77605dSQu Wenruo * Note: here stripe_nr has taken device replace into consideration, 11543e77605dSQu Wenruo * thus it can be larger than rbio->real_stripe. 11553e77605dSQu Wenruo * So here we check against bioc->num_stripes, not rbio->real_stripes. 11563e77605dSQu Wenruo */ 11573e77605dSQu Wenruo ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); 11583e77605dSQu Wenruo ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); 11593e77605dSQu Wenruo ASSERT(sector->page); 11603e77605dSQu Wenruo 11613e77605dSQu Wenruo /* We don't yet support subpage, thus pgoff should always be 0 */ 11623e77605dSQu Wenruo ASSERT(sector->pgoff == 0); 11633e77605dSQu Wenruo 11644c664611SQu Wenruo stripe = &rbio->bioc->stripes[stripe_nr]; 11653e77605dSQu Wenruo disk_start = stripe->physical + sector_nr * sectorsize; 116653b381b3SDavid Woodhouse 116753b381b3SDavid Woodhouse /* if the device is missing, just fail this stripe */ 116853b381b3SDavid Woodhouse if (!stripe->dev->bdev) 116953b381b3SDavid Woodhouse return fail_rbio_index(rbio, stripe_nr); 117053b381b3SDavid Woodhouse 117153b381b3SDavid Woodhouse /* see if we can add this page onto our existing bio */ 117253b381b3SDavid Woodhouse if (last) { 11731201b58bSDavid Sterba u64 last_end = last->bi_iter.bi_sector << 9; 11744f024f37SKent Overstreet last_end += last->bi_iter.bi_size; 117553b381b3SDavid Woodhouse 117653b381b3SDavid Woodhouse /* 117753b381b3SDavid Woodhouse * we can't merge these if they are from different 117853b381b3SDavid Woodhouse * devices or if they are not contiguous 117953b381b3SDavid Woodhouse */ 1180f90ae76aSNikolay Borisov if (last_end == disk_start && !last->bi_status && 1181309dca30SChristoph Hellwig last->bi_bdev == stripe->dev->bdev) { 11823e77605dSQu Wenruo ret = bio_add_page(last, sector->page, sectorsize, 11833e77605dSQu Wenruo sector->pgoff); 11843e77605dSQu Wenruo if (ret == sectorsize) 118553b381b3SDavid Woodhouse return 0; 118653b381b3SDavid Woodhouse } 118753b381b3SDavid Woodhouse } 118853b381b3SDavid Woodhouse 118953b381b3SDavid Woodhouse /* put a new bio on the list */ 1190e1b4b44eSChristoph Hellwig bio = bio_alloc(stripe->dev->bdev, max(bio_max_len >> PAGE_SHIFT, 1UL), 1191e1b4b44eSChristoph Hellwig opf, GFP_NOFS); 11924f024f37SKent Overstreet bio->bi_iter.bi_sector = disk_start >> 9; 1193e01bf588SChristoph Hellwig bio->bi_private = rbio; 119453b381b3SDavid Woodhouse 11953e77605dSQu Wenruo bio_add_page(bio, sector->page, sectorsize, sector->pgoff); 119653b381b3SDavid Woodhouse bio_list_add(bio_list, bio); 119753b381b3SDavid Woodhouse return 0; 119853b381b3SDavid Woodhouse } 119953b381b3SDavid Woodhouse 120053b381b3SDavid Woodhouse /* 120153b381b3SDavid Woodhouse * while we're doing the read/modify/write cycle, we could 120253b381b3SDavid Woodhouse * have errors in reading pages off the disk. This checks 120353b381b3SDavid Woodhouse * for errors and if we're not able to read the page it'll 120453b381b3SDavid Woodhouse * trigger parity reconstruction. The rmw will be finished 120553b381b3SDavid Woodhouse * after we've reconstructed the failed stripes 120653b381b3SDavid Woodhouse */ 120753b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 120853b381b3SDavid Woodhouse { 120953b381b3SDavid Woodhouse if (rbio->faila >= 0 || rbio->failb >= 0) { 12102c8cdd6eSMiao Xie BUG_ON(rbio->faila == rbio->real_stripes - 1); 121153b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 121253b381b3SDavid Woodhouse } else { 121353b381b3SDavid Woodhouse finish_rmw(rbio); 121453b381b3SDavid Woodhouse } 121553b381b3SDavid Woodhouse } 121653b381b3SDavid Woodhouse 121700425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) 121800425dd9SQu Wenruo { 121900425dd9SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 122000425dd9SQu Wenruo struct bio_vec bvec; 122100425dd9SQu Wenruo struct bvec_iter iter; 122200425dd9SQu Wenruo u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - 122300425dd9SQu Wenruo rbio->bioc->raid_map[0]; 122400425dd9SQu Wenruo 122500425dd9SQu Wenruo if (bio_flagged(bio, BIO_CLONED)) 122600425dd9SQu Wenruo bio->bi_iter = btrfs_bio(bio)->iter; 122700425dd9SQu Wenruo 122800425dd9SQu Wenruo bio_for_each_segment(bvec, bio, iter) { 122900425dd9SQu Wenruo u32 bvec_offset; 123000425dd9SQu Wenruo 123100425dd9SQu Wenruo for (bvec_offset = 0; bvec_offset < bvec.bv_len; 123200425dd9SQu Wenruo bvec_offset += sectorsize, offset += sectorsize) { 123300425dd9SQu Wenruo int index = offset / sectorsize; 123400425dd9SQu Wenruo struct sector_ptr *sector = &rbio->bio_sectors[index]; 123500425dd9SQu Wenruo 123600425dd9SQu Wenruo sector->page = bvec.bv_page; 123700425dd9SQu Wenruo sector->pgoff = bvec.bv_offset + bvec_offset; 123800425dd9SQu Wenruo ASSERT(sector->pgoff < PAGE_SIZE); 123900425dd9SQu Wenruo } 124000425dd9SQu Wenruo } 124100425dd9SQu Wenruo } 124200425dd9SQu Wenruo 124353b381b3SDavid Woodhouse /* 124453b381b3SDavid Woodhouse * helper function to walk our bio list and populate the bio_pages array with 124553b381b3SDavid Woodhouse * the result. This seems expensive, but it is faster than constantly 124653b381b3SDavid Woodhouse * searching through the bio list as we setup the IO in finish_rmw or stripe 124753b381b3SDavid Woodhouse * reconstruction. 124853b381b3SDavid Woodhouse * 124953b381b3SDavid Woodhouse * This must be called before you trust the answers from page_in_rbio 125053b381b3SDavid Woodhouse */ 125153b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio) 125253b381b3SDavid Woodhouse { 125353b381b3SDavid Woodhouse struct bio *bio; 125453b381b3SDavid Woodhouse u64 start; 125553b381b3SDavid Woodhouse unsigned long stripe_offset; 125653b381b3SDavid Woodhouse unsigned long page_index; 125753b381b3SDavid Woodhouse 125853b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 125953b381b3SDavid Woodhouse bio_list_for_each(bio, &rbio->bio_list) { 12606592e58cSFilipe Manana struct bio_vec bvec; 12616592e58cSFilipe Manana struct bvec_iter iter; 12626592e58cSFilipe Manana int i = 0; 12636592e58cSFilipe Manana 12641201b58bSDavid Sterba start = bio->bi_iter.bi_sector << 9; 12654c664611SQu Wenruo stripe_offset = start - rbio->bioc->raid_map[0]; 126609cbfeafSKirill A. Shutemov page_index = stripe_offset >> PAGE_SHIFT; 126753b381b3SDavid Woodhouse 12686592e58cSFilipe Manana bio_for_each_segment(bvec, bio, iter) { 12696592e58cSFilipe Manana rbio->bio_pages[page_index + i] = bvec.bv_page; 12706592e58cSFilipe Manana i++; 12716592e58cSFilipe Manana } 127253b381b3SDavid Woodhouse } 127300425dd9SQu Wenruo /* This loop will replace above loop when the full switch is done */ 127400425dd9SQu Wenruo bio_list_for_each(bio, &rbio->bio_list) 127500425dd9SQu Wenruo index_one_bio(rbio, bio); 127600425dd9SQu Wenruo 127753b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 127853b381b3SDavid Woodhouse } 127953b381b3SDavid Woodhouse 128053b381b3SDavid Woodhouse /* 128153b381b3SDavid Woodhouse * this is called from one of two situations. We either 128253b381b3SDavid Woodhouse * have a full stripe from the higher layers, or we've read all 128353b381b3SDavid Woodhouse * the missing bits off disk. 128453b381b3SDavid Woodhouse * 128553b381b3SDavid Woodhouse * This will calculate the parity and then send down any 128653b381b3SDavid Woodhouse * changed blocks. 128753b381b3SDavid Woodhouse */ 128853b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 128953b381b3SDavid Woodhouse { 12904c664611SQu Wenruo struct btrfs_io_context *bioc = rbio->bioc; 1291*1145059aSQu Wenruo const u32 sectorsize = bioc->fs_info->sectorsize; 12921389053eSKees Cook void **pointers = rbio->finish_pointers; 129353b381b3SDavid Woodhouse int nr_data = rbio->nr_data; 129453b381b3SDavid Woodhouse int stripe; 12953e77605dSQu Wenruo int sectornr; 1296c17af965SDavid Sterba bool has_qstripe; 129753b381b3SDavid Woodhouse struct bio_list bio_list; 129853b381b3SDavid Woodhouse struct bio *bio; 129953b381b3SDavid Woodhouse int ret; 130053b381b3SDavid Woodhouse 130153b381b3SDavid Woodhouse bio_list_init(&bio_list); 130253b381b3SDavid Woodhouse 1303c17af965SDavid Sterba if (rbio->real_stripes - rbio->nr_data == 1) 1304c17af965SDavid Sterba has_qstripe = false; 1305c17af965SDavid Sterba else if (rbio->real_stripes - rbio->nr_data == 2) 1306c17af965SDavid Sterba has_qstripe = true; 1307c17af965SDavid Sterba else 130853b381b3SDavid Woodhouse BUG(); 130953b381b3SDavid Woodhouse 131053b381b3SDavid Woodhouse /* at this point we either have a full stripe, 131153b381b3SDavid Woodhouse * or we've read the full stripe from the drive. 131253b381b3SDavid Woodhouse * recalculate the parity and write the new results. 131353b381b3SDavid Woodhouse * 131453b381b3SDavid Woodhouse * We're not allowed to add any new bios to the 131553b381b3SDavid Woodhouse * bio list here, anyone else that wants to 131653b381b3SDavid Woodhouse * change this stripe needs to do their own rmw. 131753b381b3SDavid Woodhouse */ 131853b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 131953b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 132053b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 132153b381b3SDavid Woodhouse 1322b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 132353b381b3SDavid Woodhouse 132453b381b3SDavid Woodhouse /* 132553b381b3SDavid Woodhouse * now that we've set rmw_locked, run through the 132653b381b3SDavid Woodhouse * bio list one last time and map the page pointers 13274ae10b3aSChris Mason * 13284ae10b3aSChris Mason * We don't cache full rbios because we're assuming 13294ae10b3aSChris Mason * the higher layers are unlikely to use this area of 13304ae10b3aSChris Mason * the disk again soon. If they do use it again, 13314ae10b3aSChris Mason * hopefully they will send another full bio. 133253b381b3SDavid Woodhouse */ 133353b381b3SDavid Woodhouse index_rbio_pages(rbio); 13344ae10b3aSChris Mason if (!rbio_is_full(rbio)) 13354ae10b3aSChris Mason cache_rbio_pages(rbio); 13364ae10b3aSChris Mason else 13374ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 133853b381b3SDavid Woodhouse 13393e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 1340*1145059aSQu Wenruo struct sector_ptr *sector; 1341*1145059aSQu Wenruo 1342*1145059aSQu Wenruo /* First collect one sector from each data stripe */ 134353b381b3SDavid Woodhouse for (stripe = 0; stripe < nr_data; stripe++) { 1344*1145059aSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 0); 1345*1145059aSQu Wenruo pointers[stripe] = kmap_local_page(sector->page) + 1346*1145059aSQu Wenruo sector->pgoff; 134753b381b3SDavid Woodhouse } 134853b381b3SDavid Woodhouse 1349*1145059aSQu Wenruo /* Then add the parity stripe */ 1350*1145059aSQu Wenruo sector = rbio_pstripe_sector(rbio, sectornr); 1351*1145059aSQu Wenruo sector->uptodate = 1; 1352*1145059aSQu Wenruo pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; 135353b381b3SDavid Woodhouse 1354c17af965SDavid Sterba if (has_qstripe) { 135553b381b3SDavid Woodhouse /* 1356*1145059aSQu Wenruo * RAID6, add the qstripe and call the library function 1357*1145059aSQu Wenruo * to fill in our p/q 135853b381b3SDavid Woodhouse */ 1359*1145059aSQu Wenruo sector = rbio_qstripe_sector(rbio, sectornr); 1360*1145059aSQu Wenruo sector->uptodate = 1; 1361*1145059aSQu Wenruo pointers[stripe++] = kmap_local_page(sector->page) + 1362*1145059aSQu Wenruo sector->pgoff; 136353b381b3SDavid Woodhouse 1364*1145059aSQu Wenruo raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 136553b381b3SDavid Woodhouse pointers); 136653b381b3SDavid Woodhouse } else { 136753b381b3SDavid Woodhouse /* raid5 */ 1368*1145059aSQu Wenruo memcpy(pointers[nr_data], pointers[0], sectorsize); 1369*1145059aSQu Wenruo run_xor(pointers + 1, nr_data - 1, sectorsize); 137053b381b3SDavid Woodhouse } 137194a0b58dSIra Weiny for (stripe = stripe - 1; stripe >= 0; stripe--) 137294a0b58dSIra Weiny kunmap_local(pointers[stripe]); 137353b381b3SDavid Woodhouse } 137453b381b3SDavid Woodhouse 137553b381b3SDavid Woodhouse /* 137653b381b3SDavid Woodhouse * time to start writing. Make bios for everything from the 137753b381b3SDavid Woodhouse * higher layers (the bio_list in our rbio) and our p/q. Ignore 137853b381b3SDavid Woodhouse * everything else. 137953b381b3SDavid Woodhouse */ 13802c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 13813e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 13823e77605dSQu Wenruo struct sector_ptr *sector; 13833e77605dSQu Wenruo 138453b381b3SDavid Woodhouse if (stripe < rbio->nr_data) { 13853e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 13863e77605dSQu Wenruo if (!sector) 138753b381b3SDavid Woodhouse continue; 138853b381b3SDavid Woodhouse } else { 13893e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 139053b381b3SDavid Woodhouse } 139153b381b3SDavid Woodhouse 13923e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, 13933e77605dSQu Wenruo sectornr, rbio->stripe_len, 1394e01bf588SChristoph Hellwig REQ_OP_WRITE); 139553b381b3SDavid Woodhouse if (ret) 139653b381b3SDavid Woodhouse goto cleanup; 139753b381b3SDavid Woodhouse } 139853b381b3SDavid Woodhouse } 139953b381b3SDavid Woodhouse 14004c664611SQu Wenruo if (likely(!bioc->num_tgtdevs)) 14012c8cdd6eSMiao Xie goto write_data; 14022c8cdd6eSMiao Xie 14032c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 14044c664611SQu Wenruo if (!bioc->tgtdev_map[stripe]) 14052c8cdd6eSMiao Xie continue; 14062c8cdd6eSMiao Xie 14073e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 14083e77605dSQu Wenruo struct sector_ptr *sector; 14093e77605dSQu Wenruo 14102c8cdd6eSMiao Xie if (stripe < rbio->nr_data) { 14113e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 14123e77605dSQu Wenruo if (!sector) 14132c8cdd6eSMiao Xie continue; 14142c8cdd6eSMiao Xie } else { 14153e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 14162c8cdd6eSMiao Xie } 14172c8cdd6eSMiao Xie 14183e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 14194c664611SQu Wenruo rbio->bioc->tgtdev_map[stripe], 14203e77605dSQu Wenruo sectornr, rbio->stripe_len, 1421e01bf588SChristoph Hellwig REQ_OP_WRITE); 14222c8cdd6eSMiao Xie if (ret) 14232c8cdd6eSMiao Xie goto cleanup; 14242c8cdd6eSMiao Xie } 14252c8cdd6eSMiao Xie } 14262c8cdd6eSMiao Xie 14272c8cdd6eSMiao Xie write_data: 1428b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1429b89e1b01SMiao Xie BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 143053b381b3SDavid Woodhouse 1431bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 143253b381b3SDavid Woodhouse bio->bi_end_io = raid_write_end_io; 14334e49ea4aSMike Christie 14344e49ea4aSMike Christie submit_bio(bio); 143553b381b3SDavid Woodhouse } 143653b381b3SDavid Woodhouse return; 143753b381b3SDavid Woodhouse 143853b381b3SDavid Woodhouse cleanup: 143958efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 1440785884fcSLiu Bo 1441785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 1442785884fcSLiu Bo bio_put(bio); 144353b381b3SDavid Woodhouse } 144453b381b3SDavid Woodhouse 144553b381b3SDavid Woodhouse /* 144653b381b3SDavid Woodhouse * helper to find the stripe number for a given bio. Used to figure out which 144753b381b3SDavid Woodhouse * stripe has failed. This expects the bio to correspond to a physical disk, 144853b381b3SDavid Woodhouse * so it looks up based on physical sector numbers. 144953b381b3SDavid Woodhouse */ 145053b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio, 145153b381b3SDavid Woodhouse struct bio *bio) 145253b381b3SDavid Woodhouse { 14534f024f37SKent Overstreet u64 physical = bio->bi_iter.bi_sector; 145453b381b3SDavid Woodhouse int i; 14554c664611SQu Wenruo struct btrfs_io_stripe *stripe; 145653b381b3SDavid Woodhouse 145753b381b3SDavid Woodhouse physical <<= 9; 145853b381b3SDavid Woodhouse 14594c664611SQu Wenruo for (i = 0; i < rbio->bioc->num_stripes; i++) { 14604c664611SQu Wenruo stripe = &rbio->bioc->stripes[i]; 146183025863SNikolay Borisov if (in_range(physical, stripe->physical, rbio->stripe_len) && 1462309dca30SChristoph Hellwig stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) { 146353b381b3SDavid Woodhouse return i; 146453b381b3SDavid Woodhouse } 146553b381b3SDavid Woodhouse } 146653b381b3SDavid Woodhouse return -1; 146753b381b3SDavid Woodhouse } 146853b381b3SDavid Woodhouse 146953b381b3SDavid Woodhouse /* 147053b381b3SDavid Woodhouse * helper to find the stripe number for a given 147153b381b3SDavid Woodhouse * bio (before mapping). Used to figure out which stripe has 147253b381b3SDavid Woodhouse * failed. This looks up based on logical block numbers. 147353b381b3SDavid Woodhouse */ 147453b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 147553b381b3SDavid Woodhouse struct bio *bio) 147653b381b3SDavid Woodhouse { 14771201b58bSDavid Sterba u64 logical = bio->bi_iter.bi_sector << 9; 147853b381b3SDavid Woodhouse int i; 147953b381b3SDavid Woodhouse 148053b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_data; i++) { 14814c664611SQu Wenruo u64 stripe_start = rbio->bioc->raid_map[i]; 148283025863SNikolay Borisov 148383025863SNikolay Borisov if (in_range(logical, stripe_start, rbio->stripe_len)) 148453b381b3SDavid Woodhouse return i; 148553b381b3SDavid Woodhouse } 148653b381b3SDavid Woodhouse return -1; 148753b381b3SDavid Woodhouse } 148853b381b3SDavid Woodhouse 148953b381b3SDavid Woodhouse /* 149053b381b3SDavid Woodhouse * returns -EIO if we had too many failures 149153b381b3SDavid Woodhouse */ 149253b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 149353b381b3SDavid Woodhouse { 149453b381b3SDavid Woodhouse unsigned long flags; 149553b381b3SDavid Woodhouse int ret = 0; 149653b381b3SDavid Woodhouse 149753b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 149853b381b3SDavid Woodhouse 149953b381b3SDavid Woodhouse /* we already know this stripe is bad, move on */ 150053b381b3SDavid Woodhouse if (rbio->faila == failed || rbio->failb == failed) 150153b381b3SDavid Woodhouse goto out; 150253b381b3SDavid Woodhouse 150353b381b3SDavid Woodhouse if (rbio->faila == -1) { 150453b381b3SDavid Woodhouse /* first failure on this rbio */ 150553b381b3SDavid Woodhouse rbio->faila = failed; 1506b89e1b01SMiao Xie atomic_inc(&rbio->error); 150753b381b3SDavid Woodhouse } else if (rbio->failb == -1) { 150853b381b3SDavid Woodhouse /* second failure on this rbio */ 150953b381b3SDavid Woodhouse rbio->failb = failed; 1510b89e1b01SMiao Xie atomic_inc(&rbio->error); 151153b381b3SDavid Woodhouse } else { 151253b381b3SDavid Woodhouse ret = -EIO; 151353b381b3SDavid Woodhouse } 151453b381b3SDavid Woodhouse out: 151553b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 151653b381b3SDavid Woodhouse 151753b381b3SDavid Woodhouse return ret; 151853b381b3SDavid Woodhouse } 151953b381b3SDavid Woodhouse 152053b381b3SDavid Woodhouse /* 152153b381b3SDavid Woodhouse * helper to fail a stripe based on a physical disk 152253b381b3SDavid Woodhouse * bio. 152353b381b3SDavid Woodhouse */ 152453b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 152553b381b3SDavid Woodhouse struct bio *bio) 152653b381b3SDavid Woodhouse { 152753b381b3SDavid Woodhouse int failed = find_bio_stripe(rbio, bio); 152853b381b3SDavid Woodhouse 152953b381b3SDavid Woodhouse if (failed < 0) 153053b381b3SDavid Woodhouse return -EIO; 153153b381b3SDavid Woodhouse 153253b381b3SDavid Woodhouse return fail_rbio_index(rbio, failed); 153353b381b3SDavid Woodhouse } 153453b381b3SDavid Woodhouse 153553b381b3SDavid Woodhouse /* 153653b381b3SDavid Woodhouse * this sets each page in the bio uptodate. It should only be used on private 153753b381b3SDavid Woodhouse * rbio pages, nothing that comes in from the higher layers 153853b381b3SDavid Woodhouse */ 153953b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio) 154053b381b3SDavid Woodhouse { 15410198e5b7SLiu Bo struct bio_vec *bvec; 15426dc4f100SMing Lei struct bvec_iter_all iter_all; 154353b381b3SDavid Woodhouse 15440198e5b7SLiu Bo ASSERT(!bio_flagged(bio, BIO_CLONED)); 15456592e58cSFilipe Manana 15462b070cfeSChristoph Hellwig bio_for_each_segment_all(bvec, bio, iter_all) 15470198e5b7SLiu Bo SetPageUptodate(bvec->bv_page); 154853b381b3SDavid Woodhouse } 154953b381b3SDavid Woodhouse 155053b381b3SDavid Woodhouse /* 155153b381b3SDavid Woodhouse * end io for the read phase of the rmw cycle. All the bios here are physical 155253b381b3SDavid Woodhouse * stripe bios we've read from the disk so we can recalculate the parity of the 155353b381b3SDavid Woodhouse * stripe. 155453b381b3SDavid Woodhouse * 155553b381b3SDavid Woodhouse * This will usually kick off finish_rmw once all the bios are read in, but it 155653b381b3SDavid Woodhouse * may trigger parity reconstruction if we had any errors along the way 155753b381b3SDavid Woodhouse */ 15584246a0b6SChristoph Hellwig static void raid_rmw_end_io(struct bio *bio) 155953b381b3SDavid Woodhouse { 156053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 156153b381b3SDavid Woodhouse 15624e4cbee9SChristoph Hellwig if (bio->bi_status) 156353b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 156453b381b3SDavid Woodhouse else 156553b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 156653b381b3SDavid Woodhouse 156753b381b3SDavid Woodhouse bio_put(bio); 156853b381b3SDavid Woodhouse 1569b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 157053b381b3SDavid Woodhouse return; 157153b381b3SDavid Woodhouse 15724c664611SQu Wenruo if (atomic_read(&rbio->error) > rbio->bioc->max_errors) 157353b381b3SDavid Woodhouse goto cleanup; 157453b381b3SDavid Woodhouse 157553b381b3SDavid Woodhouse /* 157653b381b3SDavid Woodhouse * this will normally call finish_rmw to start our write 157753b381b3SDavid Woodhouse * but if there are any failed stripes we'll reconstruct 157853b381b3SDavid Woodhouse * from parity first 157953b381b3SDavid Woodhouse */ 158053b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 158153b381b3SDavid Woodhouse return; 158253b381b3SDavid Woodhouse 158353b381b3SDavid Woodhouse cleanup: 158453b381b3SDavid Woodhouse 158558efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 158653b381b3SDavid Woodhouse } 158753b381b3SDavid Woodhouse 158853b381b3SDavid Woodhouse /* 158953b381b3SDavid Woodhouse * the stripe must be locked by the caller. It will 159053b381b3SDavid Woodhouse * unlock after all the writes are done 159153b381b3SDavid Woodhouse */ 159253b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 159353b381b3SDavid Woodhouse { 159453b381b3SDavid Woodhouse int bios_to_read = 0; 159553b381b3SDavid Woodhouse struct bio_list bio_list; 159653b381b3SDavid Woodhouse int ret; 15973e77605dSQu Wenruo int sectornr; 159853b381b3SDavid Woodhouse int stripe; 159953b381b3SDavid Woodhouse struct bio *bio; 160053b381b3SDavid Woodhouse 160153b381b3SDavid Woodhouse bio_list_init(&bio_list); 160253b381b3SDavid Woodhouse 160353b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 160453b381b3SDavid Woodhouse if (ret) 160553b381b3SDavid Woodhouse goto cleanup; 160653b381b3SDavid Woodhouse 160753b381b3SDavid Woodhouse index_rbio_pages(rbio); 160853b381b3SDavid Woodhouse 1609b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 161053b381b3SDavid Woodhouse /* 161153b381b3SDavid Woodhouse * build a list of bios to read all the missing parts of this 161253b381b3SDavid Woodhouse * stripe 161353b381b3SDavid Woodhouse */ 161453b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->nr_data; stripe++) { 16153e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 16163e77605dSQu Wenruo struct sector_ptr *sector; 16173e77605dSQu Wenruo 161853b381b3SDavid Woodhouse /* 16193e77605dSQu Wenruo * We want to find all the sectors missing from the 16203e77605dSQu Wenruo * rbio and read them from the disk. If * sector_in_rbio() 16213e77605dSQu Wenruo * finds a page in the bio list we don't need to read 16223e77605dSQu Wenruo * it off the stripe. 162353b381b3SDavid Woodhouse */ 16243e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 16253e77605dSQu Wenruo if (sector) 162653b381b3SDavid Woodhouse continue; 162753b381b3SDavid Woodhouse 16283e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 16294ae10b3aSChris Mason /* 16303e77605dSQu Wenruo * The bio cache may have handed us an uptodate page. 16313e77605dSQu Wenruo * If so, be happy and use it. 16324ae10b3aSChris Mason */ 16333e77605dSQu Wenruo if (sector->uptodate) 16344ae10b3aSChris Mason continue; 16354ae10b3aSChris Mason 16363e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 16373e77605dSQu Wenruo stripe, sectornr, rbio->stripe_len, 1638e01bf588SChristoph Hellwig REQ_OP_READ); 163953b381b3SDavid Woodhouse if (ret) 164053b381b3SDavid Woodhouse goto cleanup; 164153b381b3SDavid Woodhouse } 164253b381b3SDavid Woodhouse } 164353b381b3SDavid Woodhouse 164453b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 164553b381b3SDavid Woodhouse if (!bios_to_read) { 164653b381b3SDavid Woodhouse /* 164753b381b3SDavid Woodhouse * this can happen if others have merged with 164853b381b3SDavid Woodhouse * us, it means there is nothing left to read. 164953b381b3SDavid Woodhouse * But if there are missing devices it may not be 165053b381b3SDavid Woodhouse * safe to do the full stripe write yet. 165153b381b3SDavid Woodhouse */ 165253b381b3SDavid Woodhouse goto finish; 165353b381b3SDavid Woodhouse } 165453b381b3SDavid Woodhouse 165553b381b3SDavid Woodhouse /* 16564c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 16574c664611SQu Wenruo * touch it after that. 165853b381b3SDavid Woodhouse */ 1659b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 1660bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 166153b381b3SDavid Woodhouse bio->bi_end_io = raid_rmw_end_io; 166253b381b3SDavid Woodhouse 16636a258d72SQu Wenruo btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 166453b381b3SDavid Woodhouse 16654e49ea4aSMike Christie submit_bio(bio); 166653b381b3SDavid Woodhouse } 166753b381b3SDavid Woodhouse /* the actual write will happen once the reads are done */ 166853b381b3SDavid Woodhouse return 0; 166953b381b3SDavid Woodhouse 167053b381b3SDavid Woodhouse cleanup: 167158efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 1672785884fcSLiu Bo 1673785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 1674785884fcSLiu Bo bio_put(bio); 1675785884fcSLiu Bo 167653b381b3SDavid Woodhouse return -EIO; 167753b381b3SDavid Woodhouse 167853b381b3SDavid Woodhouse finish: 167953b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 168053b381b3SDavid Woodhouse return 0; 168153b381b3SDavid Woodhouse } 168253b381b3SDavid Woodhouse 168353b381b3SDavid Woodhouse /* 168453b381b3SDavid Woodhouse * if the upper layers pass in a full stripe, we thank them by only allocating 168553b381b3SDavid Woodhouse * enough pages to hold the parity, and sending it all down quickly. 168653b381b3SDavid Woodhouse */ 168753b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio) 168853b381b3SDavid Woodhouse { 168953b381b3SDavid Woodhouse int ret; 169053b381b3SDavid Woodhouse 169153b381b3SDavid Woodhouse ret = alloc_rbio_parity_pages(rbio); 16923cd846d1SMiao Xie if (ret) { 16933cd846d1SMiao Xie __free_raid_bio(rbio); 169453b381b3SDavid Woodhouse return ret; 16953cd846d1SMiao Xie } 169653b381b3SDavid Woodhouse 169753b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 169853b381b3SDavid Woodhouse if (ret == 0) 169953b381b3SDavid Woodhouse finish_rmw(rbio); 170053b381b3SDavid Woodhouse return 0; 170153b381b3SDavid Woodhouse } 170253b381b3SDavid Woodhouse 170353b381b3SDavid Woodhouse /* 170453b381b3SDavid Woodhouse * partial stripe writes get handed over to async helpers. 170553b381b3SDavid Woodhouse * We're really hoping to merge a few more writes into this 170653b381b3SDavid Woodhouse * rbio before calculating new parity 170753b381b3SDavid Woodhouse */ 170853b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio) 170953b381b3SDavid Woodhouse { 171053b381b3SDavid Woodhouse int ret; 171153b381b3SDavid Woodhouse 171253b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 171353b381b3SDavid Woodhouse if (ret == 0) 1714cf6a4a75SDavid Sterba start_async_work(rbio, rmw_work); 171553b381b3SDavid Woodhouse return 0; 171653b381b3SDavid Woodhouse } 171753b381b3SDavid Woodhouse 171853b381b3SDavid Woodhouse /* 171953b381b3SDavid Woodhouse * sometimes while we were reading from the drive to 172053b381b3SDavid Woodhouse * recalculate parity, enough new bios come into create 172153b381b3SDavid Woodhouse * a full stripe. So we do a check here to see if we can 172253b381b3SDavid Woodhouse * go directly to finish_rmw 172353b381b3SDavid Woodhouse */ 172453b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 172553b381b3SDavid Woodhouse { 172653b381b3SDavid Woodhouse /* head off into rmw land if we don't have a full stripe */ 172753b381b3SDavid Woodhouse if (!rbio_is_full(rbio)) 172853b381b3SDavid Woodhouse return partial_stripe_write(rbio); 172953b381b3SDavid Woodhouse return full_stripe_write(rbio); 173053b381b3SDavid Woodhouse } 173153b381b3SDavid Woodhouse 173253b381b3SDavid Woodhouse /* 17336ac0f488SChris Mason * We use plugging call backs to collect full stripes. 17346ac0f488SChris Mason * Any time we get a partial stripe write while plugged 17356ac0f488SChris Mason * we collect it into a list. When the unplug comes down, 17366ac0f488SChris Mason * we sort the list by logical block number and merge 17376ac0f488SChris Mason * everything we can into the same rbios 17386ac0f488SChris Mason */ 17396ac0f488SChris Mason struct btrfs_plug_cb { 17406ac0f488SChris Mason struct blk_plug_cb cb; 17416ac0f488SChris Mason struct btrfs_fs_info *info; 17426ac0f488SChris Mason struct list_head rbio_list; 17436ac0f488SChris Mason struct btrfs_work work; 17446ac0f488SChris Mason }; 17456ac0f488SChris Mason 17466ac0f488SChris Mason /* 17476ac0f488SChris Mason * rbios on the plug list are sorted for easier merging. 17486ac0f488SChris Mason */ 17494f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a, 17504f0f586bSSami Tolvanen const struct list_head *b) 17516ac0f488SChris Mason { 1752214cc184SDavid Sterba const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 17536ac0f488SChris Mason plug_list); 1754214cc184SDavid Sterba const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 17556ac0f488SChris Mason plug_list); 17564f024f37SKent Overstreet u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 17574f024f37SKent Overstreet u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 17586ac0f488SChris Mason 17596ac0f488SChris Mason if (a_sector < b_sector) 17606ac0f488SChris Mason return -1; 17616ac0f488SChris Mason if (a_sector > b_sector) 17626ac0f488SChris Mason return 1; 17636ac0f488SChris Mason return 0; 17646ac0f488SChris Mason } 17656ac0f488SChris Mason 17666ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug) 17676ac0f488SChris Mason { 17686ac0f488SChris Mason struct btrfs_raid_bio *cur; 17696ac0f488SChris Mason struct btrfs_raid_bio *last = NULL; 17706ac0f488SChris Mason 17716ac0f488SChris Mason /* 17726ac0f488SChris Mason * sort our plug list then try to merge 17736ac0f488SChris Mason * everything we can in hopes of creating full 17746ac0f488SChris Mason * stripes. 17756ac0f488SChris Mason */ 17766ac0f488SChris Mason list_sort(NULL, &plug->rbio_list, plug_cmp); 17776ac0f488SChris Mason while (!list_empty(&plug->rbio_list)) { 17786ac0f488SChris Mason cur = list_entry(plug->rbio_list.next, 17796ac0f488SChris Mason struct btrfs_raid_bio, plug_list); 17806ac0f488SChris Mason list_del_init(&cur->plug_list); 17816ac0f488SChris Mason 17826ac0f488SChris Mason if (rbio_is_full(cur)) { 1783c7b562c5SDavid Sterba int ret; 1784c7b562c5SDavid Sterba 17856ac0f488SChris Mason /* we have a full stripe, send it down */ 1786c7b562c5SDavid Sterba ret = full_stripe_write(cur); 1787c7b562c5SDavid Sterba BUG_ON(ret); 17886ac0f488SChris Mason continue; 17896ac0f488SChris Mason } 17906ac0f488SChris Mason if (last) { 17916ac0f488SChris Mason if (rbio_can_merge(last, cur)) { 17926ac0f488SChris Mason merge_rbio(last, cur); 17936ac0f488SChris Mason __free_raid_bio(cur); 17946ac0f488SChris Mason continue; 17956ac0f488SChris Mason 17966ac0f488SChris Mason } 17976ac0f488SChris Mason __raid56_parity_write(last); 17986ac0f488SChris Mason } 17996ac0f488SChris Mason last = cur; 18006ac0f488SChris Mason } 18016ac0f488SChris Mason if (last) { 18026ac0f488SChris Mason __raid56_parity_write(last); 18036ac0f488SChris Mason } 18046ac0f488SChris Mason kfree(plug); 18056ac0f488SChris Mason } 18066ac0f488SChris Mason 18076ac0f488SChris Mason /* 18086ac0f488SChris Mason * if the unplug comes from schedule, we have to push the 18096ac0f488SChris Mason * work off to a helper thread 18106ac0f488SChris Mason */ 18116ac0f488SChris Mason static void unplug_work(struct btrfs_work *work) 18126ac0f488SChris Mason { 18136ac0f488SChris Mason struct btrfs_plug_cb *plug; 18146ac0f488SChris Mason plug = container_of(work, struct btrfs_plug_cb, work); 18156ac0f488SChris Mason run_plug(plug); 18166ac0f488SChris Mason } 18176ac0f488SChris Mason 18186ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 18196ac0f488SChris Mason { 18206ac0f488SChris Mason struct btrfs_plug_cb *plug; 18216ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 18226ac0f488SChris Mason 18236ac0f488SChris Mason if (from_schedule) { 1824a0cac0ecSOmar Sandoval btrfs_init_work(&plug->work, unplug_work, NULL, NULL); 1825d05a33acSQu Wenruo btrfs_queue_work(plug->info->rmw_workers, 18266ac0f488SChris Mason &plug->work); 18276ac0f488SChris Mason return; 18286ac0f488SChris Mason } 18296ac0f488SChris Mason run_plug(plug); 18306ac0f488SChris Mason } 18316ac0f488SChris Mason 18326ac0f488SChris Mason /* 183353b381b3SDavid Woodhouse * our main entry point for writes from the rest of the FS. 183453b381b3SDavid Woodhouse */ 1835cc353a8bSQu Wenruo int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len) 183653b381b3SDavid Woodhouse { 18376a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 183853b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 18396ac0f488SChris Mason struct btrfs_plug_cb *plug = NULL; 18406ac0f488SChris Mason struct blk_plug_cb *cb; 18414245215dSMiao Xie int ret; 184253b381b3SDavid Woodhouse 18434c664611SQu Wenruo rbio = alloc_rbio(fs_info, bioc, stripe_len); 1844af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 18454c664611SQu Wenruo btrfs_put_bioc(bioc); 184653b381b3SDavid Woodhouse return PTR_ERR(rbio); 1847af8e2d1dSMiao Xie } 184853b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 18494f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 18501b94b556SMiao Xie rbio->operation = BTRFS_RBIO_WRITE; 18516ac0f488SChris Mason 18520b246afaSJeff Mahoney btrfs_bio_counter_inc_noblocked(fs_info); 18534245215dSMiao Xie rbio->generic_bio_cnt = 1; 18544245215dSMiao Xie 18556ac0f488SChris Mason /* 18566ac0f488SChris Mason * don't plug on full rbios, just get them out the door 18576ac0f488SChris Mason * as quickly as we can 18586ac0f488SChris Mason */ 18594245215dSMiao Xie if (rbio_is_full(rbio)) { 18604245215dSMiao Xie ret = full_stripe_write(rbio); 18614245215dSMiao Xie if (ret) 18620b246afaSJeff Mahoney btrfs_bio_counter_dec(fs_info); 18634245215dSMiao Xie return ret; 18644245215dSMiao Xie } 18656ac0f488SChris Mason 18660b246afaSJeff Mahoney cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); 18676ac0f488SChris Mason if (cb) { 18686ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 18696ac0f488SChris Mason if (!plug->info) { 18700b246afaSJeff Mahoney plug->info = fs_info; 18716ac0f488SChris Mason INIT_LIST_HEAD(&plug->rbio_list); 18726ac0f488SChris Mason } 18736ac0f488SChris Mason list_add_tail(&rbio->plug_list, &plug->rbio_list); 18744245215dSMiao Xie ret = 0; 18756ac0f488SChris Mason } else { 18764245215dSMiao Xie ret = __raid56_parity_write(rbio); 18774245215dSMiao Xie if (ret) 18780b246afaSJeff Mahoney btrfs_bio_counter_dec(fs_info); 187953b381b3SDavid Woodhouse } 18804245215dSMiao Xie return ret; 18816ac0f488SChris Mason } 188253b381b3SDavid Woodhouse 188353b381b3SDavid Woodhouse /* 188453b381b3SDavid Woodhouse * all parity reconstruction happens here. We've read in everything 188553b381b3SDavid Woodhouse * we can find from the drives and this does the heavy lifting of 188653b381b3SDavid Woodhouse * sorting the good from the bad. 188753b381b3SDavid Woodhouse */ 188853b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 188953b381b3SDavid Woodhouse { 189007e4d380SQu Wenruo const u32 sectorsize = rbio->bioc->fs_info->sectorsize; 189107e4d380SQu Wenruo int sectornr, stripe; 189253b381b3SDavid Woodhouse void **pointers; 189394a0b58dSIra Weiny void **unmap_array; 189453b381b3SDavid Woodhouse int faila = -1, failb = -1; 189558efbc9fSOmar Sandoval blk_status_t err; 189653b381b3SDavid Woodhouse int i; 189753b381b3SDavid Woodhouse 189807e4d380SQu Wenruo /* 189907e4d380SQu Wenruo * This array stores the pointer for each sector, thus it has the extra 190007e4d380SQu Wenruo * pgoff value added from each sector 190107e4d380SQu Wenruo */ 190231e818feSDavid Sterba pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 190353b381b3SDavid Woodhouse if (!pointers) { 190458efbc9fSOmar Sandoval err = BLK_STS_RESOURCE; 190553b381b3SDavid Woodhouse goto cleanup_io; 190653b381b3SDavid Woodhouse } 190753b381b3SDavid Woodhouse 190894a0b58dSIra Weiny /* 190994a0b58dSIra Weiny * Store copy of pointers that does not get reordered during 191094a0b58dSIra Weiny * reconstruction so that kunmap_local works. 191194a0b58dSIra Weiny */ 191294a0b58dSIra Weiny unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 191394a0b58dSIra Weiny if (!unmap_array) { 191494a0b58dSIra Weiny err = BLK_STS_RESOURCE; 191594a0b58dSIra Weiny goto cleanup_pointers; 191694a0b58dSIra Weiny } 191794a0b58dSIra Weiny 191853b381b3SDavid Woodhouse faila = rbio->faila; 191953b381b3SDavid Woodhouse failb = rbio->failb; 192053b381b3SDavid Woodhouse 1921b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 1922b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 192353b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 192453b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 192553b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 192653b381b3SDavid Woodhouse } 192753b381b3SDavid Woodhouse 192853b381b3SDavid Woodhouse index_rbio_pages(rbio); 192953b381b3SDavid Woodhouse 193007e4d380SQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 193107e4d380SQu Wenruo struct sector_ptr *sector; 193207e4d380SQu Wenruo 19335a6ac9eaSMiao Xie /* 19345a6ac9eaSMiao Xie * Now we just use bitmap to mark the horizontal stripes in 19355a6ac9eaSMiao Xie * which we have data when doing parity scrub. 19365a6ac9eaSMiao Xie */ 19375a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 193807e4d380SQu Wenruo !test_bit(sectornr, rbio->dbitmap)) 19395a6ac9eaSMiao Xie continue; 19405a6ac9eaSMiao Xie 194194a0b58dSIra Weiny /* 194207e4d380SQu Wenruo * Setup our array of pointers with sectors from each stripe 194394a0b58dSIra Weiny * 194494a0b58dSIra Weiny * NOTE: store a duplicate array of pointers to preserve the 194594a0b58dSIra Weiny * pointer order 194653b381b3SDavid Woodhouse */ 19472c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 194853b381b3SDavid Woodhouse /* 194907e4d380SQu Wenruo * If we're rebuilding a read, we have to use 195053b381b3SDavid Woodhouse * pages from the bio list 195153b381b3SDavid Woodhouse */ 1952b4ee1782SOmar Sandoval if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1953b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 195453b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 195507e4d380SQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 0); 195653b381b3SDavid Woodhouse } else { 195707e4d380SQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 195853b381b3SDavid Woodhouse } 195907e4d380SQu Wenruo ASSERT(sector->page); 196007e4d380SQu Wenruo pointers[stripe] = kmap_local_page(sector->page) + 196107e4d380SQu Wenruo sector->pgoff; 196294a0b58dSIra Weiny unmap_array[stripe] = pointers[stripe]; 196353b381b3SDavid Woodhouse } 196453b381b3SDavid Woodhouse 196507e4d380SQu Wenruo /* All raid6 handling here */ 19664c664611SQu Wenruo if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { 196707e4d380SQu Wenruo /* Single failure, rebuild from parity raid5 style */ 196853b381b3SDavid Woodhouse if (failb < 0) { 196953b381b3SDavid Woodhouse if (faila == rbio->nr_data) { 197053b381b3SDavid Woodhouse /* 197153b381b3SDavid Woodhouse * Just the P stripe has failed, without 197253b381b3SDavid Woodhouse * a bad data or Q stripe. 197353b381b3SDavid Woodhouse * TODO, we should redo the xor here. 197453b381b3SDavid Woodhouse */ 197558efbc9fSOmar Sandoval err = BLK_STS_IOERR; 197653b381b3SDavid Woodhouse goto cleanup; 197753b381b3SDavid Woodhouse } 197853b381b3SDavid Woodhouse /* 197953b381b3SDavid Woodhouse * a single failure in raid6 is rebuilt 198053b381b3SDavid Woodhouse * in the pstripe code below 198153b381b3SDavid Woodhouse */ 198253b381b3SDavid Woodhouse goto pstripe; 198353b381b3SDavid Woodhouse } 198453b381b3SDavid Woodhouse 198553b381b3SDavid Woodhouse /* make sure our ps and qs are in order */ 1986b7d2083aSNikolay Borisov if (faila > failb) 1987b7d2083aSNikolay Borisov swap(faila, failb); 198853b381b3SDavid Woodhouse 198953b381b3SDavid Woodhouse /* if the q stripe is failed, do a pstripe reconstruction 199053b381b3SDavid Woodhouse * from the xors. 199153b381b3SDavid Woodhouse * If both the q stripe and the P stripe are failed, we're 199253b381b3SDavid Woodhouse * here due to a crc mismatch and we can't give them the 199353b381b3SDavid Woodhouse * data they want 199453b381b3SDavid Woodhouse */ 19954c664611SQu Wenruo if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) { 19964c664611SQu Wenruo if (rbio->bioc->raid_map[faila] == 19978e5cfb55SZhao Lei RAID5_P_STRIPE) { 199858efbc9fSOmar Sandoval err = BLK_STS_IOERR; 199953b381b3SDavid Woodhouse goto cleanup; 200053b381b3SDavid Woodhouse } 200153b381b3SDavid Woodhouse /* 200253b381b3SDavid Woodhouse * otherwise we have one bad data stripe and 200353b381b3SDavid Woodhouse * a good P stripe. raid5! 200453b381b3SDavid Woodhouse */ 200553b381b3SDavid Woodhouse goto pstripe; 200653b381b3SDavid Woodhouse } 200753b381b3SDavid Woodhouse 20084c664611SQu Wenruo if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) { 20092c8cdd6eSMiao Xie raid6_datap_recov(rbio->real_stripes, 201007e4d380SQu Wenruo sectorsize, faila, pointers); 201153b381b3SDavid Woodhouse } else { 20122c8cdd6eSMiao Xie raid6_2data_recov(rbio->real_stripes, 201307e4d380SQu Wenruo sectorsize, faila, failb, 201453b381b3SDavid Woodhouse pointers); 201553b381b3SDavid Woodhouse } 201653b381b3SDavid Woodhouse } else { 201753b381b3SDavid Woodhouse void *p; 201853b381b3SDavid Woodhouse 201953b381b3SDavid Woodhouse /* rebuild from P stripe here (raid5 or raid6) */ 202053b381b3SDavid Woodhouse BUG_ON(failb != -1); 202153b381b3SDavid Woodhouse pstripe: 202253b381b3SDavid Woodhouse /* Copy parity block into failed block to start with */ 202307e4d380SQu Wenruo memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); 202453b381b3SDavid Woodhouse 202553b381b3SDavid Woodhouse /* rearrange the pointer array */ 202653b381b3SDavid Woodhouse p = pointers[faila]; 202753b381b3SDavid Woodhouse for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 202853b381b3SDavid Woodhouse pointers[stripe] = pointers[stripe + 1]; 202953b381b3SDavid Woodhouse pointers[rbio->nr_data - 1] = p; 203053b381b3SDavid Woodhouse 203153b381b3SDavid Woodhouse /* xor in the rest */ 203207e4d380SQu Wenruo run_xor(pointers, rbio->nr_data - 1, sectorsize); 203353b381b3SDavid Woodhouse } 203453b381b3SDavid Woodhouse /* if we're doing this rebuild as part of an rmw, go through 203553b381b3SDavid Woodhouse * and set all of our private rbio pages in the 203653b381b3SDavid Woodhouse * failed stripes as uptodate. This way finish_rmw will 203753b381b3SDavid Woodhouse * know they can be trusted. If this was a read reconstruction, 203853b381b3SDavid Woodhouse * other endio functions will fiddle the uptodate bits 203953b381b3SDavid Woodhouse */ 20401b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) { 204107e4d380SQu Wenruo for (i = 0; i < rbio->stripe_nsectors; i++) { 204253b381b3SDavid Woodhouse if (faila != -1) { 204307e4d380SQu Wenruo sector = rbio_stripe_sector(rbio, faila, i); 204407e4d380SQu Wenruo sector->uptodate = 1; 204553b381b3SDavid Woodhouse } 204653b381b3SDavid Woodhouse if (failb != -1) { 204707e4d380SQu Wenruo sector = rbio_stripe_sector(rbio, failb, i); 204807e4d380SQu Wenruo sector->uptodate = 1; 204953b381b3SDavid Woodhouse } 205053b381b3SDavid Woodhouse } 205153b381b3SDavid Woodhouse } 205294a0b58dSIra Weiny for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--) 205394a0b58dSIra Weiny kunmap_local(unmap_array[stripe]); 205453b381b3SDavid Woodhouse } 205553b381b3SDavid Woodhouse 205658efbc9fSOmar Sandoval err = BLK_STS_OK; 205753b381b3SDavid Woodhouse cleanup: 205894a0b58dSIra Weiny kfree(unmap_array); 205994a0b58dSIra Weiny cleanup_pointers: 206053b381b3SDavid Woodhouse kfree(pointers); 206153b381b3SDavid Woodhouse 206253b381b3SDavid Woodhouse cleanup_io: 2063580c6efaSLiu Bo /* 2064580c6efaSLiu Bo * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a 2065580c6efaSLiu Bo * valid rbio which is consistent with ondisk content, thus such a 2066580c6efaSLiu Bo * valid rbio can be cached to avoid further disk reads. 2067580c6efaSLiu Bo */ 2068580c6efaSLiu Bo if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2069580c6efaSLiu Bo rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 207044ac474dSLiu Bo /* 207144ac474dSLiu Bo * - In case of two failures, where rbio->failb != -1: 207244ac474dSLiu Bo * 207344ac474dSLiu Bo * Do not cache this rbio since the above read reconstruction 207444ac474dSLiu Bo * (raid6_datap_recov() or raid6_2data_recov()) may have 207544ac474dSLiu Bo * changed some content of stripes which are not identical to 207644ac474dSLiu Bo * on-disk content any more, otherwise, a later write/recover 207744ac474dSLiu Bo * may steal stripe_pages from this rbio and end up with 207844ac474dSLiu Bo * corruptions or rebuild failures. 207944ac474dSLiu Bo * 208044ac474dSLiu Bo * - In case of single failure, where rbio->failb == -1: 208144ac474dSLiu Bo * 208244ac474dSLiu Bo * Cache this rbio iff the above read reconstruction is 208352042d8eSAndrea Gelmini * executed without problems. 208444ac474dSLiu Bo */ 208544ac474dSLiu Bo if (err == BLK_STS_OK && rbio->failb < 0) 20864ae10b3aSChris Mason cache_rbio_pages(rbio); 20874ae10b3aSChris Mason else 20884ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 20894ae10b3aSChris Mason 20904246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 209158efbc9fSOmar Sandoval } else if (err == BLK_STS_OK) { 209253b381b3SDavid Woodhouse rbio->faila = -1; 209353b381b3SDavid Woodhouse rbio->failb = -1; 20945a6ac9eaSMiao Xie 20955a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) 209653b381b3SDavid Woodhouse finish_rmw(rbio); 20975a6ac9eaSMiao Xie else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 20985a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 20995a6ac9eaSMiao Xie else 21005a6ac9eaSMiao Xie BUG(); 210153b381b3SDavid Woodhouse } else { 21024246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 210353b381b3SDavid Woodhouse } 210453b381b3SDavid Woodhouse } 210553b381b3SDavid Woodhouse 210653b381b3SDavid Woodhouse /* 210753b381b3SDavid Woodhouse * This is called only for stripes we've read from disk to 210853b381b3SDavid Woodhouse * reconstruct the parity. 210953b381b3SDavid Woodhouse */ 21104246a0b6SChristoph Hellwig static void raid_recover_end_io(struct bio *bio) 211153b381b3SDavid Woodhouse { 211253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 211353b381b3SDavid Woodhouse 211453b381b3SDavid Woodhouse /* 211553b381b3SDavid Woodhouse * we only read stripe pages off the disk, set them 211653b381b3SDavid Woodhouse * up to date if there were no errors 211753b381b3SDavid Woodhouse */ 21184e4cbee9SChristoph Hellwig if (bio->bi_status) 211953b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 212053b381b3SDavid Woodhouse else 212153b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 212253b381b3SDavid Woodhouse bio_put(bio); 212353b381b3SDavid Woodhouse 2124b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 212553b381b3SDavid Woodhouse return; 212653b381b3SDavid Woodhouse 21274c664611SQu Wenruo if (atomic_read(&rbio->error) > rbio->bioc->max_errors) 212858efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 212953b381b3SDavid Woodhouse else 213053b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 213153b381b3SDavid Woodhouse } 213253b381b3SDavid Woodhouse 213353b381b3SDavid Woodhouse /* 213453b381b3SDavid Woodhouse * reads everything we need off the disk to reconstruct 213553b381b3SDavid Woodhouse * the parity. endio handlers trigger final reconstruction 213653b381b3SDavid Woodhouse * when the IO is done. 213753b381b3SDavid Woodhouse * 213853b381b3SDavid Woodhouse * This is used both for reads from the higher layers and for 213953b381b3SDavid Woodhouse * parity construction required to finish a rmw cycle. 214053b381b3SDavid Woodhouse */ 214153b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 214253b381b3SDavid Woodhouse { 214353b381b3SDavid Woodhouse int bios_to_read = 0; 214453b381b3SDavid Woodhouse struct bio_list bio_list; 214553b381b3SDavid Woodhouse int ret; 21463e77605dSQu Wenruo int sectornr; 214753b381b3SDavid Woodhouse int stripe; 214853b381b3SDavid Woodhouse struct bio *bio; 214953b381b3SDavid Woodhouse 215053b381b3SDavid Woodhouse bio_list_init(&bio_list); 215153b381b3SDavid Woodhouse 215253b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 215353b381b3SDavid Woodhouse if (ret) 215453b381b3SDavid Woodhouse goto cleanup; 215553b381b3SDavid Woodhouse 2156b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 215753b381b3SDavid Woodhouse 215853b381b3SDavid Woodhouse /* 21594ae10b3aSChris Mason * read everything that hasn't failed. Thanks to the 21604ae10b3aSChris Mason * stripe cache, it is possible that some or all of these 21614ae10b3aSChris Mason * pages are going to be uptodate. 216253b381b3SDavid Woodhouse */ 21632c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 21645588383eSLiu Bo if (rbio->faila == stripe || rbio->failb == stripe) { 2165b89e1b01SMiao Xie atomic_inc(&rbio->error); 216653b381b3SDavid Woodhouse continue; 21675588383eSLiu Bo } 216853b381b3SDavid Woodhouse 21693e77605dSQu Wenruo for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { 21703e77605dSQu Wenruo struct sector_ptr *sector; 217153b381b3SDavid Woodhouse 217253b381b3SDavid Woodhouse /* 217353b381b3SDavid Woodhouse * the rmw code may have already read this 217453b381b3SDavid Woodhouse * page in 217553b381b3SDavid Woodhouse */ 21763e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 21773e77605dSQu Wenruo if (sector->uptodate) 217853b381b3SDavid Woodhouse continue; 217953b381b3SDavid Woodhouse 21803e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 21813e77605dSQu Wenruo stripe, sectornr, rbio->stripe_len, 2182e01bf588SChristoph Hellwig REQ_OP_READ); 218353b381b3SDavid Woodhouse if (ret < 0) 218453b381b3SDavid Woodhouse goto cleanup; 218553b381b3SDavid Woodhouse } 218653b381b3SDavid Woodhouse } 218753b381b3SDavid Woodhouse 218853b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 218953b381b3SDavid Woodhouse if (!bios_to_read) { 219053b381b3SDavid Woodhouse /* 219153b381b3SDavid Woodhouse * we might have no bios to read just because the pages 219253b381b3SDavid Woodhouse * were up to date, or we might have no bios to read because 219353b381b3SDavid Woodhouse * the devices were gone. 219453b381b3SDavid Woodhouse */ 21954c664611SQu Wenruo if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) { 219653b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 2197813f8a0eSNikolay Borisov return 0; 219853b381b3SDavid Woodhouse } else { 219953b381b3SDavid Woodhouse goto cleanup; 220053b381b3SDavid Woodhouse } 220153b381b3SDavid Woodhouse } 220253b381b3SDavid Woodhouse 220353b381b3SDavid Woodhouse /* 22044c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 22054c664611SQu Wenruo * touch it after that. 220653b381b3SDavid Woodhouse */ 2207b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 2208bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 220953b381b3SDavid Woodhouse bio->bi_end_io = raid_recover_end_io; 221053b381b3SDavid Woodhouse 22116a258d72SQu Wenruo btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 221253b381b3SDavid Woodhouse 22134e49ea4aSMike Christie submit_bio(bio); 221453b381b3SDavid Woodhouse } 2215813f8a0eSNikolay Borisov 221653b381b3SDavid Woodhouse return 0; 221753b381b3SDavid Woodhouse 221853b381b3SDavid Woodhouse cleanup: 2219b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2220b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 222158efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2222785884fcSLiu Bo 2223785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2224785884fcSLiu Bo bio_put(bio); 2225785884fcSLiu Bo 222653b381b3SDavid Woodhouse return -EIO; 222753b381b3SDavid Woodhouse } 222853b381b3SDavid Woodhouse 222953b381b3SDavid Woodhouse /* 223053b381b3SDavid Woodhouse * the main entry point for reads from the higher layers. This 223153b381b3SDavid Woodhouse * is really only called when the normal read path had a failure, 223253b381b3SDavid Woodhouse * so we assume the bio they send down corresponds to a failed part 223353b381b3SDavid Woodhouse * of the drive. 223453b381b3SDavid Woodhouse */ 22356a258d72SQu Wenruo int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc, 2236cc353a8bSQu Wenruo u32 stripe_len, int mirror_num, int generic_io) 223753b381b3SDavid Woodhouse { 22386a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 223953b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 224053b381b3SDavid Woodhouse int ret; 224153b381b3SDavid Woodhouse 2242abad60c6SLiu Bo if (generic_io) { 22434c664611SQu Wenruo ASSERT(bioc->mirror_num == mirror_num); 2244c3a3b19bSQu Wenruo btrfs_bio(bio)->mirror_num = mirror_num; 2245abad60c6SLiu Bo } 2246abad60c6SLiu Bo 22474c664611SQu Wenruo rbio = alloc_rbio(fs_info, bioc, stripe_len); 2248af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 22496e9606d2SZhao Lei if (generic_io) 22504c664611SQu Wenruo btrfs_put_bioc(bioc); 225153b381b3SDavid Woodhouse return PTR_ERR(rbio); 2252af8e2d1dSMiao Xie } 225353b381b3SDavid Woodhouse 22541b94b556SMiao Xie rbio->operation = BTRFS_RBIO_READ_REBUILD; 225553b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 22564f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 225753b381b3SDavid Woodhouse 225853b381b3SDavid Woodhouse rbio->faila = find_logical_bio_stripe(rbio, bio); 225953b381b3SDavid Woodhouse if (rbio->faila == -1) { 22600b246afaSJeff Mahoney btrfs_warn(fs_info, 22614c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)", 22621201b58bSDavid Sterba __func__, bio->bi_iter.bi_sector << 9, 22634c664611SQu Wenruo (u64)bio->bi_iter.bi_size, bioc->map_type); 22646e9606d2SZhao Lei if (generic_io) 22654c664611SQu Wenruo btrfs_put_bioc(bioc); 226653b381b3SDavid Woodhouse kfree(rbio); 226753b381b3SDavid Woodhouse return -EIO; 226853b381b3SDavid Woodhouse } 226953b381b3SDavid Woodhouse 22704245215dSMiao Xie if (generic_io) { 22710b246afaSJeff Mahoney btrfs_bio_counter_inc_noblocked(fs_info); 22724245215dSMiao Xie rbio->generic_bio_cnt = 1; 22734245215dSMiao Xie } else { 22744c664611SQu Wenruo btrfs_get_bioc(bioc); 22754245215dSMiao Xie } 22764245215dSMiao Xie 227753b381b3SDavid Woodhouse /* 22788810f751SLiu Bo * Loop retry: 22798810f751SLiu Bo * for 'mirror == 2', reconstruct from all other stripes. 22808810f751SLiu Bo * for 'mirror_num > 2', select a stripe to fail on every retry. 228153b381b3SDavid Woodhouse */ 22828810f751SLiu Bo if (mirror_num > 2) { 22838810f751SLiu Bo /* 22848810f751SLiu Bo * 'mirror == 3' is to fail the p stripe and 22858810f751SLiu Bo * reconstruct from the q stripe. 'mirror > 3' is to 22868810f751SLiu Bo * fail a data stripe and reconstruct from p+q stripe. 22878810f751SLiu Bo */ 22888810f751SLiu Bo rbio->failb = rbio->real_stripes - (mirror_num - 1); 22898810f751SLiu Bo ASSERT(rbio->failb > 0); 22908810f751SLiu Bo if (rbio->failb <= rbio->faila) 22918810f751SLiu Bo rbio->failb--; 22928810f751SLiu Bo } 229353b381b3SDavid Woodhouse 229453b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 229553b381b3SDavid Woodhouse 229653b381b3SDavid Woodhouse /* 229753b381b3SDavid Woodhouse * __raid56_parity_recover will end the bio with 229853b381b3SDavid Woodhouse * any errors it hits. We don't want to return 229953b381b3SDavid Woodhouse * its error value up the stack because our caller 230053b381b3SDavid Woodhouse * will end up calling bio_endio with any nonzero 230153b381b3SDavid Woodhouse * return 230253b381b3SDavid Woodhouse */ 230353b381b3SDavid Woodhouse if (ret == 0) 230453b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 230553b381b3SDavid Woodhouse /* 230653b381b3SDavid Woodhouse * our rbio has been added to the list of 230753b381b3SDavid Woodhouse * rbios that will be handled after the 230853b381b3SDavid Woodhouse * currently lock owner is done 230953b381b3SDavid Woodhouse */ 231053b381b3SDavid Woodhouse return 0; 231153b381b3SDavid Woodhouse 231253b381b3SDavid Woodhouse } 231353b381b3SDavid Woodhouse 231453b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work) 231553b381b3SDavid Woodhouse { 231653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 231753b381b3SDavid Woodhouse 231853b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 231953b381b3SDavid Woodhouse raid56_rmw_stripe(rbio); 232053b381b3SDavid Woodhouse } 232153b381b3SDavid Woodhouse 232253b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work) 232353b381b3SDavid Woodhouse { 232453b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 232553b381b3SDavid Woodhouse 232653b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 232753b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 232853b381b3SDavid Woodhouse } 23295a6ac9eaSMiao Xie 23305a6ac9eaSMiao Xie /* 23315a6ac9eaSMiao Xie * The following code is used to scrub/replace the parity stripe 23325a6ac9eaSMiao Xie * 23334c664611SQu Wenruo * Caller must have already increased bio_counter for getting @bioc. 2334ae6529c3SQu Wenruo * 23355a6ac9eaSMiao Xie * Note: We need make sure all the pages that add into the scrub/replace 23365a6ac9eaSMiao Xie * raid bio are correct and not be changed during the scrub/replace. That 23375a6ac9eaSMiao Xie * is those pages just hold metadata or file data with checksum. 23385a6ac9eaSMiao Xie */ 23395a6ac9eaSMiao Xie 23406a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio, 23416a258d72SQu Wenruo struct btrfs_io_context *bioc, 2342cc353a8bSQu Wenruo u32 stripe_len, struct btrfs_device *scrub_dev, 23435a6ac9eaSMiao Xie unsigned long *dbitmap, int stripe_nsectors) 23445a6ac9eaSMiao Xie { 23456a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 23465a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 23475a6ac9eaSMiao Xie int i; 23485a6ac9eaSMiao Xie 23494c664611SQu Wenruo rbio = alloc_rbio(fs_info, bioc, stripe_len); 23505a6ac9eaSMiao Xie if (IS_ERR(rbio)) 23515a6ac9eaSMiao Xie return NULL; 23525a6ac9eaSMiao Xie bio_list_add(&rbio->bio_list, bio); 23535a6ac9eaSMiao Xie /* 23545a6ac9eaSMiao Xie * This is a special bio which is used to hold the completion handler 23555a6ac9eaSMiao Xie * and make the scrub rbio is similar to the other types 23565a6ac9eaSMiao Xie */ 23575a6ac9eaSMiao Xie ASSERT(!bio->bi_iter.bi_size); 23585a6ac9eaSMiao Xie rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 23595a6ac9eaSMiao Xie 23609cd3a7ebSLiu Bo /* 23614c664611SQu Wenruo * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted 23629cd3a7ebSLiu Bo * to the end position, so this search can start from the first parity 23639cd3a7ebSLiu Bo * stripe. 23649cd3a7ebSLiu Bo */ 23659cd3a7ebSLiu Bo for (i = rbio->nr_data; i < rbio->real_stripes; i++) { 23664c664611SQu Wenruo if (bioc->stripes[i].dev == scrub_dev) { 23675a6ac9eaSMiao Xie rbio->scrubp = i; 23685a6ac9eaSMiao Xie break; 23695a6ac9eaSMiao Xie } 23705a6ac9eaSMiao Xie } 23719cd3a7ebSLiu Bo ASSERT(i < rbio->real_stripes); 23725a6ac9eaSMiao Xie 23735a6ac9eaSMiao Xie /* Now we just support the sectorsize equals to page size */ 23740b246afaSJeff Mahoney ASSERT(fs_info->sectorsize == PAGE_SIZE); 23755a6ac9eaSMiao Xie ASSERT(rbio->stripe_npages == stripe_nsectors); 23765a6ac9eaSMiao Xie bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); 23775a6ac9eaSMiao Xie 2378ae6529c3SQu Wenruo /* 23794c664611SQu Wenruo * We have already increased bio_counter when getting bioc, record it 2380ae6529c3SQu Wenruo * so we can free it at rbio_orig_end_io(). 2381ae6529c3SQu Wenruo */ 2382ae6529c3SQu Wenruo rbio->generic_bio_cnt = 1; 2383ae6529c3SQu Wenruo 23845a6ac9eaSMiao Xie return rbio; 23855a6ac9eaSMiao Xie } 23865a6ac9eaSMiao Xie 2387b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */ 2388b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, 2389b4ee1782SOmar Sandoval u64 logical) 23905a6ac9eaSMiao Xie { 23915a6ac9eaSMiao Xie int stripe_offset; 23925a6ac9eaSMiao Xie int index; 23935a6ac9eaSMiao Xie 23944c664611SQu Wenruo ASSERT(logical >= rbio->bioc->raid_map[0]); 23954c664611SQu Wenruo ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] + 23965a6ac9eaSMiao Xie rbio->stripe_len * rbio->nr_data); 23974c664611SQu Wenruo stripe_offset = (int)(logical - rbio->bioc->raid_map[0]); 239809cbfeafSKirill A. Shutemov index = stripe_offset >> PAGE_SHIFT; 23995a6ac9eaSMiao Xie rbio->bio_pages[index] = page; 24005a6ac9eaSMiao Xie } 24015a6ac9eaSMiao Xie 24025a6ac9eaSMiao Xie /* 24035a6ac9eaSMiao Xie * We just scrub the parity that we have correct data on the same horizontal, 24045a6ac9eaSMiao Xie * so we needn't allocate all pages for all the stripes. 24055a6ac9eaSMiao Xie */ 24065a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 24075a6ac9eaSMiao Xie { 24085a6ac9eaSMiao Xie int i; 24095a6ac9eaSMiao Xie int bit; 24105a6ac9eaSMiao Xie int index; 24115a6ac9eaSMiao Xie struct page *page; 24125a6ac9eaSMiao Xie 24135a6ac9eaSMiao Xie for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 24142c8cdd6eSMiao Xie for (i = 0; i < rbio->real_stripes; i++) { 24155a6ac9eaSMiao Xie index = i * rbio->stripe_npages + bit; 24165a6ac9eaSMiao Xie if (rbio->stripe_pages[index]) 24175a6ac9eaSMiao Xie continue; 24185a6ac9eaSMiao Xie 2419b0ee5e1eSDavid Sterba page = alloc_page(GFP_NOFS); 24205a6ac9eaSMiao Xie if (!page) 24215a6ac9eaSMiao Xie return -ENOMEM; 24225a6ac9eaSMiao Xie rbio->stripe_pages[index] = page; 24235a6ac9eaSMiao Xie } 24245a6ac9eaSMiao Xie } 2425eb357060SQu Wenruo index_stripe_sectors(rbio); 24265a6ac9eaSMiao Xie return 0; 24275a6ac9eaSMiao Xie } 24285a6ac9eaSMiao Xie 24295a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 24305a6ac9eaSMiao Xie int need_check) 24315a6ac9eaSMiao Xie { 24324c664611SQu Wenruo struct btrfs_io_context *bioc = rbio->bioc; 243346900662SQu Wenruo const u32 sectorsize = bioc->fs_info->sectorsize; 24341389053eSKees Cook void **pointers = rbio->finish_pointers; 24351389053eSKees Cook unsigned long *pbitmap = rbio->finish_pbitmap; 24365a6ac9eaSMiao Xie int nr_data = rbio->nr_data; 24375a6ac9eaSMiao Xie int stripe; 24383e77605dSQu Wenruo int sectornr; 2439c17af965SDavid Sterba bool has_qstripe; 244046900662SQu Wenruo struct sector_ptr p_sector = { 0 }; 244146900662SQu Wenruo struct sector_ptr q_sector = { 0 }; 24425a6ac9eaSMiao Xie struct bio_list bio_list; 24435a6ac9eaSMiao Xie struct bio *bio; 244476035976SMiao Xie int is_replace = 0; 24455a6ac9eaSMiao Xie int ret; 24465a6ac9eaSMiao Xie 24475a6ac9eaSMiao Xie bio_list_init(&bio_list); 24485a6ac9eaSMiao Xie 2449c17af965SDavid Sterba if (rbio->real_stripes - rbio->nr_data == 1) 2450c17af965SDavid Sterba has_qstripe = false; 2451c17af965SDavid Sterba else if (rbio->real_stripes - rbio->nr_data == 2) 2452c17af965SDavid Sterba has_qstripe = true; 2453c17af965SDavid Sterba else 24545a6ac9eaSMiao Xie BUG(); 24555a6ac9eaSMiao Xie 24564c664611SQu Wenruo if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) { 245776035976SMiao Xie is_replace = 1; 24583e77605dSQu Wenruo bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_nsectors); 245976035976SMiao Xie } 246076035976SMiao Xie 24615a6ac9eaSMiao Xie /* 24625a6ac9eaSMiao Xie * Because the higher layers(scrubber) are unlikely to 24635a6ac9eaSMiao Xie * use this area of the disk again soon, so don't cache 24645a6ac9eaSMiao Xie * it. 24655a6ac9eaSMiao Xie */ 24665a6ac9eaSMiao Xie clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 24675a6ac9eaSMiao Xie 24685a6ac9eaSMiao Xie if (!need_check) 24695a6ac9eaSMiao Xie goto writeback; 24705a6ac9eaSMiao Xie 247146900662SQu Wenruo p_sector.page = alloc_page(GFP_NOFS); 247246900662SQu Wenruo if (!p_sector.page) 24735a6ac9eaSMiao Xie goto cleanup; 247446900662SQu Wenruo p_sector.pgoff = 0; 247546900662SQu Wenruo p_sector.uptodate = 1; 24765a6ac9eaSMiao Xie 2477c17af965SDavid Sterba if (has_qstripe) { 2478d70cef0dSIra Weiny /* RAID6, allocate and map temp space for the Q stripe */ 247946900662SQu Wenruo q_sector.page = alloc_page(GFP_NOFS); 248046900662SQu Wenruo if (!q_sector.page) { 248146900662SQu Wenruo __free_page(p_sector.page); 248246900662SQu Wenruo p_sector.page = NULL; 24835a6ac9eaSMiao Xie goto cleanup; 24845a6ac9eaSMiao Xie } 248546900662SQu Wenruo q_sector.pgoff = 0; 248646900662SQu Wenruo q_sector.uptodate = 1; 248746900662SQu Wenruo pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); 24885a6ac9eaSMiao Xie } 24895a6ac9eaSMiao Xie 24905a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 24915a6ac9eaSMiao Xie 2492d70cef0dSIra Weiny /* Map the parity stripe just once */ 249346900662SQu Wenruo pointers[nr_data] = kmap_local_page(p_sector.page); 2494d70cef0dSIra Weiny 24953e77605dSQu Wenruo for_each_set_bit(sectornr, rbio->dbitmap, rbio->stripe_nsectors) { 249646900662SQu Wenruo struct sector_ptr *sector; 24975a6ac9eaSMiao Xie void *parity; 249846900662SQu Wenruo 24995a6ac9eaSMiao Xie /* first collect one page from each data stripe */ 25005a6ac9eaSMiao Xie for (stripe = 0; stripe < nr_data; stripe++) { 250146900662SQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 0); 250246900662SQu Wenruo pointers[stripe] = kmap_local_page(sector->page) + 250346900662SQu Wenruo sector->pgoff; 25045a6ac9eaSMiao Xie } 25055a6ac9eaSMiao Xie 2506c17af965SDavid Sterba if (has_qstripe) { 2507d70cef0dSIra Weiny /* RAID6, call the library function to fill in our P/Q */ 250846900662SQu Wenruo raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, 25095a6ac9eaSMiao Xie pointers); 25105a6ac9eaSMiao Xie } else { 25115a6ac9eaSMiao Xie /* raid5 */ 251246900662SQu Wenruo memcpy(pointers[nr_data], pointers[0], sectorsize); 251346900662SQu Wenruo run_xor(pointers + 1, nr_data - 1, sectorsize); 25145a6ac9eaSMiao Xie } 25155a6ac9eaSMiao Xie 251601327610SNicholas D Steeves /* Check scrubbing parity and repair it */ 251746900662SQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 251846900662SQu Wenruo parity = kmap_local_page(sector->page) + sector->pgoff; 251946900662SQu Wenruo if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) 252046900662SQu Wenruo memcpy(parity, pointers[rbio->scrubp], sectorsize); 25215a6ac9eaSMiao Xie else 25225a6ac9eaSMiao Xie /* Parity is right, needn't writeback */ 25233e77605dSQu Wenruo bitmap_clear(rbio->dbitmap, sectornr, 1); 252458c1a35cSIra Weiny kunmap_local(parity); 25255a6ac9eaSMiao Xie 252694a0b58dSIra Weiny for (stripe = nr_data - 1; stripe >= 0; stripe--) 252794a0b58dSIra Weiny kunmap_local(pointers[stripe]); 25285a6ac9eaSMiao Xie } 25295a6ac9eaSMiao Xie 253094a0b58dSIra Weiny kunmap_local(pointers[nr_data]); 253146900662SQu Wenruo __free_page(p_sector.page); 253246900662SQu Wenruo p_sector.page = NULL; 253346900662SQu Wenruo if (q_sector.page) { 253494a0b58dSIra Weiny kunmap_local(pointers[rbio->real_stripes - 1]); 253546900662SQu Wenruo __free_page(q_sector.page); 253646900662SQu Wenruo q_sector.page = NULL; 2537d70cef0dSIra Weiny } 25385a6ac9eaSMiao Xie 25395a6ac9eaSMiao Xie writeback: 25405a6ac9eaSMiao Xie /* 25415a6ac9eaSMiao Xie * time to start writing. Make bios for everything from the 25425a6ac9eaSMiao Xie * higher layers (the bio_list in our rbio) and our p/q. Ignore 25435a6ac9eaSMiao Xie * everything else. 25445a6ac9eaSMiao Xie */ 25453e77605dSQu Wenruo for_each_set_bit(sectornr, rbio->dbitmap, rbio->stripe_nsectors) { 25463e77605dSQu Wenruo struct sector_ptr *sector; 25475a6ac9eaSMiao Xie 25483e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 25493e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, 25503e77605dSQu Wenruo sectornr, rbio->stripe_len, REQ_OP_WRITE); 25515a6ac9eaSMiao Xie if (ret) 25525a6ac9eaSMiao Xie goto cleanup; 25535a6ac9eaSMiao Xie } 25545a6ac9eaSMiao Xie 255576035976SMiao Xie if (!is_replace) 255676035976SMiao Xie goto submit_write; 255776035976SMiao Xie 25583e77605dSQu Wenruo for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { 25593e77605dSQu Wenruo struct sector_ptr *sector; 256076035976SMiao Xie 25613e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); 25623e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 25634c664611SQu Wenruo bioc->tgtdev_map[rbio->scrubp], 25643e77605dSQu Wenruo sectornr, rbio->stripe_len, REQ_OP_WRITE); 256576035976SMiao Xie if (ret) 256676035976SMiao Xie goto cleanup; 256776035976SMiao Xie } 256876035976SMiao Xie 256976035976SMiao Xie submit_write: 25705a6ac9eaSMiao Xie nr_data = bio_list_size(&bio_list); 25715a6ac9eaSMiao Xie if (!nr_data) { 25725a6ac9eaSMiao Xie /* Every parity is right */ 257358efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_OK); 25745a6ac9eaSMiao Xie return; 25755a6ac9eaSMiao Xie } 25765a6ac9eaSMiao Xie 25775a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, nr_data); 25785a6ac9eaSMiao Xie 2579bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 2580a6111d11SZhao Lei bio->bi_end_io = raid_write_end_io; 25814e49ea4aSMike Christie 25824e49ea4aSMike Christie submit_bio(bio); 25835a6ac9eaSMiao Xie } 25845a6ac9eaSMiao Xie return; 25855a6ac9eaSMiao Xie 25865a6ac9eaSMiao Xie cleanup: 258758efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2588785884fcSLiu Bo 2589785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2590785884fcSLiu Bo bio_put(bio); 25915a6ac9eaSMiao Xie } 25925a6ac9eaSMiao Xie 25935a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 25945a6ac9eaSMiao Xie { 25955a6ac9eaSMiao Xie if (stripe >= 0 && stripe < rbio->nr_data) 25965a6ac9eaSMiao Xie return 1; 25975a6ac9eaSMiao Xie return 0; 25985a6ac9eaSMiao Xie } 25995a6ac9eaSMiao Xie 26005a6ac9eaSMiao Xie /* 26015a6ac9eaSMiao Xie * While we're doing the parity check and repair, we could have errors 26025a6ac9eaSMiao Xie * in reading pages off the disk. This checks for errors and if we're 26035a6ac9eaSMiao Xie * not able to read the page it'll trigger parity reconstruction. The 26045a6ac9eaSMiao Xie * parity scrub will be finished after we've reconstructed the failed 26055a6ac9eaSMiao Xie * stripes 26065a6ac9eaSMiao Xie */ 26075a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 26085a6ac9eaSMiao Xie { 26094c664611SQu Wenruo if (atomic_read(&rbio->error) > rbio->bioc->max_errors) 26105a6ac9eaSMiao Xie goto cleanup; 26115a6ac9eaSMiao Xie 26125a6ac9eaSMiao Xie if (rbio->faila >= 0 || rbio->failb >= 0) { 26135a6ac9eaSMiao Xie int dfail = 0, failp = -1; 26145a6ac9eaSMiao Xie 26155a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->faila)) 26165a6ac9eaSMiao Xie dfail++; 26175a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->faila)) 26185a6ac9eaSMiao Xie failp = rbio->faila; 26195a6ac9eaSMiao Xie 26205a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->failb)) 26215a6ac9eaSMiao Xie dfail++; 26225a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->failb)) 26235a6ac9eaSMiao Xie failp = rbio->failb; 26245a6ac9eaSMiao Xie 26255a6ac9eaSMiao Xie /* 26265a6ac9eaSMiao Xie * Because we can not use a scrubbing parity to repair 26275a6ac9eaSMiao Xie * the data, so the capability of the repair is declined. 26285a6ac9eaSMiao Xie * (In the case of RAID5, we can not repair anything) 26295a6ac9eaSMiao Xie */ 26304c664611SQu Wenruo if (dfail > rbio->bioc->max_errors - 1) 26315a6ac9eaSMiao Xie goto cleanup; 26325a6ac9eaSMiao Xie 26335a6ac9eaSMiao Xie /* 26345a6ac9eaSMiao Xie * If all data is good, only parity is correctly, just 26355a6ac9eaSMiao Xie * repair the parity. 26365a6ac9eaSMiao Xie */ 26375a6ac9eaSMiao Xie if (dfail == 0) { 26385a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 26395a6ac9eaSMiao Xie return; 26405a6ac9eaSMiao Xie } 26415a6ac9eaSMiao Xie 26425a6ac9eaSMiao Xie /* 26435a6ac9eaSMiao Xie * Here means we got one corrupted data stripe and one 26445a6ac9eaSMiao Xie * corrupted parity on RAID6, if the corrupted parity 264501327610SNicholas D Steeves * is scrubbing parity, luckily, use the other one to repair 26465a6ac9eaSMiao Xie * the data, or we can not repair the data stripe. 26475a6ac9eaSMiao Xie */ 26485a6ac9eaSMiao Xie if (failp != rbio->scrubp) 26495a6ac9eaSMiao Xie goto cleanup; 26505a6ac9eaSMiao Xie 26515a6ac9eaSMiao Xie __raid_recover_end_io(rbio); 26525a6ac9eaSMiao Xie } else { 26535a6ac9eaSMiao Xie finish_parity_scrub(rbio, 1); 26545a6ac9eaSMiao Xie } 26555a6ac9eaSMiao Xie return; 26565a6ac9eaSMiao Xie 26575a6ac9eaSMiao Xie cleanup: 265858efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 26595a6ac9eaSMiao Xie } 26605a6ac9eaSMiao Xie 26615a6ac9eaSMiao Xie /* 26625a6ac9eaSMiao Xie * end io for the read phase of the rmw cycle. All the bios here are physical 26635a6ac9eaSMiao Xie * stripe bios we've read from the disk so we can recalculate the parity of the 26645a6ac9eaSMiao Xie * stripe. 26655a6ac9eaSMiao Xie * 26665a6ac9eaSMiao Xie * This will usually kick off finish_rmw once all the bios are read in, but it 26675a6ac9eaSMiao Xie * may trigger parity reconstruction if we had any errors along the way 26685a6ac9eaSMiao Xie */ 26694246a0b6SChristoph Hellwig static void raid56_parity_scrub_end_io(struct bio *bio) 26705a6ac9eaSMiao Xie { 26715a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio = bio->bi_private; 26725a6ac9eaSMiao Xie 26734e4cbee9SChristoph Hellwig if (bio->bi_status) 26745a6ac9eaSMiao Xie fail_bio_stripe(rbio, bio); 26755a6ac9eaSMiao Xie else 26765a6ac9eaSMiao Xie set_bio_pages_uptodate(bio); 26775a6ac9eaSMiao Xie 26785a6ac9eaSMiao Xie bio_put(bio); 26795a6ac9eaSMiao Xie 26805a6ac9eaSMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 26815a6ac9eaSMiao Xie return; 26825a6ac9eaSMiao Xie 26835a6ac9eaSMiao Xie /* 26845a6ac9eaSMiao Xie * this will normally call finish_rmw to start our write 26855a6ac9eaSMiao Xie * but if there are any failed stripes we'll reconstruct 26865a6ac9eaSMiao Xie * from parity first 26875a6ac9eaSMiao Xie */ 26885a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 26895a6ac9eaSMiao Xie } 26905a6ac9eaSMiao Xie 26915a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 26925a6ac9eaSMiao Xie { 26935a6ac9eaSMiao Xie int bios_to_read = 0; 26945a6ac9eaSMiao Xie struct bio_list bio_list; 26955a6ac9eaSMiao Xie int ret; 26963e77605dSQu Wenruo int sectornr; 26975a6ac9eaSMiao Xie int stripe; 26985a6ac9eaSMiao Xie struct bio *bio; 26995a6ac9eaSMiao Xie 2700785884fcSLiu Bo bio_list_init(&bio_list); 2701785884fcSLiu Bo 27025a6ac9eaSMiao Xie ret = alloc_rbio_essential_pages(rbio); 27035a6ac9eaSMiao Xie if (ret) 27045a6ac9eaSMiao Xie goto cleanup; 27055a6ac9eaSMiao Xie 27065a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 27075a6ac9eaSMiao Xie /* 27085a6ac9eaSMiao Xie * build a list of bios to read all the missing parts of this 27095a6ac9eaSMiao Xie * stripe 27105a6ac9eaSMiao Xie */ 27112c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 27123e77605dSQu Wenruo for_each_set_bit(sectornr , rbio->dbitmap, rbio->stripe_nsectors) { 27133e77605dSQu Wenruo struct sector_ptr *sector; 27145a6ac9eaSMiao Xie /* 27153e77605dSQu Wenruo * We want to find all the sectors missing from the 27163e77605dSQu Wenruo * rbio and read them from the disk. If * sector_in_rbio() 27173e77605dSQu Wenruo * finds a sector in the bio list we don't need to read 27183e77605dSQu Wenruo * it off the stripe. 27195a6ac9eaSMiao Xie */ 27203e77605dSQu Wenruo sector = sector_in_rbio(rbio, stripe, sectornr, 1); 27213e77605dSQu Wenruo if (sector) 27225a6ac9eaSMiao Xie continue; 27235a6ac9eaSMiao Xie 27243e77605dSQu Wenruo sector = rbio_stripe_sector(rbio, stripe, sectornr); 27255a6ac9eaSMiao Xie /* 27263e77605dSQu Wenruo * The bio cache may have handed us an uptodate sector. 27273e77605dSQu Wenruo * If so, be happy and use it. 27285a6ac9eaSMiao Xie */ 27293e77605dSQu Wenruo if (sector->uptodate) 27305a6ac9eaSMiao Xie continue; 27315a6ac9eaSMiao Xie 27323e77605dSQu Wenruo ret = rbio_add_io_sector(rbio, &bio_list, sector, 27333e77605dSQu Wenruo stripe, sectornr, rbio->stripe_len, 27343e77605dSQu Wenruo REQ_OP_READ); 27355a6ac9eaSMiao Xie if (ret) 27365a6ac9eaSMiao Xie goto cleanup; 27375a6ac9eaSMiao Xie } 27385a6ac9eaSMiao Xie } 27395a6ac9eaSMiao Xie 27405a6ac9eaSMiao Xie bios_to_read = bio_list_size(&bio_list); 27415a6ac9eaSMiao Xie if (!bios_to_read) { 27425a6ac9eaSMiao Xie /* 27435a6ac9eaSMiao Xie * this can happen if others have merged with 27445a6ac9eaSMiao Xie * us, it means there is nothing left to read. 27455a6ac9eaSMiao Xie * But if there are missing devices it may not be 27465a6ac9eaSMiao Xie * safe to do the full stripe write yet. 27475a6ac9eaSMiao Xie */ 27485a6ac9eaSMiao Xie goto finish; 27495a6ac9eaSMiao Xie } 27505a6ac9eaSMiao Xie 27515a6ac9eaSMiao Xie /* 27524c664611SQu Wenruo * The bioc may be freed once we submit the last bio. Make sure not to 27534c664611SQu Wenruo * touch it after that. 27545a6ac9eaSMiao Xie */ 27555a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 2756bf28a605SNikolay Borisov while ((bio = bio_list_pop(&bio_list))) { 27575a6ac9eaSMiao Xie bio->bi_end_io = raid56_parity_scrub_end_io; 27585a6ac9eaSMiao Xie 27596a258d72SQu Wenruo btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 27605a6ac9eaSMiao Xie 27614e49ea4aSMike Christie submit_bio(bio); 27625a6ac9eaSMiao Xie } 27635a6ac9eaSMiao Xie /* the actual write will happen once the reads are done */ 27645a6ac9eaSMiao Xie return; 27655a6ac9eaSMiao Xie 27665a6ac9eaSMiao Xie cleanup: 276758efbc9fSOmar Sandoval rbio_orig_end_io(rbio, BLK_STS_IOERR); 2768785884fcSLiu Bo 2769785884fcSLiu Bo while ((bio = bio_list_pop(&bio_list))) 2770785884fcSLiu Bo bio_put(bio); 2771785884fcSLiu Bo 27725a6ac9eaSMiao Xie return; 27735a6ac9eaSMiao Xie 27745a6ac9eaSMiao Xie finish: 27755a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 27765a6ac9eaSMiao Xie } 27775a6ac9eaSMiao Xie 27785a6ac9eaSMiao Xie static void scrub_parity_work(struct btrfs_work *work) 27795a6ac9eaSMiao Xie { 27805a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 27815a6ac9eaSMiao Xie 27825a6ac9eaSMiao Xie rbio = container_of(work, struct btrfs_raid_bio, work); 27835a6ac9eaSMiao Xie raid56_parity_scrub_stripe(rbio); 27845a6ac9eaSMiao Xie } 27855a6ac9eaSMiao Xie 27865a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 27875a6ac9eaSMiao Xie { 27885a6ac9eaSMiao Xie if (!lock_stripe_add(rbio)) 2789a81b747dSDavid Sterba start_async_work(rbio, scrub_parity_work); 27905a6ac9eaSMiao Xie } 2791b4ee1782SOmar Sandoval 2792b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */ 2793b4ee1782SOmar Sandoval 2794b4ee1782SOmar Sandoval struct btrfs_raid_bio * 27956a258d72SQu Wenruo raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc, 27966a258d72SQu Wenruo u64 length) 2797b4ee1782SOmar Sandoval { 27986a258d72SQu Wenruo struct btrfs_fs_info *fs_info = bioc->fs_info; 2799b4ee1782SOmar Sandoval struct btrfs_raid_bio *rbio; 2800b4ee1782SOmar Sandoval 28014c664611SQu Wenruo rbio = alloc_rbio(fs_info, bioc, length); 2802b4ee1782SOmar Sandoval if (IS_ERR(rbio)) 2803b4ee1782SOmar Sandoval return NULL; 2804b4ee1782SOmar Sandoval 2805b4ee1782SOmar Sandoval rbio->operation = BTRFS_RBIO_REBUILD_MISSING; 2806b4ee1782SOmar Sandoval bio_list_add(&rbio->bio_list, bio); 2807b4ee1782SOmar Sandoval /* 2808b4ee1782SOmar Sandoval * This is a special bio which is used to hold the completion handler 2809b4ee1782SOmar Sandoval * and make the scrub rbio is similar to the other types 2810b4ee1782SOmar Sandoval */ 2811b4ee1782SOmar Sandoval ASSERT(!bio->bi_iter.bi_size); 2812b4ee1782SOmar Sandoval 2813b4ee1782SOmar Sandoval rbio->faila = find_logical_bio_stripe(rbio, bio); 2814b4ee1782SOmar Sandoval if (rbio->faila == -1) { 2815b4ee1782SOmar Sandoval BUG(); 2816b4ee1782SOmar Sandoval kfree(rbio); 2817b4ee1782SOmar Sandoval return NULL; 2818b4ee1782SOmar Sandoval } 2819b4ee1782SOmar Sandoval 2820ae6529c3SQu Wenruo /* 28214c664611SQu Wenruo * When we get bioc, we have already increased bio_counter, record it 2822ae6529c3SQu Wenruo * so we can free it at rbio_orig_end_io() 2823ae6529c3SQu Wenruo */ 2824ae6529c3SQu Wenruo rbio->generic_bio_cnt = 1; 2825ae6529c3SQu Wenruo 2826b4ee1782SOmar Sandoval return rbio; 2827b4ee1782SOmar Sandoval } 2828b4ee1782SOmar Sandoval 2829b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) 2830b4ee1782SOmar Sandoval { 2831b4ee1782SOmar Sandoval if (!lock_stripe_add(rbio)) 2832e66d8d5aSDavid Sterba start_async_work(rbio, read_rebuild_work); 2833b4ee1782SOmar Sandoval } 2834