153b381b3SDavid Woodhouse /* 253b381b3SDavid Woodhouse * Copyright (C) 2012 Fusion-io All rights reserved. 353b381b3SDavid Woodhouse * Copyright (C) 2012 Intel Corp. All rights reserved. 453b381b3SDavid Woodhouse * 553b381b3SDavid Woodhouse * This program is free software; you can redistribute it and/or 653b381b3SDavid Woodhouse * modify it under the terms of the GNU General Public 753b381b3SDavid Woodhouse * License v2 as published by the Free Software Foundation. 853b381b3SDavid Woodhouse * 953b381b3SDavid Woodhouse * This program is distributed in the hope that it will be useful, 1053b381b3SDavid Woodhouse * but WITHOUT ANY WARRANTY; without even the implied warranty of 1153b381b3SDavid Woodhouse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1253b381b3SDavid Woodhouse * General Public License for more details. 1353b381b3SDavid Woodhouse * 1453b381b3SDavid Woodhouse * You should have received a copy of the GNU General Public 1553b381b3SDavid Woodhouse * License along with this program; if not, write to the 1653b381b3SDavid Woodhouse * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 1753b381b3SDavid Woodhouse * Boston, MA 021110-1307, USA. 1853b381b3SDavid Woodhouse */ 1953b381b3SDavid Woodhouse #include <linux/sched.h> 2053b381b3SDavid Woodhouse #include <linux/wait.h> 2153b381b3SDavid Woodhouse #include <linux/bio.h> 2253b381b3SDavid Woodhouse #include <linux/slab.h> 2353b381b3SDavid Woodhouse #include <linux/buffer_head.h> 2453b381b3SDavid Woodhouse #include <linux/blkdev.h> 2553b381b3SDavid Woodhouse #include <linux/random.h> 2653b381b3SDavid Woodhouse #include <linux/iocontext.h> 2753b381b3SDavid Woodhouse #include <linux/capability.h> 2853b381b3SDavid Woodhouse #include <linux/ratelimit.h> 2953b381b3SDavid Woodhouse #include <linux/kthread.h> 3053b381b3SDavid Woodhouse #include <linux/raid/pq.h> 3153b381b3SDavid Woodhouse #include <linux/hash.h> 3253b381b3SDavid Woodhouse #include <linux/list_sort.h> 3353b381b3SDavid Woodhouse #include <linux/raid/xor.h> 34818e010bSDavid Sterba #include <linux/mm.h> 3553b381b3SDavid Woodhouse #include <asm/div64.h> 3653b381b3SDavid Woodhouse #include "ctree.h" 3753b381b3SDavid Woodhouse #include "extent_map.h" 3853b381b3SDavid Woodhouse #include "disk-io.h" 3953b381b3SDavid Woodhouse #include "transaction.h" 4053b381b3SDavid Woodhouse #include "print-tree.h" 4153b381b3SDavid Woodhouse #include "volumes.h" 4253b381b3SDavid Woodhouse #include "raid56.h" 4353b381b3SDavid Woodhouse #include "async-thread.h" 4453b381b3SDavid Woodhouse #include "check-integrity.h" 4553b381b3SDavid Woodhouse #include "rcu-string.h" 4653b381b3SDavid Woodhouse 4753b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */ 4853b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT 1 4953b381b3SDavid Woodhouse 504ae10b3aSChris Mason /* 514ae10b3aSChris Mason * set when this rbio is sitting in the hash, but it is just a cache 524ae10b3aSChris Mason * of past RMW 534ae10b3aSChris Mason */ 544ae10b3aSChris Mason #define RBIO_CACHE_BIT 2 554ae10b3aSChris Mason 564ae10b3aSChris Mason /* 574ae10b3aSChris Mason * set when it is safe to trust the stripe_pages for caching 584ae10b3aSChris Mason */ 594ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT 3 604ae10b3aSChris Mason 614ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024 624ae10b3aSChris Mason 631b94b556SMiao Xie enum btrfs_rbio_ops { 64b4ee1782SOmar Sandoval BTRFS_RBIO_WRITE, 65b4ee1782SOmar Sandoval BTRFS_RBIO_READ_REBUILD, 66b4ee1782SOmar Sandoval BTRFS_RBIO_PARITY_SCRUB, 67b4ee1782SOmar Sandoval BTRFS_RBIO_REBUILD_MISSING, 681b94b556SMiao Xie }; 691b94b556SMiao Xie 7053b381b3SDavid Woodhouse struct btrfs_raid_bio { 7153b381b3SDavid Woodhouse struct btrfs_fs_info *fs_info; 7253b381b3SDavid Woodhouse struct btrfs_bio *bbio; 7353b381b3SDavid Woodhouse 7453b381b3SDavid Woodhouse /* while we're doing rmw on a stripe 7553b381b3SDavid Woodhouse * we put it into a hash table so we can 7653b381b3SDavid Woodhouse * lock the stripe and merge more rbios 7753b381b3SDavid Woodhouse * into it. 7853b381b3SDavid Woodhouse */ 7953b381b3SDavid Woodhouse struct list_head hash_list; 8053b381b3SDavid Woodhouse 8153b381b3SDavid Woodhouse /* 824ae10b3aSChris Mason * LRU list for the stripe cache 834ae10b3aSChris Mason */ 844ae10b3aSChris Mason struct list_head stripe_cache; 854ae10b3aSChris Mason 864ae10b3aSChris Mason /* 8753b381b3SDavid Woodhouse * for scheduling work in the helper threads 8853b381b3SDavid Woodhouse */ 8953b381b3SDavid Woodhouse struct btrfs_work work; 9053b381b3SDavid Woodhouse 9153b381b3SDavid Woodhouse /* 9253b381b3SDavid Woodhouse * bio list and bio_list_lock are used 9353b381b3SDavid Woodhouse * to add more bios into the stripe 9453b381b3SDavid Woodhouse * in hopes of avoiding the full rmw 9553b381b3SDavid Woodhouse */ 9653b381b3SDavid Woodhouse struct bio_list bio_list; 9753b381b3SDavid Woodhouse spinlock_t bio_list_lock; 9853b381b3SDavid Woodhouse 996ac0f488SChris Mason /* also protected by the bio_list_lock, the 1006ac0f488SChris Mason * plug list is used by the plugging code 1016ac0f488SChris Mason * to collect partial bios while plugged. The 1026ac0f488SChris Mason * stripe locking code also uses it to hand off 10353b381b3SDavid Woodhouse * the stripe lock to the next pending IO 10453b381b3SDavid Woodhouse */ 10553b381b3SDavid Woodhouse struct list_head plug_list; 10653b381b3SDavid Woodhouse 10753b381b3SDavid Woodhouse /* 10853b381b3SDavid Woodhouse * flags that tell us if it is safe to 10953b381b3SDavid Woodhouse * merge with this bio 11053b381b3SDavid Woodhouse */ 11153b381b3SDavid Woodhouse unsigned long flags; 11253b381b3SDavid Woodhouse 11353b381b3SDavid Woodhouse /* size of each individual stripe on disk */ 11453b381b3SDavid Woodhouse int stripe_len; 11553b381b3SDavid Woodhouse 11653b381b3SDavid Woodhouse /* number of data stripes (no p/q) */ 11753b381b3SDavid Woodhouse int nr_data; 11853b381b3SDavid Woodhouse 1192c8cdd6eSMiao Xie int real_stripes; 1202c8cdd6eSMiao Xie 1215a6ac9eaSMiao Xie int stripe_npages; 12253b381b3SDavid Woodhouse /* 12353b381b3SDavid Woodhouse * set if we're doing a parity rebuild 12453b381b3SDavid Woodhouse * for a read from higher up, which is handled 12553b381b3SDavid Woodhouse * differently from a parity rebuild as part of 12653b381b3SDavid Woodhouse * rmw 12753b381b3SDavid Woodhouse */ 1281b94b556SMiao Xie enum btrfs_rbio_ops operation; 12953b381b3SDavid Woodhouse 13053b381b3SDavid Woodhouse /* first bad stripe */ 13153b381b3SDavid Woodhouse int faila; 13253b381b3SDavid Woodhouse 13353b381b3SDavid Woodhouse /* second bad stripe (for raid6 use) */ 13453b381b3SDavid Woodhouse int failb; 13553b381b3SDavid Woodhouse 1365a6ac9eaSMiao Xie int scrubp; 13753b381b3SDavid Woodhouse /* 13853b381b3SDavid Woodhouse * number of pages needed to represent the full 13953b381b3SDavid Woodhouse * stripe 14053b381b3SDavid Woodhouse */ 14153b381b3SDavid Woodhouse int nr_pages; 14253b381b3SDavid Woodhouse 14353b381b3SDavid Woodhouse /* 14453b381b3SDavid Woodhouse * size of all the bios in the bio_list. This 14553b381b3SDavid Woodhouse * helps us decide if the rbio maps to a full 14653b381b3SDavid Woodhouse * stripe or not 14753b381b3SDavid Woodhouse */ 14853b381b3SDavid Woodhouse int bio_list_bytes; 14953b381b3SDavid Woodhouse 1504245215dSMiao Xie int generic_bio_cnt; 1514245215dSMiao Xie 152dec95574SElena Reshetova refcount_t refs; 15353b381b3SDavid Woodhouse 154b89e1b01SMiao Xie atomic_t stripes_pending; 155b89e1b01SMiao Xie 156b89e1b01SMiao Xie atomic_t error; 15753b381b3SDavid Woodhouse /* 15853b381b3SDavid Woodhouse * these are two arrays of pointers. We allocate the 15953b381b3SDavid Woodhouse * rbio big enough to hold them both and setup their 16053b381b3SDavid Woodhouse * locations when the rbio is allocated 16153b381b3SDavid Woodhouse */ 16253b381b3SDavid Woodhouse 16353b381b3SDavid Woodhouse /* pointers to pages that we allocated for 16453b381b3SDavid Woodhouse * reading/writing stripes directly from the disk (including P/Q) 16553b381b3SDavid Woodhouse */ 16653b381b3SDavid Woodhouse struct page **stripe_pages; 16753b381b3SDavid Woodhouse 16853b381b3SDavid Woodhouse /* 16953b381b3SDavid Woodhouse * pointers to the pages in the bio_list. Stored 17053b381b3SDavid Woodhouse * here for faster lookup 17153b381b3SDavid Woodhouse */ 17253b381b3SDavid Woodhouse struct page **bio_pages; 1735a6ac9eaSMiao Xie 1745a6ac9eaSMiao Xie /* 1755a6ac9eaSMiao Xie * bitmap to record which horizontal stripe has data 1765a6ac9eaSMiao Xie */ 1775a6ac9eaSMiao Xie unsigned long *dbitmap; 17853b381b3SDavid Woodhouse }; 17953b381b3SDavid Woodhouse 18053b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 18153b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 18253b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work); 18353b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work); 18453b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio); 18553b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio); 18653b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 18753b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 18853b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio); 18953b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio); 19053b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 19153b381b3SDavid Woodhouse 1925a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 1935a6ac9eaSMiao Xie int need_check); 1945a6ac9eaSMiao Xie static void async_scrub_parity(struct btrfs_raid_bio *rbio); 1955a6ac9eaSMiao Xie 19653b381b3SDavid Woodhouse /* 19753b381b3SDavid Woodhouse * the stripe hash table is used for locking, and to collect 19853b381b3SDavid Woodhouse * bios in hopes of making a full stripe 19953b381b3SDavid Woodhouse */ 20053b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 20153b381b3SDavid Woodhouse { 20253b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *table; 20353b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *x; 20453b381b3SDavid Woodhouse struct btrfs_stripe_hash *cur; 20553b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 20653b381b3SDavid Woodhouse int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 20753b381b3SDavid Woodhouse int i; 20883c8266aSDavid Sterba int table_size; 20953b381b3SDavid Woodhouse 21053b381b3SDavid Woodhouse if (info->stripe_hash_table) 21153b381b3SDavid Woodhouse return 0; 21253b381b3SDavid Woodhouse 21383c8266aSDavid Sterba /* 21483c8266aSDavid Sterba * The table is large, starting with order 4 and can go as high as 21583c8266aSDavid Sterba * order 7 in case lock debugging is turned on. 21683c8266aSDavid Sterba * 21783c8266aSDavid Sterba * Try harder to allocate and fallback to vmalloc to lower the chance 21883c8266aSDavid Sterba * of a failing mount. 21983c8266aSDavid Sterba */ 22083c8266aSDavid Sterba table_size = sizeof(*table) + sizeof(*h) * num_entries; 221818e010bSDavid Sterba table = kvzalloc(table_size, GFP_KERNEL); 22253b381b3SDavid Woodhouse if (!table) 22353b381b3SDavid Woodhouse return -ENOMEM; 22453b381b3SDavid Woodhouse 2254ae10b3aSChris Mason spin_lock_init(&table->cache_lock); 2264ae10b3aSChris Mason INIT_LIST_HEAD(&table->stripe_cache); 2274ae10b3aSChris Mason 22853b381b3SDavid Woodhouse h = table->table; 22953b381b3SDavid Woodhouse 23053b381b3SDavid Woodhouse for (i = 0; i < num_entries; i++) { 23153b381b3SDavid Woodhouse cur = h + i; 23253b381b3SDavid Woodhouse INIT_LIST_HEAD(&cur->hash_list); 23353b381b3SDavid Woodhouse spin_lock_init(&cur->lock); 23453b381b3SDavid Woodhouse init_waitqueue_head(&cur->wait); 23553b381b3SDavid Woodhouse } 23653b381b3SDavid Woodhouse 23753b381b3SDavid Woodhouse x = cmpxchg(&info->stripe_hash_table, NULL, table); 238f749303bSWang Shilong if (x) 239f749303bSWang Shilong kvfree(x); 24053b381b3SDavid Woodhouse return 0; 24153b381b3SDavid Woodhouse } 24253b381b3SDavid Woodhouse 24353b381b3SDavid Woodhouse /* 2444ae10b3aSChris Mason * caching an rbio means to copy anything from the 2454ae10b3aSChris Mason * bio_pages array into the stripe_pages array. We 2464ae10b3aSChris Mason * use the page uptodate bit in the stripe cache array 2474ae10b3aSChris Mason * to indicate if it has valid data 2484ae10b3aSChris Mason * 2494ae10b3aSChris Mason * once the caching is done, we set the cache ready 2504ae10b3aSChris Mason * bit. 2514ae10b3aSChris Mason */ 2524ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 2534ae10b3aSChris Mason { 2544ae10b3aSChris Mason int i; 2554ae10b3aSChris Mason char *s; 2564ae10b3aSChris Mason char *d; 2574ae10b3aSChris Mason int ret; 2584ae10b3aSChris Mason 2594ae10b3aSChris Mason ret = alloc_rbio_pages(rbio); 2604ae10b3aSChris Mason if (ret) 2614ae10b3aSChris Mason return; 2624ae10b3aSChris Mason 2634ae10b3aSChris Mason for (i = 0; i < rbio->nr_pages; i++) { 2644ae10b3aSChris Mason if (!rbio->bio_pages[i]) 2654ae10b3aSChris Mason continue; 2664ae10b3aSChris Mason 2674ae10b3aSChris Mason s = kmap(rbio->bio_pages[i]); 2684ae10b3aSChris Mason d = kmap(rbio->stripe_pages[i]); 2694ae10b3aSChris Mason 27009cbfeafSKirill A. Shutemov memcpy(d, s, PAGE_SIZE); 2714ae10b3aSChris Mason 2724ae10b3aSChris Mason kunmap(rbio->bio_pages[i]); 2734ae10b3aSChris Mason kunmap(rbio->stripe_pages[i]); 2744ae10b3aSChris Mason SetPageUptodate(rbio->stripe_pages[i]); 2754ae10b3aSChris Mason } 2764ae10b3aSChris Mason set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2774ae10b3aSChris Mason } 2784ae10b3aSChris Mason 2794ae10b3aSChris Mason /* 28053b381b3SDavid Woodhouse * we hash on the first logical address of the stripe 28153b381b3SDavid Woodhouse */ 28253b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio) 28353b381b3SDavid Woodhouse { 2848e5cfb55SZhao Lei u64 num = rbio->bbio->raid_map[0]; 28553b381b3SDavid Woodhouse 28653b381b3SDavid Woodhouse /* 28753b381b3SDavid Woodhouse * we shift down quite a bit. We're using byte 28853b381b3SDavid Woodhouse * addressing, and most of the lower bits are zeros. 28953b381b3SDavid Woodhouse * This tends to upset hash_64, and it consistently 29053b381b3SDavid Woodhouse * returns just one or two different values. 29153b381b3SDavid Woodhouse * 29253b381b3SDavid Woodhouse * shifting off the lower bits fixes things. 29353b381b3SDavid Woodhouse */ 29453b381b3SDavid Woodhouse return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 29553b381b3SDavid Woodhouse } 29653b381b3SDavid Woodhouse 29753b381b3SDavid Woodhouse /* 2984ae10b3aSChris Mason * stealing an rbio means taking all the uptodate pages from the stripe 2994ae10b3aSChris Mason * array in the source rbio and putting them into the destination rbio 3004ae10b3aSChris Mason */ 3014ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 3024ae10b3aSChris Mason { 3034ae10b3aSChris Mason int i; 3044ae10b3aSChris Mason struct page *s; 3054ae10b3aSChris Mason struct page *d; 3064ae10b3aSChris Mason 3074ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 3084ae10b3aSChris Mason return; 3094ae10b3aSChris Mason 3104ae10b3aSChris Mason for (i = 0; i < dest->nr_pages; i++) { 3114ae10b3aSChris Mason s = src->stripe_pages[i]; 3124ae10b3aSChris Mason if (!s || !PageUptodate(s)) { 3134ae10b3aSChris Mason continue; 3144ae10b3aSChris Mason } 3154ae10b3aSChris Mason 3164ae10b3aSChris Mason d = dest->stripe_pages[i]; 3174ae10b3aSChris Mason if (d) 3184ae10b3aSChris Mason __free_page(d); 3194ae10b3aSChris Mason 3204ae10b3aSChris Mason dest->stripe_pages[i] = s; 3214ae10b3aSChris Mason src->stripe_pages[i] = NULL; 3224ae10b3aSChris Mason } 3234ae10b3aSChris Mason } 3244ae10b3aSChris Mason 3254ae10b3aSChris Mason /* 32653b381b3SDavid Woodhouse * merging means we take the bio_list from the victim and 32753b381b3SDavid Woodhouse * splice it into the destination. The victim should 32853b381b3SDavid Woodhouse * be discarded afterwards. 32953b381b3SDavid Woodhouse * 33053b381b3SDavid Woodhouse * must be called with dest->rbio_list_lock held 33153b381b3SDavid Woodhouse */ 33253b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest, 33353b381b3SDavid Woodhouse struct btrfs_raid_bio *victim) 33453b381b3SDavid Woodhouse { 33553b381b3SDavid Woodhouse bio_list_merge(&dest->bio_list, &victim->bio_list); 33653b381b3SDavid Woodhouse dest->bio_list_bytes += victim->bio_list_bytes; 3374245215dSMiao Xie dest->generic_bio_cnt += victim->generic_bio_cnt; 33853b381b3SDavid Woodhouse bio_list_init(&victim->bio_list); 33953b381b3SDavid Woodhouse } 34053b381b3SDavid Woodhouse 34153b381b3SDavid Woodhouse /* 3424ae10b3aSChris Mason * used to prune items that are in the cache. The caller 3434ae10b3aSChris Mason * must hold the hash table lock. 3444ae10b3aSChris Mason */ 3454ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 3464ae10b3aSChris Mason { 3474ae10b3aSChris Mason int bucket = rbio_bucket(rbio); 3484ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 3494ae10b3aSChris Mason struct btrfs_stripe_hash *h; 3504ae10b3aSChris Mason int freeit = 0; 3514ae10b3aSChris Mason 3524ae10b3aSChris Mason /* 3534ae10b3aSChris Mason * check the bit again under the hash table lock. 3544ae10b3aSChris Mason */ 3554ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 3564ae10b3aSChris Mason return; 3574ae10b3aSChris Mason 3584ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 3594ae10b3aSChris Mason h = table->table + bucket; 3604ae10b3aSChris Mason 3614ae10b3aSChris Mason /* hold the lock for the bucket because we may be 3624ae10b3aSChris Mason * removing it from the hash table 3634ae10b3aSChris Mason */ 3644ae10b3aSChris Mason spin_lock(&h->lock); 3654ae10b3aSChris Mason 3664ae10b3aSChris Mason /* 3674ae10b3aSChris Mason * hold the lock for the bio list because we need 3684ae10b3aSChris Mason * to make sure the bio list is empty 3694ae10b3aSChris Mason */ 3704ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 3714ae10b3aSChris Mason 3724ae10b3aSChris Mason if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 3734ae10b3aSChris Mason list_del_init(&rbio->stripe_cache); 3744ae10b3aSChris Mason table->cache_size -= 1; 3754ae10b3aSChris Mason freeit = 1; 3764ae10b3aSChris Mason 3774ae10b3aSChris Mason /* if the bio list isn't empty, this rbio is 3784ae10b3aSChris Mason * still involved in an IO. We take it out 3794ae10b3aSChris Mason * of the cache list, and drop the ref that 3804ae10b3aSChris Mason * was held for the list. 3814ae10b3aSChris Mason * 3824ae10b3aSChris Mason * If the bio_list was empty, we also remove 3834ae10b3aSChris Mason * the rbio from the hash_table, and drop 3844ae10b3aSChris Mason * the corresponding ref 3854ae10b3aSChris Mason */ 3864ae10b3aSChris Mason if (bio_list_empty(&rbio->bio_list)) { 3874ae10b3aSChris Mason if (!list_empty(&rbio->hash_list)) { 3884ae10b3aSChris Mason list_del_init(&rbio->hash_list); 389dec95574SElena Reshetova refcount_dec(&rbio->refs); 3904ae10b3aSChris Mason BUG_ON(!list_empty(&rbio->plug_list)); 3914ae10b3aSChris Mason } 3924ae10b3aSChris Mason } 3934ae10b3aSChris Mason } 3944ae10b3aSChris Mason 3954ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 3964ae10b3aSChris Mason spin_unlock(&h->lock); 3974ae10b3aSChris Mason 3984ae10b3aSChris Mason if (freeit) 3994ae10b3aSChris Mason __free_raid_bio(rbio); 4004ae10b3aSChris Mason } 4014ae10b3aSChris Mason 4024ae10b3aSChris Mason /* 4034ae10b3aSChris Mason * prune a given rbio from the cache 4044ae10b3aSChris Mason */ 4054ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 4064ae10b3aSChris Mason { 4074ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4084ae10b3aSChris Mason unsigned long flags; 4094ae10b3aSChris Mason 4104ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4114ae10b3aSChris Mason return; 4124ae10b3aSChris Mason 4134ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4144ae10b3aSChris Mason 4154ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4164ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4174ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4184ae10b3aSChris Mason } 4194ae10b3aSChris Mason 4204ae10b3aSChris Mason /* 4214ae10b3aSChris Mason * remove everything in the cache 4224ae10b3aSChris Mason */ 42348a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 4244ae10b3aSChris Mason { 4254ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4264ae10b3aSChris Mason unsigned long flags; 4274ae10b3aSChris Mason struct btrfs_raid_bio *rbio; 4284ae10b3aSChris Mason 4294ae10b3aSChris Mason table = info->stripe_hash_table; 4304ae10b3aSChris Mason 4314ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4324ae10b3aSChris Mason while (!list_empty(&table->stripe_cache)) { 4334ae10b3aSChris Mason rbio = list_entry(table->stripe_cache.next, 4344ae10b3aSChris Mason struct btrfs_raid_bio, 4354ae10b3aSChris Mason stripe_cache); 4364ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4374ae10b3aSChris Mason } 4384ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4394ae10b3aSChris Mason } 4404ae10b3aSChris Mason 4414ae10b3aSChris Mason /* 4424ae10b3aSChris Mason * remove all cached entries and free the hash table 4434ae10b3aSChris Mason * used by unmount 44453b381b3SDavid Woodhouse */ 44553b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 44653b381b3SDavid Woodhouse { 44753b381b3SDavid Woodhouse if (!info->stripe_hash_table) 44853b381b3SDavid Woodhouse return; 4494ae10b3aSChris Mason btrfs_clear_rbio_cache(info); 450f749303bSWang Shilong kvfree(info->stripe_hash_table); 45153b381b3SDavid Woodhouse info->stripe_hash_table = NULL; 45253b381b3SDavid Woodhouse } 45353b381b3SDavid Woodhouse 45453b381b3SDavid Woodhouse /* 4554ae10b3aSChris Mason * insert an rbio into the stripe cache. It 4564ae10b3aSChris Mason * must have already been prepared by calling 4574ae10b3aSChris Mason * cache_rbio_pages 4584ae10b3aSChris Mason * 4594ae10b3aSChris Mason * If this rbio was already cached, it gets 4604ae10b3aSChris Mason * moved to the front of the lru. 4614ae10b3aSChris Mason * 4624ae10b3aSChris Mason * If the size of the rbio cache is too big, we 4634ae10b3aSChris Mason * prune an item. 4644ae10b3aSChris Mason */ 4654ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio) 4664ae10b3aSChris Mason { 4674ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4684ae10b3aSChris Mason unsigned long flags; 4694ae10b3aSChris Mason 4704ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 4714ae10b3aSChris Mason return; 4724ae10b3aSChris Mason 4734ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4744ae10b3aSChris Mason 4754ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4764ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 4774ae10b3aSChris Mason 4784ae10b3aSChris Mason /* bump our ref if we were not in the list before */ 4794ae10b3aSChris Mason if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 480dec95574SElena Reshetova refcount_inc(&rbio->refs); 4814ae10b3aSChris Mason 4824ae10b3aSChris Mason if (!list_empty(&rbio->stripe_cache)){ 4834ae10b3aSChris Mason list_move(&rbio->stripe_cache, &table->stripe_cache); 4844ae10b3aSChris Mason } else { 4854ae10b3aSChris Mason list_add(&rbio->stripe_cache, &table->stripe_cache); 4864ae10b3aSChris Mason table->cache_size += 1; 4874ae10b3aSChris Mason } 4884ae10b3aSChris Mason 4894ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4904ae10b3aSChris Mason 4914ae10b3aSChris Mason if (table->cache_size > RBIO_CACHE_SIZE) { 4924ae10b3aSChris Mason struct btrfs_raid_bio *found; 4934ae10b3aSChris Mason 4944ae10b3aSChris Mason found = list_entry(table->stripe_cache.prev, 4954ae10b3aSChris Mason struct btrfs_raid_bio, 4964ae10b3aSChris Mason stripe_cache); 4974ae10b3aSChris Mason 4984ae10b3aSChris Mason if (found != rbio) 4994ae10b3aSChris Mason __remove_rbio_from_cache(found); 5004ae10b3aSChris Mason } 5014ae10b3aSChris Mason 5024ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 5034ae10b3aSChris Mason } 5044ae10b3aSChris Mason 5054ae10b3aSChris Mason /* 50653b381b3SDavid Woodhouse * helper function to run the xor_blocks api. It is only 50753b381b3SDavid Woodhouse * able to do MAX_XOR_BLOCKS at a time, so we need to 50853b381b3SDavid Woodhouse * loop through. 50953b381b3SDavid Woodhouse */ 51053b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len) 51153b381b3SDavid Woodhouse { 51253b381b3SDavid Woodhouse int src_off = 0; 51353b381b3SDavid Woodhouse int xor_src_cnt = 0; 51453b381b3SDavid Woodhouse void *dest = pages[src_cnt]; 51553b381b3SDavid Woodhouse 51653b381b3SDavid Woodhouse while(src_cnt > 0) { 51753b381b3SDavid Woodhouse xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 51853b381b3SDavid Woodhouse xor_blocks(xor_src_cnt, len, dest, pages + src_off); 51953b381b3SDavid Woodhouse 52053b381b3SDavid Woodhouse src_cnt -= xor_src_cnt; 52153b381b3SDavid Woodhouse src_off += xor_src_cnt; 52253b381b3SDavid Woodhouse } 52353b381b3SDavid Woodhouse } 52453b381b3SDavid Woodhouse 52553b381b3SDavid Woodhouse /* 52653b381b3SDavid Woodhouse * returns true if the bio list inside this rbio 52753b381b3SDavid Woodhouse * covers an entire stripe (no rmw required). 52853b381b3SDavid Woodhouse * Must be called with the bio list lock held, or 52953b381b3SDavid Woodhouse * at a time when you know it is impossible to add 53053b381b3SDavid Woodhouse * new bios into the list 53153b381b3SDavid Woodhouse */ 53253b381b3SDavid Woodhouse static int __rbio_is_full(struct btrfs_raid_bio *rbio) 53353b381b3SDavid Woodhouse { 53453b381b3SDavid Woodhouse unsigned long size = rbio->bio_list_bytes; 53553b381b3SDavid Woodhouse int ret = 1; 53653b381b3SDavid Woodhouse 53753b381b3SDavid Woodhouse if (size != rbio->nr_data * rbio->stripe_len) 53853b381b3SDavid Woodhouse ret = 0; 53953b381b3SDavid Woodhouse 54053b381b3SDavid Woodhouse BUG_ON(size > rbio->nr_data * rbio->stripe_len); 54153b381b3SDavid Woodhouse return ret; 54253b381b3SDavid Woodhouse } 54353b381b3SDavid Woodhouse 54453b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio) 54553b381b3SDavid Woodhouse { 54653b381b3SDavid Woodhouse unsigned long flags; 54753b381b3SDavid Woodhouse int ret; 54853b381b3SDavid Woodhouse 54953b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 55053b381b3SDavid Woodhouse ret = __rbio_is_full(rbio); 55153b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 55253b381b3SDavid Woodhouse return ret; 55353b381b3SDavid Woodhouse } 55453b381b3SDavid Woodhouse 55553b381b3SDavid Woodhouse /* 55653b381b3SDavid Woodhouse * returns 1 if it is safe to merge two rbios together. 55753b381b3SDavid Woodhouse * The merging is safe if the two rbios correspond to 55853b381b3SDavid Woodhouse * the same stripe and if they are both going in the same 55953b381b3SDavid Woodhouse * direction (read vs write), and if neither one is 56053b381b3SDavid Woodhouse * locked for final IO 56153b381b3SDavid Woodhouse * 56253b381b3SDavid Woodhouse * The caller is responsible for locking such that 56353b381b3SDavid Woodhouse * rmw_locked is safe to test 56453b381b3SDavid Woodhouse */ 56553b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last, 56653b381b3SDavid Woodhouse struct btrfs_raid_bio *cur) 56753b381b3SDavid Woodhouse { 56853b381b3SDavid Woodhouse if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 56953b381b3SDavid Woodhouse test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 57053b381b3SDavid Woodhouse return 0; 57153b381b3SDavid Woodhouse 5724ae10b3aSChris Mason /* 5734ae10b3aSChris Mason * we can't merge with cached rbios, since the 5744ae10b3aSChris Mason * idea is that when we merge the destination 5754ae10b3aSChris Mason * rbio is going to run our IO for us. We can 57601327610SNicholas D Steeves * steal from cached rbios though, other functions 5774ae10b3aSChris Mason * handle that. 5784ae10b3aSChris Mason */ 5794ae10b3aSChris Mason if (test_bit(RBIO_CACHE_BIT, &last->flags) || 5804ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags)) 5814ae10b3aSChris Mason return 0; 5824ae10b3aSChris Mason 5838e5cfb55SZhao Lei if (last->bbio->raid_map[0] != 5848e5cfb55SZhao Lei cur->bbio->raid_map[0]) 58553b381b3SDavid Woodhouse return 0; 58653b381b3SDavid Woodhouse 5875a6ac9eaSMiao Xie /* we can't merge with different operations */ 5885a6ac9eaSMiao Xie if (last->operation != cur->operation) 58953b381b3SDavid Woodhouse return 0; 5905a6ac9eaSMiao Xie /* 5915a6ac9eaSMiao Xie * We've need read the full stripe from the drive. 5925a6ac9eaSMiao Xie * check and repair the parity and write the new results. 5935a6ac9eaSMiao Xie * 5945a6ac9eaSMiao Xie * We're not allowed to add any new bios to the 5955a6ac9eaSMiao Xie * bio list here, anyone else that wants to 5965a6ac9eaSMiao Xie * change this stripe needs to do their own rmw. 5975a6ac9eaSMiao Xie */ 5985a6ac9eaSMiao Xie if (last->operation == BTRFS_RBIO_PARITY_SCRUB || 5995a6ac9eaSMiao Xie cur->operation == BTRFS_RBIO_PARITY_SCRUB) 6005a6ac9eaSMiao Xie return 0; 60153b381b3SDavid Woodhouse 602b4ee1782SOmar Sandoval if (last->operation == BTRFS_RBIO_REBUILD_MISSING || 603b4ee1782SOmar Sandoval cur->operation == BTRFS_RBIO_REBUILD_MISSING) 604b4ee1782SOmar Sandoval return 0; 605b4ee1782SOmar Sandoval 60653b381b3SDavid Woodhouse return 1; 60753b381b3SDavid Woodhouse } 60853b381b3SDavid Woodhouse 609b7178a5fSZhao Lei static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, 610b7178a5fSZhao Lei int index) 611b7178a5fSZhao Lei { 612b7178a5fSZhao Lei return stripe * rbio->stripe_npages + index; 613b7178a5fSZhao Lei } 614b7178a5fSZhao Lei 615b7178a5fSZhao Lei /* 616b7178a5fSZhao Lei * these are just the pages from the rbio array, not from anything 617b7178a5fSZhao Lei * the FS sent down to us 618b7178a5fSZhao Lei */ 619b7178a5fSZhao Lei static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, 620b7178a5fSZhao Lei int index) 621b7178a5fSZhao Lei { 622b7178a5fSZhao Lei return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; 623b7178a5fSZhao Lei } 624b7178a5fSZhao Lei 62553b381b3SDavid Woodhouse /* 62653b381b3SDavid Woodhouse * helper to index into the pstripe 62753b381b3SDavid Woodhouse */ 62853b381b3SDavid Woodhouse static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) 62953b381b3SDavid Woodhouse { 630b7178a5fSZhao Lei return rbio_stripe_page(rbio, rbio->nr_data, index); 63153b381b3SDavid Woodhouse } 63253b381b3SDavid Woodhouse 63353b381b3SDavid Woodhouse /* 63453b381b3SDavid Woodhouse * helper to index into the qstripe, returns null 63553b381b3SDavid Woodhouse * if there is no qstripe 63653b381b3SDavid Woodhouse */ 63753b381b3SDavid Woodhouse static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 63853b381b3SDavid Woodhouse { 6392c8cdd6eSMiao Xie if (rbio->nr_data + 1 == rbio->real_stripes) 64053b381b3SDavid Woodhouse return NULL; 641b7178a5fSZhao Lei return rbio_stripe_page(rbio, rbio->nr_data + 1, index); 64253b381b3SDavid Woodhouse } 64353b381b3SDavid Woodhouse 64453b381b3SDavid Woodhouse /* 64553b381b3SDavid Woodhouse * The first stripe in the table for a logical address 64653b381b3SDavid Woodhouse * has the lock. rbios are added in one of three ways: 64753b381b3SDavid Woodhouse * 64853b381b3SDavid Woodhouse * 1) Nobody has the stripe locked yet. The rbio is given 64953b381b3SDavid Woodhouse * the lock and 0 is returned. The caller must start the IO 65053b381b3SDavid Woodhouse * themselves. 65153b381b3SDavid Woodhouse * 65253b381b3SDavid Woodhouse * 2) Someone has the stripe locked, but we're able to merge 65353b381b3SDavid Woodhouse * with the lock owner. The rbio is freed and the IO will 65453b381b3SDavid Woodhouse * start automatically along with the existing rbio. 1 is returned. 65553b381b3SDavid Woodhouse * 65653b381b3SDavid Woodhouse * 3) Someone has the stripe locked, but we're not able to merge. 65753b381b3SDavid Woodhouse * The rbio is added to the lock owner's plug list, or merged into 65853b381b3SDavid Woodhouse * an rbio already on the plug list. When the lock owner unlocks, 65953b381b3SDavid Woodhouse * the next rbio on the list is run and the IO is started automatically. 66053b381b3SDavid Woodhouse * 1 is returned 66153b381b3SDavid Woodhouse * 66253b381b3SDavid Woodhouse * If we return 0, the caller still owns the rbio and must continue with 66353b381b3SDavid Woodhouse * IO submission. If we return 1, the caller must assume the rbio has 66453b381b3SDavid Woodhouse * already been freed. 66553b381b3SDavid Woodhouse */ 66653b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 66753b381b3SDavid Woodhouse { 66853b381b3SDavid Woodhouse int bucket = rbio_bucket(rbio); 66953b381b3SDavid Woodhouse struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; 67053b381b3SDavid Woodhouse struct btrfs_raid_bio *cur; 67153b381b3SDavid Woodhouse struct btrfs_raid_bio *pending; 67253b381b3SDavid Woodhouse unsigned long flags; 67353b381b3SDavid Woodhouse DEFINE_WAIT(wait); 67453b381b3SDavid Woodhouse struct btrfs_raid_bio *freeit = NULL; 6754ae10b3aSChris Mason struct btrfs_raid_bio *cache_drop = NULL; 67653b381b3SDavid Woodhouse int ret = 0; 67753b381b3SDavid Woodhouse 67853b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 67953b381b3SDavid Woodhouse list_for_each_entry(cur, &h->hash_list, hash_list) { 6808e5cfb55SZhao Lei if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { 68153b381b3SDavid Woodhouse spin_lock(&cur->bio_list_lock); 68253b381b3SDavid Woodhouse 6834ae10b3aSChris Mason /* can we steal this cached rbio's pages? */ 6844ae10b3aSChris Mason if (bio_list_empty(&cur->bio_list) && 6854ae10b3aSChris Mason list_empty(&cur->plug_list) && 6864ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags) && 6874ae10b3aSChris Mason !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 6884ae10b3aSChris Mason list_del_init(&cur->hash_list); 689dec95574SElena Reshetova refcount_dec(&cur->refs); 6904ae10b3aSChris Mason 6914ae10b3aSChris Mason steal_rbio(cur, rbio); 6924ae10b3aSChris Mason cache_drop = cur; 6934ae10b3aSChris Mason spin_unlock(&cur->bio_list_lock); 6944ae10b3aSChris Mason 6954ae10b3aSChris Mason goto lockit; 6964ae10b3aSChris Mason } 6974ae10b3aSChris Mason 69853b381b3SDavid Woodhouse /* can we merge into the lock owner? */ 69953b381b3SDavid Woodhouse if (rbio_can_merge(cur, rbio)) { 70053b381b3SDavid Woodhouse merge_rbio(cur, rbio); 70153b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 70253b381b3SDavid Woodhouse freeit = rbio; 70353b381b3SDavid Woodhouse ret = 1; 70453b381b3SDavid Woodhouse goto out; 70553b381b3SDavid Woodhouse } 70653b381b3SDavid Woodhouse 7074ae10b3aSChris Mason 70853b381b3SDavid Woodhouse /* 70953b381b3SDavid Woodhouse * we couldn't merge with the running 71053b381b3SDavid Woodhouse * rbio, see if we can merge with the 71153b381b3SDavid Woodhouse * pending ones. We don't have to 71253b381b3SDavid Woodhouse * check for rmw_locked because there 71353b381b3SDavid Woodhouse * is no way they are inside finish_rmw 71453b381b3SDavid Woodhouse * right now 71553b381b3SDavid Woodhouse */ 71653b381b3SDavid Woodhouse list_for_each_entry(pending, &cur->plug_list, 71753b381b3SDavid Woodhouse plug_list) { 71853b381b3SDavid Woodhouse if (rbio_can_merge(pending, rbio)) { 71953b381b3SDavid Woodhouse merge_rbio(pending, rbio); 72053b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 72153b381b3SDavid Woodhouse freeit = rbio; 72253b381b3SDavid Woodhouse ret = 1; 72353b381b3SDavid Woodhouse goto out; 72453b381b3SDavid Woodhouse } 72553b381b3SDavid Woodhouse } 72653b381b3SDavid Woodhouse 72753b381b3SDavid Woodhouse /* no merging, put us on the tail of the plug list, 72853b381b3SDavid Woodhouse * our rbio will be started with the currently 72953b381b3SDavid Woodhouse * running rbio unlocks 73053b381b3SDavid Woodhouse */ 73153b381b3SDavid Woodhouse list_add_tail(&rbio->plug_list, &cur->plug_list); 73253b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 73353b381b3SDavid Woodhouse ret = 1; 73453b381b3SDavid Woodhouse goto out; 73553b381b3SDavid Woodhouse } 73653b381b3SDavid Woodhouse } 7374ae10b3aSChris Mason lockit: 738dec95574SElena Reshetova refcount_inc(&rbio->refs); 73953b381b3SDavid Woodhouse list_add(&rbio->hash_list, &h->hash_list); 74053b381b3SDavid Woodhouse out: 74153b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 7424ae10b3aSChris Mason if (cache_drop) 7434ae10b3aSChris Mason remove_rbio_from_cache(cache_drop); 74453b381b3SDavid Woodhouse if (freeit) 74553b381b3SDavid Woodhouse __free_raid_bio(freeit); 74653b381b3SDavid Woodhouse return ret; 74753b381b3SDavid Woodhouse } 74853b381b3SDavid Woodhouse 74953b381b3SDavid Woodhouse /* 75053b381b3SDavid Woodhouse * called as rmw or parity rebuild is completed. If the plug list has more 75153b381b3SDavid Woodhouse * rbios waiting for this stripe, the next one on the list will be started 75253b381b3SDavid Woodhouse */ 75353b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 75453b381b3SDavid Woodhouse { 75553b381b3SDavid Woodhouse int bucket; 75653b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 75753b381b3SDavid Woodhouse unsigned long flags; 7584ae10b3aSChris Mason int keep_cache = 0; 75953b381b3SDavid Woodhouse 76053b381b3SDavid Woodhouse bucket = rbio_bucket(rbio); 76153b381b3SDavid Woodhouse h = rbio->fs_info->stripe_hash_table->table + bucket; 76253b381b3SDavid Woodhouse 7634ae10b3aSChris Mason if (list_empty(&rbio->plug_list)) 7644ae10b3aSChris Mason cache_rbio(rbio); 7654ae10b3aSChris Mason 76653b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 76753b381b3SDavid Woodhouse spin_lock(&rbio->bio_list_lock); 76853b381b3SDavid Woodhouse 76953b381b3SDavid Woodhouse if (!list_empty(&rbio->hash_list)) { 7704ae10b3aSChris Mason /* 7714ae10b3aSChris Mason * if we're still cached and there is no other IO 7724ae10b3aSChris Mason * to perform, just leave this rbio here for others 7734ae10b3aSChris Mason * to steal from later 7744ae10b3aSChris Mason */ 7754ae10b3aSChris Mason if (list_empty(&rbio->plug_list) && 7764ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 7774ae10b3aSChris Mason keep_cache = 1; 7784ae10b3aSChris Mason clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 7794ae10b3aSChris Mason BUG_ON(!bio_list_empty(&rbio->bio_list)); 7804ae10b3aSChris Mason goto done; 7814ae10b3aSChris Mason } 78253b381b3SDavid Woodhouse 78353b381b3SDavid Woodhouse list_del_init(&rbio->hash_list); 784dec95574SElena Reshetova refcount_dec(&rbio->refs); 78553b381b3SDavid Woodhouse 78653b381b3SDavid Woodhouse /* 78753b381b3SDavid Woodhouse * we use the plug list to hold all the rbios 78853b381b3SDavid Woodhouse * waiting for the chance to lock this stripe. 78953b381b3SDavid Woodhouse * hand the lock over to one of them. 79053b381b3SDavid Woodhouse */ 79153b381b3SDavid Woodhouse if (!list_empty(&rbio->plug_list)) { 79253b381b3SDavid Woodhouse struct btrfs_raid_bio *next; 79353b381b3SDavid Woodhouse struct list_head *head = rbio->plug_list.next; 79453b381b3SDavid Woodhouse 79553b381b3SDavid Woodhouse next = list_entry(head, struct btrfs_raid_bio, 79653b381b3SDavid Woodhouse plug_list); 79753b381b3SDavid Woodhouse 79853b381b3SDavid Woodhouse list_del_init(&rbio->plug_list); 79953b381b3SDavid Woodhouse 80053b381b3SDavid Woodhouse list_add(&next->hash_list, &h->hash_list); 801dec95574SElena Reshetova refcount_inc(&next->refs); 80253b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 80353b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 80453b381b3SDavid Woodhouse 8051b94b556SMiao Xie if (next->operation == BTRFS_RBIO_READ_REBUILD) 80653b381b3SDavid Woodhouse async_read_rebuild(next); 807b4ee1782SOmar Sandoval else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { 808b4ee1782SOmar Sandoval steal_rbio(rbio, next); 809b4ee1782SOmar Sandoval async_read_rebuild(next); 810b4ee1782SOmar Sandoval } else if (next->operation == BTRFS_RBIO_WRITE) { 8114ae10b3aSChris Mason steal_rbio(rbio, next); 81253b381b3SDavid Woodhouse async_rmw_stripe(next); 8135a6ac9eaSMiao Xie } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 8145a6ac9eaSMiao Xie steal_rbio(rbio, next); 8155a6ac9eaSMiao Xie async_scrub_parity(next); 8164ae10b3aSChris Mason } 81753b381b3SDavid Woodhouse 81853b381b3SDavid Woodhouse goto done_nolock; 81933a9eca7SDavid Sterba /* 82033a9eca7SDavid Sterba * The barrier for this waitqueue_active is not needed, 82133a9eca7SDavid Sterba * we're protected by h->lock and can't miss a wakeup. 82233a9eca7SDavid Sterba */ 82353b381b3SDavid Woodhouse } else if (waitqueue_active(&h->wait)) { 82453b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 82553b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 82653b381b3SDavid Woodhouse wake_up(&h->wait); 82753b381b3SDavid Woodhouse goto done_nolock; 82853b381b3SDavid Woodhouse } 82953b381b3SDavid Woodhouse } 8304ae10b3aSChris Mason done: 83153b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 83253b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 83353b381b3SDavid Woodhouse 83453b381b3SDavid Woodhouse done_nolock: 8354ae10b3aSChris Mason if (!keep_cache) 8364ae10b3aSChris Mason remove_rbio_from_cache(rbio); 83753b381b3SDavid Woodhouse } 83853b381b3SDavid Woodhouse 83953b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio) 84053b381b3SDavid Woodhouse { 84153b381b3SDavid Woodhouse int i; 84253b381b3SDavid Woodhouse 843dec95574SElena Reshetova if (!refcount_dec_and_test(&rbio->refs)) 84453b381b3SDavid Woodhouse return; 84553b381b3SDavid Woodhouse 8464ae10b3aSChris Mason WARN_ON(!list_empty(&rbio->stripe_cache)); 84753b381b3SDavid Woodhouse WARN_ON(!list_empty(&rbio->hash_list)); 84853b381b3SDavid Woodhouse WARN_ON(!bio_list_empty(&rbio->bio_list)); 84953b381b3SDavid Woodhouse 85053b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 85153b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) { 85253b381b3SDavid Woodhouse __free_page(rbio->stripe_pages[i]); 85353b381b3SDavid Woodhouse rbio->stripe_pages[i] = NULL; 85453b381b3SDavid Woodhouse } 85553b381b3SDavid Woodhouse } 856af8e2d1dSMiao Xie 8576e9606d2SZhao Lei btrfs_put_bbio(rbio->bbio); 85853b381b3SDavid Woodhouse kfree(rbio); 85953b381b3SDavid Woodhouse } 86053b381b3SDavid Woodhouse 86153b381b3SDavid Woodhouse static void free_raid_bio(struct btrfs_raid_bio *rbio) 86253b381b3SDavid Woodhouse { 86353b381b3SDavid Woodhouse unlock_stripe(rbio); 86453b381b3SDavid Woodhouse __free_raid_bio(rbio); 86553b381b3SDavid Woodhouse } 86653b381b3SDavid Woodhouse 86753b381b3SDavid Woodhouse /* 86853b381b3SDavid Woodhouse * this frees the rbio and runs through all the bios in the 86953b381b3SDavid Woodhouse * bio_list and calls end_io on them 87053b381b3SDavid Woodhouse */ 8714246a0b6SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) 87253b381b3SDavid Woodhouse { 87353b381b3SDavid Woodhouse struct bio *cur = bio_list_get(&rbio->bio_list); 87453b381b3SDavid Woodhouse struct bio *next; 8754245215dSMiao Xie 8764245215dSMiao Xie if (rbio->generic_bio_cnt) 8774245215dSMiao Xie btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); 8784245215dSMiao Xie 87953b381b3SDavid Woodhouse free_raid_bio(rbio); 88053b381b3SDavid Woodhouse 88153b381b3SDavid Woodhouse while (cur) { 88253b381b3SDavid Woodhouse next = cur->bi_next; 88353b381b3SDavid Woodhouse cur->bi_next = NULL; 8844246a0b6SChristoph Hellwig cur->bi_error = err; 8854246a0b6SChristoph Hellwig bio_endio(cur); 88653b381b3SDavid Woodhouse cur = next; 88753b381b3SDavid Woodhouse } 88853b381b3SDavid Woodhouse } 88953b381b3SDavid Woodhouse 89053b381b3SDavid Woodhouse /* 89153b381b3SDavid Woodhouse * end io function used by finish_rmw. When we finally 89253b381b3SDavid Woodhouse * get here, we've written a full stripe 89353b381b3SDavid Woodhouse */ 8944246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio) 89553b381b3SDavid Woodhouse { 89653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 8974246a0b6SChristoph Hellwig int err = bio->bi_error; 898a6111d11SZhao Lei int max_errors; 89953b381b3SDavid Woodhouse 90053b381b3SDavid Woodhouse if (err) 90153b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 90253b381b3SDavid Woodhouse 90353b381b3SDavid Woodhouse bio_put(bio); 90453b381b3SDavid Woodhouse 905b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 90653b381b3SDavid Woodhouse return; 90753b381b3SDavid Woodhouse 90853b381b3SDavid Woodhouse err = 0; 90953b381b3SDavid Woodhouse 91053b381b3SDavid Woodhouse /* OK, we have read all the stripes we need to. */ 911a6111d11SZhao Lei max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 912a6111d11SZhao Lei 0 : rbio->bbio->max_errors; 913a6111d11SZhao Lei if (atomic_read(&rbio->error) > max_errors) 91453b381b3SDavid Woodhouse err = -EIO; 91553b381b3SDavid Woodhouse 9164246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 91753b381b3SDavid Woodhouse } 91853b381b3SDavid Woodhouse 91953b381b3SDavid Woodhouse /* 92053b381b3SDavid Woodhouse * the read/modify/write code wants to use the original bio for 92153b381b3SDavid Woodhouse * any pages it included, and then use the rbio for everything 92253b381b3SDavid Woodhouse * else. This function decides if a given index (stripe number) 92353b381b3SDavid Woodhouse * and page number in that stripe fall inside the original bio 92453b381b3SDavid Woodhouse * or the rbio. 92553b381b3SDavid Woodhouse * 92653b381b3SDavid Woodhouse * if you set bio_list_only, you'll get a NULL back for any ranges 92753b381b3SDavid Woodhouse * that are outside the bio_list 92853b381b3SDavid Woodhouse * 92953b381b3SDavid Woodhouse * This doesn't take any refs on anything, you get a bare page pointer 93053b381b3SDavid Woodhouse * and the caller must bump refs as required. 93153b381b3SDavid Woodhouse * 93253b381b3SDavid Woodhouse * You must call index_rbio_pages once before you can trust 93353b381b3SDavid Woodhouse * the answers from this function. 93453b381b3SDavid Woodhouse */ 93553b381b3SDavid Woodhouse static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, 93653b381b3SDavid Woodhouse int index, int pagenr, int bio_list_only) 93753b381b3SDavid Woodhouse { 93853b381b3SDavid Woodhouse int chunk_page; 93953b381b3SDavid Woodhouse struct page *p = NULL; 94053b381b3SDavid Woodhouse 94153b381b3SDavid Woodhouse chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; 94253b381b3SDavid Woodhouse 94353b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 94453b381b3SDavid Woodhouse p = rbio->bio_pages[chunk_page]; 94553b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 94653b381b3SDavid Woodhouse 94753b381b3SDavid Woodhouse if (p || bio_list_only) 94853b381b3SDavid Woodhouse return p; 94953b381b3SDavid Woodhouse 95053b381b3SDavid Woodhouse return rbio->stripe_pages[chunk_page]; 95153b381b3SDavid Woodhouse } 95253b381b3SDavid Woodhouse 95353b381b3SDavid Woodhouse /* 95453b381b3SDavid Woodhouse * number of pages we need for the entire stripe across all the 95553b381b3SDavid Woodhouse * drives 95653b381b3SDavid Woodhouse */ 95753b381b3SDavid Woodhouse static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 95853b381b3SDavid Woodhouse { 95909cbfeafSKirill A. Shutemov return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; 96053b381b3SDavid Woodhouse } 96153b381b3SDavid Woodhouse 96253b381b3SDavid Woodhouse /* 96353b381b3SDavid Woodhouse * allocation and initial setup for the btrfs_raid_bio. Not 96453b381b3SDavid Woodhouse * this does not allocate any pages for rbio->pages. 96553b381b3SDavid Woodhouse */ 9662ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, 9672ff7e61eSJeff Mahoney struct btrfs_bio *bbio, 9682ff7e61eSJeff Mahoney u64 stripe_len) 96953b381b3SDavid Woodhouse { 97053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 97153b381b3SDavid Woodhouse int nr_data = 0; 9722c8cdd6eSMiao Xie int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 9732c8cdd6eSMiao Xie int num_pages = rbio_nr_pages(stripe_len, real_stripes); 9745a6ac9eaSMiao Xie int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); 97553b381b3SDavid Woodhouse void *p; 97653b381b3SDavid Woodhouse 9775a6ac9eaSMiao Xie rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + 978bfca9a6dSZhao Lei DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) * 979bfca9a6dSZhao Lei sizeof(long), GFP_NOFS); 980af8e2d1dSMiao Xie if (!rbio) 98153b381b3SDavid Woodhouse return ERR_PTR(-ENOMEM); 98253b381b3SDavid Woodhouse 98353b381b3SDavid Woodhouse bio_list_init(&rbio->bio_list); 98453b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->plug_list); 98553b381b3SDavid Woodhouse spin_lock_init(&rbio->bio_list_lock); 9864ae10b3aSChris Mason INIT_LIST_HEAD(&rbio->stripe_cache); 98753b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->hash_list); 98853b381b3SDavid Woodhouse rbio->bbio = bbio; 9892ff7e61eSJeff Mahoney rbio->fs_info = fs_info; 99053b381b3SDavid Woodhouse rbio->stripe_len = stripe_len; 99153b381b3SDavid Woodhouse rbio->nr_pages = num_pages; 9922c8cdd6eSMiao Xie rbio->real_stripes = real_stripes; 9935a6ac9eaSMiao Xie rbio->stripe_npages = stripe_npages; 99453b381b3SDavid Woodhouse rbio->faila = -1; 99553b381b3SDavid Woodhouse rbio->failb = -1; 996dec95574SElena Reshetova refcount_set(&rbio->refs, 1); 997b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 998b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, 0); 99953b381b3SDavid Woodhouse 100053b381b3SDavid Woodhouse /* 100153b381b3SDavid Woodhouse * the stripe_pages and bio_pages array point to the extra 100253b381b3SDavid Woodhouse * memory we allocated past the end of the rbio 100353b381b3SDavid Woodhouse */ 100453b381b3SDavid Woodhouse p = rbio + 1; 100553b381b3SDavid Woodhouse rbio->stripe_pages = p; 100653b381b3SDavid Woodhouse rbio->bio_pages = p + sizeof(struct page *) * num_pages; 10075a6ac9eaSMiao Xie rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; 100853b381b3SDavid Woodhouse 100910f11900SZhao Lei if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) 101010f11900SZhao Lei nr_data = real_stripes - 1; 101110f11900SZhao Lei else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) 10122c8cdd6eSMiao Xie nr_data = real_stripes - 2; 101353b381b3SDavid Woodhouse else 101410f11900SZhao Lei BUG(); 101553b381b3SDavid Woodhouse 101653b381b3SDavid Woodhouse rbio->nr_data = nr_data; 101753b381b3SDavid Woodhouse return rbio; 101853b381b3SDavid Woodhouse } 101953b381b3SDavid Woodhouse 102053b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */ 102153b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 102253b381b3SDavid Woodhouse { 102353b381b3SDavid Woodhouse int i; 102453b381b3SDavid Woodhouse struct page *page; 102553b381b3SDavid Woodhouse 102653b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 102753b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 102853b381b3SDavid Woodhouse continue; 102953b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 103053b381b3SDavid Woodhouse if (!page) 103153b381b3SDavid Woodhouse return -ENOMEM; 103253b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 103353b381b3SDavid Woodhouse } 103453b381b3SDavid Woodhouse return 0; 103553b381b3SDavid Woodhouse } 103653b381b3SDavid Woodhouse 1037b7178a5fSZhao Lei /* only allocate pages for p/q stripes */ 103853b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 103953b381b3SDavid Woodhouse { 104053b381b3SDavid Woodhouse int i; 104153b381b3SDavid Woodhouse struct page *page; 104253b381b3SDavid Woodhouse 1043b7178a5fSZhao Lei i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); 104453b381b3SDavid Woodhouse 104553b381b3SDavid Woodhouse for (; i < rbio->nr_pages; i++) { 104653b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 104753b381b3SDavid Woodhouse continue; 104853b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 104953b381b3SDavid Woodhouse if (!page) 105053b381b3SDavid Woodhouse return -ENOMEM; 105153b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 105253b381b3SDavid Woodhouse } 105353b381b3SDavid Woodhouse return 0; 105453b381b3SDavid Woodhouse } 105553b381b3SDavid Woodhouse 105653b381b3SDavid Woodhouse /* 105753b381b3SDavid Woodhouse * add a single page from a specific stripe into our list of bios for IO 105853b381b3SDavid Woodhouse * this will try to merge into existing bios if possible, and returns 105953b381b3SDavid Woodhouse * zero if all went well. 106053b381b3SDavid Woodhouse */ 106148a3b636SEric Sandeen static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 106253b381b3SDavid Woodhouse struct bio_list *bio_list, 106353b381b3SDavid Woodhouse struct page *page, 106453b381b3SDavid Woodhouse int stripe_nr, 106553b381b3SDavid Woodhouse unsigned long page_index, 106653b381b3SDavid Woodhouse unsigned long bio_max_len) 106753b381b3SDavid Woodhouse { 106853b381b3SDavid Woodhouse struct bio *last = bio_list->tail; 106953b381b3SDavid Woodhouse u64 last_end = 0; 107053b381b3SDavid Woodhouse int ret; 107153b381b3SDavid Woodhouse struct bio *bio; 107253b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 107353b381b3SDavid Woodhouse u64 disk_start; 107453b381b3SDavid Woodhouse 107553b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[stripe_nr]; 107609cbfeafSKirill A. Shutemov disk_start = stripe->physical + (page_index << PAGE_SHIFT); 107753b381b3SDavid Woodhouse 107853b381b3SDavid Woodhouse /* if the device is missing, just fail this stripe */ 107953b381b3SDavid Woodhouse if (!stripe->dev->bdev) 108053b381b3SDavid Woodhouse return fail_rbio_index(rbio, stripe_nr); 108153b381b3SDavid Woodhouse 108253b381b3SDavid Woodhouse /* see if we can add this page onto our existing bio */ 108353b381b3SDavid Woodhouse if (last) { 10844f024f37SKent Overstreet last_end = (u64)last->bi_iter.bi_sector << 9; 10854f024f37SKent Overstreet last_end += last->bi_iter.bi_size; 108653b381b3SDavid Woodhouse 108753b381b3SDavid Woodhouse /* 108853b381b3SDavid Woodhouse * we can't merge these if they are from different 108953b381b3SDavid Woodhouse * devices or if they are not contiguous 109053b381b3SDavid Woodhouse */ 109153b381b3SDavid Woodhouse if (last_end == disk_start && stripe->dev->bdev && 10924246a0b6SChristoph Hellwig !last->bi_error && 109353b381b3SDavid Woodhouse last->bi_bdev == stripe->dev->bdev) { 109409cbfeafSKirill A. Shutemov ret = bio_add_page(last, page, PAGE_SIZE, 0); 109509cbfeafSKirill A. Shutemov if (ret == PAGE_SIZE) 109653b381b3SDavid Woodhouse return 0; 109753b381b3SDavid Woodhouse } 109853b381b3SDavid Woodhouse } 109953b381b3SDavid Woodhouse 110053b381b3SDavid Woodhouse /* put a new bio on the list */ 1101c5e4c3d7SDavid Sterba bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); 11024f024f37SKent Overstreet bio->bi_iter.bi_size = 0; 110353b381b3SDavid Woodhouse bio->bi_bdev = stripe->dev->bdev; 11044f024f37SKent Overstreet bio->bi_iter.bi_sector = disk_start >> 9; 110553b381b3SDavid Woodhouse 110609cbfeafSKirill A. Shutemov bio_add_page(bio, page, PAGE_SIZE, 0); 110753b381b3SDavid Woodhouse bio_list_add(bio_list, bio); 110853b381b3SDavid Woodhouse return 0; 110953b381b3SDavid Woodhouse } 111053b381b3SDavid Woodhouse 111153b381b3SDavid Woodhouse /* 111253b381b3SDavid Woodhouse * while we're doing the read/modify/write cycle, we could 111353b381b3SDavid Woodhouse * have errors in reading pages off the disk. This checks 111453b381b3SDavid Woodhouse * for errors and if we're not able to read the page it'll 111553b381b3SDavid Woodhouse * trigger parity reconstruction. The rmw will be finished 111653b381b3SDavid Woodhouse * after we've reconstructed the failed stripes 111753b381b3SDavid Woodhouse */ 111853b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 111953b381b3SDavid Woodhouse { 112053b381b3SDavid Woodhouse if (rbio->faila >= 0 || rbio->failb >= 0) { 11212c8cdd6eSMiao Xie BUG_ON(rbio->faila == rbio->real_stripes - 1); 112253b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 112353b381b3SDavid Woodhouse } else { 112453b381b3SDavid Woodhouse finish_rmw(rbio); 112553b381b3SDavid Woodhouse } 112653b381b3SDavid Woodhouse } 112753b381b3SDavid Woodhouse 112853b381b3SDavid Woodhouse /* 112953b381b3SDavid Woodhouse * helper function to walk our bio list and populate the bio_pages array with 113053b381b3SDavid Woodhouse * the result. This seems expensive, but it is faster than constantly 113153b381b3SDavid Woodhouse * searching through the bio list as we setup the IO in finish_rmw or stripe 113253b381b3SDavid Woodhouse * reconstruction. 113353b381b3SDavid Woodhouse * 113453b381b3SDavid Woodhouse * This must be called before you trust the answers from page_in_rbio 113553b381b3SDavid Woodhouse */ 113653b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio) 113753b381b3SDavid Woodhouse { 113853b381b3SDavid Woodhouse struct bio *bio; 113953b381b3SDavid Woodhouse u64 start; 114053b381b3SDavid Woodhouse unsigned long stripe_offset; 114153b381b3SDavid Woodhouse unsigned long page_index; 114253b381b3SDavid Woodhouse 114353b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 114453b381b3SDavid Woodhouse bio_list_for_each(bio, &rbio->bio_list) { 1145*6592e58cSFilipe Manana struct bio_vec bvec; 1146*6592e58cSFilipe Manana struct bvec_iter iter; 1147*6592e58cSFilipe Manana int i = 0; 1148*6592e58cSFilipe Manana 11494f024f37SKent Overstreet start = (u64)bio->bi_iter.bi_sector << 9; 11508e5cfb55SZhao Lei stripe_offset = start - rbio->bbio->raid_map[0]; 115109cbfeafSKirill A. Shutemov page_index = stripe_offset >> PAGE_SHIFT; 115253b381b3SDavid Woodhouse 1153*6592e58cSFilipe Manana if (bio_flagged(bio, BIO_CLONED)) 1154*6592e58cSFilipe Manana bio->bi_iter = btrfs_io_bio(bio)->iter; 1155*6592e58cSFilipe Manana 1156*6592e58cSFilipe Manana bio_for_each_segment(bvec, bio, iter) { 1157*6592e58cSFilipe Manana rbio->bio_pages[page_index + i] = bvec.bv_page; 1158*6592e58cSFilipe Manana i++; 1159*6592e58cSFilipe Manana } 116053b381b3SDavid Woodhouse } 116153b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 116253b381b3SDavid Woodhouse } 116353b381b3SDavid Woodhouse 116453b381b3SDavid Woodhouse /* 116553b381b3SDavid Woodhouse * this is called from one of two situations. We either 116653b381b3SDavid Woodhouse * have a full stripe from the higher layers, or we've read all 116753b381b3SDavid Woodhouse * the missing bits off disk. 116853b381b3SDavid Woodhouse * 116953b381b3SDavid Woodhouse * This will calculate the parity and then send down any 117053b381b3SDavid Woodhouse * changed blocks. 117153b381b3SDavid Woodhouse */ 117253b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 117353b381b3SDavid Woodhouse { 117453b381b3SDavid Woodhouse struct btrfs_bio *bbio = rbio->bbio; 11752c8cdd6eSMiao Xie void *pointers[rbio->real_stripes]; 117653b381b3SDavid Woodhouse int nr_data = rbio->nr_data; 117753b381b3SDavid Woodhouse int stripe; 117853b381b3SDavid Woodhouse int pagenr; 117953b381b3SDavid Woodhouse int p_stripe = -1; 118053b381b3SDavid Woodhouse int q_stripe = -1; 118153b381b3SDavid Woodhouse struct bio_list bio_list; 118253b381b3SDavid Woodhouse struct bio *bio; 118353b381b3SDavid Woodhouse int ret; 118453b381b3SDavid Woodhouse 118553b381b3SDavid Woodhouse bio_list_init(&bio_list); 118653b381b3SDavid Woodhouse 11872c8cdd6eSMiao Xie if (rbio->real_stripes - rbio->nr_data == 1) { 11882c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 1; 11892c8cdd6eSMiao Xie } else if (rbio->real_stripes - rbio->nr_data == 2) { 11902c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 2; 11912c8cdd6eSMiao Xie q_stripe = rbio->real_stripes - 1; 119253b381b3SDavid Woodhouse } else { 119353b381b3SDavid Woodhouse BUG(); 119453b381b3SDavid Woodhouse } 119553b381b3SDavid Woodhouse 119653b381b3SDavid Woodhouse /* at this point we either have a full stripe, 119753b381b3SDavid Woodhouse * or we've read the full stripe from the drive. 119853b381b3SDavid Woodhouse * recalculate the parity and write the new results. 119953b381b3SDavid Woodhouse * 120053b381b3SDavid Woodhouse * We're not allowed to add any new bios to the 120153b381b3SDavid Woodhouse * bio list here, anyone else that wants to 120253b381b3SDavid Woodhouse * change this stripe needs to do their own rmw. 120353b381b3SDavid Woodhouse */ 120453b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 120553b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 120653b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 120753b381b3SDavid Woodhouse 1208b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 120953b381b3SDavid Woodhouse 121053b381b3SDavid Woodhouse /* 121153b381b3SDavid Woodhouse * now that we've set rmw_locked, run through the 121253b381b3SDavid Woodhouse * bio list one last time and map the page pointers 12134ae10b3aSChris Mason * 12144ae10b3aSChris Mason * We don't cache full rbios because we're assuming 12154ae10b3aSChris Mason * the higher layers are unlikely to use this area of 12164ae10b3aSChris Mason * the disk again soon. If they do use it again, 12174ae10b3aSChris Mason * hopefully they will send another full bio. 121853b381b3SDavid Woodhouse */ 121953b381b3SDavid Woodhouse index_rbio_pages(rbio); 12204ae10b3aSChris Mason if (!rbio_is_full(rbio)) 12214ae10b3aSChris Mason cache_rbio_pages(rbio); 12224ae10b3aSChris Mason else 12234ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 122453b381b3SDavid Woodhouse 1225915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 122653b381b3SDavid Woodhouse struct page *p; 122753b381b3SDavid Woodhouse /* first collect one page from each data stripe */ 122853b381b3SDavid Woodhouse for (stripe = 0; stripe < nr_data; stripe++) { 122953b381b3SDavid Woodhouse p = page_in_rbio(rbio, stripe, pagenr, 0); 123053b381b3SDavid Woodhouse pointers[stripe] = kmap(p); 123153b381b3SDavid Woodhouse } 123253b381b3SDavid Woodhouse 123353b381b3SDavid Woodhouse /* then add the parity stripe */ 123453b381b3SDavid Woodhouse p = rbio_pstripe_page(rbio, pagenr); 123553b381b3SDavid Woodhouse SetPageUptodate(p); 123653b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 123753b381b3SDavid Woodhouse 123853b381b3SDavid Woodhouse if (q_stripe != -1) { 123953b381b3SDavid Woodhouse 124053b381b3SDavid Woodhouse /* 124153b381b3SDavid Woodhouse * raid6, add the qstripe and call the 124253b381b3SDavid Woodhouse * library function to fill in our p/q 124353b381b3SDavid Woodhouse */ 124453b381b3SDavid Woodhouse p = rbio_qstripe_page(rbio, pagenr); 124553b381b3SDavid Woodhouse SetPageUptodate(p); 124653b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 124753b381b3SDavid Woodhouse 12482c8cdd6eSMiao Xie raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 124953b381b3SDavid Woodhouse pointers); 125053b381b3SDavid Woodhouse } else { 125153b381b3SDavid Woodhouse /* raid5 */ 125253b381b3SDavid Woodhouse memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 125309cbfeafSKirill A. Shutemov run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 125453b381b3SDavid Woodhouse } 125553b381b3SDavid Woodhouse 125653b381b3SDavid Woodhouse 12572c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) 125853b381b3SDavid Woodhouse kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 125953b381b3SDavid Woodhouse } 126053b381b3SDavid Woodhouse 126153b381b3SDavid Woodhouse /* 126253b381b3SDavid Woodhouse * time to start writing. Make bios for everything from the 126353b381b3SDavid Woodhouse * higher layers (the bio_list in our rbio) and our p/q. Ignore 126453b381b3SDavid Woodhouse * everything else. 126553b381b3SDavid Woodhouse */ 12662c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1267915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 126853b381b3SDavid Woodhouse struct page *page; 126953b381b3SDavid Woodhouse if (stripe < rbio->nr_data) { 127053b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 127153b381b3SDavid Woodhouse if (!page) 127253b381b3SDavid Woodhouse continue; 127353b381b3SDavid Woodhouse } else { 127453b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 127553b381b3SDavid Woodhouse } 127653b381b3SDavid Woodhouse 127753b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 127853b381b3SDavid Woodhouse page, stripe, pagenr, rbio->stripe_len); 127953b381b3SDavid Woodhouse if (ret) 128053b381b3SDavid Woodhouse goto cleanup; 128153b381b3SDavid Woodhouse } 128253b381b3SDavid Woodhouse } 128353b381b3SDavid Woodhouse 12842c8cdd6eSMiao Xie if (likely(!bbio->num_tgtdevs)) 12852c8cdd6eSMiao Xie goto write_data; 12862c8cdd6eSMiao Xie 12872c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 12882c8cdd6eSMiao Xie if (!bbio->tgtdev_map[stripe]) 12892c8cdd6eSMiao Xie continue; 12902c8cdd6eSMiao Xie 1291915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 12922c8cdd6eSMiao Xie struct page *page; 12932c8cdd6eSMiao Xie if (stripe < rbio->nr_data) { 12942c8cdd6eSMiao Xie page = page_in_rbio(rbio, stripe, pagenr, 1); 12952c8cdd6eSMiao Xie if (!page) 12962c8cdd6eSMiao Xie continue; 12972c8cdd6eSMiao Xie } else { 12982c8cdd6eSMiao Xie page = rbio_stripe_page(rbio, stripe, pagenr); 12992c8cdd6eSMiao Xie } 13002c8cdd6eSMiao Xie 13012c8cdd6eSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, page, 13022c8cdd6eSMiao Xie rbio->bbio->tgtdev_map[stripe], 13032c8cdd6eSMiao Xie pagenr, rbio->stripe_len); 13042c8cdd6eSMiao Xie if (ret) 13052c8cdd6eSMiao Xie goto cleanup; 13062c8cdd6eSMiao Xie } 13072c8cdd6eSMiao Xie } 13082c8cdd6eSMiao Xie 13092c8cdd6eSMiao Xie write_data: 1310b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1311b89e1b01SMiao Xie BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 131253b381b3SDavid Woodhouse 131353b381b3SDavid Woodhouse while (1) { 131453b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 131553b381b3SDavid Woodhouse if (!bio) 131653b381b3SDavid Woodhouse break; 131753b381b3SDavid Woodhouse 131853b381b3SDavid Woodhouse bio->bi_private = rbio; 131953b381b3SDavid Woodhouse bio->bi_end_io = raid_write_end_io; 132037226b21SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 13214e49ea4aSMike Christie 13224e49ea4aSMike Christie submit_bio(bio); 132353b381b3SDavid Woodhouse } 132453b381b3SDavid Woodhouse return; 132553b381b3SDavid Woodhouse 132653b381b3SDavid Woodhouse cleanup: 13274246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 132853b381b3SDavid Woodhouse } 132953b381b3SDavid Woodhouse 133053b381b3SDavid Woodhouse /* 133153b381b3SDavid Woodhouse * helper to find the stripe number for a given bio. Used to figure out which 133253b381b3SDavid Woodhouse * stripe has failed. This expects the bio to correspond to a physical disk, 133353b381b3SDavid Woodhouse * so it looks up based on physical sector numbers. 133453b381b3SDavid Woodhouse */ 133553b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio, 133653b381b3SDavid Woodhouse struct bio *bio) 133753b381b3SDavid Woodhouse { 13384f024f37SKent Overstreet u64 physical = bio->bi_iter.bi_sector; 133953b381b3SDavid Woodhouse u64 stripe_start; 134053b381b3SDavid Woodhouse int i; 134153b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 134253b381b3SDavid Woodhouse 134353b381b3SDavid Woodhouse physical <<= 9; 134453b381b3SDavid Woodhouse 134553b381b3SDavid Woodhouse for (i = 0; i < rbio->bbio->num_stripes; i++) { 134653b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[i]; 134753b381b3SDavid Woodhouse stripe_start = stripe->physical; 134853b381b3SDavid Woodhouse if (physical >= stripe_start && 13492c8cdd6eSMiao Xie physical < stripe_start + rbio->stripe_len && 13502c8cdd6eSMiao Xie bio->bi_bdev == stripe->dev->bdev) { 135153b381b3SDavid Woodhouse return i; 135253b381b3SDavid Woodhouse } 135353b381b3SDavid Woodhouse } 135453b381b3SDavid Woodhouse return -1; 135553b381b3SDavid Woodhouse } 135653b381b3SDavid Woodhouse 135753b381b3SDavid Woodhouse /* 135853b381b3SDavid Woodhouse * helper to find the stripe number for a given 135953b381b3SDavid Woodhouse * bio (before mapping). Used to figure out which stripe has 136053b381b3SDavid Woodhouse * failed. This looks up based on logical block numbers. 136153b381b3SDavid Woodhouse */ 136253b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 136353b381b3SDavid Woodhouse struct bio *bio) 136453b381b3SDavid Woodhouse { 13654f024f37SKent Overstreet u64 logical = bio->bi_iter.bi_sector; 136653b381b3SDavid Woodhouse u64 stripe_start; 136753b381b3SDavid Woodhouse int i; 136853b381b3SDavid Woodhouse 136953b381b3SDavid Woodhouse logical <<= 9; 137053b381b3SDavid Woodhouse 137153b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_data; i++) { 13728e5cfb55SZhao Lei stripe_start = rbio->bbio->raid_map[i]; 137353b381b3SDavid Woodhouse if (logical >= stripe_start && 137453b381b3SDavid Woodhouse logical < stripe_start + rbio->stripe_len) { 137553b381b3SDavid Woodhouse return i; 137653b381b3SDavid Woodhouse } 137753b381b3SDavid Woodhouse } 137853b381b3SDavid Woodhouse return -1; 137953b381b3SDavid Woodhouse } 138053b381b3SDavid Woodhouse 138153b381b3SDavid Woodhouse /* 138253b381b3SDavid Woodhouse * returns -EIO if we had too many failures 138353b381b3SDavid Woodhouse */ 138453b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 138553b381b3SDavid Woodhouse { 138653b381b3SDavid Woodhouse unsigned long flags; 138753b381b3SDavid Woodhouse int ret = 0; 138853b381b3SDavid Woodhouse 138953b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 139053b381b3SDavid Woodhouse 139153b381b3SDavid Woodhouse /* we already know this stripe is bad, move on */ 139253b381b3SDavid Woodhouse if (rbio->faila == failed || rbio->failb == failed) 139353b381b3SDavid Woodhouse goto out; 139453b381b3SDavid Woodhouse 139553b381b3SDavid Woodhouse if (rbio->faila == -1) { 139653b381b3SDavid Woodhouse /* first failure on this rbio */ 139753b381b3SDavid Woodhouse rbio->faila = failed; 1398b89e1b01SMiao Xie atomic_inc(&rbio->error); 139953b381b3SDavid Woodhouse } else if (rbio->failb == -1) { 140053b381b3SDavid Woodhouse /* second failure on this rbio */ 140153b381b3SDavid Woodhouse rbio->failb = failed; 1402b89e1b01SMiao Xie atomic_inc(&rbio->error); 140353b381b3SDavid Woodhouse } else { 140453b381b3SDavid Woodhouse ret = -EIO; 140553b381b3SDavid Woodhouse } 140653b381b3SDavid Woodhouse out: 140753b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 140853b381b3SDavid Woodhouse 140953b381b3SDavid Woodhouse return ret; 141053b381b3SDavid Woodhouse } 141153b381b3SDavid Woodhouse 141253b381b3SDavid Woodhouse /* 141353b381b3SDavid Woodhouse * helper to fail a stripe based on a physical disk 141453b381b3SDavid Woodhouse * bio. 141553b381b3SDavid Woodhouse */ 141653b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 141753b381b3SDavid Woodhouse struct bio *bio) 141853b381b3SDavid Woodhouse { 141953b381b3SDavid Woodhouse int failed = find_bio_stripe(rbio, bio); 142053b381b3SDavid Woodhouse 142153b381b3SDavid Woodhouse if (failed < 0) 142253b381b3SDavid Woodhouse return -EIO; 142353b381b3SDavid Woodhouse 142453b381b3SDavid Woodhouse return fail_rbio_index(rbio, failed); 142553b381b3SDavid Woodhouse } 142653b381b3SDavid Woodhouse 142753b381b3SDavid Woodhouse /* 142853b381b3SDavid Woodhouse * this sets each page in the bio uptodate. It should only be used on private 142953b381b3SDavid Woodhouse * rbio pages, nothing that comes in from the higher layers 143053b381b3SDavid Woodhouse */ 143153b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio) 143253b381b3SDavid Woodhouse { 1433*6592e58cSFilipe Manana struct bio_vec bvec; 1434*6592e58cSFilipe Manana struct bvec_iter iter; 143553b381b3SDavid Woodhouse 1436*6592e58cSFilipe Manana if (bio_flagged(bio, BIO_CLONED)) 1437*6592e58cSFilipe Manana bio->bi_iter = btrfs_io_bio(bio)->iter; 1438*6592e58cSFilipe Manana 1439*6592e58cSFilipe Manana bio_for_each_segment(bvec, bio, iter) 1440*6592e58cSFilipe Manana SetPageUptodate(bvec.bv_page); 144153b381b3SDavid Woodhouse } 144253b381b3SDavid Woodhouse 144353b381b3SDavid Woodhouse /* 144453b381b3SDavid Woodhouse * end io for the read phase of the rmw cycle. All the bios here are physical 144553b381b3SDavid Woodhouse * stripe bios we've read from the disk so we can recalculate the parity of the 144653b381b3SDavid Woodhouse * stripe. 144753b381b3SDavid Woodhouse * 144853b381b3SDavid Woodhouse * This will usually kick off finish_rmw once all the bios are read in, but it 144953b381b3SDavid Woodhouse * may trigger parity reconstruction if we had any errors along the way 145053b381b3SDavid Woodhouse */ 14514246a0b6SChristoph Hellwig static void raid_rmw_end_io(struct bio *bio) 145253b381b3SDavid Woodhouse { 145353b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 145453b381b3SDavid Woodhouse 14554246a0b6SChristoph Hellwig if (bio->bi_error) 145653b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 145753b381b3SDavid Woodhouse else 145853b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 145953b381b3SDavid Woodhouse 146053b381b3SDavid Woodhouse bio_put(bio); 146153b381b3SDavid Woodhouse 1462b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 146353b381b3SDavid Woodhouse return; 146453b381b3SDavid Woodhouse 1465b89e1b01SMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 146653b381b3SDavid Woodhouse goto cleanup; 146753b381b3SDavid Woodhouse 146853b381b3SDavid Woodhouse /* 146953b381b3SDavid Woodhouse * this will normally call finish_rmw to start our write 147053b381b3SDavid Woodhouse * but if there are any failed stripes we'll reconstruct 147153b381b3SDavid Woodhouse * from parity first 147253b381b3SDavid Woodhouse */ 147353b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 147453b381b3SDavid Woodhouse return; 147553b381b3SDavid Woodhouse 147653b381b3SDavid Woodhouse cleanup: 147753b381b3SDavid Woodhouse 14784246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 147953b381b3SDavid Woodhouse } 148053b381b3SDavid Woodhouse 148153b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 148253b381b3SDavid Woodhouse { 14830b246afaSJeff Mahoney btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL); 14840b246afaSJeff Mahoney btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 148553b381b3SDavid Woodhouse } 148653b381b3SDavid Woodhouse 148753b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio) 148853b381b3SDavid Woodhouse { 14899e0af237SLiu Bo btrfs_init_work(&rbio->work, btrfs_rmw_helper, 14909e0af237SLiu Bo read_rebuild_work, NULL, NULL); 149153b381b3SDavid Woodhouse 14920b246afaSJeff Mahoney btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 149353b381b3SDavid Woodhouse } 149453b381b3SDavid Woodhouse 149553b381b3SDavid Woodhouse /* 149653b381b3SDavid Woodhouse * the stripe must be locked by the caller. It will 149753b381b3SDavid Woodhouse * unlock after all the writes are done 149853b381b3SDavid Woodhouse */ 149953b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 150053b381b3SDavid Woodhouse { 150153b381b3SDavid Woodhouse int bios_to_read = 0; 150253b381b3SDavid Woodhouse struct bio_list bio_list; 150353b381b3SDavid Woodhouse int ret; 150453b381b3SDavid Woodhouse int pagenr; 150553b381b3SDavid Woodhouse int stripe; 150653b381b3SDavid Woodhouse struct bio *bio; 150753b381b3SDavid Woodhouse 150853b381b3SDavid Woodhouse bio_list_init(&bio_list); 150953b381b3SDavid Woodhouse 151053b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 151153b381b3SDavid Woodhouse if (ret) 151253b381b3SDavid Woodhouse goto cleanup; 151353b381b3SDavid Woodhouse 151453b381b3SDavid Woodhouse index_rbio_pages(rbio); 151553b381b3SDavid Woodhouse 1516b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 151753b381b3SDavid Woodhouse /* 151853b381b3SDavid Woodhouse * build a list of bios to read all the missing parts of this 151953b381b3SDavid Woodhouse * stripe 152053b381b3SDavid Woodhouse */ 152153b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->nr_data; stripe++) { 1522915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 152353b381b3SDavid Woodhouse struct page *page; 152453b381b3SDavid Woodhouse /* 152553b381b3SDavid Woodhouse * we want to find all the pages missing from 152653b381b3SDavid Woodhouse * the rbio and read them from the disk. If 152753b381b3SDavid Woodhouse * page_in_rbio finds a page in the bio list 152853b381b3SDavid Woodhouse * we don't need to read it off the stripe. 152953b381b3SDavid Woodhouse */ 153053b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 153153b381b3SDavid Woodhouse if (page) 153253b381b3SDavid Woodhouse continue; 153353b381b3SDavid Woodhouse 153453b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 15354ae10b3aSChris Mason /* 15364ae10b3aSChris Mason * the bio cache may have handed us an uptodate 15374ae10b3aSChris Mason * page. If so, be happy and use it 15384ae10b3aSChris Mason */ 15394ae10b3aSChris Mason if (PageUptodate(page)) 15404ae10b3aSChris Mason continue; 15414ae10b3aSChris Mason 154253b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, page, 154353b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 154453b381b3SDavid Woodhouse if (ret) 154553b381b3SDavid Woodhouse goto cleanup; 154653b381b3SDavid Woodhouse } 154753b381b3SDavid Woodhouse } 154853b381b3SDavid Woodhouse 154953b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 155053b381b3SDavid Woodhouse if (!bios_to_read) { 155153b381b3SDavid Woodhouse /* 155253b381b3SDavid Woodhouse * this can happen if others have merged with 155353b381b3SDavid Woodhouse * us, it means there is nothing left to read. 155453b381b3SDavid Woodhouse * But if there are missing devices it may not be 155553b381b3SDavid Woodhouse * safe to do the full stripe write yet. 155653b381b3SDavid Woodhouse */ 155753b381b3SDavid Woodhouse goto finish; 155853b381b3SDavid Woodhouse } 155953b381b3SDavid Woodhouse 156053b381b3SDavid Woodhouse /* 156153b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 156253b381b3SDavid Woodhouse * not to touch it after that 156353b381b3SDavid Woodhouse */ 1564b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 156553b381b3SDavid Woodhouse while (1) { 156653b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 156753b381b3SDavid Woodhouse if (!bio) 156853b381b3SDavid Woodhouse break; 156953b381b3SDavid Woodhouse 157053b381b3SDavid Woodhouse bio->bi_private = rbio; 157153b381b3SDavid Woodhouse bio->bi_end_io = raid_rmw_end_io; 157237226b21SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 157353b381b3SDavid Woodhouse 15740b246afaSJeff Mahoney btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 157553b381b3SDavid Woodhouse 15764e49ea4aSMike Christie submit_bio(bio); 157753b381b3SDavid Woodhouse } 157853b381b3SDavid Woodhouse /* the actual write will happen once the reads are done */ 157953b381b3SDavid Woodhouse return 0; 158053b381b3SDavid Woodhouse 158153b381b3SDavid Woodhouse cleanup: 15824246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 158353b381b3SDavid Woodhouse return -EIO; 158453b381b3SDavid Woodhouse 158553b381b3SDavid Woodhouse finish: 158653b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 158753b381b3SDavid Woodhouse return 0; 158853b381b3SDavid Woodhouse } 158953b381b3SDavid Woodhouse 159053b381b3SDavid Woodhouse /* 159153b381b3SDavid Woodhouse * if the upper layers pass in a full stripe, we thank them by only allocating 159253b381b3SDavid Woodhouse * enough pages to hold the parity, and sending it all down quickly. 159353b381b3SDavid Woodhouse */ 159453b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio) 159553b381b3SDavid Woodhouse { 159653b381b3SDavid Woodhouse int ret; 159753b381b3SDavid Woodhouse 159853b381b3SDavid Woodhouse ret = alloc_rbio_parity_pages(rbio); 15993cd846d1SMiao Xie if (ret) { 16003cd846d1SMiao Xie __free_raid_bio(rbio); 160153b381b3SDavid Woodhouse return ret; 16023cd846d1SMiao Xie } 160353b381b3SDavid Woodhouse 160453b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 160553b381b3SDavid Woodhouse if (ret == 0) 160653b381b3SDavid Woodhouse finish_rmw(rbio); 160753b381b3SDavid Woodhouse return 0; 160853b381b3SDavid Woodhouse } 160953b381b3SDavid Woodhouse 161053b381b3SDavid Woodhouse /* 161153b381b3SDavid Woodhouse * partial stripe writes get handed over to async helpers. 161253b381b3SDavid Woodhouse * We're really hoping to merge a few more writes into this 161353b381b3SDavid Woodhouse * rbio before calculating new parity 161453b381b3SDavid Woodhouse */ 161553b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio) 161653b381b3SDavid Woodhouse { 161753b381b3SDavid Woodhouse int ret; 161853b381b3SDavid Woodhouse 161953b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 162053b381b3SDavid Woodhouse if (ret == 0) 162153b381b3SDavid Woodhouse async_rmw_stripe(rbio); 162253b381b3SDavid Woodhouse return 0; 162353b381b3SDavid Woodhouse } 162453b381b3SDavid Woodhouse 162553b381b3SDavid Woodhouse /* 162653b381b3SDavid Woodhouse * sometimes while we were reading from the drive to 162753b381b3SDavid Woodhouse * recalculate parity, enough new bios come into create 162853b381b3SDavid Woodhouse * a full stripe. So we do a check here to see if we can 162953b381b3SDavid Woodhouse * go directly to finish_rmw 163053b381b3SDavid Woodhouse */ 163153b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 163253b381b3SDavid Woodhouse { 163353b381b3SDavid Woodhouse /* head off into rmw land if we don't have a full stripe */ 163453b381b3SDavid Woodhouse if (!rbio_is_full(rbio)) 163553b381b3SDavid Woodhouse return partial_stripe_write(rbio); 163653b381b3SDavid Woodhouse return full_stripe_write(rbio); 163753b381b3SDavid Woodhouse } 163853b381b3SDavid Woodhouse 163953b381b3SDavid Woodhouse /* 16406ac0f488SChris Mason * We use plugging call backs to collect full stripes. 16416ac0f488SChris Mason * Any time we get a partial stripe write while plugged 16426ac0f488SChris Mason * we collect it into a list. When the unplug comes down, 16436ac0f488SChris Mason * we sort the list by logical block number and merge 16446ac0f488SChris Mason * everything we can into the same rbios 16456ac0f488SChris Mason */ 16466ac0f488SChris Mason struct btrfs_plug_cb { 16476ac0f488SChris Mason struct blk_plug_cb cb; 16486ac0f488SChris Mason struct btrfs_fs_info *info; 16496ac0f488SChris Mason struct list_head rbio_list; 16506ac0f488SChris Mason struct btrfs_work work; 16516ac0f488SChris Mason }; 16526ac0f488SChris Mason 16536ac0f488SChris Mason /* 16546ac0f488SChris Mason * rbios on the plug list are sorted for easier merging. 16556ac0f488SChris Mason */ 16566ac0f488SChris Mason static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 16576ac0f488SChris Mason { 16586ac0f488SChris Mason struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 16596ac0f488SChris Mason plug_list); 16606ac0f488SChris Mason struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 16616ac0f488SChris Mason plug_list); 16624f024f37SKent Overstreet u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 16634f024f37SKent Overstreet u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 16646ac0f488SChris Mason 16656ac0f488SChris Mason if (a_sector < b_sector) 16666ac0f488SChris Mason return -1; 16676ac0f488SChris Mason if (a_sector > b_sector) 16686ac0f488SChris Mason return 1; 16696ac0f488SChris Mason return 0; 16706ac0f488SChris Mason } 16716ac0f488SChris Mason 16726ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug) 16736ac0f488SChris Mason { 16746ac0f488SChris Mason struct btrfs_raid_bio *cur; 16756ac0f488SChris Mason struct btrfs_raid_bio *last = NULL; 16766ac0f488SChris Mason 16776ac0f488SChris Mason /* 16786ac0f488SChris Mason * sort our plug list then try to merge 16796ac0f488SChris Mason * everything we can in hopes of creating full 16806ac0f488SChris Mason * stripes. 16816ac0f488SChris Mason */ 16826ac0f488SChris Mason list_sort(NULL, &plug->rbio_list, plug_cmp); 16836ac0f488SChris Mason while (!list_empty(&plug->rbio_list)) { 16846ac0f488SChris Mason cur = list_entry(plug->rbio_list.next, 16856ac0f488SChris Mason struct btrfs_raid_bio, plug_list); 16866ac0f488SChris Mason list_del_init(&cur->plug_list); 16876ac0f488SChris Mason 16886ac0f488SChris Mason if (rbio_is_full(cur)) { 16896ac0f488SChris Mason /* we have a full stripe, send it down */ 16906ac0f488SChris Mason full_stripe_write(cur); 16916ac0f488SChris Mason continue; 16926ac0f488SChris Mason } 16936ac0f488SChris Mason if (last) { 16946ac0f488SChris Mason if (rbio_can_merge(last, cur)) { 16956ac0f488SChris Mason merge_rbio(last, cur); 16966ac0f488SChris Mason __free_raid_bio(cur); 16976ac0f488SChris Mason continue; 16986ac0f488SChris Mason 16996ac0f488SChris Mason } 17006ac0f488SChris Mason __raid56_parity_write(last); 17016ac0f488SChris Mason } 17026ac0f488SChris Mason last = cur; 17036ac0f488SChris Mason } 17046ac0f488SChris Mason if (last) { 17056ac0f488SChris Mason __raid56_parity_write(last); 17066ac0f488SChris Mason } 17076ac0f488SChris Mason kfree(plug); 17086ac0f488SChris Mason } 17096ac0f488SChris Mason 17106ac0f488SChris Mason /* 17116ac0f488SChris Mason * if the unplug comes from schedule, we have to push the 17126ac0f488SChris Mason * work off to a helper thread 17136ac0f488SChris Mason */ 17146ac0f488SChris Mason static void unplug_work(struct btrfs_work *work) 17156ac0f488SChris Mason { 17166ac0f488SChris Mason struct btrfs_plug_cb *plug; 17176ac0f488SChris Mason plug = container_of(work, struct btrfs_plug_cb, work); 17186ac0f488SChris Mason run_plug(plug); 17196ac0f488SChris Mason } 17206ac0f488SChris Mason 17216ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 17226ac0f488SChris Mason { 17236ac0f488SChris Mason struct btrfs_plug_cb *plug; 17246ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17256ac0f488SChris Mason 17266ac0f488SChris Mason if (from_schedule) { 17279e0af237SLiu Bo btrfs_init_work(&plug->work, btrfs_rmw_helper, 17289e0af237SLiu Bo unplug_work, NULL, NULL); 1729d05a33acSQu Wenruo btrfs_queue_work(plug->info->rmw_workers, 17306ac0f488SChris Mason &plug->work); 17316ac0f488SChris Mason return; 17326ac0f488SChris Mason } 17336ac0f488SChris Mason run_plug(plug); 17346ac0f488SChris Mason } 17356ac0f488SChris Mason 17366ac0f488SChris Mason /* 173753b381b3SDavid Woodhouse * our main entry point for writes from the rest of the FS. 173853b381b3SDavid Woodhouse */ 17392ff7e61eSJeff Mahoney int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, 17408e5cfb55SZhao Lei struct btrfs_bio *bbio, u64 stripe_len) 174153b381b3SDavid Woodhouse { 174253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 17436ac0f488SChris Mason struct btrfs_plug_cb *plug = NULL; 17446ac0f488SChris Mason struct blk_plug_cb *cb; 17454245215dSMiao Xie int ret; 174653b381b3SDavid Woodhouse 17472ff7e61eSJeff Mahoney rbio = alloc_rbio(fs_info, bbio, stripe_len); 1748af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 17496e9606d2SZhao Lei btrfs_put_bbio(bbio); 175053b381b3SDavid Woodhouse return PTR_ERR(rbio); 1751af8e2d1dSMiao Xie } 175253b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 17534f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 17541b94b556SMiao Xie rbio->operation = BTRFS_RBIO_WRITE; 17556ac0f488SChris Mason 17560b246afaSJeff Mahoney btrfs_bio_counter_inc_noblocked(fs_info); 17574245215dSMiao Xie rbio->generic_bio_cnt = 1; 17584245215dSMiao Xie 17596ac0f488SChris Mason /* 17606ac0f488SChris Mason * don't plug on full rbios, just get them out the door 17616ac0f488SChris Mason * as quickly as we can 17626ac0f488SChris Mason */ 17634245215dSMiao Xie if (rbio_is_full(rbio)) { 17644245215dSMiao Xie ret = full_stripe_write(rbio); 17654245215dSMiao Xie if (ret) 17660b246afaSJeff Mahoney btrfs_bio_counter_dec(fs_info); 17674245215dSMiao Xie return ret; 17684245215dSMiao Xie } 17696ac0f488SChris Mason 17700b246afaSJeff Mahoney cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); 17716ac0f488SChris Mason if (cb) { 17726ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17736ac0f488SChris Mason if (!plug->info) { 17740b246afaSJeff Mahoney plug->info = fs_info; 17756ac0f488SChris Mason INIT_LIST_HEAD(&plug->rbio_list); 17766ac0f488SChris Mason } 17776ac0f488SChris Mason list_add_tail(&rbio->plug_list, &plug->rbio_list); 17784245215dSMiao Xie ret = 0; 17796ac0f488SChris Mason } else { 17804245215dSMiao Xie ret = __raid56_parity_write(rbio); 17814245215dSMiao Xie if (ret) 17820b246afaSJeff Mahoney btrfs_bio_counter_dec(fs_info); 178353b381b3SDavid Woodhouse } 17844245215dSMiao Xie return ret; 17856ac0f488SChris Mason } 178653b381b3SDavid Woodhouse 178753b381b3SDavid Woodhouse /* 178853b381b3SDavid Woodhouse * all parity reconstruction happens here. We've read in everything 178953b381b3SDavid Woodhouse * we can find from the drives and this does the heavy lifting of 179053b381b3SDavid Woodhouse * sorting the good from the bad. 179153b381b3SDavid Woodhouse */ 179253b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 179353b381b3SDavid Woodhouse { 179453b381b3SDavid Woodhouse int pagenr, stripe; 179553b381b3SDavid Woodhouse void **pointers; 179653b381b3SDavid Woodhouse int faila = -1, failb = -1; 179753b381b3SDavid Woodhouse struct page *page; 179853b381b3SDavid Woodhouse int err; 179953b381b3SDavid Woodhouse int i; 180053b381b3SDavid Woodhouse 180131e818feSDavid Sterba pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 180253b381b3SDavid Woodhouse if (!pointers) { 180353b381b3SDavid Woodhouse err = -ENOMEM; 180453b381b3SDavid Woodhouse goto cleanup_io; 180553b381b3SDavid Woodhouse } 180653b381b3SDavid Woodhouse 180753b381b3SDavid Woodhouse faila = rbio->faila; 180853b381b3SDavid Woodhouse failb = rbio->failb; 180953b381b3SDavid Woodhouse 1810b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 1811b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 181253b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 181353b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 181453b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 181553b381b3SDavid Woodhouse } 181653b381b3SDavid Woodhouse 181753b381b3SDavid Woodhouse index_rbio_pages(rbio); 181853b381b3SDavid Woodhouse 1819915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 18205a6ac9eaSMiao Xie /* 18215a6ac9eaSMiao Xie * Now we just use bitmap to mark the horizontal stripes in 18225a6ac9eaSMiao Xie * which we have data when doing parity scrub. 18235a6ac9eaSMiao Xie */ 18245a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 18255a6ac9eaSMiao Xie !test_bit(pagenr, rbio->dbitmap)) 18265a6ac9eaSMiao Xie continue; 18275a6ac9eaSMiao Xie 182853b381b3SDavid Woodhouse /* setup our array of pointers with pages 182953b381b3SDavid Woodhouse * from each stripe 183053b381b3SDavid Woodhouse */ 18312c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 183253b381b3SDavid Woodhouse /* 183353b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 183453b381b3SDavid Woodhouse * pages from the bio list 183553b381b3SDavid Woodhouse */ 1836b4ee1782SOmar Sandoval if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1837b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 183853b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 183953b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 184053b381b3SDavid Woodhouse } else { 184153b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 184253b381b3SDavid Woodhouse } 184353b381b3SDavid Woodhouse pointers[stripe] = kmap(page); 184453b381b3SDavid Woodhouse } 184553b381b3SDavid Woodhouse 184653b381b3SDavid Woodhouse /* all raid6 handling here */ 184710f11900SZhao Lei if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { 184853b381b3SDavid Woodhouse /* 184953b381b3SDavid Woodhouse * single failure, rebuild from parity raid5 185053b381b3SDavid Woodhouse * style 185153b381b3SDavid Woodhouse */ 185253b381b3SDavid Woodhouse if (failb < 0) { 185353b381b3SDavid Woodhouse if (faila == rbio->nr_data) { 185453b381b3SDavid Woodhouse /* 185553b381b3SDavid Woodhouse * Just the P stripe has failed, without 185653b381b3SDavid Woodhouse * a bad data or Q stripe. 185753b381b3SDavid Woodhouse * TODO, we should redo the xor here. 185853b381b3SDavid Woodhouse */ 185953b381b3SDavid Woodhouse err = -EIO; 186053b381b3SDavid Woodhouse goto cleanup; 186153b381b3SDavid Woodhouse } 186253b381b3SDavid Woodhouse /* 186353b381b3SDavid Woodhouse * a single failure in raid6 is rebuilt 186453b381b3SDavid Woodhouse * in the pstripe code below 186553b381b3SDavid Woodhouse */ 186653b381b3SDavid Woodhouse goto pstripe; 186753b381b3SDavid Woodhouse } 186853b381b3SDavid Woodhouse 186953b381b3SDavid Woodhouse /* make sure our ps and qs are in order */ 187053b381b3SDavid Woodhouse if (faila > failb) { 187153b381b3SDavid Woodhouse int tmp = failb; 187253b381b3SDavid Woodhouse failb = faila; 187353b381b3SDavid Woodhouse faila = tmp; 187453b381b3SDavid Woodhouse } 187553b381b3SDavid Woodhouse 187653b381b3SDavid Woodhouse /* if the q stripe is failed, do a pstripe reconstruction 187753b381b3SDavid Woodhouse * from the xors. 187853b381b3SDavid Woodhouse * If both the q stripe and the P stripe are failed, we're 187953b381b3SDavid Woodhouse * here due to a crc mismatch and we can't give them the 188053b381b3SDavid Woodhouse * data they want 188153b381b3SDavid Woodhouse */ 18828e5cfb55SZhao Lei if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { 18838e5cfb55SZhao Lei if (rbio->bbio->raid_map[faila] == 18848e5cfb55SZhao Lei RAID5_P_STRIPE) { 188553b381b3SDavid Woodhouse err = -EIO; 188653b381b3SDavid Woodhouse goto cleanup; 188753b381b3SDavid Woodhouse } 188853b381b3SDavid Woodhouse /* 188953b381b3SDavid Woodhouse * otherwise we have one bad data stripe and 189053b381b3SDavid Woodhouse * a good P stripe. raid5! 189153b381b3SDavid Woodhouse */ 189253b381b3SDavid Woodhouse goto pstripe; 189353b381b3SDavid Woodhouse } 189453b381b3SDavid Woodhouse 18958e5cfb55SZhao Lei if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { 18962c8cdd6eSMiao Xie raid6_datap_recov(rbio->real_stripes, 189753b381b3SDavid Woodhouse PAGE_SIZE, faila, pointers); 189853b381b3SDavid Woodhouse } else { 18992c8cdd6eSMiao Xie raid6_2data_recov(rbio->real_stripes, 190053b381b3SDavid Woodhouse PAGE_SIZE, faila, failb, 190153b381b3SDavid Woodhouse pointers); 190253b381b3SDavid Woodhouse } 190353b381b3SDavid Woodhouse } else { 190453b381b3SDavid Woodhouse void *p; 190553b381b3SDavid Woodhouse 190653b381b3SDavid Woodhouse /* rebuild from P stripe here (raid5 or raid6) */ 190753b381b3SDavid Woodhouse BUG_ON(failb != -1); 190853b381b3SDavid Woodhouse pstripe: 190953b381b3SDavid Woodhouse /* Copy parity block into failed block to start with */ 191053b381b3SDavid Woodhouse memcpy(pointers[faila], 191153b381b3SDavid Woodhouse pointers[rbio->nr_data], 191209cbfeafSKirill A. Shutemov PAGE_SIZE); 191353b381b3SDavid Woodhouse 191453b381b3SDavid Woodhouse /* rearrange the pointer array */ 191553b381b3SDavid Woodhouse p = pointers[faila]; 191653b381b3SDavid Woodhouse for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 191753b381b3SDavid Woodhouse pointers[stripe] = pointers[stripe + 1]; 191853b381b3SDavid Woodhouse pointers[rbio->nr_data - 1] = p; 191953b381b3SDavid Woodhouse 192053b381b3SDavid Woodhouse /* xor in the rest */ 192109cbfeafSKirill A. Shutemov run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); 192253b381b3SDavid Woodhouse } 192353b381b3SDavid Woodhouse /* if we're doing this rebuild as part of an rmw, go through 192453b381b3SDavid Woodhouse * and set all of our private rbio pages in the 192553b381b3SDavid Woodhouse * failed stripes as uptodate. This way finish_rmw will 192653b381b3SDavid Woodhouse * know they can be trusted. If this was a read reconstruction, 192753b381b3SDavid Woodhouse * other endio functions will fiddle the uptodate bits 192853b381b3SDavid Woodhouse */ 19291b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) { 1930915e2290SZhao Lei for (i = 0; i < rbio->stripe_npages; i++) { 193153b381b3SDavid Woodhouse if (faila != -1) { 193253b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, faila, i); 193353b381b3SDavid Woodhouse SetPageUptodate(page); 193453b381b3SDavid Woodhouse } 193553b381b3SDavid Woodhouse if (failb != -1) { 193653b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, failb, i); 193753b381b3SDavid Woodhouse SetPageUptodate(page); 193853b381b3SDavid Woodhouse } 193953b381b3SDavid Woodhouse } 194053b381b3SDavid Woodhouse } 19412c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 194253b381b3SDavid Woodhouse /* 194353b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 194453b381b3SDavid Woodhouse * pages from the bio list 194553b381b3SDavid Woodhouse */ 1946b4ee1782SOmar Sandoval if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || 1947b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && 194853b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 194953b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 195053b381b3SDavid Woodhouse } else { 195153b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 195253b381b3SDavid Woodhouse } 195353b381b3SDavid Woodhouse kunmap(page); 195453b381b3SDavid Woodhouse } 195553b381b3SDavid Woodhouse } 195653b381b3SDavid Woodhouse 195753b381b3SDavid Woodhouse err = 0; 195853b381b3SDavid Woodhouse cleanup: 195953b381b3SDavid Woodhouse kfree(pointers); 196053b381b3SDavid Woodhouse 196153b381b3SDavid Woodhouse cleanup_io: 19621b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 19636e9606d2SZhao Lei if (err == 0) 19644ae10b3aSChris Mason cache_rbio_pages(rbio); 19654ae10b3aSChris Mason else 19664ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 19674ae10b3aSChris Mason 19684246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 1969b4ee1782SOmar Sandoval } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 197022365979SLinus Torvalds rbio_orig_end_io(rbio, err); 197153b381b3SDavid Woodhouse } else if (err == 0) { 197253b381b3SDavid Woodhouse rbio->faila = -1; 197353b381b3SDavid Woodhouse rbio->failb = -1; 19745a6ac9eaSMiao Xie 19755a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) 197653b381b3SDavid Woodhouse finish_rmw(rbio); 19775a6ac9eaSMiao Xie else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 19785a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 19795a6ac9eaSMiao Xie else 19805a6ac9eaSMiao Xie BUG(); 198153b381b3SDavid Woodhouse } else { 19824246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, err); 198353b381b3SDavid Woodhouse } 198453b381b3SDavid Woodhouse } 198553b381b3SDavid Woodhouse 198653b381b3SDavid Woodhouse /* 198753b381b3SDavid Woodhouse * This is called only for stripes we've read from disk to 198853b381b3SDavid Woodhouse * reconstruct the parity. 198953b381b3SDavid Woodhouse */ 19904246a0b6SChristoph Hellwig static void raid_recover_end_io(struct bio *bio) 199153b381b3SDavid Woodhouse { 199253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 199353b381b3SDavid Woodhouse 199453b381b3SDavid Woodhouse /* 199553b381b3SDavid Woodhouse * we only read stripe pages off the disk, set them 199653b381b3SDavid Woodhouse * up to date if there were no errors 199753b381b3SDavid Woodhouse */ 19984246a0b6SChristoph Hellwig if (bio->bi_error) 199953b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 200053b381b3SDavid Woodhouse else 200153b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 200253b381b3SDavid Woodhouse bio_put(bio); 200353b381b3SDavid Woodhouse 2004b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 200553b381b3SDavid Woodhouse return; 200653b381b3SDavid Woodhouse 2007b89e1b01SMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 20084246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 200953b381b3SDavid Woodhouse else 201053b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 201153b381b3SDavid Woodhouse } 201253b381b3SDavid Woodhouse 201353b381b3SDavid Woodhouse /* 201453b381b3SDavid Woodhouse * reads everything we need off the disk to reconstruct 201553b381b3SDavid Woodhouse * the parity. endio handlers trigger final reconstruction 201653b381b3SDavid Woodhouse * when the IO is done. 201753b381b3SDavid Woodhouse * 201853b381b3SDavid Woodhouse * This is used both for reads from the higher layers and for 201953b381b3SDavid Woodhouse * parity construction required to finish a rmw cycle. 202053b381b3SDavid Woodhouse */ 202153b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 202253b381b3SDavid Woodhouse { 202353b381b3SDavid Woodhouse int bios_to_read = 0; 202453b381b3SDavid Woodhouse struct bio_list bio_list; 202553b381b3SDavid Woodhouse int ret; 202653b381b3SDavid Woodhouse int pagenr; 202753b381b3SDavid Woodhouse int stripe; 202853b381b3SDavid Woodhouse struct bio *bio; 202953b381b3SDavid Woodhouse 203053b381b3SDavid Woodhouse bio_list_init(&bio_list); 203153b381b3SDavid Woodhouse 203253b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 203353b381b3SDavid Woodhouse if (ret) 203453b381b3SDavid Woodhouse goto cleanup; 203553b381b3SDavid Woodhouse 2036b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 203753b381b3SDavid Woodhouse 203853b381b3SDavid Woodhouse /* 20394ae10b3aSChris Mason * read everything that hasn't failed. Thanks to the 20404ae10b3aSChris Mason * stripe cache, it is possible that some or all of these 20414ae10b3aSChris Mason * pages are going to be uptodate. 204253b381b3SDavid Woodhouse */ 20432c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 20445588383eSLiu Bo if (rbio->faila == stripe || rbio->failb == stripe) { 2045b89e1b01SMiao Xie atomic_inc(&rbio->error); 204653b381b3SDavid Woodhouse continue; 20475588383eSLiu Bo } 204853b381b3SDavid Woodhouse 2049915e2290SZhao Lei for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { 205053b381b3SDavid Woodhouse struct page *p; 205153b381b3SDavid Woodhouse 205253b381b3SDavid Woodhouse /* 205353b381b3SDavid Woodhouse * the rmw code may have already read this 205453b381b3SDavid Woodhouse * page in 205553b381b3SDavid Woodhouse */ 205653b381b3SDavid Woodhouse p = rbio_stripe_page(rbio, stripe, pagenr); 205753b381b3SDavid Woodhouse if (PageUptodate(p)) 205853b381b3SDavid Woodhouse continue; 205953b381b3SDavid Woodhouse 206053b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 206153b381b3SDavid Woodhouse rbio_stripe_page(rbio, stripe, pagenr), 206253b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 206353b381b3SDavid Woodhouse if (ret < 0) 206453b381b3SDavid Woodhouse goto cleanup; 206553b381b3SDavid Woodhouse } 206653b381b3SDavid Woodhouse } 206753b381b3SDavid Woodhouse 206853b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 206953b381b3SDavid Woodhouse if (!bios_to_read) { 207053b381b3SDavid Woodhouse /* 207153b381b3SDavid Woodhouse * we might have no bios to read just because the pages 207253b381b3SDavid Woodhouse * were up to date, or we might have no bios to read because 207353b381b3SDavid Woodhouse * the devices were gone. 207453b381b3SDavid Woodhouse */ 2075b89e1b01SMiao Xie if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { 207653b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 207753b381b3SDavid Woodhouse goto out; 207853b381b3SDavid Woodhouse } else { 207953b381b3SDavid Woodhouse goto cleanup; 208053b381b3SDavid Woodhouse } 208153b381b3SDavid Woodhouse } 208253b381b3SDavid Woodhouse 208353b381b3SDavid Woodhouse /* 208453b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 208553b381b3SDavid Woodhouse * not to touch it after that 208653b381b3SDavid Woodhouse */ 2087b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 208853b381b3SDavid Woodhouse while (1) { 208953b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 209053b381b3SDavid Woodhouse if (!bio) 209153b381b3SDavid Woodhouse break; 209253b381b3SDavid Woodhouse 209353b381b3SDavid Woodhouse bio->bi_private = rbio; 209453b381b3SDavid Woodhouse bio->bi_end_io = raid_recover_end_io; 209537226b21SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 209653b381b3SDavid Woodhouse 20970b246afaSJeff Mahoney btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 209853b381b3SDavid Woodhouse 20994e49ea4aSMike Christie submit_bio(bio); 210053b381b3SDavid Woodhouse } 210153b381b3SDavid Woodhouse out: 210253b381b3SDavid Woodhouse return 0; 210353b381b3SDavid Woodhouse 210453b381b3SDavid Woodhouse cleanup: 2105b4ee1782SOmar Sandoval if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2106b4ee1782SOmar Sandoval rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 21074246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 210853b381b3SDavid Woodhouse return -EIO; 210953b381b3SDavid Woodhouse } 211053b381b3SDavid Woodhouse 211153b381b3SDavid Woodhouse /* 211253b381b3SDavid Woodhouse * the main entry point for reads from the higher layers. This 211353b381b3SDavid Woodhouse * is really only called when the normal read path had a failure, 211453b381b3SDavid Woodhouse * so we assume the bio they send down corresponds to a failed part 211553b381b3SDavid Woodhouse * of the drive. 211653b381b3SDavid Woodhouse */ 21172ff7e61eSJeff Mahoney int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, 21188e5cfb55SZhao Lei struct btrfs_bio *bbio, u64 stripe_len, 21198e5cfb55SZhao Lei int mirror_num, int generic_io) 212053b381b3SDavid Woodhouse { 212153b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 212253b381b3SDavid Woodhouse int ret; 212353b381b3SDavid Woodhouse 2124abad60c6SLiu Bo if (generic_io) { 2125abad60c6SLiu Bo ASSERT(bbio->mirror_num == mirror_num); 2126abad60c6SLiu Bo btrfs_io_bio(bio)->mirror_num = mirror_num; 2127abad60c6SLiu Bo } 2128abad60c6SLiu Bo 21292ff7e61eSJeff Mahoney rbio = alloc_rbio(fs_info, bbio, stripe_len); 2130af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 21316e9606d2SZhao Lei if (generic_io) 21326e9606d2SZhao Lei btrfs_put_bbio(bbio); 213353b381b3SDavid Woodhouse return PTR_ERR(rbio); 2134af8e2d1dSMiao Xie } 213553b381b3SDavid Woodhouse 21361b94b556SMiao Xie rbio->operation = BTRFS_RBIO_READ_REBUILD; 213753b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 21384f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 213953b381b3SDavid Woodhouse 214053b381b3SDavid Woodhouse rbio->faila = find_logical_bio_stripe(rbio, bio); 214153b381b3SDavid Woodhouse if (rbio->faila == -1) { 21420b246afaSJeff Mahoney btrfs_warn(fs_info, 2143e46a28caSLiu Bo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", 2144e46a28caSLiu Bo __func__, (u64)bio->bi_iter.bi_sector << 9, 2145e46a28caSLiu Bo (u64)bio->bi_iter.bi_size, bbio->map_type); 21466e9606d2SZhao Lei if (generic_io) 21476e9606d2SZhao Lei btrfs_put_bbio(bbio); 214853b381b3SDavid Woodhouse kfree(rbio); 214953b381b3SDavid Woodhouse return -EIO; 215053b381b3SDavid Woodhouse } 215153b381b3SDavid Woodhouse 21524245215dSMiao Xie if (generic_io) { 21530b246afaSJeff Mahoney btrfs_bio_counter_inc_noblocked(fs_info); 21544245215dSMiao Xie rbio->generic_bio_cnt = 1; 21554245215dSMiao Xie } else { 21566e9606d2SZhao Lei btrfs_get_bbio(bbio); 21574245215dSMiao Xie } 21584245215dSMiao Xie 215953b381b3SDavid Woodhouse /* 216053b381b3SDavid Woodhouse * reconstruct from the q stripe if they are 216153b381b3SDavid Woodhouse * asking for mirror 3 216253b381b3SDavid Woodhouse */ 216353b381b3SDavid Woodhouse if (mirror_num == 3) 21642c8cdd6eSMiao Xie rbio->failb = rbio->real_stripes - 2; 216553b381b3SDavid Woodhouse 216653b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 216753b381b3SDavid Woodhouse 216853b381b3SDavid Woodhouse /* 216953b381b3SDavid Woodhouse * __raid56_parity_recover will end the bio with 217053b381b3SDavid Woodhouse * any errors it hits. We don't want to return 217153b381b3SDavid Woodhouse * its error value up the stack because our caller 217253b381b3SDavid Woodhouse * will end up calling bio_endio with any nonzero 217353b381b3SDavid Woodhouse * return 217453b381b3SDavid Woodhouse */ 217553b381b3SDavid Woodhouse if (ret == 0) 217653b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 217753b381b3SDavid Woodhouse /* 217853b381b3SDavid Woodhouse * our rbio has been added to the list of 217953b381b3SDavid Woodhouse * rbios that will be handled after the 218053b381b3SDavid Woodhouse * currently lock owner is done 218153b381b3SDavid Woodhouse */ 218253b381b3SDavid Woodhouse return 0; 218353b381b3SDavid Woodhouse 218453b381b3SDavid Woodhouse } 218553b381b3SDavid Woodhouse 218653b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work) 218753b381b3SDavid Woodhouse { 218853b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 218953b381b3SDavid Woodhouse 219053b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 219153b381b3SDavid Woodhouse raid56_rmw_stripe(rbio); 219253b381b3SDavid Woodhouse } 219353b381b3SDavid Woodhouse 219453b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work) 219553b381b3SDavid Woodhouse { 219653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 219753b381b3SDavid Woodhouse 219853b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 219953b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 220053b381b3SDavid Woodhouse } 22015a6ac9eaSMiao Xie 22025a6ac9eaSMiao Xie /* 22035a6ac9eaSMiao Xie * The following code is used to scrub/replace the parity stripe 22045a6ac9eaSMiao Xie * 2205ae6529c3SQu Wenruo * Caller must have already increased bio_counter for getting @bbio. 2206ae6529c3SQu Wenruo * 22075a6ac9eaSMiao Xie * Note: We need make sure all the pages that add into the scrub/replace 22085a6ac9eaSMiao Xie * raid bio are correct and not be changed during the scrub/replace. That 22095a6ac9eaSMiao Xie * is those pages just hold metadata or file data with checksum. 22105a6ac9eaSMiao Xie */ 22115a6ac9eaSMiao Xie 22125a6ac9eaSMiao Xie struct btrfs_raid_bio * 22132ff7e61eSJeff Mahoney raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, 22148e5cfb55SZhao Lei struct btrfs_bio *bbio, u64 stripe_len, 22158e5cfb55SZhao Lei struct btrfs_device *scrub_dev, 22165a6ac9eaSMiao Xie unsigned long *dbitmap, int stripe_nsectors) 22175a6ac9eaSMiao Xie { 22185a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 22195a6ac9eaSMiao Xie int i; 22205a6ac9eaSMiao Xie 22212ff7e61eSJeff Mahoney rbio = alloc_rbio(fs_info, bbio, stripe_len); 22225a6ac9eaSMiao Xie if (IS_ERR(rbio)) 22235a6ac9eaSMiao Xie return NULL; 22245a6ac9eaSMiao Xie bio_list_add(&rbio->bio_list, bio); 22255a6ac9eaSMiao Xie /* 22265a6ac9eaSMiao Xie * This is a special bio which is used to hold the completion handler 22275a6ac9eaSMiao Xie * and make the scrub rbio is similar to the other types 22285a6ac9eaSMiao Xie */ 22295a6ac9eaSMiao Xie ASSERT(!bio->bi_iter.bi_size); 22305a6ac9eaSMiao Xie rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 22315a6ac9eaSMiao Xie 22322c8cdd6eSMiao Xie for (i = 0; i < rbio->real_stripes; i++) { 22335a6ac9eaSMiao Xie if (bbio->stripes[i].dev == scrub_dev) { 22345a6ac9eaSMiao Xie rbio->scrubp = i; 22355a6ac9eaSMiao Xie break; 22365a6ac9eaSMiao Xie } 22375a6ac9eaSMiao Xie } 22385a6ac9eaSMiao Xie 22395a6ac9eaSMiao Xie /* Now we just support the sectorsize equals to page size */ 22400b246afaSJeff Mahoney ASSERT(fs_info->sectorsize == PAGE_SIZE); 22415a6ac9eaSMiao Xie ASSERT(rbio->stripe_npages == stripe_nsectors); 22425a6ac9eaSMiao Xie bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); 22435a6ac9eaSMiao Xie 2244ae6529c3SQu Wenruo /* 2245ae6529c3SQu Wenruo * We have already increased bio_counter when getting bbio, record it 2246ae6529c3SQu Wenruo * so we can free it at rbio_orig_end_io(). 2247ae6529c3SQu Wenruo */ 2248ae6529c3SQu Wenruo rbio->generic_bio_cnt = 1; 2249ae6529c3SQu Wenruo 22505a6ac9eaSMiao Xie return rbio; 22515a6ac9eaSMiao Xie } 22525a6ac9eaSMiao Xie 2253b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */ 2254b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, 2255b4ee1782SOmar Sandoval u64 logical) 22565a6ac9eaSMiao Xie { 22575a6ac9eaSMiao Xie int stripe_offset; 22585a6ac9eaSMiao Xie int index; 22595a6ac9eaSMiao Xie 22608e5cfb55SZhao Lei ASSERT(logical >= rbio->bbio->raid_map[0]); 22618e5cfb55SZhao Lei ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + 22625a6ac9eaSMiao Xie rbio->stripe_len * rbio->nr_data); 22638e5cfb55SZhao Lei stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); 226409cbfeafSKirill A. Shutemov index = stripe_offset >> PAGE_SHIFT; 22655a6ac9eaSMiao Xie rbio->bio_pages[index] = page; 22665a6ac9eaSMiao Xie } 22675a6ac9eaSMiao Xie 22685a6ac9eaSMiao Xie /* 22695a6ac9eaSMiao Xie * We just scrub the parity that we have correct data on the same horizontal, 22705a6ac9eaSMiao Xie * so we needn't allocate all pages for all the stripes. 22715a6ac9eaSMiao Xie */ 22725a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 22735a6ac9eaSMiao Xie { 22745a6ac9eaSMiao Xie int i; 22755a6ac9eaSMiao Xie int bit; 22765a6ac9eaSMiao Xie int index; 22775a6ac9eaSMiao Xie struct page *page; 22785a6ac9eaSMiao Xie 22795a6ac9eaSMiao Xie for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 22802c8cdd6eSMiao Xie for (i = 0; i < rbio->real_stripes; i++) { 22815a6ac9eaSMiao Xie index = i * rbio->stripe_npages + bit; 22825a6ac9eaSMiao Xie if (rbio->stripe_pages[index]) 22835a6ac9eaSMiao Xie continue; 22845a6ac9eaSMiao Xie 22855a6ac9eaSMiao Xie page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 22865a6ac9eaSMiao Xie if (!page) 22875a6ac9eaSMiao Xie return -ENOMEM; 22885a6ac9eaSMiao Xie rbio->stripe_pages[index] = page; 22895a6ac9eaSMiao Xie } 22905a6ac9eaSMiao Xie } 22915a6ac9eaSMiao Xie return 0; 22925a6ac9eaSMiao Xie } 22935a6ac9eaSMiao Xie 22945a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 22955a6ac9eaSMiao Xie int need_check) 22965a6ac9eaSMiao Xie { 229776035976SMiao Xie struct btrfs_bio *bbio = rbio->bbio; 22982c8cdd6eSMiao Xie void *pointers[rbio->real_stripes]; 229976035976SMiao Xie DECLARE_BITMAP(pbitmap, rbio->stripe_npages); 23005a6ac9eaSMiao Xie int nr_data = rbio->nr_data; 23015a6ac9eaSMiao Xie int stripe; 23025a6ac9eaSMiao Xie int pagenr; 23035a6ac9eaSMiao Xie int p_stripe = -1; 23045a6ac9eaSMiao Xie int q_stripe = -1; 23055a6ac9eaSMiao Xie struct page *p_page = NULL; 23065a6ac9eaSMiao Xie struct page *q_page = NULL; 23075a6ac9eaSMiao Xie struct bio_list bio_list; 23085a6ac9eaSMiao Xie struct bio *bio; 230976035976SMiao Xie int is_replace = 0; 23105a6ac9eaSMiao Xie int ret; 23115a6ac9eaSMiao Xie 23125a6ac9eaSMiao Xie bio_list_init(&bio_list); 23135a6ac9eaSMiao Xie 23142c8cdd6eSMiao Xie if (rbio->real_stripes - rbio->nr_data == 1) { 23152c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 1; 23162c8cdd6eSMiao Xie } else if (rbio->real_stripes - rbio->nr_data == 2) { 23172c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 2; 23182c8cdd6eSMiao Xie q_stripe = rbio->real_stripes - 1; 23195a6ac9eaSMiao Xie } else { 23205a6ac9eaSMiao Xie BUG(); 23215a6ac9eaSMiao Xie } 23225a6ac9eaSMiao Xie 232376035976SMiao Xie if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { 232476035976SMiao Xie is_replace = 1; 232576035976SMiao Xie bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); 232676035976SMiao Xie } 232776035976SMiao Xie 23285a6ac9eaSMiao Xie /* 23295a6ac9eaSMiao Xie * Because the higher layers(scrubber) are unlikely to 23305a6ac9eaSMiao Xie * use this area of the disk again soon, so don't cache 23315a6ac9eaSMiao Xie * it. 23325a6ac9eaSMiao Xie */ 23335a6ac9eaSMiao Xie clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 23345a6ac9eaSMiao Xie 23355a6ac9eaSMiao Xie if (!need_check) 23365a6ac9eaSMiao Xie goto writeback; 23375a6ac9eaSMiao Xie 23385a6ac9eaSMiao Xie p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 23395a6ac9eaSMiao Xie if (!p_page) 23405a6ac9eaSMiao Xie goto cleanup; 23415a6ac9eaSMiao Xie SetPageUptodate(p_page); 23425a6ac9eaSMiao Xie 23435a6ac9eaSMiao Xie if (q_stripe != -1) { 23445a6ac9eaSMiao Xie q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 23455a6ac9eaSMiao Xie if (!q_page) { 23465a6ac9eaSMiao Xie __free_page(p_page); 23475a6ac9eaSMiao Xie goto cleanup; 23485a6ac9eaSMiao Xie } 23495a6ac9eaSMiao Xie SetPageUptodate(q_page); 23505a6ac9eaSMiao Xie } 23515a6ac9eaSMiao Xie 23525a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 23535a6ac9eaSMiao Xie 23545a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 23555a6ac9eaSMiao Xie struct page *p; 23565a6ac9eaSMiao Xie void *parity; 23575a6ac9eaSMiao Xie /* first collect one page from each data stripe */ 23585a6ac9eaSMiao Xie for (stripe = 0; stripe < nr_data; stripe++) { 23595a6ac9eaSMiao Xie p = page_in_rbio(rbio, stripe, pagenr, 0); 23605a6ac9eaSMiao Xie pointers[stripe] = kmap(p); 23615a6ac9eaSMiao Xie } 23625a6ac9eaSMiao Xie 23635a6ac9eaSMiao Xie /* then add the parity stripe */ 23645a6ac9eaSMiao Xie pointers[stripe++] = kmap(p_page); 23655a6ac9eaSMiao Xie 23665a6ac9eaSMiao Xie if (q_stripe != -1) { 23675a6ac9eaSMiao Xie 23685a6ac9eaSMiao Xie /* 23695a6ac9eaSMiao Xie * raid6, add the qstripe and call the 23705a6ac9eaSMiao Xie * library function to fill in our p/q 23715a6ac9eaSMiao Xie */ 23725a6ac9eaSMiao Xie pointers[stripe++] = kmap(q_page); 23735a6ac9eaSMiao Xie 23742c8cdd6eSMiao Xie raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 23755a6ac9eaSMiao Xie pointers); 23765a6ac9eaSMiao Xie } else { 23775a6ac9eaSMiao Xie /* raid5 */ 23785a6ac9eaSMiao Xie memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 237909cbfeafSKirill A. Shutemov run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 23805a6ac9eaSMiao Xie } 23815a6ac9eaSMiao Xie 238201327610SNicholas D Steeves /* Check scrubbing parity and repair it */ 23835a6ac9eaSMiao Xie p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 23845a6ac9eaSMiao Xie parity = kmap(p); 238509cbfeafSKirill A. Shutemov if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) 238609cbfeafSKirill A. Shutemov memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE); 23875a6ac9eaSMiao Xie else 23885a6ac9eaSMiao Xie /* Parity is right, needn't writeback */ 23895a6ac9eaSMiao Xie bitmap_clear(rbio->dbitmap, pagenr, 1); 23905a6ac9eaSMiao Xie kunmap(p); 23915a6ac9eaSMiao Xie 23922c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) 23935a6ac9eaSMiao Xie kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 23945a6ac9eaSMiao Xie } 23955a6ac9eaSMiao Xie 23965a6ac9eaSMiao Xie __free_page(p_page); 23975a6ac9eaSMiao Xie if (q_page) 23985a6ac9eaSMiao Xie __free_page(q_page); 23995a6ac9eaSMiao Xie 24005a6ac9eaSMiao Xie writeback: 24015a6ac9eaSMiao Xie /* 24025a6ac9eaSMiao Xie * time to start writing. Make bios for everything from the 24035a6ac9eaSMiao Xie * higher layers (the bio_list in our rbio) and our p/q. Ignore 24045a6ac9eaSMiao Xie * everything else. 24055a6ac9eaSMiao Xie */ 24065a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 24075a6ac9eaSMiao Xie struct page *page; 24085a6ac9eaSMiao Xie 24095a6ac9eaSMiao Xie page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 24105a6ac9eaSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, 24115a6ac9eaSMiao Xie page, rbio->scrubp, pagenr, rbio->stripe_len); 24125a6ac9eaSMiao Xie if (ret) 24135a6ac9eaSMiao Xie goto cleanup; 24145a6ac9eaSMiao Xie } 24155a6ac9eaSMiao Xie 241676035976SMiao Xie if (!is_replace) 241776035976SMiao Xie goto submit_write; 241876035976SMiao Xie 241976035976SMiao Xie for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { 242076035976SMiao Xie struct page *page; 242176035976SMiao Xie 242276035976SMiao Xie page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 242376035976SMiao Xie ret = rbio_add_io_page(rbio, &bio_list, page, 242476035976SMiao Xie bbio->tgtdev_map[rbio->scrubp], 242576035976SMiao Xie pagenr, rbio->stripe_len); 242676035976SMiao Xie if (ret) 242776035976SMiao Xie goto cleanup; 242876035976SMiao Xie } 242976035976SMiao Xie 243076035976SMiao Xie submit_write: 24315a6ac9eaSMiao Xie nr_data = bio_list_size(&bio_list); 24325a6ac9eaSMiao Xie if (!nr_data) { 24335a6ac9eaSMiao Xie /* Every parity is right */ 24344246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, 0); 24355a6ac9eaSMiao Xie return; 24365a6ac9eaSMiao Xie } 24375a6ac9eaSMiao Xie 24385a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, nr_data); 24395a6ac9eaSMiao Xie 24405a6ac9eaSMiao Xie while (1) { 24415a6ac9eaSMiao Xie bio = bio_list_pop(&bio_list); 24425a6ac9eaSMiao Xie if (!bio) 24435a6ac9eaSMiao Xie break; 24445a6ac9eaSMiao Xie 24455a6ac9eaSMiao Xie bio->bi_private = rbio; 2446a6111d11SZhao Lei bio->bi_end_io = raid_write_end_io; 244737226b21SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 24484e49ea4aSMike Christie 24494e49ea4aSMike Christie submit_bio(bio); 24505a6ac9eaSMiao Xie } 24515a6ac9eaSMiao Xie return; 24525a6ac9eaSMiao Xie 24535a6ac9eaSMiao Xie cleanup: 24544246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 24555a6ac9eaSMiao Xie } 24565a6ac9eaSMiao Xie 24575a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 24585a6ac9eaSMiao Xie { 24595a6ac9eaSMiao Xie if (stripe >= 0 && stripe < rbio->nr_data) 24605a6ac9eaSMiao Xie return 1; 24615a6ac9eaSMiao Xie return 0; 24625a6ac9eaSMiao Xie } 24635a6ac9eaSMiao Xie 24645a6ac9eaSMiao Xie /* 24655a6ac9eaSMiao Xie * While we're doing the parity check and repair, we could have errors 24665a6ac9eaSMiao Xie * in reading pages off the disk. This checks for errors and if we're 24675a6ac9eaSMiao Xie * not able to read the page it'll trigger parity reconstruction. The 24685a6ac9eaSMiao Xie * parity scrub will be finished after we've reconstructed the failed 24695a6ac9eaSMiao Xie * stripes 24705a6ac9eaSMiao Xie */ 24715a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 24725a6ac9eaSMiao Xie { 24735a6ac9eaSMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 24745a6ac9eaSMiao Xie goto cleanup; 24755a6ac9eaSMiao Xie 24765a6ac9eaSMiao Xie if (rbio->faila >= 0 || rbio->failb >= 0) { 24775a6ac9eaSMiao Xie int dfail = 0, failp = -1; 24785a6ac9eaSMiao Xie 24795a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->faila)) 24805a6ac9eaSMiao Xie dfail++; 24815a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->faila)) 24825a6ac9eaSMiao Xie failp = rbio->faila; 24835a6ac9eaSMiao Xie 24845a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->failb)) 24855a6ac9eaSMiao Xie dfail++; 24865a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->failb)) 24875a6ac9eaSMiao Xie failp = rbio->failb; 24885a6ac9eaSMiao Xie 24895a6ac9eaSMiao Xie /* 24905a6ac9eaSMiao Xie * Because we can not use a scrubbing parity to repair 24915a6ac9eaSMiao Xie * the data, so the capability of the repair is declined. 24925a6ac9eaSMiao Xie * (In the case of RAID5, we can not repair anything) 24935a6ac9eaSMiao Xie */ 24945a6ac9eaSMiao Xie if (dfail > rbio->bbio->max_errors - 1) 24955a6ac9eaSMiao Xie goto cleanup; 24965a6ac9eaSMiao Xie 24975a6ac9eaSMiao Xie /* 24985a6ac9eaSMiao Xie * If all data is good, only parity is correctly, just 24995a6ac9eaSMiao Xie * repair the parity. 25005a6ac9eaSMiao Xie */ 25015a6ac9eaSMiao Xie if (dfail == 0) { 25025a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 25035a6ac9eaSMiao Xie return; 25045a6ac9eaSMiao Xie } 25055a6ac9eaSMiao Xie 25065a6ac9eaSMiao Xie /* 25075a6ac9eaSMiao Xie * Here means we got one corrupted data stripe and one 25085a6ac9eaSMiao Xie * corrupted parity on RAID6, if the corrupted parity 250901327610SNicholas D Steeves * is scrubbing parity, luckily, use the other one to repair 25105a6ac9eaSMiao Xie * the data, or we can not repair the data stripe. 25115a6ac9eaSMiao Xie */ 25125a6ac9eaSMiao Xie if (failp != rbio->scrubp) 25135a6ac9eaSMiao Xie goto cleanup; 25145a6ac9eaSMiao Xie 25155a6ac9eaSMiao Xie __raid_recover_end_io(rbio); 25165a6ac9eaSMiao Xie } else { 25175a6ac9eaSMiao Xie finish_parity_scrub(rbio, 1); 25185a6ac9eaSMiao Xie } 25195a6ac9eaSMiao Xie return; 25205a6ac9eaSMiao Xie 25215a6ac9eaSMiao Xie cleanup: 25224246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 25235a6ac9eaSMiao Xie } 25245a6ac9eaSMiao Xie 25255a6ac9eaSMiao Xie /* 25265a6ac9eaSMiao Xie * end io for the read phase of the rmw cycle. All the bios here are physical 25275a6ac9eaSMiao Xie * stripe bios we've read from the disk so we can recalculate the parity of the 25285a6ac9eaSMiao Xie * stripe. 25295a6ac9eaSMiao Xie * 25305a6ac9eaSMiao Xie * This will usually kick off finish_rmw once all the bios are read in, but it 25315a6ac9eaSMiao Xie * may trigger parity reconstruction if we had any errors along the way 25325a6ac9eaSMiao Xie */ 25334246a0b6SChristoph Hellwig static void raid56_parity_scrub_end_io(struct bio *bio) 25345a6ac9eaSMiao Xie { 25355a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio = bio->bi_private; 25365a6ac9eaSMiao Xie 25374246a0b6SChristoph Hellwig if (bio->bi_error) 25385a6ac9eaSMiao Xie fail_bio_stripe(rbio, bio); 25395a6ac9eaSMiao Xie else 25405a6ac9eaSMiao Xie set_bio_pages_uptodate(bio); 25415a6ac9eaSMiao Xie 25425a6ac9eaSMiao Xie bio_put(bio); 25435a6ac9eaSMiao Xie 25445a6ac9eaSMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 25455a6ac9eaSMiao Xie return; 25465a6ac9eaSMiao Xie 25475a6ac9eaSMiao Xie /* 25485a6ac9eaSMiao Xie * this will normally call finish_rmw to start our write 25495a6ac9eaSMiao Xie * but if there are any failed stripes we'll reconstruct 25505a6ac9eaSMiao Xie * from parity first 25515a6ac9eaSMiao Xie */ 25525a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 25535a6ac9eaSMiao Xie } 25545a6ac9eaSMiao Xie 25555a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 25565a6ac9eaSMiao Xie { 25575a6ac9eaSMiao Xie int bios_to_read = 0; 25585a6ac9eaSMiao Xie struct bio_list bio_list; 25595a6ac9eaSMiao Xie int ret; 25605a6ac9eaSMiao Xie int pagenr; 25615a6ac9eaSMiao Xie int stripe; 25625a6ac9eaSMiao Xie struct bio *bio; 25635a6ac9eaSMiao Xie 25645a6ac9eaSMiao Xie ret = alloc_rbio_essential_pages(rbio); 25655a6ac9eaSMiao Xie if (ret) 25665a6ac9eaSMiao Xie goto cleanup; 25675a6ac9eaSMiao Xie 25685a6ac9eaSMiao Xie bio_list_init(&bio_list); 25695a6ac9eaSMiao Xie 25705a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 25715a6ac9eaSMiao Xie /* 25725a6ac9eaSMiao Xie * build a list of bios to read all the missing parts of this 25735a6ac9eaSMiao Xie * stripe 25745a6ac9eaSMiao Xie */ 25752c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 25765a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 25775a6ac9eaSMiao Xie struct page *page; 25785a6ac9eaSMiao Xie /* 25795a6ac9eaSMiao Xie * we want to find all the pages missing from 25805a6ac9eaSMiao Xie * the rbio and read them from the disk. If 25815a6ac9eaSMiao Xie * page_in_rbio finds a page in the bio list 25825a6ac9eaSMiao Xie * we don't need to read it off the stripe. 25835a6ac9eaSMiao Xie */ 25845a6ac9eaSMiao Xie page = page_in_rbio(rbio, stripe, pagenr, 1); 25855a6ac9eaSMiao Xie if (page) 25865a6ac9eaSMiao Xie continue; 25875a6ac9eaSMiao Xie 25885a6ac9eaSMiao Xie page = rbio_stripe_page(rbio, stripe, pagenr); 25895a6ac9eaSMiao Xie /* 25905a6ac9eaSMiao Xie * the bio cache may have handed us an uptodate 25915a6ac9eaSMiao Xie * page. If so, be happy and use it 25925a6ac9eaSMiao Xie */ 25935a6ac9eaSMiao Xie if (PageUptodate(page)) 25945a6ac9eaSMiao Xie continue; 25955a6ac9eaSMiao Xie 25965a6ac9eaSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, page, 25975a6ac9eaSMiao Xie stripe, pagenr, rbio->stripe_len); 25985a6ac9eaSMiao Xie if (ret) 25995a6ac9eaSMiao Xie goto cleanup; 26005a6ac9eaSMiao Xie } 26015a6ac9eaSMiao Xie } 26025a6ac9eaSMiao Xie 26035a6ac9eaSMiao Xie bios_to_read = bio_list_size(&bio_list); 26045a6ac9eaSMiao Xie if (!bios_to_read) { 26055a6ac9eaSMiao Xie /* 26065a6ac9eaSMiao Xie * this can happen if others have merged with 26075a6ac9eaSMiao Xie * us, it means there is nothing left to read. 26085a6ac9eaSMiao Xie * But if there are missing devices it may not be 26095a6ac9eaSMiao Xie * safe to do the full stripe write yet. 26105a6ac9eaSMiao Xie */ 26115a6ac9eaSMiao Xie goto finish; 26125a6ac9eaSMiao Xie } 26135a6ac9eaSMiao Xie 26145a6ac9eaSMiao Xie /* 26155a6ac9eaSMiao Xie * the bbio may be freed once we submit the last bio. Make sure 26165a6ac9eaSMiao Xie * not to touch it after that 26175a6ac9eaSMiao Xie */ 26185a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 26195a6ac9eaSMiao Xie while (1) { 26205a6ac9eaSMiao Xie bio = bio_list_pop(&bio_list); 26215a6ac9eaSMiao Xie if (!bio) 26225a6ac9eaSMiao Xie break; 26235a6ac9eaSMiao Xie 26245a6ac9eaSMiao Xie bio->bi_private = rbio; 26255a6ac9eaSMiao Xie bio->bi_end_io = raid56_parity_scrub_end_io; 262637226b21SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 26275a6ac9eaSMiao Xie 26280b246afaSJeff Mahoney btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); 26295a6ac9eaSMiao Xie 26304e49ea4aSMike Christie submit_bio(bio); 26315a6ac9eaSMiao Xie } 26325a6ac9eaSMiao Xie /* the actual write will happen once the reads are done */ 26335a6ac9eaSMiao Xie return; 26345a6ac9eaSMiao Xie 26355a6ac9eaSMiao Xie cleanup: 26364246a0b6SChristoph Hellwig rbio_orig_end_io(rbio, -EIO); 26375a6ac9eaSMiao Xie return; 26385a6ac9eaSMiao Xie 26395a6ac9eaSMiao Xie finish: 26405a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 26415a6ac9eaSMiao Xie } 26425a6ac9eaSMiao Xie 26435a6ac9eaSMiao Xie static void scrub_parity_work(struct btrfs_work *work) 26445a6ac9eaSMiao Xie { 26455a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 26465a6ac9eaSMiao Xie 26475a6ac9eaSMiao Xie rbio = container_of(work, struct btrfs_raid_bio, work); 26485a6ac9eaSMiao Xie raid56_parity_scrub_stripe(rbio); 26495a6ac9eaSMiao Xie } 26505a6ac9eaSMiao Xie 26515a6ac9eaSMiao Xie static void async_scrub_parity(struct btrfs_raid_bio *rbio) 26525a6ac9eaSMiao Xie { 26535a6ac9eaSMiao Xie btrfs_init_work(&rbio->work, btrfs_rmw_helper, 26545a6ac9eaSMiao Xie scrub_parity_work, NULL, NULL); 26555a6ac9eaSMiao Xie 26560b246afaSJeff Mahoney btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 26575a6ac9eaSMiao Xie } 26585a6ac9eaSMiao Xie 26595a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 26605a6ac9eaSMiao Xie { 26615a6ac9eaSMiao Xie if (!lock_stripe_add(rbio)) 26625a6ac9eaSMiao Xie async_scrub_parity(rbio); 26635a6ac9eaSMiao Xie } 2664b4ee1782SOmar Sandoval 2665b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */ 2666b4ee1782SOmar Sandoval 2667b4ee1782SOmar Sandoval struct btrfs_raid_bio * 26682ff7e61eSJeff Mahoney raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, 2669b4ee1782SOmar Sandoval struct btrfs_bio *bbio, u64 length) 2670b4ee1782SOmar Sandoval { 2671b4ee1782SOmar Sandoval struct btrfs_raid_bio *rbio; 2672b4ee1782SOmar Sandoval 26732ff7e61eSJeff Mahoney rbio = alloc_rbio(fs_info, bbio, length); 2674b4ee1782SOmar Sandoval if (IS_ERR(rbio)) 2675b4ee1782SOmar Sandoval return NULL; 2676b4ee1782SOmar Sandoval 2677b4ee1782SOmar Sandoval rbio->operation = BTRFS_RBIO_REBUILD_MISSING; 2678b4ee1782SOmar Sandoval bio_list_add(&rbio->bio_list, bio); 2679b4ee1782SOmar Sandoval /* 2680b4ee1782SOmar Sandoval * This is a special bio which is used to hold the completion handler 2681b4ee1782SOmar Sandoval * and make the scrub rbio is similar to the other types 2682b4ee1782SOmar Sandoval */ 2683b4ee1782SOmar Sandoval ASSERT(!bio->bi_iter.bi_size); 2684b4ee1782SOmar Sandoval 2685b4ee1782SOmar Sandoval rbio->faila = find_logical_bio_stripe(rbio, bio); 2686b4ee1782SOmar Sandoval if (rbio->faila == -1) { 2687b4ee1782SOmar Sandoval BUG(); 2688b4ee1782SOmar Sandoval kfree(rbio); 2689b4ee1782SOmar Sandoval return NULL; 2690b4ee1782SOmar Sandoval } 2691b4ee1782SOmar Sandoval 2692ae6529c3SQu Wenruo /* 2693ae6529c3SQu Wenruo * When we get bbio, we have already increased bio_counter, record it 2694ae6529c3SQu Wenruo * so we can free it at rbio_orig_end_io() 2695ae6529c3SQu Wenruo */ 2696ae6529c3SQu Wenruo rbio->generic_bio_cnt = 1; 2697ae6529c3SQu Wenruo 2698b4ee1782SOmar Sandoval return rbio; 2699b4ee1782SOmar Sandoval } 2700b4ee1782SOmar Sandoval 2701b4ee1782SOmar Sandoval static void missing_raid56_work(struct btrfs_work *work) 2702b4ee1782SOmar Sandoval { 2703b4ee1782SOmar Sandoval struct btrfs_raid_bio *rbio; 2704b4ee1782SOmar Sandoval 2705b4ee1782SOmar Sandoval rbio = container_of(work, struct btrfs_raid_bio, work); 2706b4ee1782SOmar Sandoval __raid56_parity_recover(rbio); 2707b4ee1782SOmar Sandoval } 2708b4ee1782SOmar Sandoval 2709b4ee1782SOmar Sandoval static void async_missing_raid56(struct btrfs_raid_bio *rbio) 2710b4ee1782SOmar Sandoval { 2711b4ee1782SOmar Sandoval btrfs_init_work(&rbio->work, btrfs_rmw_helper, 2712b4ee1782SOmar Sandoval missing_raid56_work, NULL, NULL); 2713b4ee1782SOmar Sandoval 2714b4ee1782SOmar Sandoval btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); 2715b4ee1782SOmar Sandoval } 2716b4ee1782SOmar Sandoval 2717b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) 2718b4ee1782SOmar Sandoval { 2719b4ee1782SOmar Sandoval if (!lock_stripe_add(rbio)) 2720b4ee1782SOmar Sandoval async_missing_raid56(rbio); 2721b4ee1782SOmar Sandoval } 2722