153b381b3SDavid Woodhouse /* 253b381b3SDavid Woodhouse * Copyright (C) 2012 Fusion-io All rights reserved. 353b381b3SDavid Woodhouse * Copyright (C) 2012 Intel Corp. All rights reserved. 453b381b3SDavid Woodhouse * 553b381b3SDavid Woodhouse * This program is free software; you can redistribute it and/or 653b381b3SDavid Woodhouse * modify it under the terms of the GNU General Public 753b381b3SDavid Woodhouse * License v2 as published by the Free Software Foundation. 853b381b3SDavid Woodhouse * 953b381b3SDavid Woodhouse * This program is distributed in the hope that it will be useful, 1053b381b3SDavid Woodhouse * but WITHOUT ANY WARRANTY; without even the implied warranty of 1153b381b3SDavid Woodhouse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1253b381b3SDavid Woodhouse * General Public License for more details. 1353b381b3SDavid Woodhouse * 1453b381b3SDavid Woodhouse * You should have received a copy of the GNU General Public 1553b381b3SDavid Woodhouse * License along with this program; if not, write to the 1653b381b3SDavid Woodhouse * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 1753b381b3SDavid Woodhouse * Boston, MA 021110-1307, USA. 1853b381b3SDavid Woodhouse */ 1953b381b3SDavid Woodhouse #include <linux/sched.h> 2053b381b3SDavid Woodhouse #include <linux/wait.h> 2153b381b3SDavid Woodhouse #include <linux/bio.h> 2253b381b3SDavid Woodhouse #include <linux/slab.h> 2353b381b3SDavid Woodhouse #include <linux/buffer_head.h> 2453b381b3SDavid Woodhouse #include <linux/blkdev.h> 2553b381b3SDavid Woodhouse #include <linux/random.h> 2653b381b3SDavid Woodhouse #include <linux/iocontext.h> 2753b381b3SDavid Woodhouse #include <linux/capability.h> 2853b381b3SDavid Woodhouse #include <linux/ratelimit.h> 2953b381b3SDavid Woodhouse #include <linux/kthread.h> 3053b381b3SDavid Woodhouse #include <linux/raid/pq.h> 3153b381b3SDavid Woodhouse #include <linux/hash.h> 3253b381b3SDavid Woodhouse #include <linux/list_sort.h> 3353b381b3SDavid Woodhouse #include <linux/raid/xor.h> 34d7011f5bSGeert Uytterhoeven #include <linux/vmalloc.h> 3553b381b3SDavid Woodhouse #include <asm/div64.h> 3653b381b3SDavid Woodhouse #include "ctree.h" 3753b381b3SDavid Woodhouse #include "extent_map.h" 3853b381b3SDavid Woodhouse #include "disk-io.h" 3953b381b3SDavid Woodhouse #include "transaction.h" 4053b381b3SDavid Woodhouse #include "print-tree.h" 4153b381b3SDavid Woodhouse #include "volumes.h" 4253b381b3SDavid Woodhouse #include "raid56.h" 4353b381b3SDavid Woodhouse #include "async-thread.h" 4453b381b3SDavid Woodhouse #include "check-integrity.h" 4553b381b3SDavid Woodhouse #include "rcu-string.h" 4653b381b3SDavid Woodhouse 4753b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */ 4853b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT 1 4953b381b3SDavid Woodhouse 504ae10b3aSChris Mason /* 514ae10b3aSChris Mason * set when this rbio is sitting in the hash, but it is just a cache 524ae10b3aSChris Mason * of past RMW 534ae10b3aSChris Mason */ 544ae10b3aSChris Mason #define RBIO_CACHE_BIT 2 554ae10b3aSChris Mason 564ae10b3aSChris Mason /* 574ae10b3aSChris Mason * set when it is safe to trust the stripe_pages for caching 584ae10b3aSChris Mason */ 594ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT 3 604ae10b3aSChris Mason 61af8e2d1dSMiao Xie /* 62af8e2d1dSMiao Xie * bbio and raid_map is managed by the caller, so we shouldn't free 63af8e2d1dSMiao Xie * them here. And besides that, all rbios with this flag should not 64af8e2d1dSMiao Xie * be cached, because we need raid_map to check the rbios' stripe 65af8e2d1dSMiao Xie * is the same or not, but it is very likely that the caller has 66af8e2d1dSMiao Xie * free raid_map, so don't cache those rbios. 67af8e2d1dSMiao Xie */ 68af8e2d1dSMiao Xie #define RBIO_HOLD_BBIO_MAP_BIT 4 69af8e2d1dSMiao Xie 704ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024 714ae10b3aSChris Mason 721b94b556SMiao Xie enum btrfs_rbio_ops { 731b94b556SMiao Xie BTRFS_RBIO_WRITE = 0, 741b94b556SMiao Xie BTRFS_RBIO_READ_REBUILD = 1, 755a6ac9eaSMiao Xie BTRFS_RBIO_PARITY_SCRUB = 2, 761b94b556SMiao Xie }; 771b94b556SMiao Xie 7853b381b3SDavid Woodhouse struct btrfs_raid_bio { 7953b381b3SDavid Woodhouse struct btrfs_fs_info *fs_info; 8053b381b3SDavid Woodhouse struct btrfs_bio *bbio; 8153b381b3SDavid Woodhouse 8253b381b3SDavid Woodhouse /* 8353b381b3SDavid Woodhouse * logical block numbers for the start of each stripe 8453b381b3SDavid Woodhouse * The last one or two are p/q. These are sorted, 8553b381b3SDavid Woodhouse * so raid_map[0] is the start of our full stripe 8653b381b3SDavid Woodhouse */ 8753b381b3SDavid Woodhouse u64 *raid_map; 8853b381b3SDavid Woodhouse 8953b381b3SDavid Woodhouse /* while we're doing rmw on a stripe 9053b381b3SDavid Woodhouse * we put it into a hash table so we can 9153b381b3SDavid Woodhouse * lock the stripe and merge more rbios 9253b381b3SDavid Woodhouse * into it. 9353b381b3SDavid Woodhouse */ 9453b381b3SDavid Woodhouse struct list_head hash_list; 9553b381b3SDavid Woodhouse 9653b381b3SDavid Woodhouse /* 974ae10b3aSChris Mason * LRU list for the stripe cache 984ae10b3aSChris Mason */ 994ae10b3aSChris Mason struct list_head stripe_cache; 1004ae10b3aSChris Mason 1014ae10b3aSChris Mason /* 10253b381b3SDavid Woodhouse * for scheduling work in the helper threads 10353b381b3SDavid Woodhouse */ 10453b381b3SDavid Woodhouse struct btrfs_work work; 10553b381b3SDavid Woodhouse 10653b381b3SDavid Woodhouse /* 10753b381b3SDavid Woodhouse * bio list and bio_list_lock are used 10853b381b3SDavid Woodhouse * to add more bios into the stripe 10953b381b3SDavid Woodhouse * in hopes of avoiding the full rmw 11053b381b3SDavid Woodhouse */ 11153b381b3SDavid Woodhouse struct bio_list bio_list; 11253b381b3SDavid Woodhouse spinlock_t bio_list_lock; 11353b381b3SDavid Woodhouse 1146ac0f488SChris Mason /* also protected by the bio_list_lock, the 1156ac0f488SChris Mason * plug list is used by the plugging code 1166ac0f488SChris Mason * to collect partial bios while plugged. The 1176ac0f488SChris Mason * stripe locking code also uses it to hand off 11853b381b3SDavid Woodhouse * the stripe lock to the next pending IO 11953b381b3SDavid Woodhouse */ 12053b381b3SDavid Woodhouse struct list_head plug_list; 12153b381b3SDavid Woodhouse 12253b381b3SDavid Woodhouse /* 12353b381b3SDavid Woodhouse * flags that tell us if it is safe to 12453b381b3SDavid Woodhouse * merge with this bio 12553b381b3SDavid Woodhouse */ 12653b381b3SDavid Woodhouse unsigned long flags; 12753b381b3SDavid Woodhouse 12853b381b3SDavid Woodhouse /* size of each individual stripe on disk */ 12953b381b3SDavid Woodhouse int stripe_len; 13053b381b3SDavid Woodhouse 13153b381b3SDavid Woodhouse /* number of data stripes (no p/q) */ 13253b381b3SDavid Woodhouse int nr_data; 13353b381b3SDavid Woodhouse 134*2c8cdd6eSMiao Xie int real_stripes; 135*2c8cdd6eSMiao Xie 1365a6ac9eaSMiao Xie int stripe_npages; 13753b381b3SDavid Woodhouse /* 13853b381b3SDavid Woodhouse * set if we're doing a parity rebuild 13953b381b3SDavid Woodhouse * for a read from higher up, which is handled 14053b381b3SDavid Woodhouse * differently from a parity rebuild as part of 14153b381b3SDavid Woodhouse * rmw 14253b381b3SDavid Woodhouse */ 1431b94b556SMiao Xie enum btrfs_rbio_ops operation; 14453b381b3SDavid Woodhouse 14553b381b3SDavid Woodhouse /* first bad stripe */ 14653b381b3SDavid Woodhouse int faila; 14753b381b3SDavid Woodhouse 14853b381b3SDavid Woodhouse /* second bad stripe (for raid6 use) */ 14953b381b3SDavid Woodhouse int failb; 15053b381b3SDavid Woodhouse 1515a6ac9eaSMiao Xie int scrubp; 15253b381b3SDavid Woodhouse /* 15353b381b3SDavid Woodhouse * number of pages needed to represent the full 15453b381b3SDavid Woodhouse * stripe 15553b381b3SDavid Woodhouse */ 15653b381b3SDavid Woodhouse int nr_pages; 15753b381b3SDavid Woodhouse 15853b381b3SDavid Woodhouse /* 15953b381b3SDavid Woodhouse * size of all the bios in the bio_list. This 16053b381b3SDavid Woodhouse * helps us decide if the rbio maps to a full 16153b381b3SDavid Woodhouse * stripe or not 16253b381b3SDavid Woodhouse */ 16353b381b3SDavid Woodhouse int bio_list_bytes; 16453b381b3SDavid Woodhouse 16553b381b3SDavid Woodhouse atomic_t refs; 16653b381b3SDavid Woodhouse 167b89e1b01SMiao Xie atomic_t stripes_pending; 168b89e1b01SMiao Xie 169b89e1b01SMiao Xie atomic_t error; 17053b381b3SDavid Woodhouse /* 17153b381b3SDavid Woodhouse * these are two arrays of pointers. We allocate the 17253b381b3SDavid Woodhouse * rbio big enough to hold them both and setup their 17353b381b3SDavid Woodhouse * locations when the rbio is allocated 17453b381b3SDavid Woodhouse */ 17553b381b3SDavid Woodhouse 17653b381b3SDavid Woodhouse /* pointers to pages that we allocated for 17753b381b3SDavid Woodhouse * reading/writing stripes directly from the disk (including P/Q) 17853b381b3SDavid Woodhouse */ 17953b381b3SDavid Woodhouse struct page **stripe_pages; 18053b381b3SDavid Woodhouse 18153b381b3SDavid Woodhouse /* 18253b381b3SDavid Woodhouse * pointers to the pages in the bio_list. Stored 18353b381b3SDavid Woodhouse * here for faster lookup 18453b381b3SDavid Woodhouse */ 18553b381b3SDavid Woodhouse struct page **bio_pages; 1865a6ac9eaSMiao Xie 1875a6ac9eaSMiao Xie /* 1885a6ac9eaSMiao Xie * bitmap to record which horizontal stripe has data 1895a6ac9eaSMiao Xie */ 1905a6ac9eaSMiao Xie unsigned long *dbitmap; 19153b381b3SDavid Woodhouse }; 19253b381b3SDavid Woodhouse 19353b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 19453b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 19553b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work); 19653b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work); 19753b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio); 19853b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio); 19953b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 20053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 20153b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio); 20253b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio); 20353b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 20453b381b3SDavid Woodhouse 2055a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 2065a6ac9eaSMiao Xie int need_check); 2075a6ac9eaSMiao Xie static void async_scrub_parity(struct btrfs_raid_bio *rbio); 2085a6ac9eaSMiao Xie 20953b381b3SDavid Woodhouse /* 21053b381b3SDavid Woodhouse * the stripe hash table is used for locking, and to collect 21153b381b3SDavid Woodhouse * bios in hopes of making a full stripe 21253b381b3SDavid Woodhouse */ 21353b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 21453b381b3SDavid Woodhouse { 21553b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *table; 21653b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *x; 21753b381b3SDavid Woodhouse struct btrfs_stripe_hash *cur; 21853b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 21953b381b3SDavid Woodhouse int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 22053b381b3SDavid Woodhouse int i; 22183c8266aSDavid Sterba int table_size; 22253b381b3SDavid Woodhouse 22353b381b3SDavid Woodhouse if (info->stripe_hash_table) 22453b381b3SDavid Woodhouse return 0; 22553b381b3SDavid Woodhouse 22683c8266aSDavid Sterba /* 22783c8266aSDavid Sterba * The table is large, starting with order 4 and can go as high as 22883c8266aSDavid Sterba * order 7 in case lock debugging is turned on. 22983c8266aSDavid Sterba * 23083c8266aSDavid Sterba * Try harder to allocate and fallback to vmalloc to lower the chance 23183c8266aSDavid Sterba * of a failing mount. 23283c8266aSDavid Sterba */ 23383c8266aSDavid Sterba table_size = sizeof(*table) + sizeof(*h) * num_entries; 23483c8266aSDavid Sterba table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 23583c8266aSDavid Sterba if (!table) { 23683c8266aSDavid Sterba table = vzalloc(table_size); 23753b381b3SDavid Woodhouse if (!table) 23853b381b3SDavid Woodhouse return -ENOMEM; 23983c8266aSDavid Sterba } 24053b381b3SDavid Woodhouse 2414ae10b3aSChris Mason spin_lock_init(&table->cache_lock); 2424ae10b3aSChris Mason INIT_LIST_HEAD(&table->stripe_cache); 2434ae10b3aSChris Mason 24453b381b3SDavid Woodhouse h = table->table; 24553b381b3SDavid Woodhouse 24653b381b3SDavid Woodhouse for (i = 0; i < num_entries; i++) { 24753b381b3SDavid Woodhouse cur = h + i; 24853b381b3SDavid Woodhouse INIT_LIST_HEAD(&cur->hash_list); 24953b381b3SDavid Woodhouse spin_lock_init(&cur->lock); 25053b381b3SDavid Woodhouse init_waitqueue_head(&cur->wait); 25153b381b3SDavid Woodhouse } 25253b381b3SDavid Woodhouse 25353b381b3SDavid Woodhouse x = cmpxchg(&info->stripe_hash_table, NULL, table); 25483c8266aSDavid Sterba if (x) { 25583c8266aSDavid Sterba if (is_vmalloc_addr(x)) 25683c8266aSDavid Sterba vfree(x); 25783c8266aSDavid Sterba else 25853b381b3SDavid Woodhouse kfree(x); 25983c8266aSDavid Sterba } 26053b381b3SDavid Woodhouse return 0; 26153b381b3SDavid Woodhouse } 26253b381b3SDavid Woodhouse 26353b381b3SDavid Woodhouse /* 2644ae10b3aSChris Mason * caching an rbio means to copy anything from the 2654ae10b3aSChris Mason * bio_pages array into the stripe_pages array. We 2664ae10b3aSChris Mason * use the page uptodate bit in the stripe cache array 2674ae10b3aSChris Mason * to indicate if it has valid data 2684ae10b3aSChris Mason * 2694ae10b3aSChris Mason * once the caching is done, we set the cache ready 2704ae10b3aSChris Mason * bit. 2714ae10b3aSChris Mason */ 2724ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 2734ae10b3aSChris Mason { 2744ae10b3aSChris Mason int i; 2754ae10b3aSChris Mason char *s; 2764ae10b3aSChris Mason char *d; 2774ae10b3aSChris Mason int ret; 2784ae10b3aSChris Mason 2794ae10b3aSChris Mason ret = alloc_rbio_pages(rbio); 2804ae10b3aSChris Mason if (ret) 2814ae10b3aSChris Mason return; 2824ae10b3aSChris Mason 2834ae10b3aSChris Mason for (i = 0; i < rbio->nr_pages; i++) { 2844ae10b3aSChris Mason if (!rbio->bio_pages[i]) 2854ae10b3aSChris Mason continue; 2864ae10b3aSChris Mason 2874ae10b3aSChris Mason s = kmap(rbio->bio_pages[i]); 2884ae10b3aSChris Mason d = kmap(rbio->stripe_pages[i]); 2894ae10b3aSChris Mason 2904ae10b3aSChris Mason memcpy(d, s, PAGE_CACHE_SIZE); 2914ae10b3aSChris Mason 2924ae10b3aSChris Mason kunmap(rbio->bio_pages[i]); 2934ae10b3aSChris Mason kunmap(rbio->stripe_pages[i]); 2944ae10b3aSChris Mason SetPageUptodate(rbio->stripe_pages[i]); 2954ae10b3aSChris Mason } 2964ae10b3aSChris Mason set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2974ae10b3aSChris Mason } 2984ae10b3aSChris Mason 2994ae10b3aSChris Mason /* 30053b381b3SDavid Woodhouse * we hash on the first logical address of the stripe 30153b381b3SDavid Woodhouse */ 30253b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio) 30353b381b3SDavid Woodhouse { 30453b381b3SDavid Woodhouse u64 num = rbio->raid_map[0]; 30553b381b3SDavid Woodhouse 30653b381b3SDavid Woodhouse /* 30753b381b3SDavid Woodhouse * we shift down quite a bit. We're using byte 30853b381b3SDavid Woodhouse * addressing, and most of the lower bits are zeros. 30953b381b3SDavid Woodhouse * This tends to upset hash_64, and it consistently 31053b381b3SDavid Woodhouse * returns just one or two different values. 31153b381b3SDavid Woodhouse * 31253b381b3SDavid Woodhouse * shifting off the lower bits fixes things. 31353b381b3SDavid Woodhouse */ 31453b381b3SDavid Woodhouse return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 31553b381b3SDavid Woodhouse } 31653b381b3SDavid Woodhouse 31753b381b3SDavid Woodhouse /* 3184ae10b3aSChris Mason * stealing an rbio means taking all the uptodate pages from the stripe 3194ae10b3aSChris Mason * array in the source rbio and putting them into the destination rbio 3204ae10b3aSChris Mason */ 3214ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 3224ae10b3aSChris Mason { 3234ae10b3aSChris Mason int i; 3244ae10b3aSChris Mason struct page *s; 3254ae10b3aSChris Mason struct page *d; 3264ae10b3aSChris Mason 3274ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 3284ae10b3aSChris Mason return; 3294ae10b3aSChris Mason 3304ae10b3aSChris Mason for (i = 0; i < dest->nr_pages; i++) { 3314ae10b3aSChris Mason s = src->stripe_pages[i]; 3324ae10b3aSChris Mason if (!s || !PageUptodate(s)) { 3334ae10b3aSChris Mason continue; 3344ae10b3aSChris Mason } 3354ae10b3aSChris Mason 3364ae10b3aSChris Mason d = dest->stripe_pages[i]; 3374ae10b3aSChris Mason if (d) 3384ae10b3aSChris Mason __free_page(d); 3394ae10b3aSChris Mason 3404ae10b3aSChris Mason dest->stripe_pages[i] = s; 3414ae10b3aSChris Mason src->stripe_pages[i] = NULL; 3424ae10b3aSChris Mason } 3434ae10b3aSChris Mason } 3444ae10b3aSChris Mason 3454ae10b3aSChris Mason /* 34653b381b3SDavid Woodhouse * merging means we take the bio_list from the victim and 34753b381b3SDavid Woodhouse * splice it into the destination. The victim should 34853b381b3SDavid Woodhouse * be discarded afterwards. 34953b381b3SDavid Woodhouse * 35053b381b3SDavid Woodhouse * must be called with dest->rbio_list_lock held 35153b381b3SDavid Woodhouse */ 35253b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest, 35353b381b3SDavid Woodhouse struct btrfs_raid_bio *victim) 35453b381b3SDavid Woodhouse { 35553b381b3SDavid Woodhouse bio_list_merge(&dest->bio_list, &victim->bio_list); 35653b381b3SDavid Woodhouse dest->bio_list_bytes += victim->bio_list_bytes; 35753b381b3SDavid Woodhouse bio_list_init(&victim->bio_list); 35853b381b3SDavid Woodhouse } 35953b381b3SDavid Woodhouse 36053b381b3SDavid Woodhouse /* 3614ae10b3aSChris Mason * used to prune items that are in the cache. The caller 3624ae10b3aSChris Mason * must hold the hash table lock. 3634ae10b3aSChris Mason */ 3644ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 3654ae10b3aSChris Mason { 3664ae10b3aSChris Mason int bucket = rbio_bucket(rbio); 3674ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 3684ae10b3aSChris Mason struct btrfs_stripe_hash *h; 3694ae10b3aSChris Mason int freeit = 0; 3704ae10b3aSChris Mason 3714ae10b3aSChris Mason /* 3724ae10b3aSChris Mason * check the bit again under the hash table lock. 3734ae10b3aSChris Mason */ 3744ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 3754ae10b3aSChris Mason return; 3764ae10b3aSChris Mason 3774ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 3784ae10b3aSChris Mason h = table->table + bucket; 3794ae10b3aSChris Mason 3804ae10b3aSChris Mason /* hold the lock for the bucket because we may be 3814ae10b3aSChris Mason * removing it from the hash table 3824ae10b3aSChris Mason */ 3834ae10b3aSChris Mason spin_lock(&h->lock); 3844ae10b3aSChris Mason 3854ae10b3aSChris Mason /* 3864ae10b3aSChris Mason * hold the lock for the bio list because we need 3874ae10b3aSChris Mason * to make sure the bio list is empty 3884ae10b3aSChris Mason */ 3894ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 3904ae10b3aSChris Mason 3914ae10b3aSChris Mason if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 3924ae10b3aSChris Mason list_del_init(&rbio->stripe_cache); 3934ae10b3aSChris Mason table->cache_size -= 1; 3944ae10b3aSChris Mason freeit = 1; 3954ae10b3aSChris Mason 3964ae10b3aSChris Mason /* if the bio list isn't empty, this rbio is 3974ae10b3aSChris Mason * still involved in an IO. We take it out 3984ae10b3aSChris Mason * of the cache list, and drop the ref that 3994ae10b3aSChris Mason * was held for the list. 4004ae10b3aSChris Mason * 4014ae10b3aSChris Mason * If the bio_list was empty, we also remove 4024ae10b3aSChris Mason * the rbio from the hash_table, and drop 4034ae10b3aSChris Mason * the corresponding ref 4044ae10b3aSChris Mason */ 4054ae10b3aSChris Mason if (bio_list_empty(&rbio->bio_list)) { 4064ae10b3aSChris Mason if (!list_empty(&rbio->hash_list)) { 4074ae10b3aSChris Mason list_del_init(&rbio->hash_list); 4084ae10b3aSChris Mason atomic_dec(&rbio->refs); 4094ae10b3aSChris Mason BUG_ON(!list_empty(&rbio->plug_list)); 4104ae10b3aSChris Mason } 4114ae10b3aSChris Mason } 4124ae10b3aSChris Mason } 4134ae10b3aSChris Mason 4144ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4154ae10b3aSChris Mason spin_unlock(&h->lock); 4164ae10b3aSChris Mason 4174ae10b3aSChris Mason if (freeit) 4184ae10b3aSChris Mason __free_raid_bio(rbio); 4194ae10b3aSChris Mason } 4204ae10b3aSChris Mason 4214ae10b3aSChris Mason /* 4224ae10b3aSChris Mason * prune a given rbio from the cache 4234ae10b3aSChris Mason */ 4244ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 4254ae10b3aSChris Mason { 4264ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4274ae10b3aSChris Mason unsigned long flags; 4284ae10b3aSChris Mason 4294ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4304ae10b3aSChris Mason return; 4314ae10b3aSChris Mason 4324ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4334ae10b3aSChris Mason 4344ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4354ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4364ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4374ae10b3aSChris Mason } 4384ae10b3aSChris Mason 4394ae10b3aSChris Mason /* 4404ae10b3aSChris Mason * remove everything in the cache 4414ae10b3aSChris Mason */ 44248a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 4434ae10b3aSChris Mason { 4444ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4454ae10b3aSChris Mason unsigned long flags; 4464ae10b3aSChris Mason struct btrfs_raid_bio *rbio; 4474ae10b3aSChris Mason 4484ae10b3aSChris Mason table = info->stripe_hash_table; 4494ae10b3aSChris Mason 4504ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4514ae10b3aSChris Mason while (!list_empty(&table->stripe_cache)) { 4524ae10b3aSChris Mason rbio = list_entry(table->stripe_cache.next, 4534ae10b3aSChris Mason struct btrfs_raid_bio, 4544ae10b3aSChris Mason stripe_cache); 4554ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4564ae10b3aSChris Mason } 4574ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4584ae10b3aSChris Mason } 4594ae10b3aSChris Mason 4604ae10b3aSChris Mason /* 4614ae10b3aSChris Mason * remove all cached entries and free the hash table 4624ae10b3aSChris Mason * used by unmount 46353b381b3SDavid Woodhouse */ 46453b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 46553b381b3SDavid Woodhouse { 46653b381b3SDavid Woodhouse if (!info->stripe_hash_table) 46753b381b3SDavid Woodhouse return; 4684ae10b3aSChris Mason btrfs_clear_rbio_cache(info); 46983c8266aSDavid Sterba if (is_vmalloc_addr(info->stripe_hash_table)) 47083c8266aSDavid Sterba vfree(info->stripe_hash_table); 47183c8266aSDavid Sterba else 47253b381b3SDavid Woodhouse kfree(info->stripe_hash_table); 47353b381b3SDavid Woodhouse info->stripe_hash_table = NULL; 47453b381b3SDavid Woodhouse } 47553b381b3SDavid Woodhouse 47653b381b3SDavid Woodhouse /* 4774ae10b3aSChris Mason * insert an rbio into the stripe cache. It 4784ae10b3aSChris Mason * must have already been prepared by calling 4794ae10b3aSChris Mason * cache_rbio_pages 4804ae10b3aSChris Mason * 4814ae10b3aSChris Mason * If this rbio was already cached, it gets 4824ae10b3aSChris Mason * moved to the front of the lru. 4834ae10b3aSChris Mason * 4844ae10b3aSChris Mason * If the size of the rbio cache is too big, we 4854ae10b3aSChris Mason * prune an item. 4864ae10b3aSChris Mason */ 4874ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio) 4884ae10b3aSChris Mason { 4894ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4904ae10b3aSChris Mason unsigned long flags; 4914ae10b3aSChris Mason 4924ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 4934ae10b3aSChris Mason return; 4944ae10b3aSChris Mason 4954ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4964ae10b3aSChris Mason 4974ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4984ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 4994ae10b3aSChris Mason 5004ae10b3aSChris Mason /* bump our ref if we were not in the list before */ 5014ae10b3aSChris Mason if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 5024ae10b3aSChris Mason atomic_inc(&rbio->refs); 5034ae10b3aSChris Mason 5044ae10b3aSChris Mason if (!list_empty(&rbio->stripe_cache)){ 5054ae10b3aSChris Mason list_move(&rbio->stripe_cache, &table->stripe_cache); 5064ae10b3aSChris Mason } else { 5074ae10b3aSChris Mason list_add(&rbio->stripe_cache, &table->stripe_cache); 5084ae10b3aSChris Mason table->cache_size += 1; 5094ae10b3aSChris Mason } 5104ae10b3aSChris Mason 5114ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 5124ae10b3aSChris Mason 5134ae10b3aSChris Mason if (table->cache_size > RBIO_CACHE_SIZE) { 5144ae10b3aSChris Mason struct btrfs_raid_bio *found; 5154ae10b3aSChris Mason 5164ae10b3aSChris Mason found = list_entry(table->stripe_cache.prev, 5174ae10b3aSChris Mason struct btrfs_raid_bio, 5184ae10b3aSChris Mason stripe_cache); 5194ae10b3aSChris Mason 5204ae10b3aSChris Mason if (found != rbio) 5214ae10b3aSChris Mason __remove_rbio_from_cache(found); 5224ae10b3aSChris Mason } 5234ae10b3aSChris Mason 5244ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 5254ae10b3aSChris Mason return; 5264ae10b3aSChris Mason } 5274ae10b3aSChris Mason 5284ae10b3aSChris Mason /* 52953b381b3SDavid Woodhouse * helper function to run the xor_blocks api. It is only 53053b381b3SDavid Woodhouse * able to do MAX_XOR_BLOCKS at a time, so we need to 53153b381b3SDavid Woodhouse * loop through. 53253b381b3SDavid Woodhouse */ 53353b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len) 53453b381b3SDavid Woodhouse { 53553b381b3SDavid Woodhouse int src_off = 0; 53653b381b3SDavid Woodhouse int xor_src_cnt = 0; 53753b381b3SDavid Woodhouse void *dest = pages[src_cnt]; 53853b381b3SDavid Woodhouse 53953b381b3SDavid Woodhouse while(src_cnt > 0) { 54053b381b3SDavid Woodhouse xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 54153b381b3SDavid Woodhouse xor_blocks(xor_src_cnt, len, dest, pages + src_off); 54253b381b3SDavid Woodhouse 54353b381b3SDavid Woodhouse src_cnt -= xor_src_cnt; 54453b381b3SDavid Woodhouse src_off += xor_src_cnt; 54553b381b3SDavid Woodhouse } 54653b381b3SDavid Woodhouse } 54753b381b3SDavid Woodhouse 54853b381b3SDavid Woodhouse /* 54953b381b3SDavid Woodhouse * returns true if the bio list inside this rbio 55053b381b3SDavid Woodhouse * covers an entire stripe (no rmw required). 55153b381b3SDavid Woodhouse * Must be called with the bio list lock held, or 55253b381b3SDavid Woodhouse * at a time when you know it is impossible to add 55353b381b3SDavid Woodhouse * new bios into the list 55453b381b3SDavid Woodhouse */ 55553b381b3SDavid Woodhouse static int __rbio_is_full(struct btrfs_raid_bio *rbio) 55653b381b3SDavid Woodhouse { 55753b381b3SDavid Woodhouse unsigned long size = rbio->bio_list_bytes; 55853b381b3SDavid Woodhouse int ret = 1; 55953b381b3SDavid Woodhouse 56053b381b3SDavid Woodhouse if (size != rbio->nr_data * rbio->stripe_len) 56153b381b3SDavid Woodhouse ret = 0; 56253b381b3SDavid Woodhouse 56353b381b3SDavid Woodhouse BUG_ON(size > rbio->nr_data * rbio->stripe_len); 56453b381b3SDavid Woodhouse return ret; 56553b381b3SDavid Woodhouse } 56653b381b3SDavid Woodhouse 56753b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio) 56853b381b3SDavid Woodhouse { 56953b381b3SDavid Woodhouse unsigned long flags; 57053b381b3SDavid Woodhouse int ret; 57153b381b3SDavid Woodhouse 57253b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 57353b381b3SDavid Woodhouse ret = __rbio_is_full(rbio); 57453b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 57553b381b3SDavid Woodhouse return ret; 57653b381b3SDavid Woodhouse } 57753b381b3SDavid Woodhouse 57853b381b3SDavid Woodhouse /* 57953b381b3SDavid Woodhouse * returns 1 if it is safe to merge two rbios together. 58053b381b3SDavid Woodhouse * The merging is safe if the two rbios correspond to 58153b381b3SDavid Woodhouse * the same stripe and if they are both going in the same 58253b381b3SDavid Woodhouse * direction (read vs write), and if neither one is 58353b381b3SDavid Woodhouse * locked for final IO 58453b381b3SDavid Woodhouse * 58553b381b3SDavid Woodhouse * The caller is responsible for locking such that 58653b381b3SDavid Woodhouse * rmw_locked is safe to test 58753b381b3SDavid Woodhouse */ 58853b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last, 58953b381b3SDavid Woodhouse struct btrfs_raid_bio *cur) 59053b381b3SDavid Woodhouse { 59153b381b3SDavid Woodhouse if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 59253b381b3SDavid Woodhouse test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 59353b381b3SDavid Woodhouse return 0; 59453b381b3SDavid Woodhouse 5954ae10b3aSChris Mason /* 5964ae10b3aSChris Mason * we can't merge with cached rbios, since the 5974ae10b3aSChris Mason * idea is that when we merge the destination 5984ae10b3aSChris Mason * rbio is going to run our IO for us. We can 5994ae10b3aSChris Mason * steal from cached rbio's though, other functions 6004ae10b3aSChris Mason * handle that. 6014ae10b3aSChris Mason */ 6024ae10b3aSChris Mason if (test_bit(RBIO_CACHE_BIT, &last->flags) || 6034ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags)) 6044ae10b3aSChris Mason return 0; 6054ae10b3aSChris Mason 60653b381b3SDavid Woodhouse if (last->raid_map[0] != 60753b381b3SDavid Woodhouse cur->raid_map[0]) 60853b381b3SDavid Woodhouse return 0; 60953b381b3SDavid Woodhouse 6105a6ac9eaSMiao Xie /* we can't merge with different operations */ 6115a6ac9eaSMiao Xie if (last->operation != cur->operation) 61253b381b3SDavid Woodhouse return 0; 6135a6ac9eaSMiao Xie /* 6145a6ac9eaSMiao Xie * We've need read the full stripe from the drive. 6155a6ac9eaSMiao Xie * check and repair the parity and write the new results. 6165a6ac9eaSMiao Xie * 6175a6ac9eaSMiao Xie * We're not allowed to add any new bios to the 6185a6ac9eaSMiao Xie * bio list here, anyone else that wants to 6195a6ac9eaSMiao Xie * change this stripe needs to do their own rmw. 6205a6ac9eaSMiao Xie */ 6215a6ac9eaSMiao Xie if (last->operation == BTRFS_RBIO_PARITY_SCRUB || 6225a6ac9eaSMiao Xie cur->operation == BTRFS_RBIO_PARITY_SCRUB) 6235a6ac9eaSMiao Xie return 0; 62453b381b3SDavid Woodhouse 62553b381b3SDavid Woodhouse return 1; 62653b381b3SDavid Woodhouse } 62753b381b3SDavid Woodhouse 62853b381b3SDavid Woodhouse /* 62953b381b3SDavid Woodhouse * helper to index into the pstripe 63053b381b3SDavid Woodhouse */ 63153b381b3SDavid Woodhouse static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) 63253b381b3SDavid Woodhouse { 63353b381b3SDavid Woodhouse index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; 63453b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 63553b381b3SDavid Woodhouse } 63653b381b3SDavid Woodhouse 63753b381b3SDavid Woodhouse /* 63853b381b3SDavid Woodhouse * helper to index into the qstripe, returns null 63953b381b3SDavid Woodhouse * if there is no qstripe 64053b381b3SDavid Woodhouse */ 64153b381b3SDavid Woodhouse static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 64253b381b3SDavid Woodhouse { 643*2c8cdd6eSMiao Xie if (rbio->nr_data + 1 == rbio->real_stripes) 64453b381b3SDavid Woodhouse return NULL; 64553b381b3SDavid Woodhouse 64653b381b3SDavid Woodhouse index += ((rbio->nr_data + 1) * rbio->stripe_len) >> 64753b381b3SDavid Woodhouse PAGE_CACHE_SHIFT; 64853b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 64953b381b3SDavid Woodhouse } 65053b381b3SDavid Woodhouse 65153b381b3SDavid Woodhouse /* 65253b381b3SDavid Woodhouse * The first stripe in the table for a logical address 65353b381b3SDavid Woodhouse * has the lock. rbios are added in one of three ways: 65453b381b3SDavid Woodhouse * 65553b381b3SDavid Woodhouse * 1) Nobody has the stripe locked yet. The rbio is given 65653b381b3SDavid Woodhouse * the lock and 0 is returned. The caller must start the IO 65753b381b3SDavid Woodhouse * themselves. 65853b381b3SDavid Woodhouse * 65953b381b3SDavid Woodhouse * 2) Someone has the stripe locked, but we're able to merge 66053b381b3SDavid Woodhouse * with the lock owner. The rbio is freed and the IO will 66153b381b3SDavid Woodhouse * start automatically along with the existing rbio. 1 is returned. 66253b381b3SDavid Woodhouse * 66353b381b3SDavid Woodhouse * 3) Someone has the stripe locked, but we're not able to merge. 66453b381b3SDavid Woodhouse * The rbio is added to the lock owner's plug list, or merged into 66553b381b3SDavid Woodhouse * an rbio already on the plug list. When the lock owner unlocks, 66653b381b3SDavid Woodhouse * the next rbio on the list is run and the IO is started automatically. 66753b381b3SDavid Woodhouse * 1 is returned 66853b381b3SDavid Woodhouse * 66953b381b3SDavid Woodhouse * If we return 0, the caller still owns the rbio and must continue with 67053b381b3SDavid Woodhouse * IO submission. If we return 1, the caller must assume the rbio has 67153b381b3SDavid Woodhouse * already been freed. 67253b381b3SDavid Woodhouse */ 67353b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 67453b381b3SDavid Woodhouse { 67553b381b3SDavid Woodhouse int bucket = rbio_bucket(rbio); 67653b381b3SDavid Woodhouse struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; 67753b381b3SDavid Woodhouse struct btrfs_raid_bio *cur; 67853b381b3SDavid Woodhouse struct btrfs_raid_bio *pending; 67953b381b3SDavid Woodhouse unsigned long flags; 68053b381b3SDavid Woodhouse DEFINE_WAIT(wait); 68153b381b3SDavid Woodhouse struct btrfs_raid_bio *freeit = NULL; 6824ae10b3aSChris Mason struct btrfs_raid_bio *cache_drop = NULL; 68353b381b3SDavid Woodhouse int ret = 0; 68453b381b3SDavid Woodhouse int walk = 0; 68553b381b3SDavid Woodhouse 68653b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 68753b381b3SDavid Woodhouse list_for_each_entry(cur, &h->hash_list, hash_list) { 68853b381b3SDavid Woodhouse walk++; 68953b381b3SDavid Woodhouse if (cur->raid_map[0] == rbio->raid_map[0]) { 69053b381b3SDavid Woodhouse spin_lock(&cur->bio_list_lock); 69153b381b3SDavid Woodhouse 6924ae10b3aSChris Mason /* can we steal this cached rbio's pages? */ 6934ae10b3aSChris Mason if (bio_list_empty(&cur->bio_list) && 6944ae10b3aSChris Mason list_empty(&cur->plug_list) && 6954ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags) && 6964ae10b3aSChris Mason !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 6974ae10b3aSChris Mason list_del_init(&cur->hash_list); 6984ae10b3aSChris Mason atomic_dec(&cur->refs); 6994ae10b3aSChris Mason 7004ae10b3aSChris Mason steal_rbio(cur, rbio); 7014ae10b3aSChris Mason cache_drop = cur; 7024ae10b3aSChris Mason spin_unlock(&cur->bio_list_lock); 7034ae10b3aSChris Mason 7044ae10b3aSChris Mason goto lockit; 7054ae10b3aSChris Mason } 7064ae10b3aSChris Mason 70753b381b3SDavid Woodhouse /* can we merge into the lock owner? */ 70853b381b3SDavid Woodhouse if (rbio_can_merge(cur, rbio)) { 70953b381b3SDavid Woodhouse merge_rbio(cur, rbio); 71053b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 71153b381b3SDavid Woodhouse freeit = rbio; 71253b381b3SDavid Woodhouse ret = 1; 71353b381b3SDavid Woodhouse goto out; 71453b381b3SDavid Woodhouse } 71553b381b3SDavid Woodhouse 7164ae10b3aSChris Mason 71753b381b3SDavid Woodhouse /* 71853b381b3SDavid Woodhouse * we couldn't merge with the running 71953b381b3SDavid Woodhouse * rbio, see if we can merge with the 72053b381b3SDavid Woodhouse * pending ones. We don't have to 72153b381b3SDavid Woodhouse * check for rmw_locked because there 72253b381b3SDavid Woodhouse * is no way they are inside finish_rmw 72353b381b3SDavid Woodhouse * right now 72453b381b3SDavid Woodhouse */ 72553b381b3SDavid Woodhouse list_for_each_entry(pending, &cur->plug_list, 72653b381b3SDavid Woodhouse plug_list) { 72753b381b3SDavid Woodhouse if (rbio_can_merge(pending, rbio)) { 72853b381b3SDavid Woodhouse merge_rbio(pending, rbio); 72953b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 73053b381b3SDavid Woodhouse freeit = rbio; 73153b381b3SDavid Woodhouse ret = 1; 73253b381b3SDavid Woodhouse goto out; 73353b381b3SDavid Woodhouse } 73453b381b3SDavid Woodhouse } 73553b381b3SDavid Woodhouse 73653b381b3SDavid Woodhouse /* no merging, put us on the tail of the plug list, 73753b381b3SDavid Woodhouse * our rbio will be started with the currently 73853b381b3SDavid Woodhouse * running rbio unlocks 73953b381b3SDavid Woodhouse */ 74053b381b3SDavid Woodhouse list_add_tail(&rbio->plug_list, &cur->plug_list); 74153b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 74253b381b3SDavid Woodhouse ret = 1; 74353b381b3SDavid Woodhouse goto out; 74453b381b3SDavid Woodhouse } 74553b381b3SDavid Woodhouse } 7464ae10b3aSChris Mason lockit: 74753b381b3SDavid Woodhouse atomic_inc(&rbio->refs); 74853b381b3SDavid Woodhouse list_add(&rbio->hash_list, &h->hash_list); 74953b381b3SDavid Woodhouse out: 75053b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 7514ae10b3aSChris Mason if (cache_drop) 7524ae10b3aSChris Mason remove_rbio_from_cache(cache_drop); 75353b381b3SDavid Woodhouse if (freeit) 75453b381b3SDavid Woodhouse __free_raid_bio(freeit); 75553b381b3SDavid Woodhouse return ret; 75653b381b3SDavid Woodhouse } 75753b381b3SDavid Woodhouse 75853b381b3SDavid Woodhouse /* 75953b381b3SDavid Woodhouse * called as rmw or parity rebuild is completed. If the plug list has more 76053b381b3SDavid Woodhouse * rbios waiting for this stripe, the next one on the list will be started 76153b381b3SDavid Woodhouse */ 76253b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 76353b381b3SDavid Woodhouse { 76453b381b3SDavid Woodhouse int bucket; 76553b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 76653b381b3SDavid Woodhouse unsigned long flags; 7674ae10b3aSChris Mason int keep_cache = 0; 76853b381b3SDavid Woodhouse 76953b381b3SDavid Woodhouse bucket = rbio_bucket(rbio); 77053b381b3SDavid Woodhouse h = rbio->fs_info->stripe_hash_table->table + bucket; 77153b381b3SDavid Woodhouse 7724ae10b3aSChris Mason if (list_empty(&rbio->plug_list)) 7734ae10b3aSChris Mason cache_rbio(rbio); 7744ae10b3aSChris Mason 77553b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 77653b381b3SDavid Woodhouse spin_lock(&rbio->bio_list_lock); 77753b381b3SDavid Woodhouse 77853b381b3SDavid Woodhouse if (!list_empty(&rbio->hash_list)) { 7794ae10b3aSChris Mason /* 7804ae10b3aSChris Mason * if we're still cached and there is no other IO 7814ae10b3aSChris Mason * to perform, just leave this rbio here for others 7824ae10b3aSChris Mason * to steal from later 7834ae10b3aSChris Mason */ 7844ae10b3aSChris Mason if (list_empty(&rbio->plug_list) && 7854ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 7864ae10b3aSChris Mason keep_cache = 1; 7874ae10b3aSChris Mason clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 7884ae10b3aSChris Mason BUG_ON(!bio_list_empty(&rbio->bio_list)); 7894ae10b3aSChris Mason goto done; 7904ae10b3aSChris Mason } 79153b381b3SDavid Woodhouse 79253b381b3SDavid Woodhouse list_del_init(&rbio->hash_list); 79353b381b3SDavid Woodhouse atomic_dec(&rbio->refs); 79453b381b3SDavid Woodhouse 79553b381b3SDavid Woodhouse /* 79653b381b3SDavid Woodhouse * we use the plug list to hold all the rbios 79753b381b3SDavid Woodhouse * waiting for the chance to lock this stripe. 79853b381b3SDavid Woodhouse * hand the lock over to one of them. 79953b381b3SDavid Woodhouse */ 80053b381b3SDavid Woodhouse if (!list_empty(&rbio->plug_list)) { 80153b381b3SDavid Woodhouse struct btrfs_raid_bio *next; 80253b381b3SDavid Woodhouse struct list_head *head = rbio->plug_list.next; 80353b381b3SDavid Woodhouse 80453b381b3SDavid Woodhouse next = list_entry(head, struct btrfs_raid_bio, 80553b381b3SDavid Woodhouse plug_list); 80653b381b3SDavid Woodhouse 80753b381b3SDavid Woodhouse list_del_init(&rbio->plug_list); 80853b381b3SDavid Woodhouse 80953b381b3SDavid Woodhouse list_add(&next->hash_list, &h->hash_list); 81053b381b3SDavid Woodhouse atomic_inc(&next->refs); 81153b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 81253b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 81353b381b3SDavid Woodhouse 8141b94b556SMiao Xie if (next->operation == BTRFS_RBIO_READ_REBUILD) 81553b381b3SDavid Woodhouse async_read_rebuild(next); 8161b94b556SMiao Xie else if (next->operation == BTRFS_RBIO_WRITE) { 8174ae10b3aSChris Mason steal_rbio(rbio, next); 81853b381b3SDavid Woodhouse async_rmw_stripe(next); 8195a6ac9eaSMiao Xie } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { 8205a6ac9eaSMiao Xie steal_rbio(rbio, next); 8215a6ac9eaSMiao Xie async_scrub_parity(next); 8224ae10b3aSChris Mason } 82353b381b3SDavid Woodhouse 82453b381b3SDavid Woodhouse goto done_nolock; 82553b381b3SDavid Woodhouse } else if (waitqueue_active(&h->wait)) { 82653b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 82753b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 82853b381b3SDavid Woodhouse wake_up(&h->wait); 82953b381b3SDavid Woodhouse goto done_nolock; 83053b381b3SDavid Woodhouse } 83153b381b3SDavid Woodhouse } 8324ae10b3aSChris Mason done: 83353b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 83453b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 83553b381b3SDavid Woodhouse 83653b381b3SDavid Woodhouse done_nolock: 8374ae10b3aSChris Mason if (!keep_cache) 8384ae10b3aSChris Mason remove_rbio_from_cache(rbio); 83953b381b3SDavid Woodhouse } 84053b381b3SDavid Woodhouse 841af8e2d1dSMiao Xie static inline void 842af8e2d1dSMiao Xie __free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) 843af8e2d1dSMiao Xie { 844af8e2d1dSMiao Xie if (need) { 845af8e2d1dSMiao Xie kfree(raid_map); 846af8e2d1dSMiao Xie kfree(bbio); 847af8e2d1dSMiao Xie } 848af8e2d1dSMiao Xie } 849af8e2d1dSMiao Xie 850af8e2d1dSMiao Xie static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) 851af8e2d1dSMiao Xie { 852af8e2d1dSMiao Xie __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, 853af8e2d1dSMiao Xie !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); 854af8e2d1dSMiao Xie } 855af8e2d1dSMiao Xie 85653b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio) 85753b381b3SDavid Woodhouse { 85853b381b3SDavid Woodhouse int i; 85953b381b3SDavid Woodhouse 86053b381b3SDavid Woodhouse WARN_ON(atomic_read(&rbio->refs) < 0); 86153b381b3SDavid Woodhouse if (!atomic_dec_and_test(&rbio->refs)) 86253b381b3SDavid Woodhouse return; 86353b381b3SDavid Woodhouse 8644ae10b3aSChris Mason WARN_ON(!list_empty(&rbio->stripe_cache)); 86553b381b3SDavid Woodhouse WARN_ON(!list_empty(&rbio->hash_list)); 86653b381b3SDavid Woodhouse WARN_ON(!bio_list_empty(&rbio->bio_list)); 86753b381b3SDavid Woodhouse 86853b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 86953b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) { 87053b381b3SDavid Woodhouse __free_page(rbio->stripe_pages[i]); 87153b381b3SDavid Woodhouse rbio->stripe_pages[i] = NULL; 87253b381b3SDavid Woodhouse } 87353b381b3SDavid Woodhouse } 874af8e2d1dSMiao Xie 875af8e2d1dSMiao Xie free_bbio_and_raid_map(rbio); 876af8e2d1dSMiao Xie 87753b381b3SDavid Woodhouse kfree(rbio); 87853b381b3SDavid Woodhouse } 87953b381b3SDavid Woodhouse 88053b381b3SDavid Woodhouse static void free_raid_bio(struct btrfs_raid_bio *rbio) 88153b381b3SDavid Woodhouse { 88253b381b3SDavid Woodhouse unlock_stripe(rbio); 88353b381b3SDavid Woodhouse __free_raid_bio(rbio); 88453b381b3SDavid Woodhouse } 88553b381b3SDavid Woodhouse 88653b381b3SDavid Woodhouse /* 88753b381b3SDavid Woodhouse * this frees the rbio and runs through all the bios in the 88853b381b3SDavid Woodhouse * bio_list and calls end_io on them 88953b381b3SDavid Woodhouse */ 89053b381b3SDavid Woodhouse static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) 89153b381b3SDavid Woodhouse { 89253b381b3SDavid Woodhouse struct bio *cur = bio_list_get(&rbio->bio_list); 89353b381b3SDavid Woodhouse struct bio *next; 89453b381b3SDavid Woodhouse free_raid_bio(rbio); 89553b381b3SDavid Woodhouse 89653b381b3SDavid Woodhouse while (cur) { 89753b381b3SDavid Woodhouse next = cur->bi_next; 89853b381b3SDavid Woodhouse cur->bi_next = NULL; 89953b381b3SDavid Woodhouse if (uptodate) 90053b381b3SDavid Woodhouse set_bit(BIO_UPTODATE, &cur->bi_flags); 90153b381b3SDavid Woodhouse bio_endio(cur, err); 90253b381b3SDavid Woodhouse cur = next; 90353b381b3SDavid Woodhouse } 90453b381b3SDavid Woodhouse } 90553b381b3SDavid Woodhouse 90653b381b3SDavid Woodhouse /* 90753b381b3SDavid Woodhouse * end io function used by finish_rmw. When we finally 90853b381b3SDavid Woodhouse * get here, we've written a full stripe 90953b381b3SDavid Woodhouse */ 91053b381b3SDavid Woodhouse static void raid_write_end_io(struct bio *bio, int err) 91153b381b3SDavid Woodhouse { 91253b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 91353b381b3SDavid Woodhouse 91453b381b3SDavid Woodhouse if (err) 91553b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 91653b381b3SDavid Woodhouse 91753b381b3SDavid Woodhouse bio_put(bio); 91853b381b3SDavid Woodhouse 919b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 92053b381b3SDavid Woodhouse return; 92153b381b3SDavid Woodhouse 92253b381b3SDavid Woodhouse err = 0; 92353b381b3SDavid Woodhouse 92453b381b3SDavid Woodhouse /* OK, we have read all the stripes we need to. */ 925b89e1b01SMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 92653b381b3SDavid Woodhouse err = -EIO; 92753b381b3SDavid Woodhouse 92853b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, 0); 92953b381b3SDavid Woodhouse return; 93053b381b3SDavid Woodhouse } 93153b381b3SDavid Woodhouse 93253b381b3SDavid Woodhouse /* 93353b381b3SDavid Woodhouse * the read/modify/write code wants to use the original bio for 93453b381b3SDavid Woodhouse * any pages it included, and then use the rbio for everything 93553b381b3SDavid Woodhouse * else. This function decides if a given index (stripe number) 93653b381b3SDavid Woodhouse * and page number in that stripe fall inside the original bio 93753b381b3SDavid Woodhouse * or the rbio. 93853b381b3SDavid Woodhouse * 93953b381b3SDavid Woodhouse * if you set bio_list_only, you'll get a NULL back for any ranges 94053b381b3SDavid Woodhouse * that are outside the bio_list 94153b381b3SDavid Woodhouse * 94253b381b3SDavid Woodhouse * This doesn't take any refs on anything, you get a bare page pointer 94353b381b3SDavid Woodhouse * and the caller must bump refs as required. 94453b381b3SDavid Woodhouse * 94553b381b3SDavid Woodhouse * You must call index_rbio_pages once before you can trust 94653b381b3SDavid Woodhouse * the answers from this function. 94753b381b3SDavid Woodhouse */ 94853b381b3SDavid Woodhouse static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, 94953b381b3SDavid Woodhouse int index, int pagenr, int bio_list_only) 95053b381b3SDavid Woodhouse { 95153b381b3SDavid Woodhouse int chunk_page; 95253b381b3SDavid Woodhouse struct page *p = NULL; 95353b381b3SDavid Woodhouse 95453b381b3SDavid Woodhouse chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; 95553b381b3SDavid Woodhouse 95653b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 95753b381b3SDavid Woodhouse p = rbio->bio_pages[chunk_page]; 95853b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 95953b381b3SDavid Woodhouse 96053b381b3SDavid Woodhouse if (p || bio_list_only) 96153b381b3SDavid Woodhouse return p; 96253b381b3SDavid Woodhouse 96353b381b3SDavid Woodhouse return rbio->stripe_pages[chunk_page]; 96453b381b3SDavid Woodhouse } 96553b381b3SDavid Woodhouse 96653b381b3SDavid Woodhouse /* 96753b381b3SDavid Woodhouse * number of pages we need for the entire stripe across all the 96853b381b3SDavid Woodhouse * drives 96953b381b3SDavid Woodhouse */ 97053b381b3SDavid Woodhouse static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 97153b381b3SDavid Woodhouse { 97253b381b3SDavid Woodhouse unsigned long nr = stripe_len * nr_stripes; 973ed6078f7SDavid Sterba return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE); 97453b381b3SDavid Woodhouse } 97553b381b3SDavid Woodhouse 97653b381b3SDavid Woodhouse /* 97753b381b3SDavid Woodhouse * allocation and initial setup for the btrfs_raid_bio. Not 97853b381b3SDavid Woodhouse * this does not allocate any pages for rbio->pages. 97953b381b3SDavid Woodhouse */ 98053b381b3SDavid Woodhouse static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, 98153b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 98253b381b3SDavid Woodhouse u64 stripe_len) 98353b381b3SDavid Woodhouse { 98453b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 98553b381b3SDavid Woodhouse int nr_data = 0; 986*2c8cdd6eSMiao Xie int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 987*2c8cdd6eSMiao Xie int num_pages = rbio_nr_pages(stripe_len, real_stripes); 9885a6ac9eaSMiao Xie int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); 98953b381b3SDavid Woodhouse void *p; 99053b381b3SDavid Woodhouse 9915a6ac9eaSMiao Xie rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + 9925a6ac9eaSMiao Xie DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8), 99353b381b3SDavid Woodhouse GFP_NOFS); 994af8e2d1dSMiao Xie if (!rbio) 99553b381b3SDavid Woodhouse return ERR_PTR(-ENOMEM); 99653b381b3SDavid Woodhouse 99753b381b3SDavid Woodhouse bio_list_init(&rbio->bio_list); 99853b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->plug_list); 99953b381b3SDavid Woodhouse spin_lock_init(&rbio->bio_list_lock); 10004ae10b3aSChris Mason INIT_LIST_HEAD(&rbio->stripe_cache); 100153b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->hash_list); 100253b381b3SDavid Woodhouse rbio->bbio = bbio; 100353b381b3SDavid Woodhouse rbio->raid_map = raid_map; 100453b381b3SDavid Woodhouse rbio->fs_info = root->fs_info; 100553b381b3SDavid Woodhouse rbio->stripe_len = stripe_len; 100653b381b3SDavid Woodhouse rbio->nr_pages = num_pages; 1007*2c8cdd6eSMiao Xie rbio->real_stripes = real_stripes; 10085a6ac9eaSMiao Xie rbio->stripe_npages = stripe_npages; 100953b381b3SDavid Woodhouse rbio->faila = -1; 101053b381b3SDavid Woodhouse rbio->failb = -1; 101153b381b3SDavid Woodhouse atomic_set(&rbio->refs, 1); 1012b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 1013b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, 0); 101453b381b3SDavid Woodhouse 101553b381b3SDavid Woodhouse /* 101653b381b3SDavid Woodhouse * the stripe_pages and bio_pages array point to the extra 101753b381b3SDavid Woodhouse * memory we allocated past the end of the rbio 101853b381b3SDavid Woodhouse */ 101953b381b3SDavid Woodhouse p = rbio + 1; 102053b381b3SDavid Woodhouse rbio->stripe_pages = p; 102153b381b3SDavid Woodhouse rbio->bio_pages = p + sizeof(struct page *) * num_pages; 10225a6ac9eaSMiao Xie rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; 102353b381b3SDavid Woodhouse 1024*2c8cdd6eSMiao Xie if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) 1025*2c8cdd6eSMiao Xie nr_data = real_stripes - 2; 102653b381b3SDavid Woodhouse else 1027*2c8cdd6eSMiao Xie nr_data = real_stripes - 1; 102853b381b3SDavid Woodhouse 102953b381b3SDavid Woodhouse rbio->nr_data = nr_data; 103053b381b3SDavid Woodhouse return rbio; 103153b381b3SDavid Woodhouse } 103253b381b3SDavid Woodhouse 103353b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */ 103453b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 103553b381b3SDavid Woodhouse { 103653b381b3SDavid Woodhouse int i; 103753b381b3SDavid Woodhouse struct page *page; 103853b381b3SDavid Woodhouse 103953b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 104053b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 104153b381b3SDavid Woodhouse continue; 104253b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 104353b381b3SDavid Woodhouse if (!page) 104453b381b3SDavid Woodhouse return -ENOMEM; 104553b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 104653b381b3SDavid Woodhouse ClearPageUptodate(page); 104753b381b3SDavid Woodhouse } 104853b381b3SDavid Woodhouse return 0; 104953b381b3SDavid Woodhouse } 105053b381b3SDavid Woodhouse 105153b381b3SDavid Woodhouse /* allocate pages for just the p/q stripes */ 105253b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 105353b381b3SDavid Woodhouse { 105453b381b3SDavid Woodhouse int i; 105553b381b3SDavid Woodhouse struct page *page; 105653b381b3SDavid Woodhouse 105753b381b3SDavid Woodhouse i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; 105853b381b3SDavid Woodhouse 105953b381b3SDavid Woodhouse for (; i < rbio->nr_pages; i++) { 106053b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 106153b381b3SDavid Woodhouse continue; 106253b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 106353b381b3SDavid Woodhouse if (!page) 106453b381b3SDavid Woodhouse return -ENOMEM; 106553b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 106653b381b3SDavid Woodhouse } 106753b381b3SDavid Woodhouse return 0; 106853b381b3SDavid Woodhouse } 106953b381b3SDavid Woodhouse 107053b381b3SDavid Woodhouse /* 107153b381b3SDavid Woodhouse * add a single page from a specific stripe into our list of bios for IO 107253b381b3SDavid Woodhouse * this will try to merge into existing bios if possible, and returns 107353b381b3SDavid Woodhouse * zero if all went well. 107453b381b3SDavid Woodhouse */ 107548a3b636SEric Sandeen static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 107653b381b3SDavid Woodhouse struct bio_list *bio_list, 107753b381b3SDavid Woodhouse struct page *page, 107853b381b3SDavid Woodhouse int stripe_nr, 107953b381b3SDavid Woodhouse unsigned long page_index, 108053b381b3SDavid Woodhouse unsigned long bio_max_len) 108153b381b3SDavid Woodhouse { 108253b381b3SDavid Woodhouse struct bio *last = bio_list->tail; 108353b381b3SDavid Woodhouse u64 last_end = 0; 108453b381b3SDavid Woodhouse int ret; 108553b381b3SDavid Woodhouse struct bio *bio; 108653b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 108753b381b3SDavid Woodhouse u64 disk_start; 108853b381b3SDavid Woodhouse 108953b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[stripe_nr]; 109053b381b3SDavid Woodhouse disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 109153b381b3SDavid Woodhouse 109253b381b3SDavid Woodhouse /* if the device is missing, just fail this stripe */ 109353b381b3SDavid Woodhouse if (!stripe->dev->bdev) 109453b381b3SDavid Woodhouse return fail_rbio_index(rbio, stripe_nr); 109553b381b3SDavid Woodhouse 109653b381b3SDavid Woodhouse /* see if we can add this page onto our existing bio */ 109753b381b3SDavid Woodhouse if (last) { 10984f024f37SKent Overstreet last_end = (u64)last->bi_iter.bi_sector << 9; 10994f024f37SKent Overstreet last_end += last->bi_iter.bi_size; 110053b381b3SDavid Woodhouse 110153b381b3SDavid Woodhouse /* 110253b381b3SDavid Woodhouse * we can't merge these if they are from different 110353b381b3SDavid Woodhouse * devices or if they are not contiguous 110453b381b3SDavid Woodhouse */ 110553b381b3SDavid Woodhouse if (last_end == disk_start && stripe->dev->bdev && 110653b381b3SDavid Woodhouse test_bit(BIO_UPTODATE, &last->bi_flags) && 110753b381b3SDavid Woodhouse last->bi_bdev == stripe->dev->bdev) { 110853b381b3SDavid Woodhouse ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 110953b381b3SDavid Woodhouse if (ret == PAGE_CACHE_SIZE) 111053b381b3SDavid Woodhouse return 0; 111153b381b3SDavid Woodhouse } 111253b381b3SDavid Woodhouse } 111353b381b3SDavid Woodhouse 111453b381b3SDavid Woodhouse /* put a new bio on the list */ 11159be3395bSChris Mason bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); 111653b381b3SDavid Woodhouse if (!bio) 111753b381b3SDavid Woodhouse return -ENOMEM; 111853b381b3SDavid Woodhouse 11194f024f37SKent Overstreet bio->bi_iter.bi_size = 0; 112053b381b3SDavid Woodhouse bio->bi_bdev = stripe->dev->bdev; 11214f024f37SKent Overstreet bio->bi_iter.bi_sector = disk_start >> 9; 112253b381b3SDavid Woodhouse set_bit(BIO_UPTODATE, &bio->bi_flags); 112353b381b3SDavid Woodhouse 112453b381b3SDavid Woodhouse bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 112553b381b3SDavid Woodhouse bio_list_add(bio_list, bio); 112653b381b3SDavid Woodhouse return 0; 112753b381b3SDavid Woodhouse } 112853b381b3SDavid Woodhouse 112953b381b3SDavid Woodhouse /* 113053b381b3SDavid Woodhouse * while we're doing the read/modify/write cycle, we could 113153b381b3SDavid Woodhouse * have errors in reading pages off the disk. This checks 113253b381b3SDavid Woodhouse * for errors and if we're not able to read the page it'll 113353b381b3SDavid Woodhouse * trigger parity reconstruction. The rmw will be finished 113453b381b3SDavid Woodhouse * after we've reconstructed the failed stripes 113553b381b3SDavid Woodhouse */ 113653b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 113753b381b3SDavid Woodhouse { 113853b381b3SDavid Woodhouse if (rbio->faila >= 0 || rbio->failb >= 0) { 1139*2c8cdd6eSMiao Xie BUG_ON(rbio->faila == rbio->real_stripes - 1); 114053b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 114153b381b3SDavid Woodhouse } else { 114253b381b3SDavid Woodhouse finish_rmw(rbio); 114353b381b3SDavid Woodhouse } 114453b381b3SDavid Woodhouse } 114553b381b3SDavid Woodhouse 114653b381b3SDavid Woodhouse /* 114753b381b3SDavid Woodhouse * these are just the pages from the rbio array, not from anything 114853b381b3SDavid Woodhouse * the FS sent down to us 114953b381b3SDavid Woodhouse */ 115053b381b3SDavid Woodhouse static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page) 115153b381b3SDavid Woodhouse { 115253b381b3SDavid Woodhouse int index; 115353b381b3SDavid Woodhouse index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT); 115453b381b3SDavid Woodhouse index += page; 115553b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 115653b381b3SDavid Woodhouse } 115753b381b3SDavid Woodhouse 115853b381b3SDavid Woodhouse /* 115953b381b3SDavid Woodhouse * helper function to walk our bio list and populate the bio_pages array with 116053b381b3SDavid Woodhouse * the result. This seems expensive, but it is faster than constantly 116153b381b3SDavid Woodhouse * searching through the bio list as we setup the IO in finish_rmw or stripe 116253b381b3SDavid Woodhouse * reconstruction. 116353b381b3SDavid Woodhouse * 116453b381b3SDavid Woodhouse * This must be called before you trust the answers from page_in_rbio 116553b381b3SDavid Woodhouse */ 116653b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio) 116753b381b3SDavid Woodhouse { 116853b381b3SDavid Woodhouse struct bio *bio; 116953b381b3SDavid Woodhouse u64 start; 117053b381b3SDavid Woodhouse unsigned long stripe_offset; 117153b381b3SDavid Woodhouse unsigned long page_index; 117253b381b3SDavid Woodhouse struct page *p; 117353b381b3SDavid Woodhouse int i; 117453b381b3SDavid Woodhouse 117553b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 117653b381b3SDavid Woodhouse bio_list_for_each(bio, &rbio->bio_list) { 11774f024f37SKent Overstreet start = (u64)bio->bi_iter.bi_sector << 9; 117853b381b3SDavid Woodhouse stripe_offset = start - rbio->raid_map[0]; 117953b381b3SDavid Woodhouse page_index = stripe_offset >> PAGE_CACHE_SHIFT; 118053b381b3SDavid Woodhouse 118153b381b3SDavid Woodhouse for (i = 0; i < bio->bi_vcnt; i++) { 118253b381b3SDavid Woodhouse p = bio->bi_io_vec[i].bv_page; 118353b381b3SDavid Woodhouse rbio->bio_pages[page_index + i] = p; 118453b381b3SDavid Woodhouse } 118553b381b3SDavid Woodhouse } 118653b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 118753b381b3SDavid Woodhouse } 118853b381b3SDavid Woodhouse 118953b381b3SDavid Woodhouse /* 119053b381b3SDavid Woodhouse * this is called from one of two situations. We either 119153b381b3SDavid Woodhouse * have a full stripe from the higher layers, or we've read all 119253b381b3SDavid Woodhouse * the missing bits off disk. 119353b381b3SDavid Woodhouse * 119453b381b3SDavid Woodhouse * This will calculate the parity and then send down any 119553b381b3SDavid Woodhouse * changed blocks. 119653b381b3SDavid Woodhouse */ 119753b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 119853b381b3SDavid Woodhouse { 119953b381b3SDavid Woodhouse struct btrfs_bio *bbio = rbio->bbio; 1200*2c8cdd6eSMiao Xie void *pointers[rbio->real_stripes]; 120153b381b3SDavid Woodhouse int stripe_len = rbio->stripe_len; 120253b381b3SDavid Woodhouse int nr_data = rbio->nr_data; 120353b381b3SDavid Woodhouse int stripe; 120453b381b3SDavid Woodhouse int pagenr; 120553b381b3SDavid Woodhouse int p_stripe = -1; 120653b381b3SDavid Woodhouse int q_stripe = -1; 120753b381b3SDavid Woodhouse struct bio_list bio_list; 120853b381b3SDavid Woodhouse struct bio *bio; 120953b381b3SDavid Woodhouse int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT; 121053b381b3SDavid Woodhouse int ret; 121153b381b3SDavid Woodhouse 121253b381b3SDavid Woodhouse bio_list_init(&bio_list); 121353b381b3SDavid Woodhouse 1214*2c8cdd6eSMiao Xie if (rbio->real_stripes - rbio->nr_data == 1) { 1215*2c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 1; 1216*2c8cdd6eSMiao Xie } else if (rbio->real_stripes - rbio->nr_data == 2) { 1217*2c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 2; 1218*2c8cdd6eSMiao Xie q_stripe = rbio->real_stripes - 1; 121953b381b3SDavid Woodhouse } else { 122053b381b3SDavid Woodhouse BUG(); 122153b381b3SDavid Woodhouse } 122253b381b3SDavid Woodhouse 122353b381b3SDavid Woodhouse /* at this point we either have a full stripe, 122453b381b3SDavid Woodhouse * or we've read the full stripe from the drive. 122553b381b3SDavid Woodhouse * recalculate the parity and write the new results. 122653b381b3SDavid Woodhouse * 122753b381b3SDavid Woodhouse * We're not allowed to add any new bios to the 122853b381b3SDavid Woodhouse * bio list here, anyone else that wants to 122953b381b3SDavid Woodhouse * change this stripe needs to do their own rmw. 123053b381b3SDavid Woodhouse */ 123153b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 123253b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 123353b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 123453b381b3SDavid Woodhouse 1235b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 123653b381b3SDavid Woodhouse 123753b381b3SDavid Woodhouse /* 123853b381b3SDavid Woodhouse * now that we've set rmw_locked, run through the 123953b381b3SDavid Woodhouse * bio list one last time and map the page pointers 12404ae10b3aSChris Mason * 12414ae10b3aSChris Mason * We don't cache full rbios because we're assuming 12424ae10b3aSChris Mason * the higher layers are unlikely to use this area of 12434ae10b3aSChris Mason * the disk again soon. If they do use it again, 12444ae10b3aSChris Mason * hopefully they will send another full bio. 124553b381b3SDavid Woodhouse */ 124653b381b3SDavid Woodhouse index_rbio_pages(rbio); 12474ae10b3aSChris Mason if (!rbio_is_full(rbio)) 12484ae10b3aSChris Mason cache_rbio_pages(rbio); 12494ae10b3aSChris Mason else 12504ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 125153b381b3SDavid Woodhouse 125253b381b3SDavid Woodhouse for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 125353b381b3SDavid Woodhouse struct page *p; 125453b381b3SDavid Woodhouse /* first collect one page from each data stripe */ 125553b381b3SDavid Woodhouse for (stripe = 0; stripe < nr_data; stripe++) { 125653b381b3SDavid Woodhouse p = page_in_rbio(rbio, stripe, pagenr, 0); 125753b381b3SDavid Woodhouse pointers[stripe] = kmap(p); 125853b381b3SDavid Woodhouse } 125953b381b3SDavid Woodhouse 126053b381b3SDavid Woodhouse /* then add the parity stripe */ 126153b381b3SDavid Woodhouse p = rbio_pstripe_page(rbio, pagenr); 126253b381b3SDavid Woodhouse SetPageUptodate(p); 126353b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 126453b381b3SDavid Woodhouse 126553b381b3SDavid Woodhouse if (q_stripe != -1) { 126653b381b3SDavid Woodhouse 126753b381b3SDavid Woodhouse /* 126853b381b3SDavid Woodhouse * raid6, add the qstripe and call the 126953b381b3SDavid Woodhouse * library function to fill in our p/q 127053b381b3SDavid Woodhouse */ 127153b381b3SDavid Woodhouse p = rbio_qstripe_page(rbio, pagenr); 127253b381b3SDavid Woodhouse SetPageUptodate(p); 127353b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 127453b381b3SDavid Woodhouse 1275*2c8cdd6eSMiao Xie raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 127653b381b3SDavid Woodhouse pointers); 127753b381b3SDavid Woodhouse } else { 127853b381b3SDavid Woodhouse /* raid5 */ 127953b381b3SDavid Woodhouse memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 128053b381b3SDavid Woodhouse run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 128153b381b3SDavid Woodhouse } 128253b381b3SDavid Woodhouse 128353b381b3SDavid Woodhouse 1284*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) 128553b381b3SDavid Woodhouse kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 128653b381b3SDavid Woodhouse } 128753b381b3SDavid Woodhouse 128853b381b3SDavid Woodhouse /* 128953b381b3SDavid Woodhouse * time to start writing. Make bios for everything from the 129053b381b3SDavid Woodhouse * higher layers (the bio_list in our rbio) and our p/q. Ignore 129153b381b3SDavid Woodhouse * everything else. 129253b381b3SDavid Woodhouse */ 1293*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 129453b381b3SDavid Woodhouse for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 129553b381b3SDavid Woodhouse struct page *page; 129653b381b3SDavid Woodhouse if (stripe < rbio->nr_data) { 129753b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 129853b381b3SDavid Woodhouse if (!page) 129953b381b3SDavid Woodhouse continue; 130053b381b3SDavid Woodhouse } else { 130153b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 130253b381b3SDavid Woodhouse } 130353b381b3SDavid Woodhouse 130453b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 130553b381b3SDavid Woodhouse page, stripe, pagenr, rbio->stripe_len); 130653b381b3SDavid Woodhouse if (ret) 130753b381b3SDavid Woodhouse goto cleanup; 130853b381b3SDavid Woodhouse } 130953b381b3SDavid Woodhouse } 131053b381b3SDavid Woodhouse 1311*2c8cdd6eSMiao Xie if (likely(!bbio->num_tgtdevs)) 1312*2c8cdd6eSMiao Xie goto write_data; 1313*2c8cdd6eSMiao Xie 1314*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 1315*2c8cdd6eSMiao Xie if (!bbio->tgtdev_map[stripe]) 1316*2c8cdd6eSMiao Xie continue; 1317*2c8cdd6eSMiao Xie 1318*2c8cdd6eSMiao Xie for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 1319*2c8cdd6eSMiao Xie struct page *page; 1320*2c8cdd6eSMiao Xie if (stripe < rbio->nr_data) { 1321*2c8cdd6eSMiao Xie page = page_in_rbio(rbio, stripe, pagenr, 1); 1322*2c8cdd6eSMiao Xie if (!page) 1323*2c8cdd6eSMiao Xie continue; 1324*2c8cdd6eSMiao Xie } else { 1325*2c8cdd6eSMiao Xie page = rbio_stripe_page(rbio, stripe, pagenr); 1326*2c8cdd6eSMiao Xie } 1327*2c8cdd6eSMiao Xie 1328*2c8cdd6eSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, page, 1329*2c8cdd6eSMiao Xie rbio->bbio->tgtdev_map[stripe], 1330*2c8cdd6eSMiao Xie pagenr, rbio->stripe_len); 1331*2c8cdd6eSMiao Xie if (ret) 1332*2c8cdd6eSMiao Xie goto cleanup; 1333*2c8cdd6eSMiao Xie } 1334*2c8cdd6eSMiao Xie } 1335*2c8cdd6eSMiao Xie 1336*2c8cdd6eSMiao Xie write_data: 1337b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); 1338b89e1b01SMiao Xie BUG_ON(atomic_read(&rbio->stripes_pending) == 0); 133953b381b3SDavid Woodhouse 134053b381b3SDavid Woodhouse while (1) { 134153b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 134253b381b3SDavid Woodhouse if (!bio) 134353b381b3SDavid Woodhouse break; 134453b381b3SDavid Woodhouse 134553b381b3SDavid Woodhouse bio->bi_private = rbio; 134653b381b3SDavid Woodhouse bio->bi_end_io = raid_write_end_io; 134753b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 134853b381b3SDavid Woodhouse submit_bio(WRITE, bio); 134953b381b3SDavid Woodhouse } 135053b381b3SDavid Woodhouse return; 135153b381b3SDavid Woodhouse 135253b381b3SDavid Woodhouse cleanup: 135353b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 135453b381b3SDavid Woodhouse } 135553b381b3SDavid Woodhouse 135653b381b3SDavid Woodhouse /* 135753b381b3SDavid Woodhouse * helper to find the stripe number for a given bio. Used to figure out which 135853b381b3SDavid Woodhouse * stripe has failed. This expects the bio to correspond to a physical disk, 135953b381b3SDavid Woodhouse * so it looks up based on physical sector numbers. 136053b381b3SDavid Woodhouse */ 136153b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio, 136253b381b3SDavid Woodhouse struct bio *bio) 136353b381b3SDavid Woodhouse { 13644f024f37SKent Overstreet u64 physical = bio->bi_iter.bi_sector; 136553b381b3SDavid Woodhouse u64 stripe_start; 136653b381b3SDavid Woodhouse int i; 136753b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 136853b381b3SDavid Woodhouse 136953b381b3SDavid Woodhouse physical <<= 9; 137053b381b3SDavid Woodhouse 137153b381b3SDavid Woodhouse for (i = 0; i < rbio->bbio->num_stripes; i++) { 137253b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[i]; 137353b381b3SDavid Woodhouse stripe_start = stripe->physical; 137453b381b3SDavid Woodhouse if (physical >= stripe_start && 1375*2c8cdd6eSMiao Xie physical < stripe_start + rbio->stripe_len && 1376*2c8cdd6eSMiao Xie bio->bi_bdev == stripe->dev->bdev) { 137753b381b3SDavid Woodhouse return i; 137853b381b3SDavid Woodhouse } 137953b381b3SDavid Woodhouse } 138053b381b3SDavid Woodhouse return -1; 138153b381b3SDavid Woodhouse } 138253b381b3SDavid Woodhouse 138353b381b3SDavid Woodhouse /* 138453b381b3SDavid Woodhouse * helper to find the stripe number for a given 138553b381b3SDavid Woodhouse * bio (before mapping). Used to figure out which stripe has 138653b381b3SDavid Woodhouse * failed. This looks up based on logical block numbers. 138753b381b3SDavid Woodhouse */ 138853b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 138953b381b3SDavid Woodhouse struct bio *bio) 139053b381b3SDavid Woodhouse { 13914f024f37SKent Overstreet u64 logical = bio->bi_iter.bi_sector; 139253b381b3SDavid Woodhouse u64 stripe_start; 139353b381b3SDavid Woodhouse int i; 139453b381b3SDavid Woodhouse 139553b381b3SDavid Woodhouse logical <<= 9; 139653b381b3SDavid Woodhouse 139753b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_data; i++) { 139853b381b3SDavid Woodhouse stripe_start = rbio->raid_map[i]; 139953b381b3SDavid Woodhouse if (logical >= stripe_start && 140053b381b3SDavid Woodhouse logical < stripe_start + rbio->stripe_len) { 140153b381b3SDavid Woodhouse return i; 140253b381b3SDavid Woodhouse } 140353b381b3SDavid Woodhouse } 140453b381b3SDavid Woodhouse return -1; 140553b381b3SDavid Woodhouse } 140653b381b3SDavid Woodhouse 140753b381b3SDavid Woodhouse /* 140853b381b3SDavid Woodhouse * returns -EIO if we had too many failures 140953b381b3SDavid Woodhouse */ 141053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 141153b381b3SDavid Woodhouse { 141253b381b3SDavid Woodhouse unsigned long flags; 141353b381b3SDavid Woodhouse int ret = 0; 141453b381b3SDavid Woodhouse 141553b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 141653b381b3SDavid Woodhouse 141753b381b3SDavid Woodhouse /* we already know this stripe is bad, move on */ 141853b381b3SDavid Woodhouse if (rbio->faila == failed || rbio->failb == failed) 141953b381b3SDavid Woodhouse goto out; 142053b381b3SDavid Woodhouse 142153b381b3SDavid Woodhouse if (rbio->faila == -1) { 142253b381b3SDavid Woodhouse /* first failure on this rbio */ 142353b381b3SDavid Woodhouse rbio->faila = failed; 1424b89e1b01SMiao Xie atomic_inc(&rbio->error); 142553b381b3SDavid Woodhouse } else if (rbio->failb == -1) { 142653b381b3SDavid Woodhouse /* second failure on this rbio */ 142753b381b3SDavid Woodhouse rbio->failb = failed; 1428b89e1b01SMiao Xie atomic_inc(&rbio->error); 142953b381b3SDavid Woodhouse } else { 143053b381b3SDavid Woodhouse ret = -EIO; 143153b381b3SDavid Woodhouse } 143253b381b3SDavid Woodhouse out: 143353b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 143453b381b3SDavid Woodhouse 143553b381b3SDavid Woodhouse return ret; 143653b381b3SDavid Woodhouse } 143753b381b3SDavid Woodhouse 143853b381b3SDavid Woodhouse /* 143953b381b3SDavid Woodhouse * helper to fail a stripe based on a physical disk 144053b381b3SDavid Woodhouse * bio. 144153b381b3SDavid Woodhouse */ 144253b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 144353b381b3SDavid Woodhouse struct bio *bio) 144453b381b3SDavid Woodhouse { 144553b381b3SDavid Woodhouse int failed = find_bio_stripe(rbio, bio); 144653b381b3SDavid Woodhouse 144753b381b3SDavid Woodhouse if (failed < 0) 144853b381b3SDavid Woodhouse return -EIO; 144953b381b3SDavid Woodhouse 145053b381b3SDavid Woodhouse return fail_rbio_index(rbio, failed); 145153b381b3SDavid Woodhouse } 145253b381b3SDavid Woodhouse 145353b381b3SDavid Woodhouse /* 145453b381b3SDavid Woodhouse * this sets each page in the bio uptodate. It should only be used on private 145553b381b3SDavid Woodhouse * rbio pages, nothing that comes in from the higher layers 145653b381b3SDavid Woodhouse */ 145753b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio) 145853b381b3SDavid Woodhouse { 145953b381b3SDavid Woodhouse int i; 146053b381b3SDavid Woodhouse struct page *p; 146153b381b3SDavid Woodhouse 146253b381b3SDavid Woodhouse for (i = 0; i < bio->bi_vcnt; i++) { 146353b381b3SDavid Woodhouse p = bio->bi_io_vec[i].bv_page; 146453b381b3SDavid Woodhouse SetPageUptodate(p); 146553b381b3SDavid Woodhouse } 146653b381b3SDavid Woodhouse } 146753b381b3SDavid Woodhouse 146853b381b3SDavid Woodhouse /* 146953b381b3SDavid Woodhouse * end io for the read phase of the rmw cycle. All the bios here are physical 147053b381b3SDavid Woodhouse * stripe bios we've read from the disk so we can recalculate the parity of the 147153b381b3SDavid Woodhouse * stripe. 147253b381b3SDavid Woodhouse * 147353b381b3SDavid Woodhouse * This will usually kick off finish_rmw once all the bios are read in, but it 147453b381b3SDavid Woodhouse * may trigger parity reconstruction if we had any errors along the way 147553b381b3SDavid Woodhouse */ 147653b381b3SDavid Woodhouse static void raid_rmw_end_io(struct bio *bio, int err) 147753b381b3SDavid Woodhouse { 147853b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 147953b381b3SDavid Woodhouse 148053b381b3SDavid Woodhouse if (err) 148153b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 148253b381b3SDavid Woodhouse else 148353b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 148453b381b3SDavid Woodhouse 148553b381b3SDavid Woodhouse bio_put(bio); 148653b381b3SDavid Woodhouse 1487b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 148853b381b3SDavid Woodhouse return; 148953b381b3SDavid Woodhouse 149053b381b3SDavid Woodhouse err = 0; 1491b89e1b01SMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 149253b381b3SDavid Woodhouse goto cleanup; 149353b381b3SDavid Woodhouse 149453b381b3SDavid Woodhouse /* 149553b381b3SDavid Woodhouse * this will normally call finish_rmw to start our write 149653b381b3SDavid Woodhouse * but if there are any failed stripes we'll reconstruct 149753b381b3SDavid Woodhouse * from parity first 149853b381b3SDavid Woodhouse */ 149953b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 150053b381b3SDavid Woodhouse return; 150153b381b3SDavid Woodhouse 150253b381b3SDavid Woodhouse cleanup: 150353b381b3SDavid Woodhouse 150453b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 150553b381b3SDavid Woodhouse } 150653b381b3SDavid Woodhouse 150753b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 150853b381b3SDavid Woodhouse { 15099e0af237SLiu Bo btrfs_init_work(&rbio->work, btrfs_rmw_helper, 15109e0af237SLiu Bo rmw_work, NULL, NULL); 151153b381b3SDavid Woodhouse 1512d05a33acSQu Wenruo btrfs_queue_work(rbio->fs_info->rmw_workers, 151353b381b3SDavid Woodhouse &rbio->work); 151453b381b3SDavid Woodhouse } 151553b381b3SDavid Woodhouse 151653b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio) 151753b381b3SDavid Woodhouse { 15189e0af237SLiu Bo btrfs_init_work(&rbio->work, btrfs_rmw_helper, 15199e0af237SLiu Bo read_rebuild_work, NULL, NULL); 152053b381b3SDavid Woodhouse 1521d05a33acSQu Wenruo btrfs_queue_work(rbio->fs_info->rmw_workers, 152253b381b3SDavid Woodhouse &rbio->work); 152353b381b3SDavid Woodhouse } 152453b381b3SDavid Woodhouse 152553b381b3SDavid Woodhouse /* 152653b381b3SDavid Woodhouse * the stripe must be locked by the caller. It will 152753b381b3SDavid Woodhouse * unlock after all the writes are done 152853b381b3SDavid Woodhouse */ 152953b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 153053b381b3SDavid Woodhouse { 153153b381b3SDavid Woodhouse int bios_to_read = 0; 153253b381b3SDavid Woodhouse struct bio_list bio_list; 153353b381b3SDavid Woodhouse int ret; 1534ed6078f7SDavid Sterba int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 153553b381b3SDavid Woodhouse int pagenr; 153653b381b3SDavid Woodhouse int stripe; 153753b381b3SDavid Woodhouse struct bio *bio; 153853b381b3SDavid Woodhouse 153953b381b3SDavid Woodhouse bio_list_init(&bio_list); 154053b381b3SDavid Woodhouse 154153b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 154253b381b3SDavid Woodhouse if (ret) 154353b381b3SDavid Woodhouse goto cleanup; 154453b381b3SDavid Woodhouse 154553b381b3SDavid Woodhouse index_rbio_pages(rbio); 154653b381b3SDavid Woodhouse 1547b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 154853b381b3SDavid Woodhouse /* 154953b381b3SDavid Woodhouse * build a list of bios to read all the missing parts of this 155053b381b3SDavid Woodhouse * stripe 155153b381b3SDavid Woodhouse */ 155253b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->nr_data; stripe++) { 155353b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 155453b381b3SDavid Woodhouse struct page *page; 155553b381b3SDavid Woodhouse /* 155653b381b3SDavid Woodhouse * we want to find all the pages missing from 155753b381b3SDavid Woodhouse * the rbio and read them from the disk. If 155853b381b3SDavid Woodhouse * page_in_rbio finds a page in the bio list 155953b381b3SDavid Woodhouse * we don't need to read it off the stripe. 156053b381b3SDavid Woodhouse */ 156153b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 156253b381b3SDavid Woodhouse if (page) 156353b381b3SDavid Woodhouse continue; 156453b381b3SDavid Woodhouse 156553b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 15664ae10b3aSChris Mason /* 15674ae10b3aSChris Mason * the bio cache may have handed us an uptodate 15684ae10b3aSChris Mason * page. If so, be happy and use it 15694ae10b3aSChris Mason */ 15704ae10b3aSChris Mason if (PageUptodate(page)) 15714ae10b3aSChris Mason continue; 15724ae10b3aSChris Mason 157353b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, page, 157453b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 157553b381b3SDavid Woodhouse if (ret) 157653b381b3SDavid Woodhouse goto cleanup; 157753b381b3SDavid Woodhouse } 157853b381b3SDavid Woodhouse } 157953b381b3SDavid Woodhouse 158053b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 158153b381b3SDavid Woodhouse if (!bios_to_read) { 158253b381b3SDavid Woodhouse /* 158353b381b3SDavid Woodhouse * this can happen if others have merged with 158453b381b3SDavid Woodhouse * us, it means there is nothing left to read. 158553b381b3SDavid Woodhouse * But if there are missing devices it may not be 158653b381b3SDavid Woodhouse * safe to do the full stripe write yet. 158753b381b3SDavid Woodhouse */ 158853b381b3SDavid Woodhouse goto finish; 158953b381b3SDavid Woodhouse } 159053b381b3SDavid Woodhouse 159153b381b3SDavid Woodhouse /* 159253b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 159353b381b3SDavid Woodhouse * not to touch it after that 159453b381b3SDavid Woodhouse */ 1595b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 159653b381b3SDavid Woodhouse while (1) { 159753b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 159853b381b3SDavid Woodhouse if (!bio) 159953b381b3SDavid Woodhouse break; 160053b381b3SDavid Woodhouse 160153b381b3SDavid Woodhouse bio->bi_private = rbio; 160253b381b3SDavid Woodhouse bio->bi_end_io = raid_rmw_end_io; 160353b381b3SDavid Woodhouse 160453b381b3SDavid Woodhouse btrfs_bio_wq_end_io(rbio->fs_info, bio, 160553b381b3SDavid Woodhouse BTRFS_WQ_ENDIO_RAID56); 160653b381b3SDavid Woodhouse 160753b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 160853b381b3SDavid Woodhouse submit_bio(READ, bio); 160953b381b3SDavid Woodhouse } 161053b381b3SDavid Woodhouse /* the actual write will happen once the reads are done */ 161153b381b3SDavid Woodhouse return 0; 161253b381b3SDavid Woodhouse 161353b381b3SDavid Woodhouse cleanup: 161453b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 161553b381b3SDavid Woodhouse return -EIO; 161653b381b3SDavid Woodhouse 161753b381b3SDavid Woodhouse finish: 161853b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 161953b381b3SDavid Woodhouse return 0; 162053b381b3SDavid Woodhouse } 162153b381b3SDavid Woodhouse 162253b381b3SDavid Woodhouse /* 162353b381b3SDavid Woodhouse * if the upper layers pass in a full stripe, we thank them by only allocating 162453b381b3SDavid Woodhouse * enough pages to hold the parity, and sending it all down quickly. 162553b381b3SDavid Woodhouse */ 162653b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio) 162753b381b3SDavid Woodhouse { 162853b381b3SDavid Woodhouse int ret; 162953b381b3SDavid Woodhouse 163053b381b3SDavid Woodhouse ret = alloc_rbio_parity_pages(rbio); 16313cd846d1SMiao Xie if (ret) { 16323cd846d1SMiao Xie __free_raid_bio(rbio); 163353b381b3SDavid Woodhouse return ret; 16343cd846d1SMiao Xie } 163553b381b3SDavid Woodhouse 163653b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 163753b381b3SDavid Woodhouse if (ret == 0) 163853b381b3SDavid Woodhouse finish_rmw(rbio); 163953b381b3SDavid Woodhouse return 0; 164053b381b3SDavid Woodhouse } 164153b381b3SDavid Woodhouse 164253b381b3SDavid Woodhouse /* 164353b381b3SDavid Woodhouse * partial stripe writes get handed over to async helpers. 164453b381b3SDavid Woodhouse * We're really hoping to merge a few more writes into this 164553b381b3SDavid Woodhouse * rbio before calculating new parity 164653b381b3SDavid Woodhouse */ 164753b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio) 164853b381b3SDavid Woodhouse { 164953b381b3SDavid Woodhouse int ret; 165053b381b3SDavid Woodhouse 165153b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 165253b381b3SDavid Woodhouse if (ret == 0) 165353b381b3SDavid Woodhouse async_rmw_stripe(rbio); 165453b381b3SDavid Woodhouse return 0; 165553b381b3SDavid Woodhouse } 165653b381b3SDavid Woodhouse 165753b381b3SDavid Woodhouse /* 165853b381b3SDavid Woodhouse * sometimes while we were reading from the drive to 165953b381b3SDavid Woodhouse * recalculate parity, enough new bios come into create 166053b381b3SDavid Woodhouse * a full stripe. So we do a check here to see if we can 166153b381b3SDavid Woodhouse * go directly to finish_rmw 166253b381b3SDavid Woodhouse */ 166353b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 166453b381b3SDavid Woodhouse { 166553b381b3SDavid Woodhouse /* head off into rmw land if we don't have a full stripe */ 166653b381b3SDavid Woodhouse if (!rbio_is_full(rbio)) 166753b381b3SDavid Woodhouse return partial_stripe_write(rbio); 166853b381b3SDavid Woodhouse return full_stripe_write(rbio); 166953b381b3SDavid Woodhouse } 167053b381b3SDavid Woodhouse 167153b381b3SDavid Woodhouse /* 16726ac0f488SChris Mason * We use plugging call backs to collect full stripes. 16736ac0f488SChris Mason * Any time we get a partial stripe write while plugged 16746ac0f488SChris Mason * we collect it into a list. When the unplug comes down, 16756ac0f488SChris Mason * we sort the list by logical block number and merge 16766ac0f488SChris Mason * everything we can into the same rbios 16776ac0f488SChris Mason */ 16786ac0f488SChris Mason struct btrfs_plug_cb { 16796ac0f488SChris Mason struct blk_plug_cb cb; 16806ac0f488SChris Mason struct btrfs_fs_info *info; 16816ac0f488SChris Mason struct list_head rbio_list; 16826ac0f488SChris Mason struct btrfs_work work; 16836ac0f488SChris Mason }; 16846ac0f488SChris Mason 16856ac0f488SChris Mason /* 16866ac0f488SChris Mason * rbios on the plug list are sorted for easier merging. 16876ac0f488SChris Mason */ 16886ac0f488SChris Mason static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 16896ac0f488SChris Mason { 16906ac0f488SChris Mason struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 16916ac0f488SChris Mason plug_list); 16926ac0f488SChris Mason struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 16936ac0f488SChris Mason plug_list); 16944f024f37SKent Overstreet u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; 16954f024f37SKent Overstreet u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; 16966ac0f488SChris Mason 16976ac0f488SChris Mason if (a_sector < b_sector) 16986ac0f488SChris Mason return -1; 16996ac0f488SChris Mason if (a_sector > b_sector) 17006ac0f488SChris Mason return 1; 17016ac0f488SChris Mason return 0; 17026ac0f488SChris Mason } 17036ac0f488SChris Mason 17046ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug) 17056ac0f488SChris Mason { 17066ac0f488SChris Mason struct btrfs_raid_bio *cur; 17076ac0f488SChris Mason struct btrfs_raid_bio *last = NULL; 17086ac0f488SChris Mason 17096ac0f488SChris Mason /* 17106ac0f488SChris Mason * sort our plug list then try to merge 17116ac0f488SChris Mason * everything we can in hopes of creating full 17126ac0f488SChris Mason * stripes. 17136ac0f488SChris Mason */ 17146ac0f488SChris Mason list_sort(NULL, &plug->rbio_list, plug_cmp); 17156ac0f488SChris Mason while (!list_empty(&plug->rbio_list)) { 17166ac0f488SChris Mason cur = list_entry(plug->rbio_list.next, 17176ac0f488SChris Mason struct btrfs_raid_bio, plug_list); 17186ac0f488SChris Mason list_del_init(&cur->plug_list); 17196ac0f488SChris Mason 17206ac0f488SChris Mason if (rbio_is_full(cur)) { 17216ac0f488SChris Mason /* we have a full stripe, send it down */ 17226ac0f488SChris Mason full_stripe_write(cur); 17236ac0f488SChris Mason continue; 17246ac0f488SChris Mason } 17256ac0f488SChris Mason if (last) { 17266ac0f488SChris Mason if (rbio_can_merge(last, cur)) { 17276ac0f488SChris Mason merge_rbio(last, cur); 17286ac0f488SChris Mason __free_raid_bio(cur); 17296ac0f488SChris Mason continue; 17306ac0f488SChris Mason 17316ac0f488SChris Mason } 17326ac0f488SChris Mason __raid56_parity_write(last); 17336ac0f488SChris Mason } 17346ac0f488SChris Mason last = cur; 17356ac0f488SChris Mason } 17366ac0f488SChris Mason if (last) { 17376ac0f488SChris Mason __raid56_parity_write(last); 17386ac0f488SChris Mason } 17396ac0f488SChris Mason kfree(plug); 17406ac0f488SChris Mason } 17416ac0f488SChris Mason 17426ac0f488SChris Mason /* 17436ac0f488SChris Mason * if the unplug comes from schedule, we have to push the 17446ac0f488SChris Mason * work off to a helper thread 17456ac0f488SChris Mason */ 17466ac0f488SChris Mason static void unplug_work(struct btrfs_work *work) 17476ac0f488SChris Mason { 17486ac0f488SChris Mason struct btrfs_plug_cb *plug; 17496ac0f488SChris Mason plug = container_of(work, struct btrfs_plug_cb, work); 17506ac0f488SChris Mason run_plug(plug); 17516ac0f488SChris Mason } 17526ac0f488SChris Mason 17536ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 17546ac0f488SChris Mason { 17556ac0f488SChris Mason struct btrfs_plug_cb *plug; 17566ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17576ac0f488SChris Mason 17586ac0f488SChris Mason if (from_schedule) { 17599e0af237SLiu Bo btrfs_init_work(&plug->work, btrfs_rmw_helper, 17609e0af237SLiu Bo unplug_work, NULL, NULL); 1761d05a33acSQu Wenruo btrfs_queue_work(plug->info->rmw_workers, 17626ac0f488SChris Mason &plug->work); 17636ac0f488SChris Mason return; 17646ac0f488SChris Mason } 17656ac0f488SChris Mason run_plug(plug); 17666ac0f488SChris Mason } 17676ac0f488SChris Mason 17686ac0f488SChris Mason /* 176953b381b3SDavid Woodhouse * our main entry point for writes from the rest of the FS. 177053b381b3SDavid Woodhouse */ 177153b381b3SDavid Woodhouse int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 177253b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 177353b381b3SDavid Woodhouse u64 stripe_len) 177453b381b3SDavid Woodhouse { 177553b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 17766ac0f488SChris Mason struct btrfs_plug_cb *plug = NULL; 17776ac0f488SChris Mason struct blk_plug_cb *cb; 177853b381b3SDavid Woodhouse 177953b381b3SDavid Woodhouse rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 1780af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 1781af8e2d1dSMiao Xie __free_bbio_and_raid_map(bbio, raid_map, 1); 178253b381b3SDavid Woodhouse return PTR_ERR(rbio); 1783af8e2d1dSMiao Xie } 178453b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 17854f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 17861b94b556SMiao Xie rbio->operation = BTRFS_RBIO_WRITE; 17876ac0f488SChris Mason 17886ac0f488SChris Mason /* 17896ac0f488SChris Mason * don't plug on full rbios, just get them out the door 17906ac0f488SChris Mason * as quickly as we can 17916ac0f488SChris Mason */ 17926ac0f488SChris Mason if (rbio_is_full(rbio)) 17936ac0f488SChris Mason return full_stripe_write(rbio); 17946ac0f488SChris Mason 17956ac0f488SChris Mason cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, 17966ac0f488SChris Mason sizeof(*plug)); 17976ac0f488SChris Mason if (cb) { 17986ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17996ac0f488SChris Mason if (!plug->info) { 18006ac0f488SChris Mason plug->info = root->fs_info; 18016ac0f488SChris Mason INIT_LIST_HEAD(&plug->rbio_list); 18026ac0f488SChris Mason } 18036ac0f488SChris Mason list_add_tail(&rbio->plug_list, &plug->rbio_list); 18046ac0f488SChris Mason } else { 180553b381b3SDavid Woodhouse return __raid56_parity_write(rbio); 180653b381b3SDavid Woodhouse } 18076ac0f488SChris Mason return 0; 18086ac0f488SChris Mason } 180953b381b3SDavid Woodhouse 181053b381b3SDavid Woodhouse /* 181153b381b3SDavid Woodhouse * all parity reconstruction happens here. We've read in everything 181253b381b3SDavid Woodhouse * we can find from the drives and this does the heavy lifting of 181353b381b3SDavid Woodhouse * sorting the good from the bad. 181453b381b3SDavid Woodhouse */ 181553b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 181653b381b3SDavid Woodhouse { 181753b381b3SDavid Woodhouse int pagenr, stripe; 181853b381b3SDavid Woodhouse void **pointers; 181953b381b3SDavid Woodhouse int faila = -1, failb = -1; 1820ed6078f7SDavid Sterba int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 182153b381b3SDavid Woodhouse struct page *page; 182253b381b3SDavid Woodhouse int err; 182353b381b3SDavid Woodhouse int i; 182453b381b3SDavid Woodhouse 1825*2c8cdd6eSMiao Xie pointers = kzalloc(rbio->real_stripes * sizeof(void *), 182653b381b3SDavid Woodhouse GFP_NOFS); 182753b381b3SDavid Woodhouse if (!pointers) { 182853b381b3SDavid Woodhouse err = -ENOMEM; 182953b381b3SDavid Woodhouse goto cleanup_io; 183053b381b3SDavid Woodhouse } 183153b381b3SDavid Woodhouse 183253b381b3SDavid Woodhouse faila = rbio->faila; 183353b381b3SDavid Woodhouse failb = rbio->failb; 183453b381b3SDavid Woodhouse 18351b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 183653b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 183753b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 183853b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 183953b381b3SDavid Woodhouse } 184053b381b3SDavid Woodhouse 184153b381b3SDavid Woodhouse index_rbio_pages(rbio); 184253b381b3SDavid Woodhouse 184353b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 18445a6ac9eaSMiao Xie /* 18455a6ac9eaSMiao Xie * Now we just use bitmap to mark the horizontal stripes in 18465a6ac9eaSMiao Xie * which we have data when doing parity scrub. 18475a6ac9eaSMiao Xie */ 18485a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && 18495a6ac9eaSMiao Xie !test_bit(pagenr, rbio->dbitmap)) 18505a6ac9eaSMiao Xie continue; 18515a6ac9eaSMiao Xie 185253b381b3SDavid Woodhouse /* setup our array of pointers with pages 185353b381b3SDavid Woodhouse * from each stripe 185453b381b3SDavid Woodhouse */ 1855*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 185653b381b3SDavid Woodhouse /* 185753b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 185853b381b3SDavid Woodhouse * pages from the bio list 185953b381b3SDavid Woodhouse */ 18601b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD && 186153b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 186253b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 186353b381b3SDavid Woodhouse } else { 186453b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 186553b381b3SDavid Woodhouse } 186653b381b3SDavid Woodhouse pointers[stripe] = kmap(page); 186753b381b3SDavid Woodhouse } 186853b381b3SDavid Woodhouse 186953b381b3SDavid Woodhouse /* all raid6 handling here */ 1870*2c8cdd6eSMiao Xie if (rbio->raid_map[rbio->real_stripes - 1] == 187153b381b3SDavid Woodhouse RAID6_Q_STRIPE) { 187253b381b3SDavid Woodhouse 187353b381b3SDavid Woodhouse /* 187453b381b3SDavid Woodhouse * single failure, rebuild from parity raid5 187553b381b3SDavid Woodhouse * style 187653b381b3SDavid Woodhouse */ 187753b381b3SDavid Woodhouse if (failb < 0) { 187853b381b3SDavid Woodhouse if (faila == rbio->nr_data) { 187953b381b3SDavid Woodhouse /* 188053b381b3SDavid Woodhouse * Just the P stripe has failed, without 188153b381b3SDavid Woodhouse * a bad data or Q stripe. 188253b381b3SDavid Woodhouse * TODO, we should redo the xor here. 188353b381b3SDavid Woodhouse */ 188453b381b3SDavid Woodhouse err = -EIO; 188553b381b3SDavid Woodhouse goto cleanup; 188653b381b3SDavid Woodhouse } 188753b381b3SDavid Woodhouse /* 188853b381b3SDavid Woodhouse * a single failure in raid6 is rebuilt 188953b381b3SDavid Woodhouse * in the pstripe code below 189053b381b3SDavid Woodhouse */ 189153b381b3SDavid Woodhouse goto pstripe; 189253b381b3SDavid Woodhouse } 189353b381b3SDavid Woodhouse 189453b381b3SDavid Woodhouse /* make sure our ps and qs are in order */ 189553b381b3SDavid Woodhouse if (faila > failb) { 189653b381b3SDavid Woodhouse int tmp = failb; 189753b381b3SDavid Woodhouse failb = faila; 189853b381b3SDavid Woodhouse faila = tmp; 189953b381b3SDavid Woodhouse } 190053b381b3SDavid Woodhouse 190153b381b3SDavid Woodhouse /* if the q stripe is failed, do a pstripe reconstruction 190253b381b3SDavid Woodhouse * from the xors. 190353b381b3SDavid Woodhouse * If both the q stripe and the P stripe are failed, we're 190453b381b3SDavid Woodhouse * here due to a crc mismatch and we can't give them the 190553b381b3SDavid Woodhouse * data they want 190653b381b3SDavid Woodhouse */ 190753b381b3SDavid Woodhouse if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { 190853b381b3SDavid Woodhouse if (rbio->raid_map[faila] == RAID5_P_STRIPE) { 190953b381b3SDavid Woodhouse err = -EIO; 191053b381b3SDavid Woodhouse goto cleanup; 191153b381b3SDavid Woodhouse } 191253b381b3SDavid Woodhouse /* 191353b381b3SDavid Woodhouse * otherwise we have one bad data stripe and 191453b381b3SDavid Woodhouse * a good P stripe. raid5! 191553b381b3SDavid Woodhouse */ 191653b381b3SDavid Woodhouse goto pstripe; 191753b381b3SDavid Woodhouse } 191853b381b3SDavid Woodhouse 191953b381b3SDavid Woodhouse if (rbio->raid_map[failb] == RAID5_P_STRIPE) { 1920*2c8cdd6eSMiao Xie raid6_datap_recov(rbio->real_stripes, 192153b381b3SDavid Woodhouse PAGE_SIZE, faila, pointers); 192253b381b3SDavid Woodhouse } else { 1923*2c8cdd6eSMiao Xie raid6_2data_recov(rbio->real_stripes, 192453b381b3SDavid Woodhouse PAGE_SIZE, faila, failb, 192553b381b3SDavid Woodhouse pointers); 192653b381b3SDavid Woodhouse } 192753b381b3SDavid Woodhouse } else { 192853b381b3SDavid Woodhouse void *p; 192953b381b3SDavid Woodhouse 193053b381b3SDavid Woodhouse /* rebuild from P stripe here (raid5 or raid6) */ 193153b381b3SDavid Woodhouse BUG_ON(failb != -1); 193253b381b3SDavid Woodhouse pstripe: 193353b381b3SDavid Woodhouse /* Copy parity block into failed block to start with */ 193453b381b3SDavid Woodhouse memcpy(pointers[faila], 193553b381b3SDavid Woodhouse pointers[rbio->nr_data], 193653b381b3SDavid Woodhouse PAGE_CACHE_SIZE); 193753b381b3SDavid Woodhouse 193853b381b3SDavid Woodhouse /* rearrange the pointer array */ 193953b381b3SDavid Woodhouse p = pointers[faila]; 194053b381b3SDavid Woodhouse for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 194153b381b3SDavid Woodhouse pointers[stripe] = pointers[stripe + 1]; 194253b381b3SDavid Woodhouse pointers[rbio->nr_data - 1] = p; 194353b381b3SDavid Woodhouse 194453b381b3SDavid Woodhouse /* xor in the rest */ 194553b381b3SDavid Woodhouse run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 194653b381b3SDavid Woodhouse } 194753b381b3SDavid Woodhouse /* if we're doing this rebuild as part of an rmw, go through 194853b381b3SDavid Woodhouse * and set all of our private rbio pages in the 194953b381b3SDavid Woodhouse * failed stripes as uptodate. This way finish_rmw will 195053b381b3SDavid Woodhouse * know they can be trusted. If this was a read reconstruction, 195153b381b3SDavid Woodhouse * other endio functions will fiddle the uptodate bits 195253b381b3SDavid Woodhouse */ 19531b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) { 195453b381b3SDavid Woodhouse for (i = 0; i < nr_pages; i++) { 195553b381b3SDavid Woodhouse if (faila != -1) { 195653b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, faila, i); 195753b381b3SDavid Woodhouse SetPageUptodate(page); 195853b381b3SDavid Woodhouse } 195953b381b3SDavid Woodhouse if (failb != -1) { 196053b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, failb, i); 196153b381b3SDavid Woodhouse SetPageUptodate(page); 196253b381b3SDavid Woodhouse } 196353b381b3SDavid Woodhouse } 196453b381b3SDavid Woodhouse } 1965*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 196653b381b3SDavid Woodhouse /* 196753b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 196853b381b3SDavid Woodhouse * pages from the bio list 196953b381b3SDavid Woodhouse */ 19701b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD && 197153b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 197253b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 197353b381b3SDavid Woodhouse } else { 197453b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 197553b381b3SDavid Woodhouse } 197653b381b3SDavid Woodhouse kunmap(page); 197753b381b3SDavid Woodhouse } 197853b381b3SDavid Woodhouse } 197953b381b3SDavid Woodhouse 198053b381b3SDavid Woodhouse err = 0; 198153b381b3SDavid Woodhouse cleanup: 198253b381b3SDavid Woodhouse kfree(pointers); 198353b381b3SDavid Woodhouse 198453b381b3SDavid Woodhouse cleanup_io: 19851b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1986af8e2d1dSMiao Xie if (err == 0 && 1987af8e2d1dSMiao Xie !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)) 19884ae10b3aSChris Mason cache_rbio_pages(rbio); 19894ae10b3aSChris Mason else 19904ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 19914ae10b3aSChris Mason 199253b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, err == 0); 199353b381b3SDavid Woodhouse } else if (err == 0) { 199453b381b3SDavid Woodhouse rbio->faila = -1; 199553b381b3SDavid Woodhouse rbio->failb = -1; 19965a6ac9eaSMiao Xie 19975a6ac9eaSMiao Xie if (rbio->operation == BTRFS_RBIO_WRITE) 199853b381b3SDavid Woodhouse finish_rmw(rbio); 19995a6ac9eaSMiao Xie else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) 20005a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 20015a6ac9eaSMiao Xie else 20025a6ac9eaSMiao Xie BUG(); 200353b381b3SDavid Woodhouse } else { 200453b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, 0); 200553b381b3SDavid Woodhouse } 200653b381b3SDavid Woodhouse } 200753b381b3SDavid Woodhouse 200853b381b3SDavid Woodhouse /* 200953b381b3SDavid Woodhouse * This is called only for stripes we've read from disk to 201053b381b3SDavid Woodhouse * reconstruct the parity. 201153b381b3SDavid Woodhouse */ 201253b381b3SDavid Woodhouse static void raid_recover_end_io(struct bio *bio, int err) 201353b381b3SDavid Woodhouse { 201453b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 201553b381b3SDavid Woodhouse 201653b381b3SDavid Woodhouse /* 201753b381b3SDavid Woodhouse * we only read stripe pages off the disk, set them 201853b381b3SDavid Woodhouse * up to date if there were no errors 201953b381b3SDavid Woodhouse */ 202053b381b3SDavid Woodhouse if (err) 202153b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 202253b381b3SDavid Woodhouse else 202353b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 202453b381b3SDavid Woodhouse bio_put(bio); 202553b381b3SDavid Woodhouse 2026b89e1b01SMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 202753b381b3SDavid Woodhouse return; 202853b381b3SDavid Woodhouse 2029b89e1b01SMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 203053b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 203153b381b3SDavid Woodhouse else 203253b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 203353b381b3SDavid Woodhouse } 203453b381b3SDavid Woodhouse 203553b381b3SDavid Woodhouse /* 203653b381b3SDavid Woodhouse * reads everything we need off the disk to reconstruct 203753b381b3SDavid Woodhouse * the parity. endio handlers trigger final reconstruction 203853b381b3SDavid Woodhouse * when the IO is done. 203953b381b3SDavid Woodhouse * 204053b381b3SDavid Woodhouse * This is used both for reads from the higher layers and for 204153b381b3SDavid Woodhouse * parity construction required to finish a rmw cycle. 204253b381b3SDavid Woodhouse */ 204353b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 204453b381b3SDavid Woodhouse { 204553b381b3SDavid Woodhouse int bios_to_read = 0; 204653b381b3SDavid Woodhouse struct bio_list bio_list; 204753b381b3SDavid Woodhouse int ret; 2048ed6078f7SDavid Sterba int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 204953b381b3SDavid Woodhouse int pagenr; 205053b381b3SDavid Woodhouse int stripe; 205153b381b3SDavid Woodhouse struct bio *bio; 205253b381b3SDavid Woodhouse 205353b381b3SDavid Woodhouse bio_list_init(&bio_list); 205453b381b3SDavid Woodhouse 205553b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 205653b381b3SDavid Woodhouse if (ret) 205753b381b3SDavid Woodhouse goto cleanup; 205853b381b3SDavid Woodhouse 2059b89e1b01SMiao Xie atomic_set(&rbio->error, 0); 206053b381b3SDavid Woodhouse 206153b381b3SDavid Woodhouse /* 20624ae10b3aSChris Mason * read everything that hasn't failed. Thanks to the 20634ae10b3aSChris Mason * stripe cache, it is possible that some or all of these 20644ae10b3aSChris Mason * pages are going to be uptodate. 206553b381b3SDavid Woodhouse */ 2066*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 20675588383eSLiu Bo if (rbio->faila == stripe || rbio->failb == stripe) { 2068b89e1b01SMiao Xie atomic_inc(&rbio->error); 206953b381b3SDavid Woodhouse continue; 20705588383eSLiu Bo } 207153b381b3SDavid Woodhouse 207253b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 207353b381b3SDavid Woodhouse struct page *p; 207453b381b3SDavid Woodhouse 207553b381b3SDavid Woodhouse /* 207653b381b3SDavid Woodhouse * the rmw code may have already read this 207753b381b3SDavid Woodhouse * page in 207853b381b3SDavid Woodhouse */ 207953b381b3SDavid Woodhouse p = rbio_stripe_page(rbio, stripe, pagenr); 208053b381b3SDavid Woodhouse if (PageUptodate(p)) 208153b381b3SDavid Woodhouse continue; 208253b381b3SDavid Woodhouse 208353b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 208453b381b3SDavid Woodhouse rbio_stripe_page(rbio, stripe, pagenr), 208553b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 208653b381b3SDavid Woodhouse if (ret < 0) 208753b381b3SDavid Woodhouse goto cleanup; 208853b381b3SDavid Woodhouse } 208953b381b3SDavid Woodhouse } 209053b381b3SDavid Woodhouse 209153b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 209253b381b3SDavid Woodhouse if (!bios_to_read) { 209353b381b3SDavid Woodhouse /* 209453b381b3SDavid Woodhouse * we might have no bios to read just because the pages 209553b381b3SDavid Woodhouse * were up to date, or we might have no bios to read because 209653b381b3SDavid Woodhouse * the devices were gone. 209753b381b3SDavid Woodhouse */ 2098b89e1b01SMiao Xie if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { 209953b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 210053b381b3SDavid Woodhouse goto out; 210153b381b3SDavid Woodhouse } else { 210253b381b3SDavid Woodhouse goto cleanup; 210353b381b3SDavid Woodhouse } 210453b381b3SDavid Woodhouse } 210553b381b3SDavid Woodhouse 210653b381b3SDavid Woodhouse /* 210753b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 210853b381b3SDavid Woodhouse * not to touch it after that 210953b381b3SDavid Woodhouse */ 2110b89e1b01SMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 211153b381b3SDavid Woodhouse while (1) { 211253b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 211353b381b3SDavid Woodhouse if (!bio) 211453b381b3SDavid Woodhouse break; 211553b381b3SDavid Woodhouse 211653b381b3SDavid Woodhouse bio->bi_private = rbio; 211753b381b3SDavid Woodhouse bio->bi_end_io = raid_recover_end_io; 211853b381b3SDavid Woodhouse 211953b381b3SDavid Woodhouse btrfs_bio_wq_end_io(rbio->fs_info, bio, 212053b381b3SDavid Woodhouse BTRFS_WQ_ENDIO_RAID56); 212153b381b3SDavid Woodhouse 212253b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 212353b381b3SDavid Woodhouse submit_bio(READ, bio); 212453b381b3SDavid Woodhouse } 212553b381b3SDavid Woodhouse out: 212653b381b3SDavid Woodhouse return 0; 212753b381b3SDavid Woodhouse 212853b381b3SDavid Woodhouse cleanup: 21291b94b556SMiao Xie if (rbio->operation == BTRFS_RBIO_READ_REBUILD) 213053b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 213153b381b3SDavid Woodhouse return -EIO; 213253b381b3SDavid Woodhouse } 213353b381b3SDavid Woodhouse 213453b381b3SDavid Woodhouse /* 213553b381b3SDavid Woodhouse * the main entry point for reads from the higher layers. This 213653b381b3SDavid Woodhouse * is really only called when the normal read path had a failure, 213753b381b3SDavid Woodhouse * so we assume the bio they send down corresponds to a failed part 213853b381b3SDavid Woodhouse * of the drive. 213953b381b3SDavid Woodhouse */ 214053b381b3SDavid Woodhouse int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 214153b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 2142af8e2d1dSMiao Xie u64 stripe_len, int mirror_num, int hold_bbio) 214353b381b3SDavid Woodhouse { 214453b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 214553b381b3SDavid Woodhouse int ret; 214653b381b3SDavid Woodhouse 214753b381b3SDavid Woodhouse rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 2148af8e2d1dSMiao Xie if (IS_ERR(rbio)) { 2149af8e2d1dSMiao Xie __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio); 215053b381b3SDavid Woodhouse return PTR_ERR(rbio); 2151af8e2d1dSMiao Xie } 215253b381b3SDavid Woodhouse 2153af8e2d1dSMiao Xie if (hold_bbio) 2154af8e2d1dSMiao Xie set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags); 21551b94b556SMiao Xie rbio->operation = BTRFS_RBIO_READ_REBUILD; 215653b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 21574f024f37SKent Overstreet rbio->bio_list_bytes = bio->bi_iter.bi_size; 215853b381b3SDavid Woodhouse 215953b381b3SDavid Woodhouse rbio->faila = find_logical_bio_stripe(rbio, bio); 216053b381b3SDavid Woodhouse if (rbio->faila == -1) { 216153b381b3SDavid Woodhouse BUG(); 2162af8e2d1dSMiao Xie __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio); 216353b381b3SDavid Woodhouse kfree(rbio); 216453b381b3SDavid Woodhouse return -EIO; 216553b381b3SDavid Woodhouse } 216653b381b3SDavid Woodhouse 216753b381b3SDavid Woodhouse /* 216853b381b3SDavid Woodhouse * reconstruct from the q stripe if they are 216953b381b3SDavid Woodhouse * asking for mirror 3 217053b381b3SDavid Woodhouse */ 217153b381b3SDavid Woodhouse if (mirror_num == 3) 2172*2c8cdd6eSMiao Xie rbio->failb = rbio->real_stripes - 2; 217353b381b3SDavid Woodhouse 217453b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 217553b381b3SDavid Woodhouse 217653b381b3SDavid Woodhouse /* 217753b381b3SDavid Woodhouse * __raid56_parity_recover will end the bio with 217853b381b3SDavid Woodhouse * any errors it hits. We don't want to return 217953b381b3SDavid Woodhouse * its error value up the stack because our caller 218053b381b3SDavid Woodhouse * will end up calling bio_endio with any nonzero 218153b381b3SDavid Woodhouse * return 218253b381b3SDavid Woodhouse */ 218353b381b3SDavid Woodhouse if (ret == 0) 218453b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 218553b381b3SDavid Woodhouse /* 218653b381b3SDavid Woodhouse * our rbio has been added to the list of 218753b381b3SDavid Woodhouse * rbios that will be handled after the 218853b381b3SDavid Woodhouse * currently lock owner is done 218953b381b3SDavid Woodhouse */ 219053b381b3SDavid Woodhouse return 0; 219153b381b3SDavid Woodhouse 219253b381b3SDavid Woodhouse } 219353b381b3SDavid Woodhouse 219453b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work) 219553b381b3SDavid Woodhouse { 219653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 219753b381b3SDavid Woodhouse 219853b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 219953b381b3SDavid Woodhouse raid56_rmw_stripe(rbio); 220053b381b3SDavid Woodhouse } 220153b381b3SDavid Woodhouse 220253b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work) 220353b381b3SDavid Woodhouse { 220453b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 220553b381b3SDavid Woodhouse 220653b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 220753b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 220853b381b3SDavid Woodhouse } 22095a6ac9eaSMiao Xie 22105a6ac9eaSMiao Xie /* 22115a6ac9eaSMiao Xie * The following code is used to scrub/replace the parity stripe 22125a6ac9eaSMiao Xie * 22135a6ac9eaSMiao Xie * Note: We need make sure all the pages that add into the scrub/replace 22145a6ac9eaSMiao Xie * raid bio are correct and not be changed during the scrub/replace. That 22155a6ac9eaSMiao Xie * is those pages just hold metadata or file data with checksum. 22165a6ac9eaSMiao Xie */ 22175a6ac9eaSMiao Xie 22185a6ac9eaSMiao Xie struct btrfs_raid_bio * 22195a6ac9eaSMiao Xie raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, 22205a6ac9eaSMiao Xie struct btrfs_bio *bbio, u64 *raid_map, 22215a6ac9eaSMiao Xie u64 stripe_len, struct btrfs_device *scrub_dev, 22225a6ac9eaSMiao Xie unsigned long *dbitmap, int stripe_nsectors) 22235a6ac9eaSMiao Xie { 22245a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 22255a6ac9eaSMiao Xie int i; 22265a6ac9eaSMiao Xie 22275a6ac9eaSMiao Xie rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 22285a6ac9eaSMiao Xie if (IS_ERR(rbio)) 22295a6ac9eaSMiao Xie return NULL; 22305a6ac9eaSMiao Xie bio_list_add(&rbio->bio_list, bio); 22315a6ac9eaSMiao Xie /* 22325a6ac9eaSMiao Xie * This is a special bio which is used to hold the completion handler 22335a6ac9eaSMiao Xie * and make the scrub rbio is similar to the other types 22345a6ac9eaSMiao Xie */ 22355a6ac9eaSMiao Xie ASSERT(!bio->bi_iter.bi_size); 22365a6ac9eaSMiao Xie rbio->operation = BTRFS_RBIO_PARITY_SCRUB; 22375a6ac9eaSMiao Xie 2238*2c8cdd6eSMiao Xie for (i = 0; i < rbio->real_stripes; i++) { 22395a6ac9eaSMiao Xie if (bbio->stripes[i].dev == scrub_dev) { 22405a6ac9eaSMiao Xie rbio->scrubp = i; 22415a6ac9eaSMiao Xie break; 22425a6ac9eaSMiao Xie } 22435a6ac9eaSMiao Xie } 22445a6ac9eaSMiao Xie 22455a6ac9eaSMiao Xie /* Now we just support the sectorsize equals to page size */ 22465a6ac9eaSMiao Xie ASSERT(root->sectorsize == PAGE_SIZE); 22475a6ac9eaSMiao Xie ASSERT(rbio->stripe_npages == stripe_nsectors); 22485a6ac9eaSMiao Xie bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); 22495a6ac9eaSMiao Xie 22505a6ac9eaSMiao Xie return rbio; 22515a6ac9eaSMiao Xie } 22525a6ac9eaSMiao Xie 22535a6ac9eaSMiao Xie void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, 22545a6ac9eaSMiao Xie struct page *page, u64 logical) 22555a6ac9eaSMiao Xie { 22565a6ac9eaSMiao Xie int stripe_offset; 22575a6ac9eaSMiao Xie int index; 22585a6ac9eaSMiao Xie 22595a6ac9eaSMiao Xie ASSERT(logical >= rbio->raid_map[0]); 22605a6ac9eaSMiao Xie ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + 22615a6ac9eaSMiao Xie rbio->stripe_len * rbio->nr_data); 22625a6ac9eaSMiao Xie stripe_offset = (int)(logical - rbio->raid_map[0]); 22635a6ac9eaSMiao Xie index = stripe_offset >> PAGE_CACHE_SHIFT; 22645a6ac9eaSMiao Xie rbio->bio_pages[index] = page; 22655a6ac9eaSMiao Xie } 22665a6ac9eaSMiao Xie 22675a6ac9eaSMiao Xie /* 22685a6ac9eaSMiao Xie * We just scrub the parity that we have correct data on the same horizontal, 22695a6ac9eaSMiao Xie * so we needn't allocate all pages for all the stripes. 22705a6ac9eaSMiao Xie */ 22715a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) 22725a6ac9eaSMiao Xie { 22735a6ac9eaSMiao Xie int i; 22745a6ac9eaSMiao Xie int bit; 22755a6ac9eaSMiao Xie int index; 22765a6ac9eaSMiao Xie struct page *page; 22775a6ac9eaSMiao Xie 22785a6ac9eaSMiao Xie for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { 2279*2c8cdd6eSMiao Xie for (i = 0; i < rbio->real_stripes; i++) { 22805a6ac9eaSMiao Xie index = i * rbio->stripe_npages + bit; 22815a6ac9eaSMiao Xie if (rbio->stripe_pages[index]) 22825a6ac9eaSMiao Xie continue; 22835a6ac9eaSMiao Xie 22845a6ac9eaSMiao Xie page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 22855a6ac9eaSMiao Xie if (!page) 22865a6ac9eaSMiao Xie return -ENOMEM; 22875a6ac9eaSMiao Xie rbio->stripe_pages[index] = page; 22885a6ac9eaSMiao Xie ClearPageUptodate(page); 22895a6ac9eaSMiao Xie } 22905a6ac9eaSMiao Xie } 22915a6ac9eaSMiao Xie return 0; 22925a6ac9eaSMiao Xie } 22935a6ac9eaSMiao Xie 22945a6ac9eaSMiao Xie /* 22955a6ac9eaSMiao Xie * end io function used by finish_rmw. When we finally 22965a6ac9eaSMiao Xie * get here, we've written a full stripe 22975a6ac9eaSMiao Xie */ 22985a6ac9eaSMiao Xie static void raid_write_parity_end_io(struct bio *bio, int err) 22995a6ac9eaSMiao Xie { 23005a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio = bio->bi_private; 23015a6ac9eaSMiao Xie 23025a6ac9eaSMiao Xie if (err) 23035a6ac9eaSMiao Xie fail_bio_stripe(rbio, bio); 23045a6ac9eaSMiao Xie 23055a6ac9eaSMiao Xie bio_put(bio); 23065a6ac9eaSMiao Xie 23075a6ac9eaSMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 23085a6ac9eaSMiao Xie return; 23095a6ac9eaSMiao Xie 23105a6ac9eaSMiao Xie err = 0; 23115a6ac9eaSMiao Xie 23125a6ac9eaSMiao Xie if (atomic_read(&rbio->error)) 23135a6ac9eaSMiao Xie err = -EIO; 23145a6ac9eaSMiao Xie 23155a6ac9eaSMiao Xie rbio_orig_end_io(rbio, err, 0); 23165a6ac9eaSMiao Xie } 23175a6ac9eaSMiao Xie 23185a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, 23195a6ac9eaSMiao Xie int need_check) 23205a6ac9eaSMiao Xie { 2321*2c8cdd6eSMiao Xie void *pointers[rbio->real_stripes]; 23225a6ac9eaSMiao Xie int nr_data = rbio->nr_data; 23235a6ac9eaSMiao Xie int stripe; 23245a6ac9eaSMiao Xie int pagenr; 23255a6ac9eaSMiao Xie int p_stripe = -1; 23265a6ac9eaSMiao Xie int q_stripe = -1; 23275a6ac9eaSMiao Xie struct page *p_page = NULL; 23285a6ac9eaSMiao Xie struct page *q_page = NULL; 23295a6ac9eaSMiao Xie struct bio_list bio_list; 23305a6ac9eaSMiao Xie struct bio *bio; 23315a6ac9eaSMiao Xie int ret; 23325a6ac9eaSMiao Xie 23335a6ac9eaSMiao Xie bio_list_init(&bio_list); 23345a6ac9eaSMiao Xie 2335*2c8cdd6eSMiao Xie if (rbio->real_stripes - rbio->nr_data == 1) { 2336*2c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 1; 2337*2c8cdd6eSMiao Xie } else if (rbio->real_stripes - rbio->nr_data == 2) { 2338*2c8cdd6eSMiao Xie p_stripe = rbio->real_stripes - 2; 2339*2c8cdd6eSMiao Xie q_stripe = rbio->real_stripes - 1; 23405a6ac9eaSMiao Xie } else { 23415a6ac9eaSMiao Xie BUG(); 23425a6ac9eaSMiao Xie } 23435a6ac9eaSMiao Xie 23445a6ac9eaSMiao Xie /* 23455a6ac9eaSMiao Xie * Because the higher layers(scrubber) are unlikely to 23465a6ac9eaSMiao Xie * use this area of the disk again soon, so don't cache 23475a6ac9eaSMiao Xie * it. 23485a6ac9eaSMiao Xie */ 23495a6ac9eaSMiao Xie clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 23505a6ac9eaSMiao Xie 23515a6ac9eaSMiao Xie if (!need_check) 23525a6ac9eaSMiao Xie goto writeback; 23535a6ac9eaSMiao Xie 23545a6ac9eaSMiao Xie p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 23555a6ac9eaSMiao Xie if (!p_page) 23565a6ac9eaSMiao Xie goto cleanup; 23575a6ac9eaSMiao Xie SetPageUptodate(p_page); 23585a6ac9eaSMiao Xie 23595a6ac9eaSMiao Xie if (q_stripe != -1) { 23605a6ac9eaSMiao Xie q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 23615a6ac9eaSMiao Xie if (!q_page) { 23625a6ac9eaSMiao Xie __free_page(p_page); 23635a6ac9eaSMiao Xie goto cleanup; 23645a6ac9eaSMiao Xie } 23655a6ac9eaSMiao Xie SetPageUptodate(q_page); 23665a6ac9eaSMiao Xie } 23675a6ac9eaSMiao Xie 23685a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 23695a6ac9eaSMiao Xie 23705a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 23715a6ac9eaSMiao Xie struct page *p; 23725a6ac9eaSMiao Xie void *parity; 23735a6ac9eaSMiao Xie /* first collect one page from each data stripe */ 23745a6ac9eaSMiao Xie for (stripe = 0; stripe < nr_data; stripe++) { 23755a6ac9eaSMiao Xie p = page_in_rbio(rbio, stripe, pagenr, 0); 23765a6ac9eaSMiao Xie pointers[stripe] = kmap(p); 23775a6ac9eaSMiao Xie } 23785a6ac9eaSMiao Xie 23795a6ac9eaSMiao Xie /* then add the parity stripe */ 23805a6ac9eaSMiao Xie pointers[stripe++] = kmap(p_page); 23815a6ac9eaSMiao Xie 23825a6ac9eaSMiao Xie if (q_stripe != -1) { 23835a6ac9eaSMiao Xie 23845a6ac9eaSMiao Xie /* 23855a6ac9eaSMiao Xie * raid6, add the qstripe and call the 23865a6ac9eaSMiao Xie * library function to fill in our p/q 23875a6ac9eaSMiao Xie */ 23885a6ac9eaSMiao Xie pointers[stripe++] = kmap(q_page); 23895a6ac9eaSMiao Xie 2390*2c8cdd6eSMiao Xie raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, 23915a6ac9eaSMiao Xie pointers); 23925a6ac9eaSMiao Xie } else { 23935a6ac9eaSMiao Xie /* raid5 */ 23945a6ac9eaSMiao Xie memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 23955a6ac9eaSMiao Xie run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 23965a6ac9eaSMiao Xie } 23975a6ac9eaSMiao Xie 23985a6ac9eaSMiao Xie /* Check scrubbing pairty and repair it */ 23995a6ac9eaSMiao Xie p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 24005a6ac9eaSMiao Xie parity = kmap(p); 24015a6ac9eaSMiao Xie if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) 24025a6ac9eaSMiao Xie memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); 24035a6ac9eaSMiao Xie else 24045a6ac9eaSMiao Xie /* Parity is right, needn't writeback */ 24055a6ac9eaSMiao Xie bitmap_clear(rbio->dbitmap, pagenr, 1); 24065a6ac9eaSMiao Xie kunmap(p); 24075a6ac9eaSMiao Xie 2408*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) 24095a6ac9eaSMiao Xie kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 24105a6ac9eaSMiao Xie } 24115a6ac9eaSMiao Xie 24125a6ac9eaSMiao Xie __free_page(p_page); 24135a6ac9eaSMiao Xie if (q_page) 24145a6ac9eaSMiao Xie __free_page(q_page); 24155a6ac9eaSMiao Xie 24165a6ac9eaSMiao Xie writeback: 24175a6ac9eaSMiao Xie /* 24185a6ac9eaSMiao Xie * time to start writing. Make bios for everything from the 24195a6ac9eaSMiao Xie * higher layers (the bio_list in our rbio) and our p/q. Ignore 24205a6ac9eaSMiao Xie * everything else. 24215a6ac9eaSMiao Xie */ 24225a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 24235a6ac9eaSMiao Xie struct page *page; 24245a6ac9eaSMiao Xie 24255a6ac9eaSMiao Xie page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 24265a6ac9eaSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, 24275a6ac9eaSMiao Xie page, rbio->scrubp, pagenr, rbio->stripe_len); 24285a6ac9eaSMiao Xie if (ret) 24295a6ac9eaSMiao Xie goto cleanup; 24305a6ac9eaSMiao Xie } 24315a6ac9eaSMiao Xie 24325a6ac9eaSMiao Xie nr_data = bio_list_size(&bio_list); 24335a6ac9eaSMiao Xie if (!nr_data) { 24345a6ac9eaSMiao Xie /* Every parity is right */ 24355a6ac9eaSMiao Xie rbio_orig_end_io(rbio, 0, 0); 24365a6ac9eaSMiao Xie return; 24375a6ac9eaSMiao Xie } 24385a6ac9eaSMiao Xie 24395a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, nr_data); 24405a6ac9eaSMiao Xie 24415a6ac9eaSMiao Xie while (1) { 24425a6ac9eaSMiao Xie bio = bio_list_pop(&bio_list); 24435a6ac9eaSMiao Xie if (!bio) 24445a6ac9eaSMiao Xie break; 24455a6ac9eaSMiao Xie 24465a6ac9eaSMiao Xie bio->bi_private = rbio; 24475a6ac9eaSMiao Xie bio->bi_end_io = raid_write_parity_end_io; 24485a6ac9eaSMiao Xie BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 24495a6ac9eaSMiao Xie submit_bio(WRITE, bio); 24505a6ac9eaSMiao Xie } 24515a6ac9eaSMiao Xie return; 24525a6ac9eaSMiao Xie 24535a6ac9eaSMiao Xie cleanup: 24545a6ac9eaSMiao Xie rbio_orig_end_io(rbio, -EIO, 0); 24555a6ac9eaSMiao Xie } 24565a6ac9eaSMiao Xie 24575a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) 24585a6ac9eaSMiao Xie { 24595a6ac9eaSMiao Xie if (stripe >= 0 && stripe < rbio->nr_data) 24605a6ac9eaSMiao Xie return 1; 24615a6ac9eaSMiao Xie return 0; 24625a6ac9eaSMiao Xie } 24635a6ac9eaSMiao Xie 24645a6ac9eaSMiao Xie /* 24655a6ac9eaSMiao Xie * While we're doing the parity check and repair, we could have errors 24665a6ac9eaSMiao Xie * in reading pages off the disk. This checks for errors and if we're 24675a6ac9eaSMiao Xie * not able to read the page it'll trigger parity reconstruction. The 24685a6ac9eaSMiao Xie * parity scrub will be finished after we've reconstructed the failed 24695a6ac9eaSMiao Xie * stripes 24705a6ac9eaSMiao Xie */ 24715a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) 24725a6ac9eaSMiao Xie { 24735a6ac9eaSMiao Xie if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 24745a6ac9eaSMiao Xie goto cleanup; 24755a6ac9eaSMiao Xie 24765a6ac9eaSMiao Xie if (rbio->faila >= 0 || rbio->failb >= 0) { 24775a6ac9eaSMiao Xie int dfail = 0, failp = -1; 24785a6ac9eaSMiao Xie 24795a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->faila)) 24805a6ac9eaSMiao Xie dfail++; 24815a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->faila)) 24825a6ac9eaSMiao Xie failp = rbio->faila; 24835a6ac9eaSMiao Xie 24845a6ac9eaSMiao Xie if (is_data_stripe(rbio, rbio->failb)) 24855a6ac9eaSMiao Xie dfail++; 24865a6ac9eaSMiao Xie else if (is_parity_stripe(rbio->failb)) 24875a6ac9eaSMiao Xie failp = rbio->failb; 24885a6ac9eaSMiao Xie 24895a6ac9eaSMiao Xie /* 24905a6ac9eaSMiao Xie * Because we can not use a scrubbing parity to repair 24915a6ac9eaSMiao Xie * the data, so the capability of the repair is declined. 24925a6ac9eaSMiao Xie * (In the case of RAID5, we can not repair anything) 24935a6ac9eaSMiao Xie */ 24945a6ac9eaSMiao Xie if (dfail > rbio->bbio->max_errors - 1) 24955a6ac9eaSMiao Xie goto cleanup; 24965a6ac9eaSMiao Xie 24975a6ac9eaSMiao Xie /* 24985a6ac9eaSMiao Xie * If all data is good, only parity is correctly, just 24995a6ac9eaSMiao Xie * repair the parity. 25005a6ac9eaSMiao Xie */ 25015a6ac9eaSMiao Xie if (dfail == 0) { 25025a6ac9eaSMiao Xie finish_parity_scrub(rbio, 0); 25035a6ac9eaSMiao Xie return; 25045a6ac9eaSMiao Xie } 25055a6ac9eaSMiao Xie 25065a6ac9eaSMiao Xie /* 25075a6ac9eaSMiao Xie * Here means we got one corrupted data stripe and one 25085a6ac9eaSMiao Xie * corrupted parity on RAID6, if the corrupted parity 25095a6ac9eaSMiao Xie * is scrubbing parity, luckly, use the other one to repair 25105a6ac9eaSMiao Xie * the data, or we can not repair the data stripe. 25115a6ac9eaSMiao Xie */ 25125a6ac9eaSMiao Xie if (failp != rbio->scrubp) 25135a6ac9eaSMiao Xie goto cleanup; 25145a6ac9eaSMiao Xie 25155a6ac9eaSMiao Xie __raid_recover_end_io(rbio); 25165a6ac9eaSMiao Xie } else { 25175a6ac9eaSMiao Xie finish_parity_scrub(rbio, 1); 25185a6ac9eaSMiao Xie } 25195a6ac9eaSMiao Xie return; 25205a6ac9eaSMiao Xie 25215a6ac9eaSMiao Xie cleanup: 25225a6ac9eaSMiao Xie rbio_orig_end_io(rbio, -EIO, 0); 25235a6ac9eaSMiao Xie } 25245a6ac9eaSMiao Xie 25255a6ac9eaSMiao Xie /* 25265a6ac9eaSMiao Xie * end io for the read phase of the rmw cycle. All the bios here are physical 25275a6ac9eaSMiao Xie * stripe bios we've read from the disk so we can recalculate the parity of the 25285a6ac9eaSMiao Xie * stripe. 25295a6ac9eaSMiao Xie * 25305a6ac9eaSMiao Xie * This will usually kick off finish_rmw once all the bios are read in, but it 25315a6ac9eaSMiao Xie * may trigger parity reconstruction if we had any errors along the way 25325a6ac9eaSMiao Xie */ 25335a6ac9eaSMiao Xie static void raid56_parity_scrub_end_io(struct bio *bio, int err) 25345a6ac9eaSMiao Xie { 25355a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio = bio->bi_private; 25365a6ac9eaSMiao Xie 25375a6ac9eaSMiao Xie if (err) 25385a6ac9eaSMiao Xie fail_bio_stripe(rbio, bio); 25395a6ac9eaSMiao Xie else 25405a6ac9eaSMiao Xie set_bio_pages_uptodate(bio); 25415a6ac9eaSMiao Xie 25425a6ac9eaSMiao Xie bio_put(bio); 25435a6ac9eaSMiao Xie 25445a6ac9eaSMiao Xie if (!atomic_dec_and_test(&rbio->stripes_pending)) 25455a6ac9eaSMiao Xie return; 25465a6ac9eaSMiao Xie 25475a6ac9eaSMiao Xie /* 25485a6ac9eaSMiao Xie * this will normally call finish_rmw to start our write 25495a6ac9eaSMiao Xie * but if there are any failed stripes we'll reconstruct 25505a6ac9eaSMiao Xie * from parity first 25515a6ac9eaSMiao Xie */ 25525a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 25535a6ac9eaSMiao Xie } 25545a6ac9eaSMiao Xie 25555a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) 25565a6ac9eaSMiao Xie { 25575a6ac9eaSMiao Xie int bios_to_read = 0; 25585a6ac9eaSMiao Xie struct bio_list bio_list; 25595a6ac9eaSMiao Xie int ret; 25605a6ac9eaSMiao Xie int pagenr; 25615a6ac9eaSMiao Xie int stripe; 25625a6ac9eaSMiao Xie struct bio *bio; 25635a6ac9eaSMiao Xie 25645a6ac9eaSMiao Xie ret = alloc_rbio_essential_pages(rbio); 25655a6ac9eaSMiao Xie if (ret) 25665a6ac9eaSMiao Xie goto cleanup; 25675a6ac9eaSMiao Xie 25685a6ac9eaSMiao Xie bio_list_init(&bio_list); 25695a6ac9eaSMiao Xie 25705a6ac9eaSMiao Xie atomic_set(&rbio->error, 0); 25715a6ac9eaSMiao Xie /* 25725a6ac9eaSMiao Xie * build a list of bios to read all the missing parts of this 25735a6ac9eaSMiao Xie * stripe 25745a6ac9eaSMiao Xie */ 2575*2c8cdd6eSMiao Xie for (stripe = 0; stripe < rbio->real_stripes; stripe++) { 25765a6ac9eaSMiao Xie for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { 25775a6ac9eaSMiao Xie struct page *page; 25785a6ac9eaSMiao Xie /* 25795a6ac9eaSMiao Xie * we want to find all the pages missing from 25805a6ac9eaSMiao Xie * the rbio and read them from the disk. If 25815a6ac9eaSMiao Xie * page_in_rbio finds a page in the bio list 25825a6ac9eaSMiao Xie * we don't need to read it off the stripe. 25835a6ac9eaSMiao Xie */ 25845a6ac9eaSMiao Xie page = page_in_rbio(rbio, stripe, pagenr, 1); 25855a6ac9eaSMiao Xie if (page) 25865a6ac9eaSMiao Xie continue; 25875a6ac9eaSMiao Xie 25885a6ac9eaSMiao Xie page = rbio_stripe_page(rbio, stripe, pagenr); 25895a6ac9eaSMiao Xie /* 25905a6ac9eaSMiao Xie * the bio cache may have handed us an uptodate 25915a6ac9eaSMiao Xie * page. If so, be happy and use it 25925a6ac9eaSMiao Xie */ 25935a6ac9eaSMiao Xie if (PageUptodate(page)) 25945a6ac9eaSMiao Xie continue; 25955a6ac9eaSMiao Xie 25965a6ac9eaSMiao Xie ret = rbio_add_io_page(rbio, &bio_list, page, 25975a6ac9eaSMiao Xie stripe, pagenr, rbio->stripe_len); 25985a6ac9eaSMiao Xie if (ret) 25995a6ac9eaSMiao Xie goto cleanup; 26005a6ac9eaSMiao Xie } 26015a6ac9eaSMiao Xie } 26025a6ac9eaSMiao Xie 26035a6ac9eaSMiao Xie bios_to_read = bio_list_size(&bio_list); 26045a6ac9eaSMiao Xie if (!bios_to_read) { 26055a6ac9eaSMiao Xie /* 26065a6ac9eaSMiao Xie * this can happen if others have merged with 26075a6ac9eaSMiao Xie * us, it means there is nothing left to read. 26085a6ac9eaSMiao Xie * But if there are missing devices it may not be 26095a6ac9eaSMiao Xie * safe to do the full stripe write yet. 26105a6ac9eaSMiao Xie */ 26115a6ac9eaSMiao Xie goto finish; 26125a6ac9eaSMiao Xie } 26135a6ac9eaSMiao Xie 26145a6ac9eaSMiao Xie /* 26155a6ac9eaSMiao Xie * the bbio may be freed once we submit the last bio. Make sure 26165a6ac9eaSMiao Xie * not to touch it after that 26175a6ac9eaSMiao Xie */ 26185a6ac9eaSMiao Xie atomic_set(&rbio->stripes_pending, bios_to_read); 26195a6ac9eaSMiao Xie while (1) { 26205a6ac9eaSMiao Xie bio = bio_list_pop(&bio_list); 26215a6ac9eaSMiao Xie if (!bio) 26225a6ac9eaSMiao Xie break; 26235a6ac9eaSMiao Xie 26245a6ac9eaSMiao Xie bio->bi_private = rbio; 26255a6ac9eaSMiao Xie bio->bi_end_io = raid56_parity_scrub_end_io; 26265a6ac9eaSMiao Xie 26275a6ac9eaSMiao Xie btrfs_bio_wq_end_io(rbio->fs_info, bio, 26285a6ac9eaSMiao Xie BTRFS_WQ_ENDIO_RAID56); 26295a6ac9eaSMiao Xie 26305a6ac9eaSMiao Xie BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 26315a6ac9eaSMiao Xie submit_bio(READ, bio); 26325a6ac9eaSMiao Xie } 26335a6ac9eaSMiao Xie /* the actual write will happen once the reads are done */ 26345a6ac9eaSMiao Xie return; 26355a6ac9eaSMiao Xie 26365a6ac9eaSMiao Xie cleanup: 26375a6ac9eaSMiao Xie rbio_orig_end_io(rbio, -EIO, 0); 26385a6ac9eaSMiao Xie return; 26395a6ac9eaSMiao Xie 26405a6ac9eaSMiao Xie finish: 26415a6ac9eaSMiao Xie validate_rbio_for_parity_scrub(rbio); 26425a6ac9eaSMiao Xie } 26435a6ac9eaSMiao Xie 26445a6ac9eaSMiao Xie static void scrub_parity_work(struct btrfs_work *work) 26455a6ac9eaSMiao Xie { 26465a6ac9eaSMiao Xie struct btrfs_raid_bio *rbio; 26475a6ac9eaSMiao Xie 26485a6ac9eaSMiao Xie rbio = container_of(work, struct btrfs_raid_bio, work); 26495a6ac9eaSMiao Xie raid56_parity_scrub_stripe(rbio); 26505a6ac9eaSMiao Xie } 26515a6ac9eaSMiao Xie 26525a6ac9eaSMiao Xie static void async_scrub_parity(struct btrfs_raid_bio *rbio) 26535a6ac9eaSMiao Xie { 26545a6ac9eaSMiao Xie btrfs_init_work(&rbio->work, btrfs_rmw_helper, 26555a6ac9eaSMiao Xie scrub_parity_work, NULL, NULL); 26565a6ac9eaSMiao Xie 26575a6ac9eaSMiao Xie btrfs_queue_work(rbio->fs_info->rmw_workers, 26585a6ac9eaSMiao Xie &rbio->work); 26595a6ac9eaSMiao Xie } 26605a6ac9eaSMiao Xie 26615a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) 26625a6ac9eaSMiao Xie { 26635a6ac9eaSMiao Xie if (!lock_stripe_add(rbio)) 26645a6ac9eaSMiao Xie async_scrub_parity(rbio); 26655a6ac9eaSMiao Xie } 2666