153b381b3SDavid Woodhouse /* 253b381b3SDavid Woodhouse * Copyright (C) 2012 Fusion-io All rights reserved. 353b381b3SDavid Woodhouse * Copyright (C) 2012 Intel Corp. All rights reserved. 453b381b3SDavid Woodhouse * 553b381b3SDavid Woodhouse * This program is free software; you can redistribute it and/or 653b381b3SDavid Woodhouse * modify it under the terms of the GNU General Public 753b381b3SDavid Woodhouse * License v2 as published by the Free Software Foundation. 853b381b3SDavid Woodhouse * 953b381b3SDavid Woodhouse * This program is distributed in the hope that it will be useful, 1053b381b3SDavid Woodhouse * but WITHOUT ANY WARRANTY; without even the implied warranty of 1153b381b3SDavid Woodhouse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1253b381b3SDavid Woodhouse * General Public License for more details. 1353b381b3SDavid Woodhouse * 1453b381b3SDavid Woodhouse * You should have received a copy of the GNU General Public 1553b381b3SDavid Woodhouse * License along with this program; if not, write to the 1653b381b3SDavid Woodhouse * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 1753b381b3SDavid Woodhouse * Boston, MA 021110-1307, USA. 1853b381b3SDavid Woodhouse */ 1953b381b3SDavid Woodhouse #include <linux/sched.h> 2053b381b3SDavid Woodhouse #include <linux/wait.h> 2153b381b3SDavid Woodhouse #include <linux/bio.h> 2253b381b3SDavid Woodhouse #include <linux/slab.h> 2353b381b3SDavid Woodhouse #include <linux/buffer_head.h> 2453b381b3SDavid Woodhouse #include <linux/blkdev.h> 2553b381b3SDavid Woodhouse #include <linux/random.h> 2653b381b3SDavid Woodhouse #include <linux/iocontext.h> 2753b381b3SDavid Woodhouse #include <linux/capability.h> 2853b381b3SDavid Woodhouse #include <linux/ratelimit.h> 2953b381b3SDavid Woodhouse #include <linux/kthread.h> 3053b381b3SDavid Woodhouse #include <linux/raid/pq.h> 3153b381b3SDavid Woodhouse #include <linux/hash.h> 3253b381b3SDavid Woodhouse #include <linux/list_sort.h> 3353b381b3SDavid Woodhouse #include <linux/raid/xor.h> 34d7011f5bSGeert Uytterhoeven #include <linux/vmalloc.h> 3553b381b3SDavid Woodhouse #include <asm/div64.h> 3653b381b3SDavid Woodhouse #include "compat.h" 3753b381b3SDavid Woodhouse #include "ctree.h" 3853b381b3SDavid Woodhouse #include "extent_map.h" 3953b381b3SDavid Woodhouse #include "disk-io.h" 4053b381b3SDavid Woodhouse #include "transaction.h" 4153b381b3SDavid Woodhouse #include "print-tree.h" 4253b381b3SDavid Woodhouse #include "volumes.h" 4353b381b3SDavid Woodhouse #include "raid56.h" 4453b381b3SDavid Woodhouse #include "async-thread.h" 4553b381b3SDavid Woodhouse #include "check-integrity.h" 4653b381b3SDavid Woodhouse #include "rcu-string.h" 4753b381b3SDavid Woodhouse 4853b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */ 4953b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT 1 5053b381b3SDavid Woodhouse 514ae10b3aSChris Mason /* 524ae10b3aSChris Mason * set when this rbio is sitting in the hash, but it is just a cache 534ae10b3aSChris Mason * of past RMW 544ae10b3aSChris Mason */ 554ae10b3aSChris Mason #define RBIO_CACHE_BIT 2 564ae10b3aSChris Mason 574ae10b3aSChris Mason /* 584ae10b3aSChris Mason * set when it is safe to trust the stripe_pages for caching 594ae10b3aSChris Mason */ 604ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT 3 614ae10b3aSChris Mason 624ae10b3aSChris Mason 634ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024 644ae10b3aSChris Mason 6553b381b3SDavid Woodhouse struct btrfs_raid_bio { 6653b381b3SDavid Woodhouse struct btrfs_fs_info *fs_info; 6753b381b3SDavid Woodhouse struct btrfs_bio *bbio; 6853b381b3SDavid Woodhouse 6953b381b3SDavid Woodhouse /* 7053b381b3SDavid Woodhouse * logical block numbers for the start of each stripe 7153b381b3SDavid Woodhouse * The last one or two are p/q. These are sorted, 7253b381b3SDavid Woodhouse * so raid_map[0] is the start of our full stripe 7353b381b3SDavid Woodhouse */ 7453b381b3SDavid Woodhouse u64 *raid_map; 7553b381b3SDavid Woodhouse 7653b381b3SDavid Woodhouse /* while we're doing rmw on a stripe 7753b381b3SDavid Woodhouse * we put it into a hash table so we can 7853b381b3SDavid Woodhouse * lock the stripe and merge more rbios 7953b381b3SDavid Woodhouse * into it. 8053b381b3SDavid Woodhouse */ 8153b381b3SDavid Woodhouse struct list_head hash_list; 8253b381b3SDavid Woodhouse 8353b381b3SDavid Woodhouse /* 844ae10b3aSChris Mason * LRU list for the stripe cache 854ae10b3aSChris Mason */ 864ae10b3aSChris Mason struct list_head stripe_cache; 874ae10b3aSChris Mason 884ae10b3aSChris Mason /* 8953b381b3SDavid Woodhouse * for scheduling work in the helper threads 9053b381b3SDavid Woodhouse */ 9153b381b3SDavid Woodhouse struct btrfs_work work; 9253b381b3SDavid Woodhouse 9353b381b3SDavid Woodhouse /* 9453b381b3SDavid Woodhouse * bio list and bio_list_lock are used 9553b381b3SDavid Woodhouse * to add more bios into the stripe 9653b381b3SDavid Woodhouse * in hopes of avoiding the full rmw 9753b381b3SDavid Woodhouse */ 9853b381b3SDavid Woodhouse struct bio_list bio_list; 9953b381b3SDavid Woodhouse spinlock_t bio_list_lock; 10053b381b3SDavid Woodhouse 1016ac0f488SChris Mason /* also protected by the bio_list_lock, the 1026ac0f488SChris Mason * plug list is used by the plugging code 1036ac0f488SChris Mason * to collect partial bios while plugged. The 1046ac0f488SChris Mason * stripe locking code also uses it to hand off 10553b381b3SDavid Woodhouse * the stripe lock to the next pending IO 10653b381b3SDavid Woodhouse */ 10753b381b3SDavid Woodhouse struct list_head plug_list; 10853b381b3SDavid Woodhouse 10953b381b3SDavid Woodhouse /* 11053b381b3SDavid Woodhouse * flags that tell us if it is safe to 11153b381b3SDavid Woodhouse * merge with this bio 11253b381b3SDavid Woodhouse */ 11353b381b3SDavid Woodhouse unsigned long flags; 11453b381b3SDavid Woodhouse 11553b381b3SDavid Woodhouse /* size of each individual stripe on disk */ 11653b381b3SDavid Woodhouse int stripe_len; 11753b381b3SDavid Woodhouse 11853b381b3SDavid Woodhouse /* number of data stripes (no p/q) */ 11953b381b3SDavid Woodhouse int nr_data; 12053b381b3SDavid Woodhouse 12153b381b3SDavid Woodhouse /* 12253b381b3SDavid Woodhouse * set if we're doing a parity rebuild 12353b381b3SDavid Woodhouse * for a read from higher up, which is handled 12453b381b3SDavid Woodhouse * differently from a parity rebuild as part of 12553b381b3SDavid Woodhouse * rmw 12653b381b3SDavid Woodhouse */ 12753b381b3SDavid Woodhouse int read_rebuild; 12853b381b3SDavid Woodhouse 12953b381b3SDavid Woodhouse /* first bad stripe */ 13053b381b3SDavid Woodhouse int faila; 13153b381b3SDavid Woodhouse 13253b381b3SDavid Woodhouse /* second bad stripe (for raid6 use) */ 13353b381b3SDavid Woodhouse int failb; 13453b381b3SDavid Woodhouse 13553b381b3SDavid Woodhouse /* 13653b381b3SDavid Woodhouse * number of pages needed to represent the full 13753b381b3SDavid Woodhouse * stripe 13853b381b3SDavid Woodhouse */ 13953b381b3SDavid Woodhouse int nr_pages; 14053b381b3SDavid Woodhouse 14153b381b3SDavid Woodhouse /* 14253b381b3SDavid Woodhouse * size of all the bios in the bio_list. This 14353b381b3SDavid Woodhouse * helps us decide if the rbio maps to a full 14453b381b3SDavid Woodhouse * stripe or not 14553b381b3SDavid Woodhouse */ 14653b381b3SDavid Woodhouse int bio_list_bytes; 14753b381b3SDavid Woodhouse 14853b381b3SDavid Woodhouse atomic_t refs; 14953b381b3SDavid Woodhouse 15053b381b3SDavid Woodhouse /* 15153b381b3SDavid Woodhouse * these are two arrays of pointers. We allocate the 15253b381b3SDavid Woodhouse * rbio big enough to hold them both and setup their 15353b381b3SDavid Woodhouse * locations when the rbio is allocated 15453b381b3SDavid Woodhouse */ 15553b381b3SDavid Woodhouse 15653b381b3SDavid Woodhouse /* pointers to pages that we allocated for 15753b381b3SDavid Woodhouse * reading/writing stripes directly from the disk (including P/Q) 15853b381b3SDavid Woodhouse */ 15953b381b3SDavid Woodhouse struct page **stripe_pages; 16053b381b3SDavid Woodhouse 16153b381b3SDavid Woodhouse /* 16253b381b3SDavid Woodhouse * pointers to the pages in the bio_list. Stored 16353b381b3SDavid Woodhouse * here for faster lookup 16453b381b3SDavid Woodhouse */ 16553b381b3SDavid Woodhouse struct page **bio_pages; 16653b381b3SDavid Woodhouse }; 16753b381b3SDavid Woodhouse 16853b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 16953b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio); 17053b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work); 17153b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work); 17253b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio); 17353b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio); 17453b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); 17553b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); 17653b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio); 17753b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio); 17853b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 17953b381b3SDavid Woodhouse 18053b381b3SDavid Woodhouse /* 18153b381b3SDavid Woodhouse * the stripe hash table is used for locking, and to collect 18253b381b3SDavid Woodhouse * bios in hopes of making a full stripe 18353b381b3SDavid Woodhouse */ 18453b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) 18553b381b3SDavid Woodhouse { 18653b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *table; 18753b381b3SDavid Woodhouse struct btrfs_stripe_hash_table *x; 18853b381b3SDavid Woodhouse struct btrfs_stripe_hash *cur; 18953b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 19053b381b3SDavid Woodhouse int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; 19153b381b3SDavid Woodhouse int i; 19283c8266aSDavid Sterba int table_size; 19353b381b3SDavid Woodhouse 19453b381b3SDavid Woodhouse if (info->stripe_hash_table) 19553b381b3SDavid Woodhouse return 0; 19653b381b3SDavid Woodhouse 19783c8266aSDavid Sterba /* 19883c8266aSDavid Sterba * The table is large, starting with order 4 and can go as high as 19983c8266aSDavid Sterba * order 7 in case lock debugging is turned on. 20083c8266aSDavid Sterba * 20183c8266aSDavid Sterba * Try harder to allocate and fallback to vmalloc to lower the chance 20283c8266aSDavid Sterba * of a failing mount. 20383c8266aSDavid Sterba */ 20483c8266aSDavid Sterba table_size = sizeof(*table) + sizeof(*h) * num_entries; 20583c8266aSDavid Sterba table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 20683c8266aSDavid Sterba if (!table) { 20783c8266aSDavid Sterba table = vzalloc(table_size); 20853b381b3SDavid Woodhouse if (!table) 20953b381b3SDavid Woodhouse return -ENOMEM; 21083c8266aSDavid Sterba } 21153b381b3SDavid Woodhouse 2124ae10b3aSChris Mason spin_lock_init(&table->cache_lock); 2134ae10b3aSChris Mason INIT_LIST_HEAD(&table->stripe_cache); 2144ae10b3aSChris Mason 21553b381b3SDavid Woodhouse h = table->table; 21653b381b3SDavid Woodhouse 21753b381b3SDavid Woodhouse for (i = 0; i < num_entries; i++) { 21853b381b3SDavid Woodhouse cur = h + i; 21953b381b3SDavid Woodhouse INIT_LIST_HEAD(&cur->hash_list); 22053b381b3SDavid Woodhouse spin_lock_init(&cur->lock); 22153b381b3SDavid Woodhouse init_waitqueue_head(&cur->wait); 22253b381b3SDavid Woodhouse } 22353b381b3SDavid Woodhouse 22453b381b3SDavid Woodhouse x = cmpxchg(&info->stripe_hash_table, NULL, table); 22583c8266aSDavid Sterba if (x) { 22683c8266aSDavid Sterba if (is_vmalloc_addr(x)) 22783c8266aSDavid Sterba vfree(x); 22883c8266aSDavid Sterba else 22953b381b3SDavid Woodhouse kfree(x); 23083c8266aSDavid Sterba } 23153b381b3SDavid Woodhouse return 0; 23253b381b3SDavid Woodhouse } 23353b381b3SDavid Woodhouse 23453b381b3SDavid Woodhouse /* 2354ae10b3aSChris Mason * caching an rbio means to copy anything from the 2364ae10b3aSChris Mason * bio_pages array into the stripe_pages array. We 2374ae10b3aSChris Mason * use the page uptodate bit in the stripe cache array 2384ae10b3aSChris Mason * to indicate if it has valid data 2394ae10b3aSChris Mason * 2404ae10b3aSChris Mason * once the caching is done, we set the cache ready 2414ae10b3aSChris Mason * bit. 2424ae10b3aSChris Mason */ 2434ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio) 2444ae10b3aSChris Mason { 2454ae10b3aSChris Mason int i; 2464ae10b3aSChris Mason char *s; 2474ae10b3aSChris Mason char *d; 2484ae10b3aSChris Mason int ret; 2494ae10b3aSChris Mason 2504ae10b3aSChris Mason ret = alloc_rbio_pages(rbio); 2514ae10b3aSChris Mason if (ret) 2524ae10b3aSChris Mason return; 2534ae10b3aSChris Mason 2544ae10b3aSChris Mason for (i = 0; i < rbio->nr_pages; i++) { 2554ae10b3aSChris Mason if (!rbio->bio_pages[i]) 2564ae10b3aSChris Mason continue; 2574ae10b3aSChris Mason 2584ae10b3aSChris Mason s = kmap(rbio->bio_pages[i]); 2594ae10b3aSChris Mason d = kmap(rbio->stripe_pages[i]); 2604ae10b3aSChris Mason 2614ae10b3aSChris Mason memcpy(d, s, PAGE_CACHE_SIZE); 2624ae10b3aSChris Mason 2634ae10b3aSChris Mason kunmap(rbio->bio_pages[i]); 2644ae10b3aSChris Mason kunmap(rbio->stripe_pages[i]); 2654ae10b3aSChris Mason SetPageUptodate(rbio->stripe_pages[i]); 2664ae10b3aSChris Mason } 2674ae10b3aSChris Mason set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2684ae10b3aSChris Mason } 2694ae10b3aSChris Mason 2704ae10b3aSChris Mason /* 27153b381b3SDavid Woodhouse * we hash on the first logical address of the stripe 27253b381b3SDavid Woodhouse */ 27353b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio) 27453b381b3SDavid Woodhouse { 27553b381b3SDavid Woodhouse u64 num = rbio->raid_map[0]; 27653b381b3SDavid Woodhouse 27753b381b3SDavid Woodhouse /* 27853b381b3SDavid Woodhouse * we shift down quite a bit. We're using byte 27953b381b3SDavid Woodhouse * addressing, and most of the lower bits are zeros. 28053b381b3SDavid Woodhouse * This tends to upset hash_64, and it consistently 28153b381b3SDavid Woodhouse * returns just one or two different values. 28253b381b3SDavid Woodhouse * 28353b381b3SDavid Woodhouse * shifting off the lower bits fixes things. 28453b381b3SDavid Woodhouse */ 28553b381b3SDavid Woodhouse return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); 28653b381b3SDavid Woodhouse } 28753b381b3SDavid Woodhouse 28853b381b3SDavid Woodhouse /* 2894ae10b3aSChris Mason * stealing an rbio means taking all the uptodate pages from the stripe 2904ae10b3aSChris Mason * array in the source rbio and putting them into the destination rbio 2914ae10b3aSChris Mason */ 2924ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) 2934ae10b3aSChris Mason { 2944ae10b3aSChris Mason int i; 2954ae10b3aSChris Mason struct page *s; 2964ae10b3aSChris Mason struct page *d; 2974ae10b3aSChris Mason 2984ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) 2994ae10b3aSChris Mason return; 3004ae10b3aSChris Mason 3014ae10b3aSChris Mason for (i = 0; i < dest->nr_pages; i++) { 3024ae10b3aSChris Mason s = src->stripe_pages[i]; 3034ae10b3aSChris Mason if (!s || !PageUptodate(s)) { 3044ae10b3aSChris Mason continue; 3054ae10b3aSChris Mason } 3064ae10b3aSChris Mason 3074ae10b3aSChris Mason d = dest->stripe_pages[i]; 3084ae10b3aSChris Mason if (d) 3094ae10b3aSChris Mason __free_page(d); 3104ae10b3aSChris Mason 3114ae10b3aSChris Mason dest->stripe_pages[i] = s; 3124ae10b3aSChris Mason src->stripe_pages[i] = NULL; 3134ae10b3aSChris Mason } 3144ae10b3aSChris Mason } 3154ae10b3aSChris Mason 3164ae10b3aSChris Mason /* 31753b381b3SDavid Woodhouse * merging means we take the bio_list from the victim and 31853b381b3SDavid Woodhouse * splice it into the destination. The victim should 31953b381b3SDavid Woodhouse * be discarded afterwards. 32053b381b3SDavid Woodhouse * 32153b381b3SDavid Woodhouse * must be called with dest->rbio_list_lock held 32253b381b3SDavid Woodhouse */ 32353b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest, 32453b381b3SDavid Woodhouse struct btrfs_raid_bio *victim) 32553b381b3SDavid Woodhouse { 32653b381b3SDavid Woodhouse bio_list_merge(&dest->bio_list, &victim->bio_list); 32753b381b3SDavid Woodhouse dest->bio_list_bytes += victim->bio_list_bytes; 32853b381b3SDavid Woodhouse bio_list_init(&victim->bio_list); 32953b381b3SDavid Woodhouse } 33053b381b3SDavid Woodhouse 33153b381b3SDavid Woodhouse /* 3324ae10b3aSChris Mason * used to prune items that are in the cache. The caller 3334ae10b3aSChris Mason * must hold the hash table lock. 3344ae10b3aSChris Mason */ 3354ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 3364ae10b3aSChris Mason { 3374ae10b3aSChris Mason int bucket = rbio_bucket(rbio); 3384ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 3394ae10b3aSChris Mason struct btrfs_stripe_hash *h; 3404ae10b3aSChris Mason int freeit = 0; 3414ae10b3aSChris Mason 3424ae10b3aSChris Mason /* 3434ae10b3aSChris Mason * check the bit again under the hash table lock. 3444ae10b3aSChris Mason */ 3454ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 3464ae10b3aSChris Mason return; 3474ae10b3aSChris Mason 3484ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 3494ae10b3aSChris Mason h = table->table + bucket; 3504ae10b3aSChris Mason 3514ae10b3aSChris Mason /* hold the lock for the bucket because we may be 3524ae10b3aSChris Mason * removing it from the hash table 3534ae10b3aSChris Mason */ 3544ae10b3aSChris Mason spin_lock(&h->lock); 3554ae10b3aSChris Mason 3564ae10b3aSChris Mason /* 3574ae10b3aSChris Mason * hold the lock for the bio list because we need 3584ae10b3aSChris Mason * to make sure the bio list is empty 3594ae10b3aSChris Mason */ 3604ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 3614ae10b3aSChris Mason 3624ae10b3aSChris Mason if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { 3634ae10b3aSChris Mason list_del_init(&rbio->stripe_cache); 3644ae10b3aSChris Mason table->cache_size -= 1; 3654ae10b3aSChris Mason freeit = 1; 3664ae10b3aSChris Mason 3674ae10b3aSChris Mason /* if the bio list isn't empty, this rbio is 3684ae10b3aSChris Mason * still involved in an IO. We take it out 3694ae10b3aSChris Mason * of the cache list, and drop the ref that 3704ae10b3aSChris Mason * was held for the list. 3714ae10b3aSChris Mason * 3724ae10b3aSChris Mason * If the bio_list was empty, we also remove 3734ae10b3aSChris Mason * the rbio from the hash_table, and drop 3744ae10b3aSChris Mason * the corresponding ref 3754ae10b3aSChris Mason */ 3764ae10b3aSChris Mason if (bio_list_empty(&rbio->bio_list)) { 3774ae10b3aSChris Mason if (!list_empty(&rbio->hash_list)) { 3784ae10b3aSChris Mason list_del_init(&rbio->hash_list); 3794ae10b3aSChris Mason atomic_dec(&rbio->refs); 3804ae10b3aSChris Mason BUG_ON(!list_empty(&rbio->plug_list)); 3814ae10b3aSChris Mason } 3824ae10b3aSChris Mason } 3834ae10b3aSChris Mason } 3844ae10b3aSChris Mason 3854ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 3864ae10b3aSChris Mason spin_unlock(&h->lock); 3874ae10b3aSChris Mason 3884ae10b3aSChris Mason if (freeit) 3894ae10b3aSChris Mason __free_raid_bio(rbio); 3904ae10b3aSChris Mason } 3914ae10b3aSChris Mason 3924ae10b3aSChris Mason /* 3934ae10b3aSChris Mason * prune a given rbio from the cache 3944ae10b3aSChris Mason */ 3954ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) 3964ae10b3aSChris Mason { 3974ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 3984ae10b3aSChris Mason unsigned long flags; 3994ae10b3aSChris Mason 4004ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) 4014ae10b3aSChris Mason return; 4024ae10b3aSChris Mason 4034ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4044ae10b3aSChris Mason 4054ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4064ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4074ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4084ae10b3aSChris Mason } 4094ae10b3aSChris Mason 4104ae10b3aSChris Mason /* 4114ae10b3aSChris Mason * remove everything in the cache 4124ae10b3aSChris Mason */ 413*48a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 4144ae10b3aSChris Mason { 4154ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4164ae10b3aSChris Mason unsigned long flags; 4174ae10b3aSChris Mason struct btrfs_raid_bio *rbio; 4184ae10b3aSChris Mason 4194ae10b3aSChris Mason table = info->stripe_hash_table; 4204ae10b3aSChris Mason 4214ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4224ae10b3aSChris Mason while (!list_empty(&table->stripe_cache)) { 4234ae10b3aSChris Mason rbio = list_entry(table->stripe_cache.next, 4244ae10b3aSChris Mason struct btrfs_raid_bio, 4254ae10b3aSChris Mason stripe_cache); 4264ae10b3aSChris Mason __remove_rbio_from_cache(rbio); 4274ae10b3aSChris Mason } 4284ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4294ae10b3aSChris Mason } 4304ae10b3aSChris Mason 4314ae10b3aSChris Mason /* 4324ae10b3aSChris Mason * remove all cached entries and free the hash table 4334ae10b3aSChris Mason * used by unmount 43453b381b3SDavid Woodhouse */ 43553b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) 43653b381b3SDavid Woodhouse { 43753b381b3SDavid Woodhouse if (!info->stripe_hash_table) 43853b381b3SDavid Woodhouse return; 4394ae10b3aSChris Mason btrfs_clear_rbio_cache(info); 44083c8266aSDavid Sterba if (is_vmalloc_addr(info->stripe_hash_table)) 44183c8266aSDavid Sterba vfree(info->stripe_hash_table); 44283c8266aSDavid Sterba else 44353b381b3SDavid Woodhouse kfree(info->stripe_hash_table); 44453b381b3SDavid Woodhouse info->stripe_hash_table = NULL; 44553b381b3SDavid Woodhouse } 44653b381b3SDavid Woodhouse 44753b381b3SDavid Woodhouse /* 4484ae10b3aSChris Mason * insert an rbio into the stripe cache. It 4494ae10b3aSChris Mason * must have already been prepared by calling 4504ae10b3aSChris Mason * cache_rbio_pages 4514ae10b3aSChris Mason * 4524ae10b3aSChris Mason * If this rbio was already cached, it gets 4534ae10b3aSChris Mason * moved to the front of the lru. 4544ae10b3aSChris Mason * 4554ae10b3aSChris Mason * If the size of the rbio cache is too big, we 4564ae10b3aSChris Mason * prune an item. 4574ae10b3aSChris Mason */ 4584ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio) 4594ae10b3aSChris Mason { 4604ae10b3aSChris Mason struct btrfs_stripe_hash_table *table; 4614ae10b3aSChris Mason unsigned long flags; 4624ae10b3aSChris Mason 4634ae10b3aSChris Mason if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) 4644ae10b3aSChris Mason return; 4654ae10b3aSChris Mason 4664ae10b3aSChris Mason table = rbio->fs_info->stripe_hash_table; 4674ae10b3aSChris Mason 4684ae10b3aSChris Mason spin_lock_irqsave(&table->cache_lock, flags); 4694ae10b3aSChris Mason spin_lock(&rbio->bio_list_lock); 4704ae10b3aSChris Mason 4714ae10b3aSChris Mason /* bump our ref if we were not in the list before */ 4724ae10b3aSChris Mason if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) 4734ae10b3aSChris Mason atomic_inc(&rbio->refs); 4744ae10b3aSChris Mason 4754ae10b3aSChris Mason if (!list_empty(&rbio->stripe_cache)){ 4764ae10b3aSChris Mason list_move(&rbio->stripe_cache, &table->stripe_cache); 4774ae10b3aSChris Mason } else { 4784ae10b3aSChris Mason list_add(&rbio->stripe_cache, &table->stripe_cache); 4794ae10b3aSChris Mason table->cache_size += 1; 4804ae10b3aSChris Mason } 4814ae10b3aSChris Mason 4824ae10b3aSChris Mason spin_unlock(&rbio->bio_list_lock); 4834ae10b3aSChris Mason 4844ae10b3aSChris Mason if (table->cache_size > RBIO_CACHE_SIZE) { 4854ae10b3aSChris Mason struct btrfs_raid_bio *found; 4864ae10b3aSChris Mason 4874ae10b3aSChris Mason found = list_entry(table->stripe_cache.prev, 4884ae10b3aSChris Mason struct btrfs_raid_bio, 4894ae10b3aSChris Mason stripe_cache); 4904ae10b3aSChris Mason 4914ae10b3aSChris Mason if (found != rbio) 4924ae10b3aSChris Mason __remove_rbio_from_cache(found); 4934ae10b3aSChris Mason } 4944ae10b3aSChris Mason 4954ae10b3aSChris Mason spin_unlock_irqrestore(&table->cache_lock, flags); 4964ae10b3aSChris Mason return; 4974ae10b3aSChris Mason } 4984ae10b3aSChris Mason 4994ae10b3aSChris Mason /* 50053b381b3SDavid Woodhouse * helper function to run the xor_blocks api. It is only 50153b381b3SDavid Woodhouse * able to do MAX_XOR_BLOCKS at a time, so we need to 50253b381b3SDavid Woodhouse * loop through. 50353b381b3SDavid Woodhouse */ 50453b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len) 50553b381b3SDavid Woodhouse { 50653b381b3SDavid Woodhouse int src_off = 0; 50753b381b3SDavid Woodhouse int xor_src_cnt = 0; 50853b381b3SDavid Woodhouse void *dest = pages[src_cnt]; 50953b381b3SDavid Woodhouse 51053b381b3SDavid Woodhouse while(src_cnt > 0) { 51153b381b3SDavid Woodhouse xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); 51253b381b3SDavid Woodhouse xor_blocks(xor_src_cnt, len, dest, pages + src_off); 51353b381b3SDavid Woodhouse 51453b381b3SDavid Woodhouse src_cnt -= xor_src_cnt; 51553b381b3SDavid Woodhouse src_off += xor_src_cnt; 51653b381b3SDavid Woodhouse } 51753b381b3SDavid Woodhouse } 51853b381b3SDavid Woodhouse 51953b381b3SDavid Woodhouse /* 52053b381b3SDavid Woodhouse * returns true if the bio list inside this rbio 52153b381b3SDavid Woodhouse * covers an entire stripe (no rmw required). 52253b381b3SDavid Woodhouse * Must be called with the bio list lock held, or 52353b381b3SDavid Woodhouse * at a time when you know it is impossible to add 52453b381b3SDavid Woodhouse * new bios into the list 52553b381b3SDavid Woodhouse */ 52653b381b3SDavid Woodhouse static int __rbio_is_full(struct btrfs_raid_bio *rbio) 52753b381b3SDavid Woodhouse { 52853b381b3SDavid Woodhouse unsigned long size = rbio->bio_list_bytes; 52953b381b3SDavid Woodhouse int ret = 1; 53053b381b3SDavid Woodhouse 53153b381b3SDavid Woodhouse if (size != rbio->nr_data * rbio->stripe_len) 53253b381b3SDavid Woodhouse ret = 0; 53353b381b3SDavid Woodhouse 53453b381b3SDavid Woodhouse BUG_ON(size > rbio->nr_data * rbio->stripe_len); 53553b381b3SDavid Woodhouse return ret; 53653b381b3SDavid Woodhouse } 53753b381b3SDavid Woodhouse 53853b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio) 53953b381b3SDavid Woodhouse { 54053b381b3SDavid Woodhouse unsigned long flags; 54153b381b3SDavid Woodhouse int ret; 54253b381b3SDavid Woodhouse 54353b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 54453b381b3SDavid Woodhouse ret = __rbio_is_full(rbio); 54553b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 54653b381b3SDavid Woodhouse return ret; 54753b381b3SDavid Woodhouse } 54853b381b3SDavid Woodhouse 54953b381b3SDavid Woodhouse /* 55053b381b3SDavid Woodhouse * returns 1 if it is safe to merge two rbios together. 55153b381b3SDavid Woodhouse * The merging is safe if the two rbios correspond to 55253b381b3SDavid Woodhouse * the same stripe and if they are both going in the same 55353b381b3SDavid Woodhouse * direction (read vs write), and if neither one is 55453b381b3SDavid Woodhouse * locked for final IO 55553b381b3SDavid Woodhouse * 55653b381b3SDavid Woodhouse * The caller is responsible for locking such that 55753b381b3SDavid Woodhouse * rmw_locked is safe to test 55853b381b3SDavid Woodhouse */ 55953b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last, 56053b381b3SDavid Woodhouse struct btrfs_raid_bio *cur) 56153b381b3SDavid Woodhouse { 56253b381b3SDavid Woodhouse if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || 56353b381b3SDavid Woodhouse test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) 56453b381b3SDavid Woodhouse return 0; 56553b381b3SDavid Woodhouse 5664ae10b3aSChris Mason /* 5674ae10b3aSChris Mason * we can't merge with cached rbios, since the 5684ae10b3aSChris Mason * idea is that when we merge the destination 5694ae10b3aSChris Mason * rbio is going to run our IO for us. We can 5704ae10b3aSChris Mason * steal from cached rbio's though, other functions 5714ae10b3aSChris Mason * handle that. 5724ae10b3aSChris Mason */ 5734ae10b3aSChris Mason if (test_bit(RBIO_CACHE_BIT, &last->flags) || 5744ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags)) 5754ae10b3aSChris Mason return 0; 5764ae10b3aSChris Mason 57753b381b3SDavid Woodhouse if (last->raid_map[0] != 57853b381b3SDavid Woodhouse cur->raid_map[0]) 57953b381b3SDavid Woodhouse return 0; 58053b381b3SDavid Woodhouse 58153b381b3SDavid Woodhouse /* reads can't merge with writes */ 58253b381b3SDavid Woodhouse if (last->read_rebuild != 58353b381b3SDavid Woodhouse cur->read_rebuild) { 58453b381b3SDavid Woodhouse return 0; 58553b381b3SDavid Woodhouse } 58653b381b3SDavid Woodhouse 58753b381b3SDavid Woodhouse return 1; 58853b381b3SDavid Woodhouse } 58953b381b3SDavid Woodhouse 59053b381b3SDavid Woodhouse /* 59153b381b3SDavid Woodhouse * helper to index into the pstripe 59253b381b3SDavid Woodhouse */ 59353b381b3SDavid Woodhouse static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) 59453b381b3SDavid Woodhouse { 59553b381b3SDavid Woodhouse index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; 59653b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 59753b381b3SDavid Woodhouse } 59853b381b3SDavid Woodhouse 59953b381b3SDavid Woodhouse /* 60053b381b3SDavid Woodhouse * helper to index into the qstripe, returns null 60153b381b3SDavid Woodhouse * if there is no qstripe 60253b381b3SDavid Woodhouse */ 60353b381b3SDavid Woodhouse static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 60453b381b3SDavid Woodhouse { 60553b381b3SDavid Woodhouse if (rbio->nr_data + 1 == rbio->bbio->num_stripes) 60653b381b3SDavid Woodhouse return NULL; 60753b381b3SDavid Woodhouse 60853b381b3SDavid Woodhouse index += ((rbio->nr_data + 1) * rbio->stripe_len) >> 60953b381b3SDavid Woodhouse PAGE_CACHE_SHIFT; 61053b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 61153b381b3SDavid Woodhouse } 61253b381b3SDavid Woodhouse 61353b381b3SDavid Woodhouse /* 61453b381b3SDavid Woodhouse * The first stripe in the table for a logical address 61553b381b3SDavid Woodhouse * has the lock. rbios are added in one of three ways: 61653b381b3SDavid Woodhouse * 61753b381b3SDavid Woodhouse * 1) Nobody has the stripe locked yet. The rbio is given 61853b381b3SDavid Woodhouse * the lock and 0 is returned. The caller must start the IO 61953b381b3SDavid Woodhouse * themselves. 62053b381b3SDavid Woodhouse * 62153b381b3SDavid Woodhouse * 2) Someone has the stripe locked, but we're able to merge 62253b381b3SDavid Woodhouse * with the lock owner. The rbio is freed and the IO will 62353b381b3SDavid Woodhouse * start automatically along with the existing rbio. 1 is returned. 62453b381b3SDavid Woodhouse * 62553b381b3SDavid Woodhouse * 3) Someone has the stripe locked, but we're not able to merge. 62653b381b3SDavid Woodhouse * The rbio is added to the lock owner's plug list, or merged into 62753b381b3SDavid Woodhouse * an rbio already on the plug list. When the lock owner unlocks, 62853b381b3SDavid Woodhouse * the next rbio on the list is run and the IO is started automatically. 62953b381b3SDavid Woodhouse * 1 is returned 63053b381b3SDavid Woodhouse * 63153b381b3SDavid Woodhouse * If we return 0, the caller still owns the rbio and must continue with 63253b381b3SDavid Woodhouse * IO submission. If we return 1, the caller must assume the rbio has 63353b381b3SDavid Woodhouse * already been freed. 63453b381b3SDavid Woodhouse */ 63553b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) 63653b381b3SDavid Woodhouse { 63753b381b3SDavid Woodhouse int bucket = rbio_bucket(rbio); 63853b381b3SDavid Woodhouse struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; 63953b381b3SDavid Woodhouse struct btrfs_raid_bio *cur; 64053b381b3SDavid Woodhouse struct btrfs_raid_bio *pending; 64153b381b3SDavid Woodhouse unsigned long flags; 64253b381b3SDavid Woodhouse DEFINE_WAIT(wait); 64353b381b3SDavid Woodhouse struct btrfs_raid_bio *freeit = NULL; 6444ae10b3aSChris Mason struct btrfs_raid_bio *cache_drop = NULL; 64553b381b3SDavid Woodhouse int ret = 0; 64653b381b3SDavid Woodhouse int walk = 0; 64753b381b3SDavid Woodhouse 64853b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 64953b381b3SDavid Woodhouse list_for_each_entry(cur, &h->hash_list, hash_list) { 65053b381b3SDavid Woodhouse walk++; 65153b381b3SDavid Woodhouse if (cur->raid_map[0] == rbio->raid_map[0]) { 65253b381b3SDavid Woodhouse spin_lock(&cur->bio_list_lock); 65353b381b3SDavid Woodhouse 6544ae10b3aSChris Mason /* can we steal this cached rbio's pages? */ 6554ae10b3aSChris Mason if (bio_list_empty(&cur->bio_list) && 6564ae10b3aSChris Mason list_empty(&cur->plug_list) && 6574ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &cur->flags) && 6584ae10b3aSChris Mason !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { 6594ae10b3aSChris Mason list_del_init(&cur->hash_list); 6604ae10b3aSChris Mason atomic_dec(&cur->refs); 6614ae10b3aSChris Mason 6624ae10b3aSChris Mason steal_rbio(cur, rbio); 6634ae10b3aSChris Mason cache_drop = cur; 6644ae10b3aSChris Mason spin_unlock(&cur->bio_list_lock); 6654ae10b3aSChris Mason 6664ae10b3aSChris Mason goto lockit; 6674ae10b3aSChris Mason } 6684ae10b3aSChris Mason 66953b381b3SDavid Woodhouse /* can we merge into the lock owner? */ 67053b381b3SDavid Woodhouse if (rbio_can_merge(cur, rbio)) { 67153b381b3SDavid Woodhouse merge_rbio(cur, rbio); 67253b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 67353b381b3SDavid Woodhouse freeit = rbio; 67453b381b3SDavid Woodhouse ret = 1; 67553b381b3SDavid Woodhouse goto out; 67653b381b3SDavid Woodhouse } 67753b381b3SDavid Woodhouse 6784ae10b3aSChris Mason 67953b381b3SDavid Woodhouse /* 68053b381b3SDavid Woodhouse * we couldn't merge with the running 68153b381b3SDavid Woodhouse * rbio, see if we can merge with the 68253b381b3SDavid Woodhouse * pending ones. We don't have to 68353b381b3SDavid Woodhouse * check for rmw_locked because there 68453b381b3SDavid Woodhouse * is no way they are inside finish_rmw 68553b381b3SDavid Woodhouse * right now 68653b381b3SDavid Woodhouse */ 68753b381b3SDavid Woodhouse list_for_each_entry(pending, &cur->plug_list, 68853b381b3SDavid Woodhouse plug_list) { 68953b381b3SDavid Woodhouse if (rbio_can_merge(pending, rbio)) { 69053b381b3SDavid Woodhouse merge_rbio(pending, rbio); 69153b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 69253b381b3SDavid Woodhouse freeit = rbio; 69353b381b3SDavid Woodhouse ret = 1; 69453b381b3SDavid Woodhouse goto out; 69553b381b3SDavid Woodhouse } 69653b381b3SDavid Woodhouse } 69753b381b3SDavid Woodhouse 69853b381b3SDavid Woodhouse /* no merging, put us on the tail of the plug list, 69953b381b3SDavid Woodhouse * our rbio will be started with the currently 70053b381b3SDavid Woodhouse * running rbio unlocks 70153b381b3SDavid Woodhouse */ 70253b381b3SDavid Woodhouse list_add_tail(&rbio->plug_list, &cur->plug_list); 70353b381b3SDavid Woodhouse spin_unlock(&cur->bio_list_lock); 70453b381b3SDavid Woodhouse ret = 1; 70553b381b3SDavid Woodhouse goto out; 70653b381b3SDavid Woodhouse } 70753b381b3SDavid Woodhouse } 7084ae10b3aSChris Mason lockit: 70953b381b3SDavid Woodhouse atomic_inc(&rbio->refs); 71053b381b3SDavid Woodhouse list_add(&rbio->hash_list, &h->hash_list); 71153b381b3SDavid Woodhouse out: 71253b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 7134ae10b3aSChris Mason if (cache_drop) 7144ae10b3aSChris Mason remove_rbio_from_cache(cache_drop); 71553b381b3SDavid Woodhouse if (freeit) 71653b381b3SDavid Woodhouse __free_raid_bio(freeit); 71753b381b3SDavid Woodhouse return ret; 71853b381b3SDavid Woodhouse } 71953b381b3SDavid Woodhouse 72053b381b3SDavid Woodhouse /* 72153b381b3SDavid Woodhouse * called as rmw or parity rebuild is completed. If the plug list has more 72253b381b3SDavid Woodhouse * rbios waiting for this stripe, the next one on the list will be started 72353b381b3SDavid Woodhouse */ 72453b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) 72553b381b3SDavid Woodhouse { 72653b381b3SDavid Woodhouse int bucket; 72753b381b3SDavid Woodhouse struct btrfs_stripe_hash *h; 72853b381b3SDavid Woodhouse unsigned long flags; 7294ae10b3aSChris Mason int keep_cache = 0; 73053b381b3SDavid Woodhouse 73153b381b3SDavid Woodhouse bucket = rbio_bucket(rbio); 73253b381b3SDavid Woodhouse h = rbio->fs_info->stripe_hash_table->table + bucket; 73353b381b3SDavid Woodhouse 7344ae10b3aSChris Mason if (list_empty(&rbio->plug_list)) 7354ae10b3aSChris Mason cache_rbio(rbio); 7364ae10b3aSChris Mason 73753b381b3SDavid Woodhouse spin_lock_irqsave(&h->lock, flags); 73853b381b3SDavid Woodhouse spin_lock(&rbio->bio_list_lock); 73953b381b3SDavid Woodhouse 74053b381b3SDavid Woodhouse if (!list_empty(&rbio->hash_list)) { 7414ae10b3aSChris Mason /* 7424ae10b3aSChris Mason * if we're still cached and there is no other IO 7434ae10b3aSChris Mason * to perform, just leave this rbio here for others 7444ae10b3aSChris Mason * to steal from later 7454ae10b3aSChris Mason */ 7464ae10b3aSChris Mason if (list_empty(&rbio->plug_list) && 7474ae10b3aSChris Mason test_bit(RBIO_CACHE_BIT, &rbio->flags)) { 7484ae10b3aSChris Mason keep_cache = 1; 7494ae10b3aSChris Mason clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 7504ae10b3aSChris Mason BUG_ON(!bio_list_empty(&rbio->bio_list)); 7514ae10b3aSChris Mason goto done; 7524ae10b3aSChris Mason } 75353b381b3SDavid Woodhouse 75453b381b3SDavid Woodhouse list_del_init(&rbio->hash_list); 75553b381b3SDavid Woodhouse atomic_dec(&rbio->refs); 75653b381b3SDavid Woodhouse 75753b381b3SDavid Woodhouse /* 75853b381b3SDavid Woodhouse * we use the plug list to hold all the rbios 75953b381b3SDavid Woodhouse * waiting for the chance to lock this stripe. 76053b381b3SDavid Woodhouse * hand the lock over to one of them. 76153b381b3SDavid Woodhouse */ 76253b381b3SDavid Woodhouse if (!list_empty(&rbio->plug_list)) { 76353b381b3SDavid Woodhouse struct btrfs_raid_bio *next; 76453b381b3SDavid Woodhouse struct list_head *head = rbio->plug_list.next; 76553b381b3SDavid Woodhouse 76653b381b3SDavid Woodhouse next = list_entry(head, struct btrfs_raid_bio, 76753b381b3SDavid Woodhouse plug_list); 76853b381b3SDavid Woodhouse 76953b381b3SDavid Woodhouse list_del_init(&rbio->plug_list); 77053b381b3SDavid Woodhouse 77153b381b3SDavid Woodhouse list_add(&next->hash_list, &h->hash_list); 77253b381b3SDavid Woodhouse atomic_inc(&next->refs); 77353b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 77453b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 77553b381b3SDavid Woodhouse 77653b381b3SDavid Woodhouse if (next->read_rebuild) 77753b381b3SDavid Woodhouse async_read_rebuild(next); 7784ae10b3aSChris Mason else { 7794ae10b3aSChris Mason steal_rbio(rbio, next); 78053b381b3SDavid Woodhouse async_rmw_stripe(next); 7814ae10b3aSChris Mason } 78253b381b3SDavid Woodhouse 78353b381b3SDavid Woodhouse goto done_nolock; 78453b381b3SDavid Woodhouse } else if (waitqueue_active(&h->wait)) { 78553b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 78653b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 78753b381b3SDavid Woodhouse wake_up(&h->wait); 78853b381b3SDavid Woodhouse goto done_nolock; 78953b381b3SDavid Woodhouse } 79053b381b3SDavid Woodhouse } 7914ae10b3aSChris Mason done: 79253b381b3SDavid Woodhouse spin_unlock(&rbio->bio_list_lock); 79353b381b3SDavid Woodhouse spin_unlock_irqrestore(&h->lock, flags); 79453b381b3SDavid Woodhouse 79553b381b3SDavid Woodhouse done_nolock: 7964ae10b3aSChris Mason if (!keep_cache) 7974ae10b3aSChris Mason remove_rbio_from_cache(rbio); 79853b381b3SDavid Woodhouse } 79953b381b3SDavid Woodhouse 80053b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio) 80153b381b3SDavid Woodhouse { 80253b381b3SDavid Woodhouse int i; 80353b381b3SDavid Woodhouse 80453b381b3SDavid Woodhouse WARN_ON(atomic_read(&rbio->refs) < 0); 80553b381b3SDavid Woodhouse if (!atomic_dec_and_test(&rbio->refs)) 80653b381b3SDavid Woodhouse return; 80753b381b3SDavid Woodhouse 8084ae10b3aSChris Mason WARN_ON(!list_empty(&rbio->stripe_cache)); 80953b381b3SDavid Woodhouse WARN_ON(!list_empty(&rbio->hash_list)); 81053b381b3SDavid Woodhouse WARN_ON(!bio_list_empty(&rbio->bio_list)); 81153b381b3SDavid Woodhouse 81253b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 81353b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) { 81453b381b3SDavid Woodhouse __free_page(rbio->stripe_pages[i]); 81553b381b3SDavid Woodhouse rbio->stripe_pages[i] = NULL; 81653b381b3SDavid Woodhouse } 81753b381b3SDavid Woodhouse } 81853b381b3SDavid Woodhouse kfree(rbio->raid_map); 81953b381b3SDavid Woodhouse kfree(rbio->bbio); 82053b381b3SDavid Woodhouse kfree(rbio); 82153b381b3SDavid Woodhouse } 82253b381b3SDavid Woodhouse 82353b381b3SDavid Woodhouse static void free_raid_bio(struct btrfs_raid_bio *rbio) 82453b381b3SDavid Woodhouse { 82553b381b3SDavid Woodhouse unlock_stripe(rbio); 82653b381b3SDavid Woodhouse __free_raid_bio(rbio); 82753b381b3SDavid Woodhouse } 82853b381b3SDavid Woodhouse 82953b381b3SDavid Woodhouse /* 83053b381b3SDavid Woodhouse * this frees the rbio and runs through all the bios in the 83153b381b3SDavid Woodhouse * bio_list and calls end_io on them 83253b381b3SDavid Woodhouse */ 83353b381b3SDavid Woodhouse static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) 83453b381b3SDavid Woodhouse { 83553b381b3SDavid Woodhouse struct bio *cur = bio_list_get(&rbio->bio_list); 83653b381b3SDavid Woodhouse struct bio *next; 83753b381b3SDavid Woodhouse free_raid_bio(rbio); 83853b381b3SDavid Woodhouse 83953b381b3SDavid Woodhouse while (cur) { 84053b381b3SDavid Woodhouse next = cur->bi_next; 84153b381b3SDavid Woodhouse cur->bi_next = NULL; 84253b381b3SDavid Woodhouse if (uptodate) 84353b381b3SDavid Woodhouse set_bit(BIO_UPTODATE, &cur->bi_flags); 84453b381b3SDavid Woodhouse bio_endio(cur, err); 84553b381b3SDavid Woodhouse cur = next; 84653b381b3SDavid Woodhouse } 84753b381b3SDavid Woodhouse } 84853b381b3SDavid Woodhouse 84953b381b3SDavid Woodhouse /* 85053b381b3SDavid Woodhouse * end io function used by finish_rmw. When we finally 85153b381b3SDavid Woodhouse * get here, we've written a full stripe 85253b381b3SDavid Woodhouse */ 85353b381b3SDavid Woodhouse static void raid_write_end_io(struct bio *bio, int err) 85453b381b3SDavid Woodhouse { 85553b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 85653b381b3SDavid Woodhouse 85753b381b3SDavid Woodhouse if (err) 85853b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 85953b381b3SDavid Woodhouse 86053b381b3SDavid Woodhouse bio_put(bio); 86153b381b3SDavid Woodhouse 86253b381b3SDavid Woodhouse if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 86353b381b3SDavid Woodhouse return; 86453b381b3SDavid Woodhouse 86553b381b3SDavid Woodhouse err = 0; 86653b381b3SDavid Woodhouse 86753b381b3SDavid Woodhouse /* OK, we have read all the stripes we need to. */ 86853b381b3SDavid Woodhouse if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 86953b381b3SDavid Woodhouse err = -EIO; 87053b381b3SDavid Woodhouse 87153b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, 0); 87253b381b3SDavid Woodhouse return; 87353b381b3SDavid Woodhouse } 87453b381b3SDavid Woodhouse 87553b381b3SDavid Woodhouse /* 87653b381b3SDavid Woodhouse * the read/modify/write code wants to use the original bio for 87753b381b3SDavid Woodhouse * any pages it included, and then use the rbio for everything 87853b381b3SDavid Woodhouse * else. This function decides if a given index (stripe number) 87953b381b3SDavid Woodhouse * and page number in that stripe fall inside the original bio 88053b381b3SDavid Woodhouse * or the rbio. 88153b381b3SDavid Woodhouse * 88253b381b3SDavid Woodhouse * if you set bio_list_only, you'll get a NULL back for any ranges 88353b381b3SDavid Woodhouse * that are outside the bio_list 88453b381b3SDavid Woodhouse * 88553b381b3SDavid Woodhouse * This doesn't take any refs on anything, you get a bare page pointer 88653b381b3SDavid Woodhouse * and the caller must bump refs as required. 88753b381b3SDavid Woodhouse * 88853b381b3SDavid Woodhouse * You must call index_rbio_pages once before you can trust 88953b381b3SDavid Woodhouse * the answers from this function. 89053b381b3SDavid Woodhouse */ 89153b381b3SDavid Woodhouse static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, 89253b381b3SDavid Woodhouse int index, int pagenr, int bio_list_only) 89353b381b3SDavid Woodhouse { 89453b381b3SDavid Woodhouse int chunk_page; 89553b381b3SDavid Woodhouse struct page *p = NULL; 89653b381b3SDavid Woodhouse 89753b381b3SDavid Woodhouse chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; 89853b381b3SDavid Woodhouse 89953b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 90053b381b3SDavid Woodhouse p = rbio->bio_pages[chunk_page]; 90153b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 90253b381b3SDavid Woodhouse 90353b381b3SDavid Woodhouse if (p || bio_list_only) 90453b381b3SDavid Woodhouse return p; 90553b381b3SDavid Woodhouse 90653b381b3SDavid Woodhouse return rbio->stripe_pages[chunk_page]; 90753b381b3SDavid Woodhouse } 90853b381b3SDavid Woodhouse 90953b381b3SDavid Woodhouse /* 91053b381b3SDavid Woodhouse * number of pages we need for the entire stripe across all the 91153b381b3SDavid Woodhouse * drives 91253b381b3SDavid Woodhouse */ 91353b381b3SDavid Woodhouse static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) 91453b381b3SDavid Woodhouse { 91553b381b3SDavid Woodhouse unsigned long nr = stripe_len * nr_stripes; 91653b381b3SDavid Woodhouse return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 91753b381b3SDavid Woodhouse } 91853b381b3SDavid Woodhouse 91953b381b3SDavid Woodhouse /* 92053b381b3SDavid Woodhouse * allocation and initial setup for the btrfs_raid_bio. Not 92153b381b3SDavid Woodhouse * this does not allocate any pages for rbio->pages. 92253b381b3SDavid Woodhouse */ 92353b381b3SDavid Woodhouse static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, 92453b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 92553b381b3SDavid Woodhouse u64 stripe_len) 92653b381b3SDavid Woodhouse { 92753b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 92853b381b3SDavid Woodhouse int nr_data = 0; 92953b381b3SDavid Woodhouse int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes); 93053b381b3SDavid Woodhouse void *p; 93153b381b3SDavid Woodhouse 93253b381b3SDavid Woodhouse rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2, 93353b381b3SDavid Woodhouse GFP_NOFS); 93453b381b3SDavid Woodhouse if (!rbio) { 93553b381b3SDavid Woodhouse kfree(raid_map); 93653b381b3SDavid Woodhouse kfree(bbio); 93753b381b3SDavid Woodhouse return ERR_PTR(-ENOMEM); 93853b381b3SDavid Woodhouse } 93953b381b3SDavid Woodhouse 94053b381b3SDavid Woodhouse bio_list_init(&rbio->bio_list); 94153b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->plug_list); 94253b381b3SDavid Woodhouse spin_lock_init(&rbio->bio_list_lock); 9434ae10b3aSChris Mason INIT_LIST_HEAD(&rbio->stripe_cache); 94453b381b3SDavid Woodhouse INIT_LIST_HEAD(&rbio->hash_list); 94553b381b3SDavid Woodhouse rbio->bbio = bbio; 94653b381b3SDavid Woodhouse rbio->raid_map = raid_map; 94753b381b3SDavid Woodhouse rbio->fs_info = root->fs_info; 94853b381b3SDavid Woodhouse rbio->stripe_len = stripe_len; 94953b381b3SDavid Woodhouse rbio->nr_pages = num_pages; 95053b381b3SDavid Woodhouse rbio->faila = -1; 95153b381b3SDavid Woodhouse rbio->failb = -1; 95253b381b3SDavid Woodhouse atomic_set(&rbio->refs, 1); 95353b381b3SDavid Woodhouse 95453b381b3SDavid Woodhouse /* 95553b381b3SDavid Woodhouse * the stripe_pages and bio_pages array point to the extra 95653b381b3SDavid Woodhouse * memory we allocated past the end of the rbio 95753b381b3SDavid Woodhouse */ 95853b381b3SDavid Woodhouse p = rbio + 1; 95953b381b3SDavid Woodhouse rbio->stripe_pages = p; 96053b381b3SDavid Woodhouse rbio->bio_pages = p + sizeof(struct page *) * num_pages; 96153b381b3SDavid Woodhouse 96253b381b3SDavid Woodhouse if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) 96353b381b3SDavid Woodhouse nr_data = bbio->num_stripes - 2; 96453b381b3SDavid Woodhouse else 96553b381b3SDavid Woodhouse nr_data = bbio->num_stripes - 1; 96653b381b3SDavid Woodhouse 96753b381b3SDavid Woodhouse rbio->nr_data = nr_data; 96853b381b3SDavid Woodhouse return rbio; 96953b381b3SDavid Woodhouse } 97053b381b3SDavid Woodhouse 97153b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */ 97253b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) 97353b381b3SDavid Woodhouse { 97453b381b3SDavid Woodhouse int i; 97553b381b3SDavid Woodhouse struct page *page; 97653b381b3SDavid Woodhouse 97753b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_pages; i++) { 97853b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 97953b381b3SDavid Woodhouse continue; 98053b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 98153b381b3SDavid Woodhouse if (!page) 98253b381b3SDavid Woodhouse return -ENOMEM; 98353b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 98453b381b3SDavid Woodhouse ClearPageUptodate(page); 98553b381b3SDavid Woodhouse } 98653b381b3SDavid Woodhouse return 0; 98753b381b3SDavid Woodhouse } 98853b381b3SDavid Woodhouse 98953b381b3SDavid Woodhouse /* allocate pages for just the p/q stripes */ 99053b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) 99153b381b3SDavid Woodhouse { 99253b381b3SDavid Woodhouse int i; 99353b381b3SDavid Woodhouse struct page *page; 99453b381b3SDavid Woodhouse 99553b381b3SDavid Woodhouse i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; 99653b381b3SDavid Woodhouse 99753b381b3SDavid Woodhouse for (; i < rbio->nr_pages; i++) { 99853b381b3SDavid Woodhouse if (rbio->stripe_pages[i]) 99953b381b3SDavid Woodhouse continue; 100053b381b3SDavid Woodhouse page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 100153b381b3SDavid Woodhouse if (!page) 100253b381b3SDavid Woodhouse return -ENOMEM; 100353b381b3SDavid Woodhouse rbio->stripe_pages[i] = page; 100453b381b3SDavid Woodhouse } 100553b381b3SDavid Woodhouse return 0; 100653b381b3SDavid Woodhouse } 100753b381b3SDavid Woodhouse 100853b381b3SDavid Woodhouse /* 100953b381b3SDavid Woodhouse * add a single page from a specific stripe into our list of bios for IO 101053b381b3SDavid Woodhouse * this will try to merge into existing bios if possible, and returns 101153b381b3SDavid Woodhouse * zero if all went well. 101253b381b3SDavid Woodhouse */ 1013*48a3b636SEric Sandeen static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 101453b381b3SDavid Woodhouse struct bio_list *bio_list, 101553b381b3SDavid Woodhouse struct page *page, 101653b381b3SDavid Woodhouse int stripe_nr, 101753b381b3SDavid Woodhouse unsigned long page_index, 101853b381b3SDavid Woodhouse unsigned long bio_max_len) 101953b381b3SDavid Woodhouse { 102053b381b3SDavid Woodhouse struct bio *last = bio_list->tail; 102153b381b3SDavid Woodhouse u64 last_end = 0; 102253b381b3SDavid Woodhouse int ret; 102353b381b3SDavid Woodhouse struct bio *bio; 102453b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 102553b381b3SDavid Woodhouse u64 disk_start; 102653b381b3SDavid Woodhouse 102753b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[stripe_nr]; 102853b381b3SDavid Woodhouse disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); 102953b381b3SDavid Woodhouse 103053b381b3SDavid Woodhouse /* if the device is missing, just fail this stripe */ 103153b381b3SDavid Woodhouse if (!stripe->dev->bdev) 103253b381b3SDavid Woodhouse return fail_rbio_index(rbio, stripe_nr); 103353b381b3SDavid Woodhouse 103453b381b3SDavid Woodhouse /* see if we can add this page onto our existing bio */ 103553b381b3SDavid Woodhouse if (last) { 103653b381b3SDavid Woodhouse last_end = (u64)last->bi_sector << 9; 103753b381b3SDavid Woodhouse last_end += last->bi_size; 103853b381b3SDavid Woodhouse 103953b381b3SDavid Woodhouse /* 104053b381b3SDavid Woodhouse * we can't merge these if they are from different 104153b381b3SDavid Woodhouse * devices or if they are not contiguous 104253b381b3SDavid Woodhouse */ 104353b381b3SDavid Woodhouse if (last_end == disk_start && stripe->dev->bdev && 104453b381b3SDavid Woodhouse test_bit(BIO_UPTODATE, &last->bi_flags) && 104553b381b3SDavid Woodhouse last->bi_bdev == stripe->dev->bdev) { 104653b381b3SDavid Woodhouse ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); 104753b381b3SDavid Woodhouse if (ret == PAGE_CACHE_SIZE) 104853b381b3SDavid Woodhouse return 0; 104953b381b3SDavid Woodhouse } 105053b381b3SDavid Woodhouse } 105153b381b3SDavid Woodhouse 105253b381b3SDavid Woodhouse /* put a new bio on the list */ 105353b381b3SDavid Woodhouse bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); 105453b381b3SDavid Woodhouse if (!bio) 105553b381b3SDavid Woodhouse return -ENOMEM; 105653b381b3SDavid Woodhouse 105753b381b3SDavid Woodhouse bio->bi_size = 0; 105853b381b3SDavid Woodhouse bio->bi_bdev = stripe->dev->bdev; 105953b381b3SDavid Woodhouse bio->bi_sector = disk_start >> 9; 106053b381b3SDavid Woodhouse set_bit(BIO_UPTODATE, &bio->bi_flags); 106153b381b3SDavid Woodhouse 106253b381b3SDavid Woodhouse bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 106353b381b3SDavid Woodhouse bio_list_add(bio_list, bio); 106453b381b3SDavid Woodhouse return 0; 106553b381b3SDavid Woodhouse } 106653b381b3SDavid Woodhouse 106753b381b3SDavid Woodhouse /* 106853b381b3SDavid Woodhouse * while we're doing the read/modify/write cycle, we could 106953b381b3SDavid Woodhouse * have errors in reading pages off the disk. This checks 107053b381b3SDavid Woodhouse * for errors and if we're not able to read the page it'll 107153b381b3SDavid Woodhouse * trigger parity reconstruction. The rmw will be finished 107253b381b3SDavid Woodhouse * after we've reconstructed the failed stripes 107353b381b3SDavid Woodhouse */ 107453b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 107553b381b3SDavid Woodhouse { 107653b381b3SDavid Woodhouse if (rbio->faila >= 0 || rbio->failb >= 0) { 107753b381b3SDavid Woodhouse BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1); 107853b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 107953b381b3SDavid Woodhouse } else { 108053b381b3SDavid Woodhouse finish_rmw(rbio); 108153b381b3SDavid Woodhouse } 108253b381b3SDavid Woodhouse } 108353b381b3SDavid Woodhouse 108453b381b3SDavid Woodhouse /* 108553b381b3SDavid Woodhouse * these are just the pages from the rbio array, not from anything 108653b381b3SDavid Woodhouse * the FS sent down to us 108753b381b3SDavid Woodhouse */ 108853b381b3SDavid Woodhouse static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page) 108953b381b3SDavid Woodhouse { 109053b381b3SDavid Woodhouse int index; 109153b381b3SDavid Woodhouse index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT); 109253b381b3SDavid Woodhouse index += page; 109353b381b3SDavid Woodhouse return rbio->stripe_pages[index]; 109453b381b3SDavid Woodhouse } 109553b381b3SDavid Woodhouse 109653b381b3SDavid Woodhouse /* 109753b381b3SDavid Woodhouse * helper function to walk our bio list and populate the bio_pages array with 109853b381b3SDavid Woodhouse * the result. This seems expensive, but it is faster than constantly 109953b381b3SDavid Woodhouse * searching through the bio list as we setup the IO in finish_rmw or stripe 110053b381b3SDavid Woodhouse * reconstruction. 110153b381b3SDavid Woodhouse * 110253b381b3SDavid Woodhouse * This must be called before you trust the answers from page_in_rbio 110353b381b3SDavid Woodhouse */ 110453b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio) 110553b381b3SDavid Woodhouse { 110653b381b3SDavid Woodhouse struct bio *bio; 110753b381b3SDavid Woodhouse u64 start; 110853b381b3SDavid Woodhouse unsigned long stripe_offset; 110953b381b3SDavid Woodhouse unsigned long page_index; 111053b381b3SDavid Woodhouse struct page *p; 111153b381b3SDavid Woodhouse int i; 111253b381b3SDavid Woodhouse 111353b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 111453b381b3SDavid Woodhouse bio_list_for_each(bio, &rbio->bio_list) { 111553b381b3SDavid Woodhouse start = (u64)bio->bi_sector << 9; 111653b381b3SDavid Woodhouse stripe_offset = start - rbio->raid_map[0]; 111753b381b3SDavid Woodhouse page_index = stripe_offset >> PAGE_CACHE_SHIFT; 111853b381b3SDavid Woodhouse 111953b381b3SDavid Woodhouse for (i = 0; i < bio->bi_vcnt; i++) { 112053b381b3SDavid Woodhouse p = bio->bi_io_vec[i].bv_page; 112153b381b3SDavid Woodhouse rbio->bio_pages[page_index + i] = p; 112253b381b3SDavid Woodhouse } 112353b381b3SDavid Woodhouse } 112453b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 112553b381b3SDavid Woodhouse } 112653b381b3SDavid Woodhouse 112753b381b3SDavid Woodhouse /* 112853b381b3SDavid Woodhouse * this is called from one of two situations. We either 112953b381b3SDavid Woodhouse * have a full stripe from the higher layers, or we've read all 113053b381b3SDavid Woodhouse * the missing bits off disk. 113153b381b3SDavid Woodhouse * 113253b381b3SDavid Woodhouse * This will calculate the parity and then send down any 113353b381b3SDavid Woodhouse * changed blocks. 113453b381b3SDavid Woodhouse */ 113553b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 113653b381b3SDavid Woodhouse { 113753b381b3SDavid Woodhouse struct btrfs_bio *bbio = rbio->bbio; 113853b381b3SDavid Woodhouse void *pointers[bbio->num_stripes]; 113953b381b3SDavid Woodhouse int stripe_len = rbio->stripe_len; 114053b381b3SDavid Woodhouse int nr_data = rbio->nr_data; 114153b381b3SDavid Woodhouse int stripe; 114253b381b3SDavid Woodhouse int pagenr; 114353b381b3SDavid Woodhouse int p_stripe = -1; 114453b381b3SDavid Woodhouse int q_stripe = -1; 114553b381b3SDavid Woodhouse struct bio_list bio_list; 114653b381b3SDavid Woodhouse struct bio *bio; 114753b381b3SDavid Woodhouse int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT; 114853b381b3SDavid Woodhouse int ret; 114953b381b3SDavid Woodhouse 115053b381b3SDavid Woodhouse bio_list_init(&bio_list); 115153b381b3SDavid Woodhouse 115253b381b3SDavid Woodhouse if (bbio->num_stripes - rbio->nr_data == 1) { 115353b381b3SDavid Woodhouse p_stripe = bbio->num_stripes - 1; 115453b381b3SDavid Woodhouse } else if (bbio->num_stripes - rbio->nr_data == 2) { 115553b381b3SDavid Woodhouse p_stripe = bbio->num_stripes - 2; 115653b381b3SDavid Woodhouse q_stripe = bbio->num_stripes - 1; 115753b381b3SDavid Woodhouse } else { 115853b381b3SDavid Woodhouse BUG(); 115953b381b3SDavid Woodhouse } 116053b381b3SDavid Woodhouse 116153b381b3SDavid Woodhouse /* at this point we either have a full stripe, 116253b381b3SDavid Woodhouse * or we've read the full stripe from the drive. 116353b381b3SDavid Woodhouse * recalculate the parity and write the new results. 116453b381b3SDavid Woodhouse * 116553b381b3SDavid Woodhouse * We're not allowed to add any new bios to the 116653b381b3SDavid Woodhouse * bio list here, anyone else that wants to 116753b381b3SDavid Woodhouse * change this stripe needs to do their own rmw. 116853b381b3SDavid Woodhouse */ 116953b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 117053b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 117153b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 117253b381b3SDavid Woodhouse 117353b381b3SDavid Woodhouse atomic_set(&rbio->bbio->error, 0); 117453b381b3SDavid Woodhouse 117553b381b3SDavid Woodhouse /* 117653b381b3SDavid Woodhouse * now that we've set rmw_locked, run through the 117753b381b3SDavid Woodhouse * bio list one last time and map the page pointers 11784ae10b3aSChris Mason * 11794ae10b3aSChris Mason * We don't cache full rbios because we're assuming 11804ae10b3aSChris Mason * the higher layers are unlikely to use this area of 11814ae10b3aSChris Mason * the disk again soon. If they do use it again, 11824ae10b3aSChris Mason * hopefully they will send another full bio. 118353b381b3SDavid Woodhouse */ 118453b381b3SDavid Woodhouse index_rbio_pages(rbio); 11854ae10b3aSChris Mason if (!rbio_is_full(rbio)) 11864ae10b3aSChris Mason cache_rbio_pages(rbio); 11874ae10b3aSChris Mason else 11884ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 118953b381b3SDavid Woodhouse 119053b381b3SDavid Woodhouse for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 119153b381b3SDavid Woodhouse struct page *p; 119253b381b3SDavid Woodhouse /* first collect one page from each data stripe */ 119353b381b3SDavid Woodhouse for (stripe = 0; stripe < nr_data; stripe++) { 119453b381b3SDavid Woodhouse p = page_in_rbio(rbio, stripe, pagenr, 0); 119553b381b3SDavid Woodhouse pointers[stripe] = kmap(p); 119653b381b3SDavid Woodhouse } 119753b381b3SDavid Woodhouse 119853b381b3SDavid Woodhouse /* then add the parity stripe */ 119953b381b3SDavid Woodhouse p = rbio_pstripe_page(rbio, pagenr); 120053b381b3SDavid Woodhouse SetPageUptodate(p); 120153b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 120253b381b3SDavid Woodhouse 120353b381b3SDavid Woodhouse if (q_stripe != -1) { 120453b381b3SDavid Woodhouse 120553b381b3SDavid Woodhouse /* 120653b381b3SDavid Woodhouse * raid6, add the qstripe and call the 120753b381b3SDavid Woodhouse * library function to fill in our p/q 120853b381b3SDavid Woodhouse */ 120953b381b3SDavid Woodhouse p = rbio_qstripe_page(rbio, pagenr); 121053b381b3SDavid Woodhouse SetPageUptodate(p); 121153b381b3SDavid Woodhouse pointers[stripe++] = kmap(p); 121253b381b3SDavid Woodhouse 121353b381b3SDavid Woodhouse raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE, 121453b381b3SDavid Woodhouse pointers); 121553b381b3SDavid Woodhouse } else { 121653b381b3SDavid Woodhouse /* raid5 */ 121753b381b3SDavid Woodhouse memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); 121853b381b3SDavid Woodhouse run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); 121953b381b3SDavid Woodhouse } 122053b381b3SDavid Woodhouse 122153b381b3SDavid Woodhouse 122253b381b3SDavid Woodhouse for (stripe = 0; stripe < bbio->num_stripes; stripe++) 122353b381b3SDavid Woodhouse kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 122453b381b3SDavid Woodhouse } 122553b381b3SDavid Woodhouse 122653b381b3SDavid Woodhouse /* 122753b381b3SDavid Woodhouse * time to start writing. Make bios for everything from the 122853b381b3SDavid Woodhouse * higher layers (the bio_list in our rbio) and our p/q. Ignore 122953b381b3SDavid Woodhouse * everything else. 123053b381b3SDavid Woodhouse */ 123153b381b3SDavid Woodhouse for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 123253b381b3SDavid Woodhouse for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 123353b381b3SDavid Woodhouse struct page *page; 123453b381b3SDavid Woodhouse if (stripe < rbio->nr_data) { 123553b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 123653b381b3SDavid Woodhouse if (!page) 123753b381b3SDavid Woodhouse continue; 123853b381b3SDavid Woodhouse } else { 123953b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 124053b381b3SDavid Woodhouse } 124153b381b3SDavid Woodhouse 124253b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 124353b381b3SDavid Woodhouse page, stripe, pagenr, rbio->stripe_len); 124453b381b3SDavid Woodhouse if (ret) 124553b381b3SDavid Woodhouse goto cleanup; 124653b381b3SDavid Woodhouse } 124753b381b3SDavid Woodhouse } 124853b381b3SDavid Woodhouse 124953b381b3SDavid Woodhouse atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list)); 125053b381b3SDavid Woodhouse BUG_ON(atomic_read(&bbio->stripes_pending) == 0); 125153b381b3SDavid Woodhouse 125253b381b3SDavid Woodhouse while (1) { 125353b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 125453b381b3SDavid Woodhouse if (!bio) 125553b381b3SDavid Woodhouse break; 125653b381b3SDavid Woodhouse 125753b381b3SDavid Woodhouse bio->bi_private = rbio; 125853b381b3SDavid Woodhouse bio->bi_end_io = raid_write_end_io; 125953b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 126053b381b3SDavid Woodhouse submit_bio(WRITE, bio); 126153b381b3SDavid Woodhouse } 126253b381b3SDavid Woodhouse return; 126353b381b3SDavid Woodhouse 126453b381b3SDavid Woodhouse cleanup: 126553b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 126653b381b3SDavid Woodhouse } 126753b381b3SDavid Woodhouse 126853b381b3SDavid Woodhouse /* 126953b381b3SDavid Woodhouse * helper to find the stripe number for a given bio. Used to figure out which 127053b381b3SDavid Woodhouse * stripe has failed. This expects the bio to correspond to a physical disk, 127153b381b3SDavid Woodhouse * so it looks up based on physical sector numbers. 127253b381b3SDavid Woodhouse */ 127353b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio, 127453b381b3SDavid Woodhouse struct bio *bio) 127553b381b3SDavid Woodhouse { 127653b381b3SDavid Woodhouse u64 physical = bio->bi_sector; 127753b381b3SDavid Woodhouse u64 stripe_start; 127853b381b3SDavid Woodhouse int i; 127953b381b3SDavid Woodhouse struct btrfs_bio_stripe *stripe; 128053b381b3SDavid Woodhouse 128153b381b3SDavid Woodhouse physical <<= 9; 128253b381b3SDavid Woodhouse 128353b381b3SDavid Woodhouse for (i = 0; i < rbio->bbio->num_stripes; i++) { 128453b381b3SDavid Woodhouse stripe = &rbio->bbio->stripes[i]; 128553b381b3SDavid Woodhouse stripe_start = stripe->physical; 128653b381b3SDavid Woodhouse if (physical >= stripe_start && 128753b381b3SDavid Woodhouse physical < stripe_start + rbio->stripe_len) { 128853b381b3SDavid Woodhouse return i; 128953b381b3SDavid Woodhouse } 129053b381b3SDavid Woodhouse } 129153b381b3SDavid Woodhouse return -1; 129253b381b3SDavid Woodhouse } 129353b381b3SDavid Woodhouse 129453b381b3SDavid Woodhouse /* 129553b381b3SDavid Woodhouse * helper to find the stripe number for a given 129653b381b3SDavid Woodhouse * bio (before mapping). Used to figure out which stripe has 129753b381b3SDavid Woodhouse * failed. This looks up based on logical block numbers. 129853b381b3SDavid Woodhouse */ 129953b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 130053b381b3SDavid Woodhouse struct bio *bio) 130153b381b3SDavid Woodhouse { 130253b381b3SDavid Woodhouse u64 logical = bio->bi_sector; 130353b381b3SDavid Woodhouse u64 stripe_start; 130453b381b3SDavid Woodhouse int i; 130553b381b3SDavid Woodhouse 130653b381b3SDavid Woodhouse logical <<= 9; 130753b381b3SDavid Woodhouse 130853b381b3SDavid Woodhouse for (i = 0; i < rbio->nr_data; i++) { 130953b381b3SDavid Woodhouse stripe_start = rbio->raid_map[i]; 131053b381b3SDavid Woodhouse if (logical >= stripe_start && 131153b381b3SDavid Woodhouse logical < stripe_start + rbio->stripe_len) { 131253b381b3SDavid Woodhouse return i; 131353b381b3SDavid Woodhouse } 131453b381b3SDavid Woodhouse } 131553b381b3SDavid Woodhouse return -1; 131653b381b3SDavid Woodhouse } 131753b381b3SDavid Woodhouse 131853b381b3SDavid Woodhouse /* 131953b381b3SDavid Woodhouse * returns -EIO if we had too many failures 132053b381b3SDavid Woodhouse */ 132153b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) 132253b381b3SDavid Woodhouse { 132353b381b3SDavid Woodhouse unsigned long flags; 132453b381b3SDavid Woodhouse int ret = 0; 132553b381b3SDavid Woodhouse 132653b381b3SDavid Woodhouse spin_lock_irqsave(&rbio->bio_list_lock, flags); 132753b381b3SDavid Woodhouse 132853b381b3SDavid Woodhouse /* we already know this stripe is bad, move on */ 132953b381b3SDavid Woodhouse if (rbio->faila == failed || rbio->failb == failed) 133053b381b3SDavid Woodhouse goto out; 133153b381b3SDavid Woodhouse 133253b381b3SDavid Woodhouse if (rbio->faila == -1) { 133353b381b3SDavid Woodhouse /* first failure on this rbio */ 133453b381b3SDavid Woodhouse rbio->faila = failed; 133553b381b3SDavid Woodhouse atomic_inc(&rbio->bbio->error); 133653b381b3SDavid Woodhouse } else if (rbio->failb == -1) { 133753b381b3SDavid Woodhouse /* second failure on this rbio */ 133853b381b3SDavid Woodhouse rbio->failb = failed; 133953b381b3SDavid Woodhouse atomic_inc(&rbio->bbio->error); 134053b381b3SDavid Woodhouse } else { 134153b381b3SDavid Woodhouse ret = -EIO; 134253b381b3SDavid Woodhouse } 134353b381b3SDavid Woodhouse out: 134453b381b3SDavid Woodhouse spin_unlock_irqrestore(&rbio->bio_list_lock, flags); 134553b381b3SDavid Woodhouse 134653b381b3SDavid Woodhouse return ret; 134753b381b3SDavid Woodhouse } 134853b381b3SDavid Woodhouse 134953b381b3SDavid Woodhouse /* 135053b381b3SDavid Woodhouse * helper to fail a stripe based on a physical disk 135153b381b3SDavid Woodhouse * bio. 135253b381b3SDavid Woodhouse */ 135353b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, 135453b381b3SDavid Woodhouse struct bio *bio) 135553b381b3SDavid Woodhouse { 135653b381b3SDavid Woodhouse int failed = find_bio_stripe(rbio, bio); 135753b381b3SDavid Woodhouse 135853b381b3SDavid Woodhouse if (failed < 0) 135953b381b3SDavid Woodhouse return -EIO; 136053b381b3SDavid Woodhouse 136153b381b3SDavid Woodhouse return fail_rbio_index(rbio, failed); 136253b381b3SDavid Woodhouse } 136353b381b3SDavid Woodhouse 136453b381b3SDavid Woodhouse /* 136553b381b3SDavid Woodhouse * this sets each page in the bio uptodate. It should only be used on private 136653b381b3SDavid Woodhouse * rbio pages, nothing that comes in from the higher layers 136753b381b3SDavid Woodhouse */ 136853b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio) 136953b381b3SDavid Woodhouse { 137053b381b3SDavid Woodhouse int i; 137153b381b3SDavid Woodhouse struct page *p; 137253b381b3SDavid Woodhouse 137353b381b3SDavid Woodhouse for (i = 0; i < bio->bi_vcnt; i++) { 137453b381b3SDavid Woodhouse p = bio->bi_io_vec[i].bv_page; 137553b381b3SDavid Woodhouse SetPageUptodate(p); 137653b381b3SDavid Woodhouse } 137753b381b3SDavid Woodhouse } 137853b381b3SDavid Woodhouse 137953b381b3SDavid Woodhouse /* 138053b381b3SDavid Woodhouse * end io for the read phase of the rmw cycle. All the bios here are physical 138153b381b3SDavid Woodhouse * stripe bios we've read from the disk so we can recalculate the parity of the 138253b381b3SDavid Woodhouse * stripe. 138353b381b3SDavid Woodhouse * 138453b381b3SDavid Woodhouse * This will usually kick off finish_rmw once all the bios are read in, but it 138553b381b3SDavid Woodhouse * may trigger parity reconstruction if we had any errors along the way 138653b381b3SDavid Woodhouse */ 138753b381b3SDavid Woodhouse static void raid_rmw_end_io(struct bio *bio, int err) 138853b381b3SDavid Woodhouse { 138953b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 139053b381b3SDavid Woodhouse 139153b381b3SDavid Woodhouse if (err) 139253b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 139353b381b3SDavid Woodhouse else 139453b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 139553b381b3SDavid Woodhouse 139653b381b3SDavid Woodhouse bio_put(bio); 139753b381b3SDavid Woodhouse 139853b381b3SDavid Woodhouse if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 139953b381b3SDavid Woodhouse return; 140053b381b3SDavid Woodhouse 140153b381b3SDavid Woodhouse err = 0; 140253b381b3SDavid Woodhouse if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 140353b381b3SDavid Woodhouse goto cleanup; 140453b381b3SDavid Woodhouse 140553b381b3SDavid Woodhouse /* 140653b381b3SDavid Woodhouse * this will normally call finish_rmw to start our write 140753b381b3SDavid Woodhouse * but if there are any failed stripes we'll reconstruct 140853b381b3SDavid Woodhouse * from parity first 140953b381b3SDavid Woodhouse */ 141053b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 141153b381b3SDavid Woodhouse return; 141253b381b3SDavid Woodhouse 141353b381b3SDavid Woodhouse cleanup: 141453b381b3SDavid Woodhouse 141553b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 141653b381b3SDavid Woodhouse } 141753b381b3SDavid Woodhouse 141853b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio) 141953b381b3SDavid Woodhouse { 142053b381b3SDavid Woodhouse rbio->work.flags = 0; 142153b381b3SDavid Woodhouse rbio->work.func = rmw_work; 142253b381b3SDavid Woodhouse 142353b381b3SDavid Woodhouse btrfs_queue_worker(&rbio->fs_info->rmw_workers, 142453b381b3SDavid Woodhouse &rbio->work); 142553b381b3SDavid Woodhouse } 142653b381b3SDavid Woodhouse 142753b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio) 142853b381b3SDavid Woodhouse { 142953b381b3SDavid Woodhouse rbio->work.flags = 0; 143053b381b3SDavid Woodhouse rbio->work.func = read_rebuild_work; 143153b381b3SDavid Woodhouse 143253b381b3SDavid Woodhouse btrfs_queue_worker(&rbio->fs_info->rmw_workers, 143353b381b3SDavid Woodhouse &rbio->work); 143453b381b3SDavid Woodhouse } 143553b381b3SDavid Woodhouse 143653b381b3SDavid Woodhouse /* 143753b381b3SDavid Woodhouse * the stripe must be locked by the caller. It will 143853b381b3SDavid Woodhouse * unlock after all the writes are done 143953b381b3SDavid Woodhouse */ 144053b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 144153b381b3SDavid Woodhouse { 144253b381b3SDavid Woodhouse int bios_to_read = 0; 144353b381b3SDavid Woodhouse struct btrfs_bio *bbio = rbio->bbio; 144453b381b3SDavid Woodhouse struct bio_list bio_list; 144553b381b3SDavid Woodhouse int ret; 144653b381b3SDavid Woodhouse int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 144753b381b3SDavid Woodhouse int pagenr; 144853b381b3SDavid Woodhouse int stripe; 144953b381b3SDavid Woodhouse struct bio *bio; 145053b381b3SDavid Woodhouse 145153b381b3SDavid Woodhouse bio_list_init(&bio_list); 145253b381b3SDavid Woodhouse 145353b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 145453b381b3SDavid Woodhouse if (ret) 145553b381b3SDavid Woodhouse goto cleanup; 145653b381b3SDavid Woodhouse 145753b381b3SDavid Woodhouse index_rbio_pages(rbio); 145853b381b3SDavid Woodhouse 145953b381b3SDavid Woodhouse atomic_set(&rbio->bbio->error, 0); 146053b381b3SDavid Woodhouse /* 146153b381b3SDavid Woodhouse * build a list of bios to read all the missing parts of this 146253b381b3SDavid Woodhouse * stripe 146353b381b3SDavid Woodhouse */ 146453b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->nr_data; stripe++) { 146553b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 146653b381b3SDavid Woodhouse struct page *page; 146753b381b3SDavid Woodhouse /* 146853b381b3SDavid Woodhouse * we want to find all the pages missing from 146953b381b3SDavid Woodhouse * the rbio and read them from the disk. If 147053b381b3SDavid Woodhouse * page_in_rbio finds a page in the bio list 147153b381b3SDavid Woodhouse * we don't need to read it off the stripe. 147253b381b3SDavid Woodhouse */ 147353b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 1); 147453b381b3SDavid Woodhouse if (page) 147553b381b3SDavid Woodhouse continue; 147653b381b3SDavid Woodhouse 147753b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 14784ae10b3aSChris Mason /* 14794ae10b3aSChris Mason * the bio cache may have handed us an uptodate 14804ae10b3aSChris Mason * page. If so, be happy and use it 14814ae10b3aSChris Mason */ 14824ae10b3aSChris Mason if (PageUptodate(page)) 14834ae10b3aSChris Mason continue; 14844ae10b3aSChris Mason 148553b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, page, 148653b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 148753b381b3SDavid Woodhouse if (ret) 148853b381b3SDavid Woodhouse goto cleanup; 148953b381b3SDavid Woodhouse } 149053b381b3SDavid Woodhouse } 149153b381b3SDavid Woodhouse 149253b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 149353b381b3SDavid Woodhouse if (!bios_to_read) { 149453b381b3SDavid Woodhouse /* 149553b381b3SDavid Woodhouse * this can happen if others have merged with 149653b381b3SDavid Woodhouse * us, it means there is nothing left to read. 149753b381b3SDavid Woodhouse * But if there are missing devices it may not be 149853b381b3SDavid Woodhouse * safe to do the full stripe write yet. 149953b381b3SDavid Woodhouse */ 150053b381b3SDavid Woodhouse goto finish; 150153b381b3SDavid Woodhouse } 150253b381b3SDavid Woodhouse 150353b381b3SDavid Woodhouse /* 150453b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 150553b381b3SDavid Woodhouse * not to touch it after that 150653b381b3SDavid Woodhouse */ 150753b381b3SDavid Woodhouse atomic_set(&bbio->stripes_pending, bios_to_read); 150853b381b3SDavid Woodhouse while (1) { 150953b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 151053b381b3SDavid Woodhouse if (!bio) 151153b381b3SDavid Woodhouse break; 151253b381b3SDavid Woodhouse 151353b381b3SDavid Woodhouse bio->bi_private = rbio; 151453b381b3SDavid Woodhouse bio->bi_end_io = raid_rmw_end_io; 151553b381b3SDavid Woodhouse 151653b381b3SDavid Woodhouse btrfs_bio_wq_end_io(rbio->fs_info, bio, 151753b381b3SDavid Woodhouse BTRFS_WQ_ENDIO_RAID56); 151853b381b3SDavid Woodhouse 151953b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 152053b381b3SDavid Woodhouse submit_bio(READ, bio); 152153b381b3SDavid Woodhouse } 152253b381b3SDavid Woodhouse /* the actual write will happen once the reads are done */ 152353b381b3SDavid Woodhouse return 0; 152453b381b3SDavid Woodhouse 152553b381b3SDavid Woodhouse cleanup: 152653b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 152753b381b3SDavid Woodhouse return -EIO; 152853b381b3SDavid Woodhouse 152953b381b3SDavid Woodhouse finish: 153053b381b3SDavid Woodhouse validate_rbio_for_rmw(rbio); 153153b381b3SDavid Woodhouse return 0; 153253b381b3SDavid Woodhouse } 153353b381b3SDavid Woodhouse 153453b381b3SDavid Woodhouse /* 153553b381b3SDavid Woodhouse * if the upper layers pass in a full stripe, we thank them by only allocating 153653b381b3SDavid Woodhouse * enough pages to hold the parity, and sending it all down quickly. 153753b381b3SDavid Woodhouse */ 153853b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio) 153953b381b3SDavid Woodhouse { 154053b381b3SDavid Woodhouse int ret; 154153b381b3SDavid Woodhouse 154253b381b3SDavid Woodhouse ret = alloc_rbio_parity_pages(rbio); 154353b381b3SDavid Woodhouse if (ret) 154453b381b3SDavid Woodhouse return ret; 154553b381b3SDavid Woodhouse 154653b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 154753b381b3SDavid Woodhouse if (ret == 0) 154853b381b3SDavid Woodhouse finish_rmw(rbio); 154953b381b3SDavid Woodhouse return 0; 155053b381b3SDavid Woodhouse } 155153b381b3SDavid Woodhouse 155253b381b3SDavid Woodhouse /* 155353b381b3SDavid Woodhouse * partial stripe writes get handed over to async helpers. 155453b381b3SDavid Woodhouse * We're really hoping to merge a few more writes into this 155553b381b3SDavid Woodhouse * rbio before calculating new parity 155653b381b3SDavid Woodhouse */ 155753b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio) 155853b381b3SDavid Woodhouse { 155953b381b3SDavid Woodhouse int ret; 156053b381b3SDavid Woodhouse 156153b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 156253b381b3SDavid Woodhouse if (ret == 0) 156353b381b3SDavid Woodhouse async_rmw_stripe(rbio); 156453b381b3SDavid Woodhouse return 0; 156553b381b3SDavid Woodhouse } 156653b381b3SDavid Woodhouse 156753b381b3SDavid Woodhouse /* 156853b381b3SDavid Woodhouse * sometimes while we were reading from the drive to 156953b381b3SDavid Woodhouse * recalculate parity, enough new bios come into create 157053b381b3SDavid Woodhouse * a full stripe. So we do a check here to see if we can 157153b381b3SDavid Woodhouse * go directly to finish_rmw 157253b381b3SDavid Woodhouse */ 157353b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio) 157453b381b3SDavid Woodhouse { 157553b381b3SDavid Woodhouse /* head off into rmw land if we don't have a full stripe */ 157653b381b3SDavid Woodhouse if (!rbio_is_full(rbio)) 157753b381b3SDavid Woodhouse return partial_stripe_write(rbio); 157853b381b3SDavid Woodhouse return full_stripe_write(rbio); 157953b381b3SDavid Woodhouse } 158053b381b3SDavid Woodhouse 158153b381b3SDavid Woodhouse /* 15826ac0f488SChris Mason * We use plugging call backs to collect full stripes. 15836ac0f488SChris Mason * Any time we get a partial stripe write while plugged 15846ac0f488SChris Mason * we collect it into a list. When the unplug comes down, 15856ac0f488SChris Mason * we sort the list by logical block number and merge 15866ac0f488SChris Mason * everything we can into the same rbios 15876ac0f488SChris Mason */ 15886ac0f488SChris Mason struct btrfs_plug_cb { 15896ac0f488SChris Mason struct blk_plug_cb cb; 15906ac0f488SChris Mason struct btrfs_fs_info *info; 15916ac0f488SChris Mason struct list_head rbio_list; 15926ac0f488SChris Mason struct btrfs_work work; 15936ac0f488SChris Mason }; 15946ac0f488SChris Mason 15956ac0f488SChris Mason /* 15966ac0f488SChris Mason * rbios on the plug list are sorted for easier merging. 15976ac0f488SChris Mason */ 15986ac0f488SChris Mason static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 15996ac0f488SChris Mason { 16006ac0f488SChris Mason struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 16016ac0f488SChris Mason plug_list); 16026ac0f488SChris Mason struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 16036ac0f488SChris Mason plug_list); 16046ac0f488SChris Mason u64 a_sector = ra->bio_list.head->bi_sector; 16056ac0f488SChris Mason u64 b_sector = rb->bio_list.head->bi_sector; 16066ac0f488SChris Mason 16076ac0f488SChris Mason if (a_sector < b_sector) 16086ac0f488SChris Mason return -1; 16096ac0f488SChris Mason if (a_sector > b_sector) 16106ac0f488SChris Mason return 1; 16116ac0f488SChris Mason return 0; 16126ac0f488SChris Mason } 16136ac0f488SChris Mason 16146ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug) 16156ac0f488SChris Mason { 16166ac0f488SChris Mason struct btrfs_raid_bio *cur; 16176ac0f488SChris Mason struct btrfs_raid_bio *last = NULL; 16186ac0f488SChris Mason 16196ac0f488SChris Mason /* 16206ac0f488SChris Mason * sort our plug list then try to merge 16216ac0f488SChris Mason * everything we can in hopes of creating full 16226ac0f488SChris Mason * stripes. 16236ac0f488SChris Mason */ 16246ac0f488SChris Mason list_sort(NULL, &plug->rbio_list, plug_cmp); 16256ac0f488SChris Mason while (!list_empty(&plug->rbio_list)) { 16266ac0f488SChris Mason cur = list_entry(plug->rbio_list.next, 16276ac0f488SChris Mason struct btrfs_raid_bio, plug_list); 16286ac0f488SChris Mason list_del_init(&cur->plug_list); 16296ac0f488SChris Mason 16306ac0f488SChris Mason if (rbio_is_full(cur)) { 16316ac0f488SChris Mason /* we have a full stripe, send it down */ 16326ac0f488SChris Mason full_stripe_write(cur); 16336ac0f488SChris Mason continue; 16346ac0f488SChris Mason } 16356ac0f488SChris Mason if (last) { 16366ac0f488SChris Mason if (rbio_can_merge(last, cur)) { 16376ac0f488SChris Mason merge_rbio(last, cur); 16386ac0f488SChris Mason __free_raid_bio(cur); 16396ac0f488SChris Mason continue; 16406ac0f488SChris Mason 16416ac0f488SChris Mason } 16426ac0f488SChris Mason __raid56_parity_write(last); 16436ac0f488SChris Mason } 16446ac0f488SChris Mason last = cur; 16456ac0f488SChris Mason } 16466ac0f488SChris Mason if (last) { 16476ac0f488SChris Mason __raid56_parity_write(last); 16486ac0f488SChris Mason } 16496ac0f488SChris Mason kfree(plug); 16506ac0f488SChris Mason } 16516ac0f488SChris Mason 16526ac0f488SChris Mason /* 16536ac0f488SChris Mason * if the unplug comes from schedule, we have to push the 16546ac0f488SChris Mason * work off to a helper thread 16556ac0f488SChris Mason */ 16566ac0f488SChris Mason static void unplug_work(struct btrfs_work *work) 16576ac0f488SChris Mason { 16586ac0f488SChris Mason struct btrfs_plug_cb *plug; 16596ac0f488SChris Mason plug = container_of(work, struct btrfs_plug_cb, work); 16606ac0f488SChris Mason run_plug(plug); 16616ac0f488SChris Mason } 16626ac0f488SChris Mason 16636ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) 16646ac0f488SChris Mason { 16656ac0f488SChris Mason struct btrfs_plug_cb *plug; 16666ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 16676ac0f488SChris Mason 16686ac0f488SChris Mason if (from_schedule) { 16696ac0f488SChris Mason plug->work.flags = 0; 16706ac0f488SChris Mason plug->work.func = unplug_work; 16716ac0f488SChris Mason btrfs_queue_worker(&plug->info->rmw_workers, 16726ac0f488SChris Mason &plug->work); 16736ac0f488SChris Mason return; 16746ac0f488SChris Mason } 16756ac0f488SChris Mason run_plug(plug); 16766ac0f488SChris Mason } 16776ac0f488SChris Mason 16786ac0f488SChris Mason /* 167953b381b3SDavid Woodhouse * our main entry point for writes from the rest of the FS. 168053b381b3SDavid Woodhouse */ 168153b381b3SDavid Woodhouse int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 168253b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 168353b381b3SDavid Woodhouse u64 stripe_len) 168453b381b3SDavid Woodhouse { 168553b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 16866ac0f488SChris Mason struct btrfs_plug_cb *plug = NULL; 16876ac0f488SChris Mason struct blk_plug_cb *cb; 168853b381b3SDavid Woodhouse 168953b381b3SDavid Woodhouse rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 169053b381b3SDavid Woodhouse if (IS_ERR(rbio)) { 169153b381b3SDavid Woodhouse kfree(raid_map); 169253b381b3SDavid Woodhouse kfree(bbio); 169353b381b3SDavid Woodhouse return PTR_ERR(rbio); 169453b381b3SDavid Woodhouse } 169553b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 169653b381b3SDavid Woodhouse rbio->bio_list_bytes = bio->bi_size; 16976ac0f488SChris Mason 16986ac0f488SChris Mason /* 16996ac0f488SChris Mason * don't plug on full rbios, just get them out the door 17006ac0f488SChris Mason * as quickly as we can 17016ac0f488SChris Mason */ 17026ac0f488SChris Mason if (rbio_is_full(rbio)) 17036ac0f488SChris Mason return full_stripe_write(rbio); 17046ac0f488SChris Mason 17056ac0f488SChris Mason cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, 17066ac0f488SChris Mason sizeof(*plug)); 17076ac0f488SChris Mason if (cb) { 17086ac0f488SChris Mason plug = container_of(cb, struct btrfs_plug_cb, cb); 17096ac0f488SChris Mason if (!plug->info) { 17106ac0f488SChris Mason plug->info = root->fs_info; 17116ac0f488SChris Mason INIT_LIST_HEAD(&plug->rbio_list); 17126ac0f488SChris Mason } 17136ac0f488SChris Mason list_add_tail(&rbio->plug_list, &plug->rbio_list); 17146ac0f488SChris Mason } else { 171553b381b3SDavid Woodhouse return __raid56_parity_write(rbio); 171653b381b3SDavid Woodhouse } 17176ac0f488SChris Mason return 0; 17186ac0f488SChris Mason } 171953b381b3SDavid Woodhouse 172053b381b3SDavid Woodhouse /* 172153b381b3SDavid Woodhouse * all parity reconstruction happens here. We've read in everything 172253b381b3SDavid Woodhouse * we can find from the drives and this does the heavy lifting of 172353b381b3SDavid Woodhouse * sorting the good from the bad. 172453b381b3SDavid Woodhouse */ 172553b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) 172653b381b3SDavid Woodhouse { 172753b381b3SDavid Woodhouse int pagenr, stripe; 172853b381b3SDavid Woodhouse void **pointers; 172953b381b3SDavid Woodhouse int faila = -1, failb = -1; 173053b381b3SDavid Woodhouse int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 173153b381b3SDavid Woodhouse struct page *page; 173253b381b3SDavid Woodhouse int err; 173353b381b3SDavid Woodhouse int i; 173453b381b3SDavid Woodhouse 173553b381b3SDavid Woodhouse pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *), 173653b381b3SDavid Woodhouse GFP_NOFS); 173753b381b3SDavid Woodhouse if (!pointers) { 173853b381b3SDavid Woodhouse err = -ENOMEM; 173953b381b3SDavid Woodhouse goto cleanup_io; 174053b381b3SDavid Woodhouse } 174153b381b3SDavid Woodhouse 174253b381b3SDavid Woodhouse faila = rbio->faila; 174353b381b3SDavid Woodhouse failb = rbio->failb; 174453b381b3SDavid Woodhouse 174553b381b3SDavid Woodhouse if (rbio->read_rebuild) { 174653b381b3SDavid Woodhouse spin_lock_irq(&rbio->bio_list_lock); 174753b381b3SDavid Woodhouse set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 174853b381b3SDavid Woodhouse spin_unlock_irq(&rbio->bio_list_lock); 174953b381b3SDavid Woodhouse } 175053b381b3SDavid Woodhouse 175153b381b3SDavid Woodhouse index_rbio_pages(rbio); 175253b381b3SDavid Woodhouse 175353b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 175453b381b3SDavid Woodhouse /* setup our array of pointers with pages 175553b381b3SDavid Woodhouse * from each stripe 175653b381b3SDavid Woodhouse */ 175753b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 175853b381b3SDavid Woodhouse /* 175953b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 176053b381b3SDavid Woodhouse * pages from the bio list 176153b381b3SDavid Woodhouse */ 176253b381b3SDavid Woodhouse if (rbio->read_rebuild && 176353b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 176453b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 176553b381b3SDavid Woodhouse } else { 176653b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 176753b381b3SDavid Woodhouse } 176853b381b3SDavid Woodhouse pointers[stripe] = kmap(page); 176953b381b3SDavid Woodhouse } 177053b381b3SDavid Woodhouse 177153b381b3SDavid Woodhouse /* all raid6 handling here */ 177253b381b3SDavid Woodhouse if (rbio->raid_map[rbio->bbio->num_stripes - 1] == 177353b381b3SDavid Woodhouse RAID6_Q_STRIPE) { 177453b381b3SDavid Woodhouse 177553b381b3SDavid Woodhouse /* 177653b381b3SDavid Woodhouse * single failure, rebuild from parity raid5 177753b381b3SDavid Woodhouse * style 177853b381b3SDavid Woodhouse */ 177953b381b3SDavid Woodhouse if (failb < 0) { 178053b381b3SDavid Woodhouse if (faila == rbio->nr_data) { 178153b381b3SDavid Woodhouse /* 178253b381b3SDavid Woodhouse * Just the P stripe has failed, without 178353b381b3SDavid Woodhouse * a bad data or Q stripe. 178453b381b3SDavid Woodhouse * TODO, we should redo the xor here. 178553b381b3SDavid Woodhouse */ 178653b381b3SDavid Woodhouse err = -EIO; 178753b381b3SDavid Woodhouse goto cleanup; 178853b381b3SDavid Woodhouse } 178953b381b3SDavid Woodhouse /* 179053b381b3SDavid Woodhouse * a single failure in raid6 is rebuilt 179153b381b3SDavid Woodhouse * in the pstripe code below 179253b381b3SDavid Woodhouse */ 179353b381b3SDavid Woodhouse goto pstripe; 179453b381b3SDavid Woodhouse } 179553b381b3SDavid Woodhouse 179653b381b3SDavid Woodhouse /* make sure our ps and qs are in order */ 179753b381b3SDavid Woodhouse if (faila > failb) { 179853b381b3SDavid Woodhouse int tmp = failb; 179953b381b3SDavid Woodhouse failb = faila; 180053b381b3SDavid Woodhouse faila = tmp; 180153b381b3SDavid Woodhouse } 180253b381b3SDavid Woodhouse 180353b381b3SDavid Woodhouse /* if the q stripe is failed, do a pstripe reconstruction 180453b381b3SDavid Woodhouse * from the xors. 180553b381b3SDavid Woodhouse * If both the q stripe and the P stripe are failed, we're 180653b381b3SDavid Woodhouse * here due to a crc mismatch and we can't give them the 180753b381b3SDavid Woodhouse * data they want 180853b381b3SDavid Woodhouse */ 180953b381b3SDavid Woodhouse if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { 181053b381b3SDavid Woodhouse if (rbio->raid_map[faila] == RAID5_P_STRIPE) { 181153b381b3SDavid Woodhouse err = -EIO; 181253b381b3SDavid Woodhouse goto cleanup; 181353b381b3SDavid Woodhouse } 181453b381b3SDavid Woodhouse /* 181553b381b3SDavid Woodhouse * otherwise we have one bad data stripe and 181653b381b3SDavid Woodhouse * a good P stripe. raid5! 181753b381b3SDavid Woodhouse */ 181853b381b3SDavid Woodhouse goto pstripe; 181953b381b3SDavid Woodhouse } 182053b381b3SDavid Woodhouse 182153b381b3SDavid Woodhouse if (rbio->raid_map[failb] == RAID5_P_STRIPE) { 182253b381b3SDavid Woodhouse raid6_datap_recov(rbio->bbio->num_stripes, 182353b381b3SDavid Woodhouse PAGE_SIZE, faila, pointers); 182453b381b3SDavid Woodhouse } else { 182553b381b3SDavid Woodhouse raid6_2data_recov(rbio->bbio->num_stripes, 182653b381b3SDavid Woodhouse PAGE_SIZE, faila, failb, 182753b381b3SDavid Woodhouse pointers); 182853b381b3SDavid Woodhouse } 182953b381b3SDavid Woodhouse } else { 183053b381b3SDavid Woodhouse void *p; 183153b381b3SDavid Woodhouse 183253b381b3SDavid Woodhouse /* rebuild from P stripe here (raid5 or raid6) */ 183353b381b3SDavid Woodhouse BUG_ON(failb != -1); 183453b381b3SDavid Woodhouse pstripe: 183553b381b3SDavid Woodhouse /* Copy parity block into failed block to start with */ 183653b381b3SDavid Woodhouse memcpy(pointers[faila], 183753b381b3SDavid Woodhouse pointers[rbio->nr_data], 183853b381b3SDavid Woodhouse PAGE_CACHE_SIZE); 183953b381b3SDavid Woodhouse 184053b381b3SDavid Woodhouse /* rearrange the pointer array */ 184153b381b3SDavid Woodhouse p = pointers[faila]; 184253b381b3SDavid Woodhouse for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) 184353b381b3SDavid Woodhouse pointers[stripe] = pointers[stripe + 1]; 184453b381b3SDavid Woodhouse pointers[rbio->nr_data - 1] = p; 184553b381b3SDavid Woodhouse 184653b381b3SDavid Woodhouse /* xor in the rest */ 184753b381b3SDavid Woodhouse run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); 184853b381b3SDavid Woodhouse } 184953b381b3SDavid Woodhouse /* if we're doing this rebuild as part of an rmw, go through 185053b381b3SDavid Woodhouse * and set all of our private rbio pages in the 185153b381b3SDavid Woodhouse * failed stripes as uptodate. This way finish_rmw will 185253b381b3SDavid Woodhouse * know they can be trusted. If this was a read reconstruction, 185353b381b3SDavid Woodhouse * other endio functions will fiddle the uptodate bits 185453b381b3SDavid Woodhouse */ 185553b381b3SDavid Woodhouse if (!rbio->read_rebuild) { 185653b381b3SDavid Woodhouse for (i = 0; i < nr_pages; i++) { 185753b381b3SDavid Woodhouse if (faila != -1) { 185853b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, faila, i); 185953b381b3SDavid Woodhouse SetPageUptodate(page); 186053b381b3SDavid Woodhouse } 186153b381b3SDavid Woodhouse if (failb != -1) { 186253b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, failb, i); 186353b381b3SDavid Woodhouse SetPageUptodate(page); 186453b381b3SDavid Woodhouse } 186553b381b3SDavid Woodhouse } 186653b381b3SDavid Woodhouse } 186753b381b3SDavid Woodhouse for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 186853b381b3SDavid Woodhouse /* 186953b381b3SDavid Woodhouse * if we're rebuilding a read, we have to use 187053b381b3SDavid Woodhouse * pages from the bio list 187153b381b3SDavid Woodhouse */ 187253b381b3SDavid Woodhouse if (rbio->read_rebuild && 187353b381b3SDavid Woodhouse (stripe == faila || stripe == failb)) { 187453b381b3SDavid Woodhouse page = page_in_rbio(rbio, stripe, pagenr, 0); 187553b381b3SDavid Woodhouse } else { 187653b381b3SDavid Woodhouse page = rbio_stripe_page(rbio, stripe, pagenr); 187753b381b3SDavid Woodhouse } 187853b381b3SDavid Woodhouse kunmap(page); 187953b381b3SDavid Woodhouse } 188053b381b3SDavid Woodhouse } 188153b381b3SDavid Woodhouse 188253b381b3SDavid Woodhouse err = 0; 188353b381b3SDavid Woodhouse cleanup: 188453b381b3SDavid Woodhouse kfree(pointers); 188553b381b3SDavid Woodhouse 188653b381b3SDavid Woodhouse cleanup_io: 188753b381b3SDavid Woodhouse 188853b381b3SDavid Woodhouse if (rbio->read_rebuild) { 18894ae10b3aSChris Mason if (err == 0) 18904ae10b3aSChris Mason cache_rbio_pages(rbio); 18914ae10b3aSChris Mason else 18924ae10b3aSChris Mason clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 18934ae10b3aSChris Mason 189453b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, err == 0); 189553b381b3SDavid Woodhouse } else if (err == 0) { 189653b381b3SDavid Woodhouse rbio->faila = -1; 189753b381b3SDavid Woodhouse rbio->failb = -1; 189853b381b3SDavid Woodhouse finish_rmw(rbio); 189953b381b3SDavid Woodhouse } else { 190053b381b3SDavid Woodhouse rbio_orig_end_io(rbio, err, 0); 190153b381b3SDavid Woodhouse } 190253b381b3SDavid Woodhouse } 190353b381b3SDavid Woodhouse 190453b381b3SDavid Woodhouse /* 190553b381b3SDavid Woodhouse * This is called only for stripes we've read from disk to 190653b381b3SDavid Woodhouse * reconstruct the parity. 190753b381b3SDavid Woodhouse */ 190853b381b3SDavid Woodhouse static void raid_recover_end_io(struct bio *bio, int err) 190953b381b3SDavid Woodhouse { 191053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio = bio->bi_private; 191153b381b3SDavid Woodhouse 191253b381b3SDavid Woodhouse /* 191353b381b3SDavid Woodhouse * we only read stripe pages off the disk, set them 191453b381b3SDavid Woodhouse * up to date if there were no errors 191553b381b3SDavid Woodhouse */ 191653b381b3SDavid Woodhouse if (err) 191753b381b3SDavid Woodhouse fail_bio_stripe(rbio, bio); 191853b381b3SDavid Woodhouse else 191953b381b3SDavid Woodhouse set_bio_pages_uptodate(bio); 192053b381b3SDavid Woodhouse bio_put(bio); 192153b381b3SDavid Woodhouse 192253b381b3SDavid Woodhouse if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 192353b381b3SDavid Woodhouse return; 192453b381b3SDavid Woodhouse 192553b381b3SDavid Woodhouse if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 192653b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 192753b381b3SDavid Woodhouse else 192853b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 192953b381b3SDavid Woodhouse } 193053b381b3SDavid Woodhouse 193153b381b3SDavid Woodhouse /* 193253b381b3SDavid Woodhouse * reads everything we need off the disk to reconstruct 193353b381b3SDavid Woodhouse * the parity. endio handlers trigger final reconstruction 193453b381b3SDavid Woodhouse * when the IO is done. 193553b381b3SDavid Woodhouse * 193653b381b3SDavid Woodhouse * This is used both for reads from the higher layers and for 193753b381b3SDavid Woodhouse * parity construction required to finish a rmw cycle. 193853b381b3SDavid Woodhouse */ 193953b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 194053b381b3SDavid Woodhouse { 194153b381b3SDavid Woodhouse int bios_to_read = 0; 194253b381b3SDavid Woodhouse struct btrfs_bio *bbio = rbio->bbio; 194353b381b3SDavid Woodhouse struct bio_list bio_list; 194453b381b3SDavid Woodhouse int ret; 194553b381b3SDavid Woodhouse int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 194653b381b3SDavid Woodhouse int pagenr; 194753b381b3SDavid Woodhouse int stripe; 194853b381b3SDavid Woodhouse struct bio *bio; 194953b381b3SDavid Woodhouse 195053b381b3SDavid Woodhouse bio_list_init(&bio_list); 195153b381b3SDavid Woodhouse 195253b381b3SDavid Woodhouse ret = alloc_rbio_pages(rbio); 195353b381b3SDavid Woodhouse if (ret) 195453b381b3SDavid Woodhouse goto cleanup; 195553b381b3SDavid Woodhouse 195653b381b3SDavid Woodhouse atomic_set(&rbio->bbio->error, 0); 195753b381b3SDavid Woodhouse 195853b381b3SDavid Woodhouse /* 19594ae10b3aSChris Mason * read everything that hasn't failed. Thanks to the 19604ae10b3aSChris Mason * stripe cache, it is possible that some or all of these 19614ae10b3aSChris Mason * pages are going to be uptodate. 196253b381b3SDavid Woodhouse */ 196353b381b3SDavid Woodhouse for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 196453b381b3SDavid Woodhouse if (rbio->faila == stripe || 196553b381b3SDavid Woodhouse rbio->failb == stripe) 196653b381b3SDavid Woodhouse continue; 196753b381b3SDavid Woodhouse 196853b381b3SDavid Woodhouse for (pagenr = 0; pagenr < nr_pages; pagenr++) { 196953b381b3SDavid Woodhouse struct page *p; 197053b381b3SDavid Woodhouse 197153b381b3SDavid Woodhouse /* 197253b381b3SDavid Woodhouse * the rmw code may have already read this 197353b381b3SDavid Woodhouse * page in 197453b381b3SDavid Woodhouse */ 197553b381b3SDavid Woodhouse p = rbio_stripe_page(rbio, stripe, pagenr); 197653b381b3SDavid Woodhouse if (PageUptodate(p)) 197753b381b3SDavid Woodhouse continue; 197853b381b3SDavid Woodhouse 197953b381b3SDavid Woodhouse ret = rbio_add_io_page(rbio, &bio_list, 198053b381b3SDavid Woodhouse rbio_stripe_page(rbio, stripe, pagenr), 198153b381b3SDavid Woodhouse stripe, pagenr, rbio->stripe_len); 198253b381b3SDavid Woodhouse if (ret < 0) 198353b381b3SDavid Woodhouse goto cleanup; 198453b381b3SDavid Woodhouse } 198553b381b3SDavid Woodhouse } 198653b381b3SDavid Woodhouse 198753b381b3SDavid Woodhouse bios_to_read = bio_list_size(&bio_list); 198853b381b3SDavid Woodhouse if (!bios_to_read) { 198953b381b3SDavid Woodhouse /* 199053b381b3SDavid Woodhouse * we might have no bios to read just because the pages 199153b381b3SDavid Woodhouse * were up to date, or we might have no bios to read because 199253b381b3SDavid Woodhouse * the devices were gone. 199353b381b3SDavid Woodhouse */ 199453b381b3SDavid Woodhouse if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) { 199553b381b3SDavid Woodhouse __raid_recover_end_io(rbio); 199653b381b3SDavid Woodhouse goto out; 199753b381b3SDavid Woodhouse } else { 199853b381b3SDavid Woodhouse goto cleanup; 199953b381b3SDavid Woodhouse } 200053b381b3SDavid Woodhouse } 200153b381b3SDavid Woodhouse 200253b381b3SDavid Woodhouse /* 200353b381b3SDavid Woodhouse * the bbio may be freed once we submit the last bio. Make sure 200453b381b3SDavid Woodhouse * not to touch it after that 200553b381b3SDavid Woodhouse */ 200653b381b3SDavid Woodhouse atomic_set(&bbio->stripes_pending, bios_to_read); 200753b381b3SDavid Woodhouse while (1) { 200853b381b3SDavid Woodhouse bio = bio_list_pop(&bio_list); 200953b381b3SDavid Woodhouse if (!bio) 201053b381b3SDavid Woodhouse break; 201153b381b3SDavid Woodhouse 201253b381b3SDavid Woodhouse bio->bi_private = rbio; 201353b381b3SDavid Woodhouse bio->bi_end_io = raid_recover_end_io; 201453b381b3SDavid Woodhouse 201553b381b3SDavid Woodhouse btrfs_bio_wq_end_io(rbio->fs_info, bio, 201653b381b3SDavid Woodhouse BTRFS_WQ_ENDIO_RAID56); 201753b381b3SDavid Woodhouse 201853b381b3SDavid Woodhouse BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); 201953b381b3SDavid Woodhouse submit_bio(READ, bio); 202053b381b3SDavid Woodhouse } 202153b381b3SDavid Woodhouse out: 202253b381b3SDavid Woodhouse return 0; 202353b381b3SDavid Woodhouse 202453b381b3SDavid Woodhouse cleanup: 202553b381b3SDavid Woodhouse if (rbio->read_rebuild) 202653b381b3SDavid Woodhouse rbio_orig_end_io(rbio, -EIO, 0); 202753b381b3SDavid Woodhouse return -EIO; 202853b381b3SDavid Woodhouse } 202953b381b3SDavid Woodhouse 203053b381b3SDavid Woodhouse /* 203153b381b3SDavid Woodhouse * the main entry point for reads from the higher layers. This 203253b381b3SDavid Woodhouse * is really only called when the normal read path had a failure, 203353b381b3SDavid Woodhouse * so we assume the bio they send down corresponds to a failed part 203453b381b3SDavid Woodhouse * of the drive. 203553b381b3SDavid Woodhouse */ 203653b381b3SDavid Woodhouse int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 203753b381b3SDavid Woodhouse struct btrfs_bio *bbio, u64 *raid_map, 203853b381b3SDavid Woodhouse u64 stripe_len, int mirror_num) 203953b381b3SDavid Woodhouse { 204053b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 204153b381b3SDavid Woodhouse int ret; 204253b381b3SDavid Woodhouse 204353b381b3SDavid Woodhouse rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 204453b381b3SDavid Woodhouse if (IS_ERR(rbio)) { 204553b381b3SDavid Woodhouse return PTR_ERR(rbio); 204653b381b3SDavid Woodhouse } 204753b381b3SDavid Woodhouse 204853b381b3SDavid Woodhouse rbio->read_rebuild = 1; 204953b381b3SDavid Woodhouse bio_list_add(&rbio->bio_list, bio); 205053b381b3SDavid Woodhouse rbio->bio_list_bytes = bio->bi_size; 205153b381b3SDavid Woodhouse 205253b381b3SDavid Woodhouse rbio->faila = find_logical_bio_stripe(rbio, bio); 205353b381b3SDavid Woodhouse if (rbio->faila == -1) { 205453b381b3SDavid Woodhouse BUG(); 205553b381b3SDavid Woodhouse kfree(rbio); 205653b381b3SDavid Woodhouse return -EIO; 205753b381b3SDavid Woodhouse } 205853b381b3SDavid Woodhouse 205953b381b3SDavid Woodhouse /* 206053b381b3SDavid Woodhouse * reconstruct from the q stripe if they are 206153b381b3SDavid Woodhouse * asking for mirror 3 206253b381b3SDavid Woodhouse */ 206353b381b3SDavid Woodhouse if (mirror_num == 3) 206453b381b3SDavid Woodhouse rbio->failb = bbio->num_stripes - 2; 206553b381b3SDavid Woodhouse 206653b381b3SDavid Woodhouse ret = lock_stripe_add(rbio); 206753b381b3SDavid Woodhouse 206853b381b3SDavid Woodhouse /* 206953b381b3SDavid Woodhouse * __raid56_parity_recover will end the bio with 207053b381b3SDavid Woodhouse * any errors it hits. We don't want to return 207153b381b3SDavid Woodhouse * its error value up the stack because our caller 207253b381b3SDavid Woodhouse * will end up calling bio_endio with any nonzero 207353b381b3SDavid Woodhouse * return 207453b381b3SDavid Woodhouse */ 207553b381b3SDavid Woodhouse if (ret == 0) 207653b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 207753b381b3SDavid Woodhouse /* 207853b381b3SDavid Woodhouse * our rbio has been added to the list of 207953b381b3SDavid Woodhouse * rbios that will be handled after the 208053b381b3SDavid Woodhouse * currently lock owner is done 208153b381b3SDavid Woodhouse */ 208253b381b3SDavid Woodhouse return 0; 208353b381b3SDavid Woodhouse 208453b381b3SDavid Woodhouse } 208553b381b3SDavid Woodhouse 208653b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work) 208753b381b3SDavid Woodhouse { 208853b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 208953b381b3SDavid Woodhouse 209053b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 209153b381b3SDavid Woodhouse raid56_rmw_stripe(rbio); 209253b381b3SDavid Woodhouse } 209353b381b3SDavid Woodhouse 209453b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work) 209553b381b3SDavid Woodhouse { 209653b381b3SDavid Woodhouse struct btrfs_raid_bio *rbio; 209753b381b3SDavid Woodhouse 209853b381b3SDavid Woodhouse rbio = container_of(work, struct btrfs_raid_bio, work); 209953b381b3SDavid Woodhouse __raid56_parity_recover(rbio); 210053b381b3SDavid Woodhouse } 2101