xref: /linux/fs/btrfs/raid56.c (revision 1b94b5567e9c70ad3b24bd5e576a422246875c2a)
153b381b3SDavid Woodhouse /*
253b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
453b381b3SDavid Woodhouse  *
553b381b3SDavid Woodhouse  * This program is free software; you can redistribute it and/or
653b381b3SDavid Woodhouse  * modify it under the terms of the GNU General Public
753b381b3SDavid Woodhouse  * License v2 as published by the Free Software Foundation.
853b381b3SDavid Woodhouse  *
953b381b3SDavid Woodhouse  * This program is distributed in the hope that it will be useful,
1053b381b3SDavid Woodhouse  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1153b381b3SDavid Woodhouse  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1253b381b3SDavid Woodhouse  * General Public License for more details.
1353b381b3SDavid Woodhouse  *
1453b381b3SDavid Woodhouse  * You should have received a copy of the GNU General Public
1553b381b3SDavid Woodhouse  * License along with this program; if not, write to the
1653b381b3SDavid Woodhouse  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1753b381b3SDavid Woodhouse  * Boston, MA 021110-1307, USA.
1853b381b3SDavid Woodhouse  */
1953b381b3SDavid Woodhouse #include <linux/sched.h>
2053b381b3SDavid Woodhouse #include <linux/wait.h>
2153b381b3SDavid Woodhouse #include <linux/bio.h>
2253b381b3SDavid Woodhouse #include <linux/slab.h>
2353b381b3SDavid Woodhouse #include <linux/buffer_head.h>
2453b381b3SDavid Woodhouse #include <linux/blkdev.h>
2553b381b3SDavid Woodhouse #include <linux/random.h>
2653b381b3SDavid Woodhouse #include <linux/iocontext.h>
2753b381b3SDavid Woodhouse #include <linux/capability.h>
2853b381b3SDavid Woodhouse #include <linux/ratelimit.h>
2953b381b3SDavid Woodhouse #include <linux/kthread.h>
3053b381b3SDavid Woodhouse #include <linux/raid/pq.h>
3153b381b3SDavid Woodhouse #include <linux/hash.h>
3253b381b3SDavid Woodhouse #include <linux/list_sort.h>
3353b381b3SDavid Woodhouse #include <linux/raid/xor.h>
34d7011f5bSGeert Uytterhoeven #include <linux/vmalloc.h>
3553b381b3SDavid Woodhouse #include <asm/div64.h>
3653b381b3SDavid Woodhouse #include "ctree.h"
3753b381b3SDavid Woodhouse #include "extent_map.h"
3853b381b3SDavid Woodhouse #include "disk-io.h"
3953b381b3SDavid Woodhouse #include "transaction.h"
4053b381b3SDavid Woodhouse #include "print-tree.h"
4153b381b3SDavid Woodhouse #include "volumes.h"
4253b381b3SDavid Woodhouse #include "raid56.h"
4353b381b3SDavid Woodhouse #include "async-thread.h"
4453b381b3SDavid Woodhouse #include "check-integrity.h"
4553b381b3SDavid Woodhouse #include "rcu-string.h"
4653b381b3SDavid Woodhouse 
4753b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
4853b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
4953b381b3SDavid Woodhouse 
504ae10b3aSChris Mason /*
514ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
524ae10b3aSChris Mason  * of past RMW
534ae10b3aSChris Mason  */
544ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
554ae10b3aSChris Mason 
564ae10b3aSChris Mason /*
574ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
584ae10b3aSChris Mason  */
594ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
604ae10b3aSChris Mason 
61af8e2d1dSMiao Xie /*
62af8e2d1dSMiao Xie  * bbio and raid_map is managed by the caller, so we shouldn't free
63af8e2d1dSMiao Xie  * them here. And besides that, all rbios with this flag should not
64af8e2d1dSMiao Xie  * be cached, because we need raid_map to check the rbios' stripe
65af8e2d1dSMiao Xie  * is the same or not, but it is very likely that the caller has
66af8e2d1dSMiao Xie  * free raid_map, so don't cache those rbios.
67af8e2d1dSMiao Xie  */
68af8e2d1dSMiao Xie #define RBIO_HOLD_BBIO_MAP_BIT	4
69af8e2d1dSMiao Xie 
704ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
714ae10b3aSChris Mason 
72*1b94b556SMiao Xie enum btrfs_rbio_ops {
73*1b94b556SMiao Xie 	BTRFS_RBIO_WRITE	= 0,
74*1b94b556SMiao Xie 	BTRFS_RBIO_READ_REBUILD	= 1,
75*1b94b556SMiao Xie };
76*1b94b556SMiao Xie 
7753b381b3SDavid Woodhouse struct btrfs_raid_bio {
7853b381b3SDavid Woodhouse 	struct btrfs_fs_info *fs_info;
7953b381b3SDavid Woodhouse 	struct btrfs_bio *bbio;
8053b381b3SDavid Woodhouse 
8153b381b3SDavid Woodhouse 	/*
8253b381b3SDavid Woodhouse 	 * logical block numbers for the start of each stripe
8353b381b3SDavid Woodhouse 	 * The last one or two are p/q.  These are sorted,
8453b381b3SDavid Woodhouse 	 * so raid_map[0] is the start of our full stripe
8553b381b3SDavid Woodhouse 	 */
8653b381b3SDavid Woodhouse 	u64 *raid_map;
8753b381b3SDavid Woodhouse 
8853b381b3SDavid Woodhouse 	/* while we're doing rmw on a stripe
8953b381b3SDavid Woodhouse 	 * we put it into a hash table so we can
9053b381b3SDavid Woodhouse 	 * lock the stripe and merge more rbios
9153b381b3SDavid Woodhouse 	 * into it.
9253b381b3SDavid Woodhouse 	 */
9353b381b3SDavid Woodhouse 	struct list_head hash_list;
9453b381b3SDavid Woodhouse 
9553b381b3SDavid Woodhouse 	/*
964ae10b3aSChris Mason 	 * LRU list for the stripe cache
974ae10b3aSChris Mason 	 */
984ae10b3aSChris Mason 	struct list_head stripe_cache;
994ae10b3aSChris Mason 
1004ae10b3aSChris Mason 	/*
10153b381b3SDavid Woodhouse 	 * for scheduling work in the helper threads
10253b381b3SDavid Woodhouse 	 */
10353b381b3SDavid Woodhouse 	struct btrfs_work work;
10453b381b3SDavid Woodhouse 
10553b381b3SDavid Woodhouse 	/*
10653b381b3SDavid Woodhouse 	 * bio list and bio_list_lock are used
10753b381b3SDavid Woodhouse 	 * to add more bios into the stripe
10853b381b3SDavid Woodhouse 	 * in hopes of avoiding the full rmw
10953b381b3SDavid Woodhouse 	 */
11053b381b3SDavid Woodhouse 	struct bio_list bio_list;
11153b381b3SDavid Woodhouse 	spinlock_t bio_list_lock;
11253b381b3SDavid Woodhouse 
1136ac0f488SChris Mason 	/* also protected by the bio_list_lock, the
1146ac0f488SChris Mason 	 * plug list is used by the plugging code
1156ac0f488SChris Mason 	 * to collect partial bios while plugged.  The
1166ac0f488SChris Mason 	 * stripe locking code also uses it to hand off
11753b381b3SDavid Woodhouse 	 * the stripe lock to the next pending IO
11853b381b3SDavid Woodhouse 	 */
11953b381b3SDavid Woodhouse 	struct list_head plug_list;
12053b381b3SDavid Woodhouse 
12153b381b3SDavid Woodhouse 	/*
12253b381b3SDavid Woodhouse 	 * flags that tell us if it is safe to
12353b381b3SDavid Woodhouse 	 * merge with this bio
12453b381b3SDavid Woodhouse 	 */
12553b381b3SDavid Woodhouse 	unsigned long flags;
12653b381b3SDavid Woodhouse 
12753b381b3SDavid Woodhouse 	/* size of each individual stripe on disk */
12853b381b3SDavid Woodhouse 	int stripe_len;
12953b381b3SDavid Woodhouse 
13053b381b3SDavid Woodhouse 	/* number of data stripes (no p/q) */
13153b381b3SDavid Woodhouse 	int nr_data;
13253b381b3SDavid Woodhouse 
13353b381b3SDavid Woodhouse 	/*
13453b381b3SDavid Woodhouse 	 * set if we're doing a parity rebuild
13553b381b3SDavid Woodhouse 	 * for a read from higher up, which is handled
13653b381b3SDavid Woodhouse 	 * differently from a parity rebuild as part of
13753b381b3SDavid Woodhouse 	 * rmw
13853b381b3SDavid Woodhouse 	 */
139*1b94b556SMiao Xie 	enum btrfs_rbio_ops operation;
14053b381b3SDavid Woodhouse 
14153b381b3SDavid Woodhouse 	/* first bad stripe */
14253b381b3SDavid Woodhouse 	int faila;
14353b381b3SDavid Woodhouse 
14453b381b3SDavid Woodhouse 	/* second bad stripe (for raid6 use) */
14553b381b3SDavid Woodhouse 	int failb;
14653b381b3SDavid Woodhouse 
14753b381b3SDavid Woodhouse 	/*
14853b381b3SDavid Woodhouse 	 * number of pages needed to represent the full
14953b381b3SDavid Woodhouse 	 * stripe
15053b381b3SDavid Woodhouse 	 */
15153b381b3SDavid Woodhouse 	int nr_pages;
15253b381b3SDavid Woodhouse 
15353b381b3SDavid Woodhouse 	/*
15453b381b3SDavid Woodhouse 	 * size of all the bios in the bio_list.  This
15553b381b3SDavid Woodhouse 	 * helps us decide if the rbio maps to a full
15653b381b3SDavid Woodhouse 	 * stripe or not
15753b381b3SDavid Woodhouse 	 */
15853b381b3SDavid Woodhouse 	int bio_list_bytes;
15953b381b3SDavid Woodhouse 
16053b381b3SDavid Woodhouse 	atomic_t refs;
16153b381b3SDavid Woodhouse 
162b89e1b01SMiao Xie 	atomic_t stripes_pending;
163b89e1b01SMiao Xie 
164b89e1b01SMiao Xie 	atomic_t error;
16553b381b3SDavid Woodhouse 	/*
16653b381b3SDavid Woodhouse 	 * these are two arrays of pointers.  We allocate the
16753b381b3SDavid Woodhouse 	 * rbio big enough to hold them both and setup their
16853b381b3SDavid Woodhouse 	 * locations when the rbio is allocated
16953b381b3SDavid Woodhouse 	 */
17053b381b3SDavid Woodhouse 
17153b381b3SDavid Woodhouse 	/* pointers to pages that we allocated for
17253b381b3SDavid Woodhouse 	 * reading/writing stripes directly from the disk (including P/Q)
17353b381b3SDavid Woodhouse 	 */
17453b381b3SDavid Woodhouse 	struct page **stripe_pages;
17553b381b3SDavid Woodhouse 
17653b381b3SDavid Woodhouse 	/*
17753b381b3SDavid Woodhouse 	 * pointers to the pages in the bio_list.  Stored
17853b381b3SDavid Woodhouse 	 * here for faster lookup
17953b381b3SDavid Woodhouse 	 */
18053b381b3SDavid Woodhouse 	struct page **bio_pages;
18153b381b3SDavid Woodhouse };
18253b381b3SDavid Woodhouse 
18353b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
18453b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
18553b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work);
18653b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work);
18753b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
18853b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio);
18953b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
19053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
19153b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio);
19253b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
19353b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
19453b381b3SDavid Woodhouse 
19553b381b3SDavid Woodhouse /*
19653b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
19753b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
19853b381b3SDavid Woodhouse  */
19953b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
20053b381b3SDavid Woodhouse {
20153b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
20253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
20353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
20453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
20553b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
20653b381b3SDavid Woodhouse 	int i;
20783c8266aSDavid Sterba 	int table_size;
20853b381b3SDavid Woodhouse 
20953b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
21053b381b3SDavid Woodhouse 		return 0;
21153b381b3SDavid Woodhouse 
21283c8266aSDavid Sterba 	/*
21383c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
21483c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
21583c8266aSDavid Sterba 	 *
21683c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
21783c8266aSDavid Sterba 	 * of a failing mount.
21883c8266aSDavid Sterba 	 */
21983c8266aSDavid Sterba 	table_size = sizeof(*table) + sizeof(*h) * num_entries;
22083c8266aSDavid Sterba 	table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
22183c8266aSDavid Sterba 	if (!table) {
22283c8266aSDavid Sterba 		table = vzalloc(table_size);
22353b381b3SDavid Woodhouse 		if (!table)
22453b381b3SDavid Woodhouse 			return -ENOMEM;
22583c8266aSDavid Sterba 	}
22653b381b3SDavid Woodhouse 
2274ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
2284ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
2294ae10b3aSChris Mason 
23053b381b3SDavid Woodhouse 	h = table->table;
23153b381b3SDavid Woodhouse 
23253b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
23353b381b3SDavid Woodhouse 		cur = h + i;
23453b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
23553b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
23653b381b3SDavid Woodhouse 		init_waitqueue_head(&cur->wait);
23753b381b3SDavid Woodhouse 	}
23853b381b3SDavid Woodhouse 
23953b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
24083c8266aSDavid Sterba 	if (x) {
24183c8266aSDavid Sterba 		if (is_vmalloc_addr(x))
24283c8266aSDavid Sterba 			vfree(x);
24383c8266aSDavid Sterba 		else
24453b381b3SDavid Woodhouse 			kfree(x);
24583c8266aSDavid Sterba 	}
24653b381b3SDavid Woodhouse 	return 0;
24753b381b3SDavid Woodhouse }
24853b381b3SDavid Woodhouse 
24953b381b3SDavid Woodhouse /*
2504ae10b3aSChris Mason  * caching an rbio means to copy anything from the
2514ae10b3aSChris Mason  * bio_pages array into the stripe_pages array.  We
2524ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
2534ae10b3aSChris Mason  * to indicate if it has valid data
2544ae10b3aSChris Mason  *
2554ae10b3aSChris Mason  * once the caching is done, we set the cache ready
2564ae10b3aSChris Mason  * bit.
2574ae10b3aSChris Mason  */
2584ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
2594ae10b3aSChris Mason {
2604ae10b3aSChris Mason 	int i;
2614ae10b3aSChris Mason 	char *s;
2624ae10b3aSChris Mason 	char *d;
2634ae10b3aSChris Mason 	int ret;
2644ae10b3aSChris Mason 
2654ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
2664ae10b3aSChris Mason 	if (ret)
2674ae10b3aSChris Mason 		return;
2684ae10b3aSChris Mason 
2694ae10b3aSChris Mason 	for (i = 0; i < rbio->nr_pages; i++) {
2704ae10b3aSChris Mason 		if (!rbio->bio_pages[i])
2714ae10b3aSChris Mason 			continue;
2724ae10b3aSChris Mason 
2734ae10b3aSChris Mason 		s = kmap(rbio->bio_pages[i]);
2744ae10b3aSChris Mason 		d = kmap(rbio->stripe_pages[i]);
2754ae10b3aSChris Mason 
2764ae10b3aSChris Mason 		memcpy(d, s, PAGE_CACHE_SIZE);
2774ae10b3aSChris Mason 
2784ae10b3aSChris Mason 		kunmap(rbio->bio_pages[i]);
2794ae10b3aSChris Mason 		kunmap(rbio->stripe_pages[i]);
2804ae10b3aSChris Mason 		SetPageUptodate(rbio->stripe_pages[i]);
2814ae10b3aSChris Mason 	}
2824ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2834ae10b3aSChris Mason }
2844ae10b3aSChris Mason 
2854ae10b3aSChris Mason /*
28653b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
28753b381b3SDavid Woodhouse  */
28853b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
28953b381b3SDavid Woodhouse {
29053b381b3SDavid Woodhouse 	u64 num = rbio->raid_map[0];
29153b381b3SDavid Woodhouse 
29253b381b3SDavid Woodhouse 	/*
29353b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
29453b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
29553b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
29653b381b3SDavid Woodhouse 	 * returns just one or two different values.
29753b381b3SDavid Woodhouse 	 *
29853b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
29953b381b3SDavid Woodhouse 	 */
30053b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
30153b381b3SDavid Woodhouse }
30253b381b3SDavid Woodhouse 
30353b381b3SDavid Woodhouse /*
3044ae10b3aSChris Mason  * stealing an rbio means taking all the uptodate pages from the stripe
3054ae10b3aSChris Mason  * array in the source rbio and putting them into the destination rbio
3064ae10b3aSChris Mason  */
3074ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
3084ae10b3aSChris Mason {
3094ae10b3aSChris Mason 	int i;
3104ae10b3aSChris Mason 	struct page *s;
3114ae10b3aSChris Mason 	struct page *d;
3124ae10b3aSChris Mason 
3134ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3144ae10b3aSChris Mason 		return;
3154ae10b3aSChris Mason 
3164ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
3174ae10b3aSChris Mason 		s = src->stripe_pages[i];
3184ae10b3aSChris Mason 		if (!s || !PageUptodate(s)) {
3194ae10b3aSChris Mason 			continue;
3204ae10b3aSChris Mason 		}
3214ae10b3aSChris Mason 
3224ae10b3aSChris Mason 		d = dest->stripe_pages[i];
3234ae10b3aSChris Mason 		if (d)
3244ae10b3aSChris Mason 			__free_page(d);
3254ae10b3aSChris Mason 
3264ae10b3aSChris Mason 		dest->stripe_pages[i] = s;
3274ae10b3aSChris Mason 		src->stripe_pages[i] = NULL;
3284ae10b3aSChris Mason 	}
3294ae10b3aSChris Mason }
3304ae10b3aSChris Mason 
3314ae10b3aSChris Mason /*
33253b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
33353b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
33453b381b3SDavid Woodhouse  * be discarded afterwards.
33553b381b3SDavid Woodhouse  *
33653b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
33753b381b3SDavid Woodhouse  */
33853b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
33953b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
34053b381b3SDavid Woodhouse {
34153b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
34253b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
34353b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
34453b381b3SDavid Woodhouse }
34553b381b3SDavid Woodhouse 
34653b381b3SDavid Woodhouse /*
3474ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
3484ae10b3aSChris Mason  * must hold the hash table lock.
3494ae10b3aSChris Mason  */
3504ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
3514ae10b3aSChris Mason {
3524ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
3534ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
3544ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
3554ae10b3aSChris Mason 	int freeit = 0;
3564ae10b3aSChris Mason 
3574ae10b3aSChris Mason 	/*
3584ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
3594ae10b3aSChris Mason 	 */
3604ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
3614ae10b3aSChris Mason 		return;
3624ae10b3aSChris Mason 
3634ae10b3aSChris Mason 	table = rbio->fs_info->stripe_hash_table;
3644ae10b3aSChris Mason 	h = table->table + bucket;
3654ae10b3aSChris Mason 
3664ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
3674ae10b3aSChris Mason 	 * removing it from the hash table
3684ae10b3aSChris Mason 	 */
3694ae10b3aSChris Mason 	spin_lock(&h->lock);
3704ae10b3aSChris Mason 
3714ae10b3aSChris Mason 	/*
3724ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
3734ae10b3aSChris Mason 	 * to make sure the bio list is empty
3744ae10b3aSChris Mason 	 */
3754ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
3764ae10b3aSChris Mason 
3774ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
3784ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
3794ae10b3aSChris Mason 		table->cache_size -= 1;
3804ae10b3aSChris Mason 		freeit = 1;
3814ae10b3aSChris Mason 
3824ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
3834ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
3844ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
3854ae10b3aSChris Mason 		 * was held for the list.
3864ae10b3aSChris Mason 		 *
3874ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
3884ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
3894ae10b3aSChris Mason 		 * the corresponding ref
3904ae10b3aSChris Mason 		 */
3914ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
3924ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
3934ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
3944ae10b3aSChris Mason 				atomic_dec(&rbio->refs);
3954ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
3964ae10b3aSChris Mason 			}
3974ae10b3aSChris Mason 		}
3984ae10b3aSChris Mason 	}
3994ae10b3aSChris Mason 
4004ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4014ae10b3aSChris Mason 	spin_unlock(&h->lock);
4024ae10b3aSChris Mason 
4034ae10b3aSChris Mason 	if (freeit)
4044ae10b3aSChris Mason 		__free_raid_bio(rbio);
4054ae10b3aSChris Mason }
4064ae10b3aSChris Mason 
4074ae10b3aSChris Mason /*
4084ae10b3aSChris Mason  * prune a given rbio from the cache
4094ae10b3aSChris Mason  */
4104ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4114ae10b3aSChris Mason {
4124ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4134ae10b3aSChris Mason 	unsigned long flags;
4144ae10b3aSChris Mason 
4154ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4164ae10b3aSChris Mason 		return;
4174ae10b3aSChris Mason 
4184ae10b3aSChris Mason 	table = rbio->fs_info->stripe_hash_table;
4194ae10b3aSChris Mason 
4204ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4214ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
4224ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4234ae10b3aSChris Mason }
4244ae10b3aSChris Mason 
4254ae10b3aSChris Mason /*
4264ae10b3aSChris Mason  * remove everything in the cache
4274ae10b3aSChris Mason  */
42848a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4294ae10b3aSChris Mason {
4304ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4314ae10b3aSChris Mason 	unsigned long flags;
4324ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4334ae10b3aSChris Mason 
4344ae10b3aSChris Mason 	table = info->stripe_hash_table;
4354ae10b3aSChris Mason 
4364ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4374ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4384ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4394ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4404ae10b3aSChris Mason 				  stripe_cache);
4414ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4424ae10b3aSChris Mason 	}
4434ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4444ae10b3aSChris Mason }
4454ae10b3aSChris Mason 
4464ae10b3aSChris Mason /*
4474ae10b3aSChris Mason  * remove all cached entries and free the hash table
4484ae10b3aSChris Mason  * used by unmount
44953b381b3SDavid Woodhouse  */
45053b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
45153b381b3SDavid Woodhouse {
45253b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
45353b381b3SDavid Woodhouse 		return;
4544ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
45583c8266aSDavid Sterba 	if (is_vmalloc_addr(info->stripe_hash_table))
45683c8266aSDavid Sterba 		vfree(info->stripe_hash_table);
45783c8266aSDavid Sterba 	else
45853b381b3SDavid Woodhouse 		kfree(info->stripe_hash_table);
45953b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
46053b381b3SDavid Woodhouse }
46153b381b3SDavid Woodhouse 
46253b381b3SDavid Woodhouse /*
4634ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
4644ae10b3aSChris Mason  * must have already been prepared by calling
4654ae10b3aSChris Mason  * cache_rbio_pages
4664ae10b3aSChris Mason  *
4674ae10b3aSChris Mason  * If this rbio was already cached, it gets
4684ae10b3aSChris Mason  * moved to the front of the lru.
4694ae10b3aSChris Mason  *
4704ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
4714ae10b3aSChris Mason  * prune an item.
4724ae10b3aSChris Mason  */
4734ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
4744ae10b3aSChris Mason {
4754ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4764ae10b3aSChris Mason 	unsigned long flags;
4774ae10b3aSChris Mason 
4784ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
4794ae10b3aSChris Mason 		return;
4804ae10b3aSChris Mason 
4814ae10b3aSChris Mason 	table = rbio->fs_info->stripe_hash_table;
4824ae10b3aSChris Mason 
4834ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4844ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4854ae10b3aSChris Mason 
4864ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
4874ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
4884ae10b3aSChris Mason 		atomic_inc(&rbio->refs);
4894ae10b3aSChris Mason 
4904ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
4914ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
4924ae10b3aSChris Mason 	} else {
4934ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
4944ae10b3aSChris Mason 		table->cache_size += 1;
4954ae10b3aSChris Mason 	}
4964ae10b3aSChris Mason 
4974ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4984ae10b3aSChris Mason 
4994ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
5004ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
5014ae10b3aSChris Mason 
5024ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
5034ae10b3aSChris Mason 				  struct btrfs_raid_bio,
5044ae10b3aSChris Mason 				  stripe_cache);
5054ae10b3aSChris Mason 
5064ae10b3aSChris Mason 		if (found != rbio)
5074ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
5084ae10b3aSChris Mason 	}
5094ae10b3aSChris Mason 
5104ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
5114ae10b3aSChris Mason 	return;
5124ae10b3aSChris Mason }
5134ae10b3aSChris Mason 
5144ae10b3aSChris Mason /*
51553b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
51653b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
51753b381b3SDavid Woodhouse  * loop through.
51853b381b3SDavid Woodhouse  */
51953b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
52053b381b3SDavid Woodhouse {
52153b381b3SDavid Woodhouse 	int src_off = 0;
52253b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
52353b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
52453b381b3SDavid Woodhouse 
52553b381b3SDavid Woodhouse 	while(src_cnt > 0) {
52653b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
52753b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
52853b381b3SDavid Woodhouse 
52953b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
53053b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
53153b381b3SDavid Woodhouse 	}
53253b381b3SDavid Woodhouse }
53353b381b3SDavid Woodhouse 
53453b381b3SDavid Woodhouse /*
53553b381b3SDavid Woodhouse  * returns true if the bio list inside this rbio
53653b381b3SDavid Woodhouse  * covers an entire stripe (no rmw required).
53753b381b3SDavid Woodhouse  * Must be called with the bio list lock held, or
53853b381b3SDavid Woodhouse  * at a time when you know it is impossible to add
53953b381b3SDavid Woodhouse  * new bios into the list
54053b381b3SDavid Woodhouse  */
54153b381b3SDavid Woodhouse static int __rbio_is_full(struct btrfs_raid_bio *rbio)
54253b381b3SDavid Woodhouse {
54353b381b3SDavid Woodhouse 	unsigned long size = rbio->bio_list_bytes;
54453b381b3SDavid Woodhouse 	int ret = 1;
54553b381b3SDavid Woodhouse 
54653b381b3SDavid Woodhouse 	if (size != rbio->nr_data * rbio->stripe_len)
54753b381b3SDavid Woodhouse 		ret = 0;
54853b381b3SDavid Woodhouse 
54953b381b3SDavid Woodhouse 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
55053b381b3SDavid Woodhouse 	return ret;
55153b381b3SDavid Woodhouse }
55253b381b3SDavid Woodhouse 
55353b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
55453b381b3SDavid Woodhouse {
55553b381b3SDavid Woodhouse 	unsigned long flags;
55653b381b3SDavid Woodhouse 	int ret;
55753b381b3SDavid Woodhouse 
55853b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
55953b381b3SDavid Woodhouse 	ret = __rbio_is_full(rbio);
56053b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
56153b381b3SDavid Woodhouse 	return ret;
56253b381b3SDavid Woodhouse }
56353b381b3SDavid Woodhouse 
56453b381b3SDavid Woodhouse /*
56553b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
56653b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
56753b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
56853b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
56953b381b3SDavid Woodhouse  * locked for final IO
57053b381b3SDavid Woodhouse  *
57153b381b3SDavid Woodhouse  * The caller is responsible for locking such that
57253b381b3SDavid Woodhouse  * rmw_locked is safe to test
57353b381b3SDavid Woodhouse  */
57453b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
57553b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
57653b381b3SDavid Woodhouse {
57753b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
57853b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
57953b381b3SDavid Woodhouse 		return 0;
58053b381b3SDavid Woodhouse 
5814ae10b3aSChris Mason 	/*
5824ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
5834ae10b3aSChris Mason 	 * idea is that when we merge the destination
5844ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
5854ae10b3aSChris Mason 	 * steal from cached rbio's though, other functions
5864ae10b3aSChris Mason 	 * handle that.
5874ae10b3aSChris Mason 	 */
5884ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
5894ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
5904ae10b3aSChris Mason 		return 0;
5914ae10b3aSChris Mason 
59253b381b3SDavid Woodhouse 	if (last->raid_map[0] !=
59353b381b3SDavid Woodhouse 	    cur->raid_map[0])
59453b381b3SDavid Woodhouse 		return 0;
59553b381b3SDavid Woodhouse 
59653b381b3SDavid Woodhouse 	/* reads can't merge with writes */
597*1b94b556SMiao Xie 	if (last->operation != cur->operation) {
59853b381b3SDavid Woodhouse 		return 0;
59953b381b3SDavid Woodhouse 	}
60053b381b3SDavid Woodhouse 
60153b381b3SDavid Woodhouse 	return 1;
60253b381b3SDavid Woodhouse }
60353b381b3SDavid Woodhouse 
60453b381b3SDavid Woodhouse /*
60553b381b3SDavid Woodhouse  * helper to index into the pstripe
60653b381b3SDavid Woodhouse  */
60753b381b3SDavid Woodhouse static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
60853b381b3SDavid Woodhouse {
60953b381b3SDavid Woodhouse 	index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
61053b381b3SDavid Woodhouse 	return rbio->stripe_pages[index];
61153b381b3SDavid Woodhouse }
61253b381b3SDavid Woodhouse 
61353b381b3SDavid Woodhouse /*
61453b381b3SDavid Woodhouse  * helper to index into the qstripe, returns null
61553b381b3SDavid Woodhouse  * if there is no qstripe
61653b381b3SDavid Woodhouse  */
61753b381b3SDavid Woodhouse static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
61853b381b3SDavid Woodhouse {
61953b381b3SDavid Woodhouse 	if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
62053b381b3SDavid Woodhouse 		return NULL;
62153b381b3SDavid Woodhouse 
62253b381b3SDavid Woodhouse 	index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
62353b381b3SDavid Woodhouse 		PAGE_CACHE_SHIFT;
62453b381b3SDavid Woodhouse 	return rbio->stripe_pages[index];
62553b381b3SDavid Woodhouse }
62653b381b3SDavid Woodhouse 
62753b381b3SDavid Woodhouse /*
62853b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
62953b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
63053b381b3SDavid Woodhouse  *
63153b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
63253b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
63353b381b3SDavid Woodhouse  * themselves.
63453b381b3SDavid Woodhouse  *
63553b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
63653b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
63753b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
63853b381b3SDavid Woodhouse  *
63953b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
64053b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
64153b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
64253b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
64353b381b3SDavid Woodhouse  * 1 is returned
64453b381b3SDavid Woodhouse  *
64553b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
64653b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
64753b381b3SDavid Woodhouse  * already been freed.
64853b381b3SDavid Woodhouse  */
64953b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
65053b381b3SDavid Woodhouse {
65153b381b3SDavid Woodhouse 	int bucket = rbio_bucket(rbio);
65253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
65353b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
65453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
65553b381b3SDavid Woodhouse 	unsigned long flags;
65653b381b3SDavid Woodhouse 	DEFINE_WAIT(wait);
65753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
6584ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
65953b381b3SDavid Woodhouse 	int ret = 0;
66053b381b3SDavid Woodhouse 	int walk = 0;
66153b381b3SDavid Woodhouse 
66253b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
66353b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
66453b381b3SDavid Woodhouse 		walk++;
66553b381b3SDavid Woodhouse 		if (cur->raid_map[0] == rbio->raid_map[0]) {
66653b381b3SDavid Woodhouse 			spin_lock(&cur->bio_list_lock);
66753b381b3SDavid Woodhouse 
6684ae10b3aSChris Mason 			/* can we steal this cached rbio's pages? */
6694ae10b3aSChris Mason 			if (bio_list_empty(&cur->bio_list) &&
6704ae10b3aSChris Mason 			    list_empty(&cur->plug_list) &&
6714ae10b3aSChris Mason 			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
6724ae10b3aSChris Mason 			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
6734ae10b3aSChris Mason 				list_del_init(&cur->hash_list);
6744ae10b3aSChris Mason 				atomic_dec(&cur->refs);
6754ae10b3aSChris Mason 
6764ae10b3aSChris Mason 				steal_rbio(cur, rbio);
6774ae10b3aSChris Mason 				cache_drop = cur;
6784ae10b3aSChris Mason 				spin_unlock(&cur->bio_list_lock);
6794ae10b3aSChris Mason 
6804ae10b3aSChris Mason 				goto lockit;
6814ae10b3aSChris Mason 			}
6824ae10b3aSChris Mason 
68353b381b3SDavid Woodhouse 			/* can we merge into the lock owner? */
68453b381b3SDavid Woodhouse 			if (rbio_can_merge(cur, rbio)) {
68553b381b3SDavid Woodhouse 				merge_rbio(cur, rbio);
68653b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
68753b381b3SDavid Woodhouse 				freeit = rbio;
68853b381b3SDavid Woodhouse 				ret = 1;
68953b381b3SDavid Woodhouse 				goto out;
69053b381b3SDavid Woodhouse 			}
69153b381b3SDavid Woodhouse 
6924ae10b3aSChris Mason 
69353b381b3SDavid Woodhouse 			/*
69453b381b3SDavid Woodhouse 			 * we couldn't merge with the running
69553b381b3SDavid Woodhouse 			 * rbio, see if we can merge with the
69653b381b3SDavid Woodhouse 			 * pending ones.  We don't have to
69753b381b3SDavid Woodhouse 			 * check for rmw_locked because there
69853b381b3SDavid Woodhouse 			 * is no way they are inside finish_rmw
69953b381b3SDavid Woodhouse 			 * right now
70053b381b3SDavid Woodhouse 			 */
70153b381b3SDavid Woodhouse 			list_for_each_entry(pending, &cur->plug_list,
70253b381b3SDavid Woodhouse 					    plug_list) {
70353b381b3SDavid Woodhouse 				if (rbio_can_merge(pending, rbio)) {
70453b381b3SDavid Woodhouse 					merge_rbio(pending, rbio);
70553b381b3SDavid Woodhouse 					spin_unlock(&cur->bio_list_lock);
70653b381b3SDavid Woodhouse 					freeit = rbio;
70753b381b3SDavid Woodhouse 					ret = 1;
70853b381b3SDavid Woodhouse 					goto out;
70953b381b3SDavid Woodhouse 				}
71053b381b3SDavid Woodhouse 			}
71153b381b3SDavid Woodhouse 
71253b381b3SDavid Woodhouse 			/* no merging, put us on the tail of the plug list,
71353b381b3SDavid Woodhouse 			 * our rbio will be started with the currently
71453b381b3SDavid Woodhouse 			 * running rbio unlocks
71553b381b3SDavid Woodhouse 			 */
71653b381b3SDavid Woodhouse 			list_add_tail(&rbio->plug_list, &cur->plug_list);
71753b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
71853b381b3SDavid Woodhouse 			ret = 1;
71953b381b3SDavid Woodhouse 			goto out;
72053b381b3SDavid Woodhouse 		}
72153b381b3SDavid Woodhouse 	}
7224ae10b3aSChris Mason lockit:
72353b381b3SDavid Woodhouse 	atomic_inc(&rbio->refs);
72453b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
72553b381b3SDavid Woodhouse out:
72653b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
7274ae10b3aSChris Mason 	if (cache_drop)
7284ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
72953b381b3SDavid Woodhouse 	if (freeit)
73053b381b3SDavid Woodhouse 		__free_raid_bio(freeit);
73153b381b3SDavid Woodhouse 	return ret;
73253b381b3SDavid Woodhouse }
73353b381b3SDavid Woodhouse 
73453b381b3SDavid Woodhouse /*
73553b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
73653b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
73753b381b3SDavid Woodhouse  */
73853b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
73953b381b3SDavid Woodhouse {
74053b381b3SDavid Woodhouse 	int bucket;
74153b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
74253b381b3SDavid Woodhouse 	unsigned long flags;
7434ae10b3aSChris Mason 	int keep_cache = 0;
74453b381b3SDavid Woodhouse 
74553b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
74653b381b3SDavid Woodhouse 	h = rbio->fs_info->stripe_hash_table->table + bucket;
74753b381b3SDavid Woodhouse 
7484ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
7494ae10b3aSChris Mason 		cache_rbio(rbio);
7504ae10b3aSChris Mason 
75153b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
75253b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
75353b381b3SDavid Woodhouse 
75453b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
7554ae10b3aSChris Mason 		/*
7564ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
7574ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
7584ae10b3aSChris Mason 		 * to steal from later
7594ae10b3aSChris Mason 		 */
7604ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
7614ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
7624ae10b3aSChris Mason 			keep_cache = 1;
7634ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
7644ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
7654ae10b3aSChris Mason 			goto done;
7664ae10b3aSChris Mason 		}
76753b381b3SDavid Woodhouse 
76853b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
76953b381b3SDavid Woodhouse 		atomic_dec(&rbio->refs);
77053b381b3SDavid Woodhouse 
77153b381b3SDavid Woodhouse 		/*
77253b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
77353b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
77453b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
77553b381b3SDavid Woodhouse 		 */
77653b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
77753b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
77853b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
77953b381b3SDavid Woodhouse 
78053b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
78153b381b3SDavid Woodhouse 					  plug_list);
78253b381b3SDavid Woodhouse 
78353b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
78453b381b3SDavid Woodhouse 
78553b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
78653b381b3SDavid Woodhouse 			atomic_inc(&next->refs);
78753b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
78853b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
78953b381b3SDavid Woodhouse 
790*1b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
79153b381b3SDavid Woodhouse 				async_read_rebuild(next);
792*1b94b556SMiao Xie 			else if (next->operation == BTRFS_RBIO_WRITE){
7934ae10b3aSChris Mason 				steal_rbio(rbio, next);
79453b381b3SDavid Woodhouse 				async_rmw_stripe(next);
7954ae10b3aSChris Mason 			}
79653b381b3SDavid Woodhouse 
79753b381b3SDavid Woodhouse 			goto done_nolock;
79853b381b3SDavid Woodhouse 		} else  if (waitqueue_active(&h->wait)) {
79953b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
80053b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
80153b381b3SDavid Woodhouse 			wake_up(&h->wait);
80253b381b3SDavid Woodhouse 			goto done_nolock;
80353b381b3SDavid Woodhouse 		}
80453b381b3SDavid Woodhouse 	}
8054ae10b3aSChris Mason done:
80653b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
80753b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
80853b381b3SDavid Woodhouse 
80953b381b3SDavid Woodhouse done_nolock:
8104ae10b3aSChris Mason 	if (!keep_cache)
8114ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
81253b381b3SDavid Woodhouse }
81353b381b3SDavid Woodhouse 
814af8e2d1dSMiao Xie static inline void
815af8e2d1dSMiao Xie __free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
816af8e2d1dSMiao Xie {
817af8e2d1dSMiao Xie 	if (need) {
818af8e2d1dSMiao Xie 		kfree(raid_map);
819af8e2d1dSMiao Xie 		kfree(bbio);
820af8e2d1dSMiao Xie 	}
821af8e2d1dSMiao Xie }
822af8e2d1dSMiao Xie 
823af8e2d1dSMiao Xie static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
824af8e2d1dSMiao Xie {
825af8e2d1dSMiao Xie 	__free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
826af8e2d1dSMiao Xie 			!test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
827af8e2d1dSMiao Xie }
828af8e2d1dSMiao Xie 
82953b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio)
83053b381b3SDavid Woodhouse {
83153b381b3SDavid Woodhouse 	int i;
83253b381b3SDavid Woodhouse 
83353b381b3SDavid Woodhouse 	WARN_ON(atomic_read(&rbio->refs) < 0);
83453b381b3SDavid Woodhouse 	if (!atomic_dec_and_test(&rbio->refs))
83553b381b3SDavid Woodhouse 		return;
83653b381b3SDavid Woodhouse 
8374ae10b3aSChris Mason 	WARN_ON(!list_empty(&rbio->stripe_cache));
83853b381b3SDavid Woodhouse 	WARN_ON(!list_empty(&rbio->hash_list));
83953b381b3SDavid Woodhouse 	WARN_ON(!bio_list_empty(&rbio->bio_list));
84053b381b3SDavid Woodhouse 
84153b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_pages; i++) {
84253b381b3SDavid Woodhouse 		if (rbio->stripe_pages[i]) {
84353b381b3SDavid Woodhouse 			__free_page(rbio->stripe_pages[i]);
84453b381b3SDavid Woodhouse 			rbio->stripe_pages[i] = NULL;
84553b381b3SDavid Woodhouse 		}
84653b381b3SDavid Woodhouse 	}
847af8e2d1dSMiao Xie 
848af8e2d1dSMiao Xie 	free_bbio_and_raid_map(rbio);
849af8e2d1dSMiao Xie 
85053b381b3SDavid Woodhouse 	kfree(rbio);
85153b381b3SDavid Woodhouse }
85253b381b3SDavid Woodhouse 
85353b381b3SDavid Woodhouse static void free_raid_bio(struct btrfs_raid_bio *rbio)
85453b381b3SDavid Woodhouse {
85553b381b3SDavid Woodhouse 	unlock_stripe(rbio);
85653b381b3SDavid Woodhouse 	__free_raid_bio(rbio);
85753b381b3SDavid Woodhouse }
85853b381b3SDavid Woodhouse 
85953b381b3SDavid Woodhouse /*
86053b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
86153b381b3SDavid Woodhouse  * bio_list and calls end_io on them
86253b381b3SDavid Woodhouse  */
86353b381b3SDavid Woodhouse static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
86453b381b3SDavid Woodhouse {
86553b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
86653b381b3SDavid Woodhouse 	struct bio *next;
86753b381b3SDavid Woodhouse 	free_raid_bio(rbio);
86853b381b3SDavid Woodhouse 
86953b381b3SDavid Woodhouse 	while (cur) {
87053b381b3SDavid Woodhouse 		next = cur->bi_next;
87153b381b3SDavid Woodhouse 		cur->bi_next = NULL;
87253b381b3SDavid Woodhouse 		if (uptodate)
87353b381b3SDavid Woodhouse 			set_bit(BIO_UPTODATE, &cur->bi_flags);
87453b381b3SDavid Woodhouse 		bio_endio(cur, err);
87553b381b3SDavid Woodhouse 		cur = next;
87653b381b3SDavid Woodhouse 	}
87753b381b3SDavid Woodhouse }
87853b381b3SDavid Woodhouse 
87953b381b3SDavid Woodhouse /*
88053b381b3SDavid Woodhouse  * end io function used by finish_rmw.  When we finally
88153b381b3SDavid Woodhouse  * get here, we've written a full stripe
88253b381b3SDavid Woodhouse  */
88353b381b3SDavid Woodhouse static void raid_write_end_io(struct bio *bio, int err)
88453b381b3SDavid Woodhouse {
88553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
88653b381b3SDavid Woodhouse 
88753b381b3SDavid Woodhouse 	if (err)
88853b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
88953b381b3SDavid Woodhouse 
89053b381b3SDavid Woodhouse 	bio_put(bio);
89153b381b3SDavid Woodhouse 
892b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
89353b381b3SDavid Woodhouse 		return;
89453b381b3SDavid Woodhouse 
89553b381b3SDavid Woodhouse 	err = 0;
89653b381b3SDavid Woodhouse 
89753b381b3SDavid Woodhouse 	/* OK, we have read all the stripes we need to. */
898b89e1b01SMiao Xie 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
89953b381b3SDavid Woodhouse 		err = -EIO;
90053b381b3SDavid Woodhouse 
90153b381b3SDavid Woodhouse 	rbio_orig_end_io(rbio, err, 0);
90253b381b3SDavid Woodhouse 	return;
90353b381b3SDavid Woodhouse }
90453b381b3SDavid Woodhouse 
90553b381b3SDavid Woodhouse /*
90653b381b3SDavid Woodhouse  * the read/modify/write code wants to use the original bio for
90753b381b3SDavid Woodhouse  * any pages it included, and then use the rbio for everything
90853b381b3SDavid Woodhouse  * else.  This function decides if a given index (stripe number)
90953b381b3SDavid Woodhouse  * and page number in that stripe fall inside the original bio
91053b381b3SDavid Woodhouse  * or the rbio.
91153b381b3SDavid Woodhouse  *
91253b381b3SDavid Woodhouse  * if you set bio_list_only, you'll get a NULL back for any ranges
91353b381b3SDavid Woodhouse  * that are outside the bio_list
91453b381b3SDavid Woodhouse  *
91553b381b3SDavid Woodhouse  * This doesn't take any refs on anything, you get a bare page pointer
91653b381b3SDavid Woodhouse  * and the caller must bump refs as required.
91753b381b3SDavid Woodhouse  *
91853b381b3SDavid Woodhouse  * You must call index_rbio_pages once before you can trust
91953b381b3SDavid Woodhouse  * the answers from this function.
92053b381b3SDavid Woodhouse  */
92153b381b3SDavid Woodhouse static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
92253b381b3SDavid Woodhouse 				 int index, int pagenr, int bio_list_only)
92353b381b3SDavid Woodhouse {
92453b381b3SDavid Woodhouse 	int chunk_page;
92553b381b3SDavid Woodhouse 	struct page *p = NULL;
92653b381b3SDavid Woodhouse 
92753b381b3SDavid Woodhouse 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
92853b381b3SDavid Woodhouse 
92953b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
93053b381b3SDavid Woodhouse 	p = rbio->bio_pages[chunk_page];
93153b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
93253b381b3SDavid Woodhouse 
93353b381b3SDavid Woodhouse 	if (p || bio_list_only)
93453b381b3SDavid Woodhouse 		return p;
93553b381b3SDavid Woodhouse 
93653b381b3SDavid Woodhouse 	return rbio->stripe_pages[chunk_page];
93753b381b3SDavid Woodhouse }
93853b381b3SDavid Woodhouse 
93953b381b3SDavid Woodhouse /*
94053b381b3SDavid Woodhouse  * number of pages we need for the entire stripe across all the
94153b381b3SDavid Woodhouse  * drives
94253b381b3SDavid Woodhouse  */
94353b381b3SDavid Woodhouse static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
94453b381b3SDavid Woodhouse {
94553b381b3SDavid Woodhouse 	unsigned long nr = stripe_len * nr_stripes;
946ed6078f7SDavid Sterba 	return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
94753b381b3SDavid Woodhouse }
94853b381b3SDavid Woodhouse 
94953b381b3SDavid Woodhouse /*
95053b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
95153b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
95253b381b3SDavid Woodhouse  */
95353b381b3SDavid Woodhouse static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
95453b381b3SDavid Woodhouse 			  struct btrfs_bio *bbio, u64 *raid_map,
95553b381b3SDavid Woodhouse 			  u64 stripe_len)
95653b381b3SDavid Woodhouse {
95753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
95853b381b3SDavid Woodhouse 	int nr_data = 0;
95953b381b3SDavid Woodhouse 	int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
96053b381b3SDavid Woodhouse 	void *p;
96153b381b3SDavid Woodhouse 
96253b381b3SDavid Woodhouse 	rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
96353b381b3SDavid Woodhouse 			GFP_NOFS);
964af8e2d1dSMiao Xie 	if (!rbio)
96553b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
96653b381b3SDavid Woodhouse 
96753b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
96853b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
96953b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
9704ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
97153b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
97253b381b3SDavid Woodhouse 	rbio->bbio = bbio;
97353b381b3SDavid Woodhouse 	rbio->raid_map = raid_map;
97453b381b3SDavid Woodhouse 	rbio->fs_info = root->fs_info;
97553b381b3SDavid Woodhouse 	rbio->stripe_len = stripe_len;
97653b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
97753b381b3SDavid Woodhouse 	rbio->faila = -1;
97853b381b3SDavid Woodhouse 	rbio->failb = -1;
97953b381b3SDavid Woodhouse 	atomic_set(&rbio->refs, 1);
980b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
981b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
98253b381b3SDavid Woodhouse 
98353b381b3SDavid Woodhouse 	/*
98453b381b3SDavid Woodhouse 	 * the stripe_pages and bio_pages array point to the extra
98553b381b3SDavid Woodhouse 	 * memory we allocated past the end of the rbio
98653b381b3SDavid Woodhouse 	 */
98753b381b3SDavid Woodhouse 	p = rbio + 1;
98853b381b3SDavid Woodhouse 	rbio->stripe_pages = p;
98953b381b3SDavid Woodhouse 	rbio->bio_pages = p + sizeof(struct page *) * num_pages;
99053b381b3SDavid Woodhouse 
99153b381b3SDavid Woodhouse 	if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
99253b381b3SDavid Woodhouse 		nr_data = bbio->num_stripes - 2;
99353b381b3SDavid Woodhouse 	else
99453b381b3SDavid Woodhouse 		nr_data = bbio->num_stripes - 1;
99553b381b3SDavid Woodhouse 
99653b381b3SDavid Woodhouse 	rbio->nr_data = nr_data;
99753b381b3SDavid Woodhouse 	return rbio;
99853b381b3SDavid Woodhouse }
99953b381b3SDavid Woodhouse 
100053b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
100153b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
100253b381b3SDavid Woodhouse {
100353b381b3SDavid Woodhouse 	int i;
100453b381b3SDavid Woodhouse 	struct page *page;
100553b381b3SDavid Woodhouse 
100653b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_pages; i++) {
100753b381b3SDavid Woodhouse 		if (rbio->stripe_pages[i])
100853b381b3SDavid Woodhouse 			continue;
100953b381b3SDavid Woodhouse 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
101053b381b3SDavid Woodhouse 		if (!page)
101153b381b3SDavid Woodhouse 			return -ENOMEM;
101253b381b3SDavid Woodhouse 		rbio->stripe_pages[i] = page;
101353b381b3SDavid Woodhouse 		ClearPageUptodate(page);
101453b381b3SDavid Woodhouse 	}
101553b381b3SDavid Woodhouse 	return 0;
101653b381b3SDavid Woodhouse }
101753b381b3SDavid Woodhouse 
101853b381b3SDavid Woodhouse /* allocate pages for just the p/q stripes */
101953b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
102053b381b3SDavid Woodhouse {
102153b381b3SDavid Woodhouse 	int i;
102253b381b3SDavid Woodhouse 	struct page *page;
102353b381b3SDavid Woodhouse 
102453b381b3SDavid Woodhouse 	i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
102553b381b3SDavid Woodhouse 
102653b381b3SDavid Woodhouse 	for (; i < rbio->nr_pages; i++) {
102753b381b3SDavid Woodhouse 		if (rbio->stripe_pages[i])
102853b381b3SDavid Woodhouse 			continue;
102953b381b3SDavid Woodhouse 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
103053b381b3SDavid Woodhouse 		if (!page)
103153b381b3SDavid Woodhouse 			return -ENOMEM;
103253b381b3SDavid Woodhouse 		rbio->stripe_pages[i] = page;
103353b381b3SDavid Woodhouse 	}
103453b381b3SDavid Woodhouse 	return 0;
103553b381b3SDavid Woodhouse }
103653b381b3SDavid Woodhouse 
103753b381b3SDavid Woodhouse /*
103853b381b3SDavid Woodhouse  * add a single page from a specific stripe into our list of bios for IO
103953b381b3SDavid Woodhouse  * this will try to merge into existing bios if possible, and returns
104053b381b3SDavid Woodhouse  * zero if all went well.
104153b381b3SDavid Woodhouse  */
104248a3b636SEric Sandeen static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
104353b381b3SDavid Woodhouse 			    struct bio_list *bio_list,
104453b381b3SDavid Woodhouse 			    struct page *page,
104553b381b3SDavid Woodhouse 			    int stripe_nr,
104653b381b3SDavid Woodhouse 			    unsigned long page_index,
104753b381b3SDavid Woodhouse 			    unsigned long bio_max_len)
104853b381b3SDavid Woodhouse {
104953b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
105053b381b3SDavid Woodhouse 	u64 last_end = 0;
105153b381b3SDavid Woodhouse 	int ret;
105253b381b3SDavid Woodhouse 	struct bio *bio;
105353b381b3SDavid Woodhouse 	struct btrfs_bio_stripe *stripe;
105453b381b3SDavid Woodhouse 	u64 disk_start;
105553b381b3SDavid Woodhouse 
105653b381b3SDavid Woodhouse 	stripe = &rbio->bbio->stripes[stripe_nr];
105753b381b3SDavid Woodhouse 	disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
105853b381b3SDavid Woodhouse 
105953b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
106053b381b3SDavid Woodhouse 	if (!stripe->dev->bdev)
106153b381b3SDavid Woodhouse 		return fail_rbio_index(rbio, stripe_nr);
106253b381b3SDavid Woodhouse 
106353b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
106453b381b3SDavid Woodhouse 	if (last) {
10654f024f37SKent Overstreet 		last_end = (u64)last->bi_iter.bi_sector << 9;
10664f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
106753b381b3SDavid Woodhouse 
106853b381b3SDavid Woodhouse 		/*
106953b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
107053b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
107153b381b3SDavid Woodhouse 		 */
107253b381b3SDavid Woodhouse 		if (last_end == disk_start && stripe->dev->bdev &&
107353b381b3SDavid Woodhouse 		    test_bit(BIO_UPTODATE, &last->bi_flags) &&
107453b381b3SDavid Woodhouse 		    last->bi_bdev == stripe->dev->bdev) {
107553b381b3SDavid Woodhouse 			ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
107653b381b3SDavid Woodhouse 			if (ret == PAGE_CACHE_SIZE)
107753b381b3SDavid Woodhouse 				return 0;
107853b381b3SDavid Woodhouse 		}
107953b381b3SDavid Woodhouse 	}
108053b381b3SDavid Woodhouse 
108153b381b3SDavid Woodhouse 	/* put a new bio on the list */
10829be3395bSChris Mason 	bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
108353b381b3SDavid Woodhouse 	if (!bio)
108453b381b3SDavid Woodhouse 		return -ENOMEM;
108553b381b3SDavid Woodhouse 
10864f024f37SKent Overstreet 	bio->bi_iter.bi_size = 0;
108753b381b3SDavid Woodhouse 	bio->bi_bdev = stripe->dev->bdev;
10884f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
108953b381b3SDavid Woodhouse 	set_bit(BIO_UPTODATE, &bio->bi_flags);
109053b381b3SDavid Woodhouse 
109153b381b3SDavid Woodhouse 	bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
109253b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
109353b381b3SDavid Woodhouse 	return 0;
109453b381b3SDavid Woodhouse }
109553b381b3SDavid Woodhouse 
109653b381b3SDavid Woodhouse /*
109753b381b3SDavid Woodhouse  * while we're doing the read/modify/write cycle, we could
109853b381b3SDavid Woodhouse  * have errors in reading pages off the disk.  This checks
109953b381b3SDavid Woodhouse  * for errors and if we're not able to read the page it'll
110053b381b3SDavid Woodhouse  * trigger parity reconstruction.  The rmw will be finished
110153b381b3SDavid Woodhouse  * after we've reconstructed the failed stripes
110253b381b3SDavid Woodhouse  */
110353b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
110453b381b3SDavid Woodhouse {
110553b381b3SDavid Woodhouse 	if (rbio->faila >= 0 || rbio->failb >= 0) {
110653b381b3SDavid Woodhouse 		BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
110753b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
110853b381b3SDavid Woodhouse 	} else {
110953b381b3SDavid Woodhouse 		finish_rmw(rbio);
111053b381b3SDavid Woodhouse 	}
111153b381b3SDavid Woodhouse }
111253b381b3SDavid Woodhouse 
111353b381b3SDavid Woodhouse /*
111453b381b3SDavid Woodhouse  * these are just the pages from the rbio array, not from anything
111553b381b3SDavid Woodhouse  * the FS sent down to us
111653b381b3SDavid Woodhouse  */
111753b381b3SDavid Woodhouse static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
111853b381b3SDavid Woodhouse {
111953b381b3SDavid Woodhouse 	int index;
112053b381b3SDavid Woodhouse 	index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
112153b381b3SDavid Woodhouse 	index += page;
112253b381b3SDavid Woodhouse 	return rbio->stripe_pages[index];
112353b381b3SDavid Woodhouse }
112453b381b3SDavid Woodhouse 
112553b381b3SDavid Woodhouse /*
112653b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
112753b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
112853b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
112953b381b3SDavid Woodhouse  * reconstruction.
113053b381b3SDavid Woodhouse  *
113153b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
113253b381b3SDavid Woodhouse  */
113353b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
113453b381b3SDavid Woodhouse {
113553b381b3SDavid Woodhouse 	struct bio *bio;
113653b381b3SDavid Woodhouse 	u64 start;
113753b381b3SDavid Woodhouse 	unsigned long stripe_offset;
113853b381b3SDavid Woodhouse 	unsigned long page_index;
113953b381b3SDavid Woodhouse 	struct page *p;
114053b381b3SDavid Woodhouse 	int i;
114153b381b3SDavid Woodhouse 
114253b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
114353b381b3SDavid Woodhouse 	bio_list_for_each(bio, &rbio->bio_list) {
11444f024f37SKent Overstreet 		start = (u64)bio->bi_iter.bi_sector << 9;
114553b381b3SDavid Woodhouse 		stripe_offset = start - rbio->raid_map[0];
114653b381b3SDavid Woodhouse 		page_index = stripe_offset >> PAGE_CACHE_SHIFT;
114753b381b3SDavid Woodhouse 
114853b381b3SDavid Woodhouse 		for (i = 0; i < bio->bi_vcnt; i++) {
114953b381b3SDavid Woodhouse 			p = bio->bi_io_vec[i].bv_page;
115053b381b3SDavid Woodhouse 			rbio->bio_pages[page_index + i] = p;
115153b381b3SDavid Woodhouse 		}
115253b381b3SDavid Woodhouse 	}
115353b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
115453b381b3SDavid Woodhouse }
115553b381b3SDavid Woodhouse 
115653b381b3SDavid Woodhouse /*
115753b381b3SDavid Woodhouse  * this is called from one of two situations.  We either
115853b381b3SDavid Woodhouse  * have a full stripe from the higher layers, or we've read all
115953b381b3SDavid Woodhouse  * the missing bits off disk.
116053b381b3SDavid Woodhouse  *
116153b381b3SDavid Woodhouse  * This will calculate the parity and then send down any
116253b381b3SDavid Woodhouse  * changed blocks.
116353b381b3SDavid Woodhouse  */
116453b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
116553b381b3SDavid Woodhouse {
116653b381b3SDavid Woodhouse 	struct btrfs_bio *bbio = rbio->bbio;
116753b381b3SDavid Woodhouse 	void *pointers[bbio->num_stripes];
116853b381b3SDavid Woodhouse 	int stripe_len = rbio->stripe_len;
116953b381b3SDavid Woodhouse 	int nr_data = rbio->nr_data;
117053b381b3SDavid Woodhouse 	int stripe;
117153b381b3SDavid Woodhouse 	int pagenr;
117253b381b3SDavid Woodhouse 	int p_stripe = -1;
117353b381b3SDavid Woodhouse 	int q_stripe = -1;
117453b381b3SDavid Woodhouse 	struct bio_list bio_list;
117553b381b3SDavid Woodhouse 	struct bio *bio;
117653b381b3SDavid Woodhouse 	int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
117753b381b3SDavid Woodhouse 	int ret;
117853b381b3SDavid Woodhouse 
117953b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
118053b381b3SDavid Woodhouse 
118153b381b3SDavid Woodhouse 	if (bbio->num_stripes - rbio->nr_data == 1) {
118253b381b3SDavid Woodhouse 		p_stripe = bbio->num_stripes - 1;
118353b381b3SDavid Woodhouse 	} else if (bbio->num_stripes - rbio->nr_data == 2) {
118453b381b3SDavid Woodhouse 		p_stripe = bbio->num_stripes - 2;
118553b381b3SDavid Woodhouse 		q_stripe = bbio->num_stripes - 1;
118653b381b3SDavid Woodhouse 	} else {
118753b381b3SDavid Woodhouse 		BUG();
118853b381b3SDavid Woodhouse 	}
118953b381b3SDavid Woodhouse 
119053b381b3SDavid Woodhouse 	/* at this point we either have a full stripe,
119153b381b3SDavid Woodhouse 	 * or we've read the full stripe from the drive.
119253b381b3SDavid Woodhouse 	 * recalculate the parity and write the new results.
119353b381b3SDavid Woodhouse 	 *
119453b381b3SDavid Woodhouse 	 * We're not allowed to add any new bios to the
119553b381b3SDavid Woodhouse 	 * bio list here, anyone else that wants to
119653b381b3SDavid Woodhouse 	 * change this stripe needs to do their own rmw.
119753b381b3SDavid Woodhouse 	 */
119853b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
119953b381b3SDavid Woodhouse 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
120053b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
120153b381b3SDavid Woodhouse 
1202b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
120353b381b3SDavid Woodhouse 
120453b381b3SDavid Woodhouse 	/*
120553b381b3SDavid Woodhouse 	 * now that we've set rmw_locked, run through the
120653b381b3SDavid Woodhouse 	 * bio list one last time and map the page pointers
12074ae10b3aSChris Mason 	 *
12084ae10b3aSChris Mason 	 * We don't cache full rbios because we're assuming
12094ae10b3aSChris Mason 	 * the higher layers are unlikely to use this area of
12104ae10b3aSChris Mason 	 * the disk again soon.  If they do use it again,
12114ae10b3aSChris Mason 	 * hopefully they will send another full bio.
121253b381b3SDavid Woodhouse 	 */
121353b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
12144ae10b3aSChris Mason 	if (!rbio_is_full(rbio))
12154ae10b3aSChris Mason 		cache_rbio_pages(rbio);
12164ae10b3aSChris Mason 	else
12174ae10b3aSChris Mason 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
121853b381b3SDavid Woodhouse 
121953b381b3SDavid Woodhouse 	for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
122053b381b3SDavid Woodhouse 		struct page *p;
122153b381b3SDavid Woodhouse 		/* first collect one page from each data stripe */
122253b381b3SDavid Woodhouse 		for (stripe = 0; stripe < nr_data; stripe++) {
122353b381b3SDavid Woodhouse 			p = page_in_rbio(rbio, stripe, pagenr, 0);
122453b381b3SDavid Woodhouse 			pointers[stripe] = kmap(p);
122553b381b3SDavid Woodhouse 		}
122653b381b3SDavid Woodhouse 
122753b381b3SDavid Woodhouse 		/* then add the parity stripe */
122853b381b3SDavid Woodhouse 		p = rbio_pstripe_page(rbio, pagenr);
122953b381b3SDavid Woodhouse 		SetPageUptodate(p);
123053b381b3SDavid Woodhouse 		pointers[stripe++] = kmap(p);
123153b381b3SDavid Woodhouse 
123253b381b3SDavid Woodhouse 		if (q_stripe != -1) {
123353b381b3SDavid Woodhouse 
123453b381b3SDavid Woodhouse 			/*
123553b381b3SDavid Woodhouse 			 * raid6, add the qstripe and call the
123653b381b3SDavid Woodhouse 			 * library function to fill in our p/q
123753b381b3SDavid Woodhouse 			 */
123853b381b3SDavid Woodhouse 			p = rbio_qstripe_page(rbio, pagenr);
123953b381b3SDavid Woodhouse 			SetPageUptodate(p);
124053b381b3SDavid Woodhouse 			pointers[stripe++] = kmap(p);
124153b381b3SDavid Woodhouse 
124253b381b3SDavid Woodhouse 			raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
124353b381b3SDavid Woodhouse 						pointers);
124453b381b3SDavid Woodhouse 		} else {
124553b381b3SDavid Woodhouse 			/* raid5 */
124653b381b3SDavid Woodhouse 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
124753b381b3SDavid Woodhouse 			run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
124853b381b3SDavid Woodhouse 		}
124953b381b3SDavid Woodhouse 
125053b381b3SDavid Woodhouse 
125153b381b3SDavid Woodhouse 		for (stripe = 0; stripe < bbio->num_stripes; stripe++)
125253b381b3SDavid Woodhouse 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
125353b381b3SDavid Woodhouse 	}
125453b381b3SDavid Woodhouse 
125553b381b3SDavid Woodhouse 	/*
125653b381b3SDavid Woodhouse 	 * time to start writing.  Make bios for everything from the
125753b381b3SDavid Woodhouse 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
125853b381b3SDavid Woodhouse 	 * everything else.
125953b381b3SDavid Woodhouse 	 */
126053b381b3SDavid Woodhouse 	for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
126153b381b3SDavid Woodhouse 		for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
126253b381b3SDavid Woodhouse 			struct page *page;
126353b381b3SDavid Woodhouse 			if (stripe < rbio->nr_data) {
126453b381b3SDavid Woodhouse 				page = page_in_rbio(rbio, stripe, pagenr, 1);
126553b381b3SDavid Woodhouse 				if (!page)
126653b381b3SDavid Woodhouse 					continue;
126753b381b3SDavid Woodhouse 			} else {
126853b381b3SDavid Woodhouse 			       page = rbio_stripe_page(rbio, stripe, pagenr);
126953b381b3SDavid Woodhouse 			}
127053b381b3SDavid Woodhouse 
127153b381b3SDavid Woodhouse 			ret = rbio_add_io_page(rbio, &bio_list,
127253b381b3SDavid Woodhouse 				       page, stripe, pagenr, rbio->stripe_len);
127353b381b3SDavid Woodhouse 			if (ret)
127453b381b3SDavid Woodhouse 				goto cleanup;
127553b381b3SDavid Woodhouse 		}
127653b381b3SDavid Woodhouse 	}
127753b381b3SDavid Woodhouse 
1278b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1279b89e1b01SMiao Xie 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
128053b381b3SDavid Woodhouse 
128153b381b3SDavid Woodhouse 	while (1) {
128253b381b3SDavid Woodhouse 		bio = bio_list_pop(&bio_list);
128353b381b3SDavid Woodhouse 		if (!bio)
128453b381b3SDavid Woodhouse 			break;
128553b381b3SDavid Woodhouse 
128653b381b3SDavid Woodhouse 		bio->bi_private = rbio;
128753b381b3SDavid Woodhouse 		bio->bi_end_io = raid_write_end_io;
128853b381b3SDavid Woodhouse 		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
128953b381b3SDavid Woodhouse 		submit_bio(WRITE, bio);
129053b381b3SDavid Woodhouse 	}
129153b381b3SDavid Woodhouse 	return;
129253b381b3SDavid Woodhouse 
129353b381b3SDavid Woodhouse cleanup:
129453b381b3SDavid Woodhouse 	rbio_orig_end_io(rbio, -EIO, 0);
129553b381b3SDavid Woodhouse }
129653b381b3SDavid Woodhouse 
129753b381b3SDavid Woodhouse /*
129853b381b3SDavid Woodhouse  * helper to find the stripe number for a given bio.  Used to figure out which
129953b381b3SDavid Woodhouse  * stripe has failed.  This expects the bio to correspond to a physical disk,
130053b381b3SDavid Woodhouse  * so it looks up based on physical sector numbers.
130153b381b3SDavid Woodhouse  */
130253b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio,
130353b381b3SDavid Woodhouse 			   struct bio *bio)
130453b381b3SDavid Woodhouse {
13054f024f37SKent Overstreet 	u64 physical = bio->bi_iter.bi_sector;
130653b381b3SDavid Woodhouse 	u64 stripe_start;
130753b381b3SDavid Woodhouse 	int i;
130853b381b3SDavid Woodhouse 	struct btrfs_bio_stripe *stripe;
130953b381b3SDavid Woodhouse 
131053b381b3SDavid Woodhouse 	physical <<= 9;
131153b381b3SDavid Woodhouse 
131253b381b3SDavid Woodhouse 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
131353b381b3SDavid Woodhouse 		stripe = &rbio->bbio->stripes[i];
131453b381b3SDavid Woodhouse 		stripe_start = stripe->physical;
131553b381b3SDavid Woodhouse 		if (physical >= stripe_start &&
131653b381b3SDavid Woodhouse 		    physical < stripe_start + rbio->stripe_len) {
131753b381b3SDavid Woodhouse 			return i;
131853b381b3SDavid Woodhouse 		}
131953b381b3SDavid Woodhouse 	}
132053b381b3SDavid Woodhouse 	return -1;
132153b381b3SDavid Woodhouse }
132253b381b3SDavid Woodhouse 
132353b381b3SDavid Woodhouse /*
132453b381b3SDavid Woodhouse  * helper to find the stripe number for a given
132553b381b3SDavid Woodhouse  * bio (before mapping).  Used to figure out which stripe has
132653b381b3SDavid Woodhouse  * failed.  This looks up based on logical block numbers.
132753b381b3SDavid Woodhouse  */
132853b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
132953b381b3SDavid Woodhouse 				   struct bio *bio)
133053b381b3SDavid Woodhouse {
13314f024f37SKent Overstreet 	u64 logical = bio->bi_iter.bi_sector;
133253b381b3SDavid Woodhouse 	u64 stripe_start;
133353b381b3SDavid Woodhouse 	int i;
133453b381b3SDavid Woodhouse 
133553b381b3SDavid Woodhouse 	logical <<= 9;
133653b381b3SDavid Woodhouse 
133753b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_data; i++) {
133853b381b3SDavid Woodhouse 		stripe_start = rbio->raid_map[i];
133953b381b3SDavid Woodhouse 		if (logical >= stripe_start &&
134053b381b3SDavid Woodhouse 		    logical < stripe_start + rbio->stripe_len) {
134153b381b3SDavid Woodhouse 			return i;
134253b381b3SDavid Woodhouse 		}
134353b381b3SDavid Woodhouse 	}
134453b381b3SDavid Woodhouse 	return -1;
134553b381b3SDavid Woodhouse }
134653b381b3SDavid Woodhouse 
134753b381b3SDavid Woodhouse /*
134853b381b3SDavid Woodhouse  * returns -EIO if we had too many failures
134953b381b3SDavid Woodhouse  */
135053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
135153b381b3SDavid Woodhouse {
135253b381b3SDavid Woodhouse 	unsigned long flags;
135353b381b3SDavid Woodhouse 	int ret = 0;
135453b381b3SDavid Woodhouse 
135553b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
135653b381b3SDavid Woodhouse 
135753b381b3SDavid Woodhouse 	/* we already know this stripe is bad, move on */
135853b381b3SDavid Woodhouse 	if (rbio->faila == failed || rbio->failb == failed)
135953b381b3SDavid Woodhouse 		goto out;
136053b381b3SDavid Woodhouse 
136153b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
136253b381b3SDavid Woodhouse 		/* first failure on this rbio */
136353b381b3SDavid Woodhouse 		rbio->faila = failed;
1364b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
136553b381b3SDavid Woodhouse 	} else if (rbio->failb == -1) {
136653b381b3SDavid Woodhouse 		/* second failure on this rbio */
136753b381b3SDavid Woodhouse 		rbio->failb = failed;
1368b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
136953b381b3SDavid Woodhouse 	} else {
137053b381b3SDavid Woodhouse 		ret = -EIO;
137153b381b3SDavid Woodhouse 	}
137253b381b3SDavid Woodhouse out:
137353b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
137453b381b3SDavid Woodhouse 
137553b381b3SDavid Woodhouse 	return ret;
137653b381b3SDavid Woodhouse }
137753b381b3SDavid Woodhouse 
137853b381b3SDavid Woodhouse /*
137953b381b3SDavid Woodhouse  * helper to fail a stripe based on a physical disk
138053b381b3SDavid Woodhouse  * bio.
138153b381b3SDavid Woodhouse  */
138253b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
138353b381b3SDavid Woodhouse 			   struct bio *bio)
138453b381b3SDavid Woodhouse {
138553b381b3SDavid Woodhouse 	int failed = find_bio_stripe(rbio, bio);
138653b381b3SDavid Woodhouse 
138753b381b3SDavid Woodhouse 	if (failed < 0)
138853b381b3SDavid Woodhouse 		return -EIO;
138953b381b3SDavid Woodhouse 
139053b381b3SDavid Woodhouse 	return fail_rbio_index(rbio, failed);
139153b381b3SDavid Woodhouse }
139253b381b3SDavid Woodhouse 
139353b381b3SDavid Woodhouse /*
139453b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
139553b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
139653b381b3SDavid Woodhouse  */
139753b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio)
139853b381b3SDavid Woodhouse {
139953b381b3SDavid Woodhouse 	int i;
140053b381b3SDavid Woodhouse 	struct page *p;
140153b381b3SDavid Woodhouse 
140253b381b3SDavid Woodhouse 	for (i = 0; i < bio->bi_vcnt; i++) {
140353b381b3SDavid Woodhouse 		p = bio->bi_io_vec[i].bv_page;
140453b381b3SDavid Woodhouse 		SetPageUptodate(p);
140553b381b3SDavid Woodhouse 	}
140653b381b3SDavid Woodhouse }
140753b381b3SDavid Woodhouse 
140853b381b3SDavid Woodhouse /*
140953b381b3SDavid Woodhouse  * end io for the read phase of the rmw cycle.  All the bios here are physical
141053b381b3SDavid Woodhouse  * stripe bios we've read from the disk so we can recalculate the parity of the
141153b381b3SDavid Woodhouse  * stripe.
141253b381b3SDavid Woodhouse  *
141353b381b3SDavid Woodhouse  * This will usually kick off finish_rmw once all the bios are read in, but it
141453b381b3SDavid Woodhouse  * may trigger parity reconstruction if we had any errors along the way
141553b381b3SDavid Woodhouse  */
141653b381b3SDavid Woodhouse static void raid_rmw_end_io(struct bio *bio, int err)
141753b381b3SDavid Woodhouse {
141853b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
141953b381b3SDavid Woodhouse 
142053b381b3SDavid Woodhouse 	if (err)
142153b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
142253b381b3SDavid Woodhouse 	else
142353b381b3SDavid Woodhouse 		set_bio_pages_uptodate(bio);
142453b381b3SDavid Woodhouse 
142553b381b3SDavid Woodhouse 	bio_put(bio);
142653b381b3SDavid Woodhouse 
1427b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
142853b381b3SDavid Woodhouse 		return;
142953b381b3SDavid Woodhouse 
143053b381b3SDavid Woodhouse 	err = 0;
1431b89e1b01SMiao Xie 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
143253b381b3SDavid Woodhouse 		goto cleanup;
143353b381b3SDavid Woodhouse 
143453b381b3SDavid Woodhouse 	/*
143553b381b3SDavid Woodhouse 	 * this will normally call finish_rmw to start our write
143653b381b3SDavid Woodhouse 	 * but if there are any failed stripes we'll reconstruct
143753b381b3SDavid Woodhouse 	 * from parity first
143853b381b3SDavid Woodhouse 	 */
143953b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
144053b381b3SDavid Woodhouse 	return;
144153b381b3SDavid Woodhouse 
144253b381b3SDavid Woodhouse cleanup:
144353b381b3SDavid Woodhouse 
144453b381b3SDavid Woodhouse 	rbio_orig_end_io(rbio, -EIO, 0);
144553b381b3SDavid Woodhouse }
144653b381b3SDavid Woodhouse 
144753b381b3SDavid Woodhouse static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
144853b381b3SDavid Woodhouse {
14499e0af237SLiu Bo 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
14509e0af237SLiu Bo 			rmw_work, NULL, NULL);
145153b381b3SDavid Woodhouse 
1452d05a33acSQu Wenruo 	btrfs_queue_work(rbio->fs_info->rmw_workers,
145353b381b3SDavid Woodhouse 			 &rbio->work);
145453b381b3SDavid Woodhouse }
145553b381b3SDavid Woodhouse 
145653b381b3SDavid Woodhouse static void async_read_rebuild(struct btrfs_raid_bio *rbio)
145753b381b3SDavid Woodhouse {
14589e0af237SLiu Bo 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
14599e0af237SLiu Bo 			read_rebuild_work, NULL, NULL);
146053b381b3SDavid Woodhouse 
1461d05a33acSQu Wenruo 	btrfs_queue_work(rbio->fs_info->rmw_workers,
146253b381b3SDavid Woodhouse 			 &rbio->work);
146353b381b3SDavid Woodhouse }
146453b381b3SDavid Woodhouse 
146553b381b3SDavid Woodhouse /*
146653b381b3SDavid Woodhouse  * the stripe must be locked by the caller.  It will
146753b381b3SDavid Woodhouse  * unlock after all the writes are done
146853b381b3SDavid Woodhouse  */
146953b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
147053b381b3SDavid Woodhouse {
147153b381b3SDavid Woodhouse 	int bios_to_read = 0;
147253b381b3SDavid Woodhouse 	struct bio_list bio_list;
147353b381b3SDavid Woodhouse 	int ret;
1474ed6078f7SDavid Sterba 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
147553b381b3SDavid Woodhouse 	int pagenr;
147653b381b3SDavid Woodhouse 	int stripe;
147753b381b3SDavid Woodhouse 	struct bio *bio;
147853b381b3SDavid Woodhouse 
147953b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
148053b381b3SDavid Woodhouse 
148153b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
148253b381b3SDavid Woodhouse 	if (ret)
148353b381b3SDavid Woodhouse 		goto cleanup;
148453b381b3SDavid Woodhouse 
148553b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
148653b381b3SDavid Woodhouse 
1487b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
148853b381b3SDavid Woodhouse 	/*
148953b381b3SDavid Woodhouse 	 * build a list of bios to read all the missing parts of this
149053b381b3SDavid Woodhouse 	 * stripe
149153b381b3SDavid Woodhouse 	 */
149253b381b3SDavid Woodhouse 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
149353b381b3SDavid Woodhouse 		for (pagenr = 0; pagenr < nr_pages; pagenr++) {
149453b381b3SDavid Woodhouse 			struct page *page;
149553b381b3SDavid Woodhouse 			/*
149653b381b3SDavid Woodhouse 			 * we want to find all the pages missing from
149753b381b3SDavid Woodhouse 			 * the rbio and read them from the disk.  If
149853b381b3SDavid Woodhouse 			 * page_in_rbio finds a page in the bio list
149953b381b3SDavid Woodhouse 			 * we don't need to read it off the stripe.
150053b381b3SDavid Woodhouse 			 */
150153b381b3SDavid Woodhouse 			page = page_in_rbio(rbio, stripe, pagenr, 1);
150253b381b3SDavid Woodhouse 			if (page)
150353b381b3SDavid Woodhouse 				continue;
150453b381b3SDavid Woodhouse 
150553b381b3SDavid Woodhouse 			page = rbio_stripe_page(rbio, stripe, pagenr);
15064ae10b3aSChris Mason 			/*
15074ae10b3aSChris Mason 			 * the bio cache may have handed us an uptodate
15084ae10b3aSChris Mason 			 * page.  If so, be happy and use it
15094ae10b3aSChris Mason 			 */
15104ae10b3aSChris Mason 			if (PageUptodate(page))
15114ae10b3aSChris Mason 				continue;
15124ae10b3aSChris Mason 
151353b381b3SDavid Woodhouse 			ret = rbio_add_io_page(rbio, &bio_list, page,
151453b381b3SDavid Woodhouse 				       stripe, pagenr, rbio->stripe_len);
151553b381b3SDavid Woodhouse 			if (ret)
151653b381b3SDavid Woodhouse 				goto cleanup;
151753b381b3SDavid Woodhouse 		}
151853b381b3SDavid Woodhouse 	}
151953b381b3SDavid Woodhouse 
152053b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
152153b381b3SDavid Woodhouse 	if (!bios_to_read) {
152253b381b3SDavid Woodhouse 		/*
152353b381b3SDavid Woodhouse 		 * this can happen if others have merged with
152453b381b3SDavid Woodhouse 		 * us, it means there is nothing left to read.
152553b381b3SDavid Woodhouse 		 * But if there are missing devices it may not be
152653b381b3SDavid Woodhouse 		 * safe to do the full stripe write yet.
152753b381b3SDavid Woodhouse 		 */
152853b381b3SDavid Woodhouse 		goto finish;
152953b381b3SDavid Woodhouse 	}
153053b381b3SDavid Woodhouse 
153153b381b3SDavid Woodhouse 	/*
153253b381b3SDavid Woodhouse 	 * the bbio may be freed once we submit the last bio.  Make sure
153353b381b3SDavid Woodhouse 	 * not to touch it after that
153453b381b3SDavid Woodhouse 	 */
1535b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
153653b381b3SDavid Woodhouse 	while (1) {
153753b381b3SDavid Woodhouse 		bio = bio_list_pop(&bio_list);
153853b381b3SDavid Woodhouse 		if (!bio)
153953b381b3SDavid Woodhouse 			break;
154053b381b3SDavid Woodhouse 
154153b381b3SDavid Woodhouse 		bio->bi_private = rbio;
154253b381b3SDavid Woodhouse 		bio->bi_end_io = raid_rmw_end_io;
154353b381b3SDavid Woodhouse 
154453b381b3SDavid Woodhouse 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
154553b381b3SDavid Woodhouse 				    BTRFS_WQ_ENDIO_RAID56);
154653b381b3SDavid Woodhouse 
154753b381b3SDavid Woodhouse 		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
154853b381b3SDavid Woodhouse 		submit_bio(READ, bio);
154953b381b3SDavid Woodhouse 	}
155053b381b3SDavid Woodhouse 	/* the actual write will happen once the reads are done */
155153b381b3SDavid Woodhouse 	return 0;
155253b381b3SDavid Woodhouse 
155353b381b3SDavid Woodhouse cleanup:
155453b381b3SDavid Woodhouse 	rbio_orig_end_io(rbio, -EIO, 0);
155553b381b3SDavid Woodhouse 	return -EIO;
155653b381b3SDavid Woodhouse 
155753b381b3SDavid Woodhouse finish:
155853b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
155953b381b3SDavid Woodhouse 	return 0;
156053b381b3SDavid Woodhouse }
156153b381b3SDavid Woodhouse 
156253b381b3SDavid Woodhouse /*
156353b381b3SDavid Woodhouse  * if the upper layers pass in a full stripe, we thank them by only allocating
156453b381b3SDavid Woodhouse  * enough pages to hold the parity, and sending it all down quickly.
156553b381b3SDavid Woodhouse  */
156653b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio)
156753b381b3SDavid Woodhouse {
156853b381b3SDavid Woodhouse 	int ret;
156953b381b3SDavid Woodhouse 
157053b381b3SDavid Woodhouse 	ret = alloc_rbio_parity_pages(rbio);
15713cd846d1SMiao Xie 	if (ret) {
15723cd846d1SMiao Xie 		__free_raid_bio(rbio);
157353b381b3SDavid Woodhouse 		return ret;
15743cd846d1SMiao Xie 	}
157553b381b3SDavid Woodhouse 
157653b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
157753b381b3SDavid Woodhouse 	if (ret == 0)
157853b381b3SDavid Woodhouse 		finish_rmw(rbio);
157953b381b3SDavid Woodhouse 	return 0;
158053b381b3SDavid Woodhouse }
158153b381b3SDavid Woodhouse 
158253b381b3SDavid Woodhouse /*
158353b381b3SDavid Woodhouse  * partial stripe writes get handed over to async helpers.
158453b381b3SDavid Woodhouse  * We're really hoping to merge a few more writes into this
158553b381b3SDavid Woodhouse  * rbio before calculating new parity
158653b381b3SDavid Woodhouse  */
158753b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio)
158853b381b3SDavid Woodhouse {
158953b381b3SDavid Woodhouse 	int ret;
159053b381b3SDavid Woodhouse 
159153b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
159253b381b3SDavid Woodhouse 	if (ret == 0)
159353b381b3SDavid Woodhouse 		async_rmw_stripe(rbio);
159453b381b3SDavid Woodhouse 	return 0;
159553b381b3SDavid Woodhouse }
159653b381b3SDavid Woodhouse 
159753b381b3SDavid Woodhouse /*
159853b381b3SDavid Woodhouse  * sometimes while we were reading from the drive to
159953b381b3SDavid Woodhouse  * recalculate parity, enough new bios come into create
160053b381b3SDavid Woodhouse  * a full stripe.  So we do a check here to see if we can
160153b381b3SDavid Woodhouse  * go directly to finish_rmw
160253b381b3SDavid Woodhouse  */
160353b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
160453b381b3SDavid Woodhouse {
160553b381b3SDavid Woodhouse 	/* head off into rmw land if we don't have a full stripe */
160653b381b3SDavid Woodhouse 	if (!rbio_is_full(rbio))
160753b381b3SDavid Woodhouse 		return partial_stripe_write(rbio);
160853b381b3SDavid Woodhouse 	return full_stripe_write(rbio);
160953b381b3SDavid Woodhouse }
161053b381b3SDavid Woodhouse 
161153b381b3SDavid Woodhouse /*
16126ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
16136ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
16146ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
16156ac0f488SChris Mason  * we sort the list by logical block number and merge
16166ac0f488SChris Mason  * everything we can into the same rbios
16176ac0f488SChris Mason  */
16186ac0f488SChris Mason struct btrfs_plug_cb {
16196ac0f488SChris Mason 	struct blk_plug_cb cb;
16206ac0f488SChris Mason 	struct btrfs_fs_info *info;
16216ac0f488SChris Mason 	struct list_head rbio_list;
16226ac0f488SChris Mason 	struct btrfs_work work;
16236ac0f488SChris Mason };
16246ac0f488SChris Mason 
16256ac0f488SChris Mason /*
16266ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
16276ac0f488SChris Mason  */
16286ac0f488SChris Mason static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
16296ac0f488SChris Mason {
16306ac0f488SChris Mason 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
16316ac0f488SChris Mason 						 plug_list);
16326ac0f488SChris Mason 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
16336ac0f488SChris Mason 						 plug_list);
16344f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
16354f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
16366ac0f488SChris Mason 
16376ac0f488SChris Mason 	if (a_sector < b_sector)
16386ac0f488SChris Mason 		return -1;
16396ac0f488SChris Mason 	if (a_sector > b_sector)
16406ac0f488SChris Mason 		return 1;
16416ac0f488SChris Mason 	return 0;
16426ac0f488SChris Mason }
16436ac0f488SChris Mason 
16446ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug)
16456ac0f488SChris Mason {
16466ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
16476ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
16486ac0f488SChris Mason 
16496ac0f488SChris Mason 	/*
16506ac0f488SChris Mason 	 * sort our plug list then try to merge
16516ac0f488SChris Mason 	 * everything we can in hopes of creating full
16526ac0f488SChris Mason 	 * stripes.
16536ac0f488SChris Mason 	 */
16546ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
16556ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
16566ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
16576ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
16586ac0f488SChris Mason 		list_del_init(&cur->plug_list);
16596ac0f488SChris Mason 
16606ac0f488SChris Mason 		if (rbio_is_full(cur)) {
16616ac0f488SChris Mason 			/* we have a full stripe, send it down */
16626ac0f488SChris Mason 			full_stripe_write(cur);
16636ac0f488SChris Mason 			continue;
16646ac0f488SChris Mason 		}
16656ac0f488SChris Mason 		if (last) {
16666ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
16676ac0f488SChris Mason 				merge_rbio(last, cur);
16686ac0f488SChris Mason 				__free_raid_bio(cur);
16696ac0f488SChris Mason 				continue;
16706ac0f488SChris Mason 
16716ac0f488SChris Mason 			}
16726ac0f488SChris Mason 			__raid56_parity_write(last);
16736ac0f488SChris Mason 		}
16746ac0f488SChris Mason 		last = cur;
16756ac0f488SChris Mason 	}
16766ac0f488SChris Mason 	if (last) {
16776ac0f488SChris Mason 		__raid56_parity_write(last);
16786ac0f488SChris Mason 	}
16796ac0f488SChris Mason 	kfree(plug);
16806ac0f488SChris Mason }
16816ac0f488SChris Mason 
16826ac0f488SChris Mason /*
16836ac0f488SChris Mason  * if the unplug comes from schedule, we have to push the
16846ac0f488SChris Mason  * work off to a helper thread
16856ac0f488SChris Mason  */
16866ac0f488SChris Mason static void unplug_work(struct btrfs_work *work)
16876ac0f488SChris Mason {
16886ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
16896ac0f488SChris Mason 	plug = container_of(work, struct btrfs_plug_cb, work);
16906ac0f488SChris Mason 	run_plug(plug);
16916ac0f488SChris Mason }
16926ac0f488SChris Mason 
16936ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
16946ac0f488SChris Mason {
16956ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
16966ac0f488SChris Mason 	plug = container_of(cb, struct btrfs_plug_cb, cb);
16976ac0f488SChris Mason 
16986ac0f488SChris Mason 	if (from_schedule) {
16999e0af237SLiu Bo 		btrfs_init_work(&plug->work, btrfs_rmw_helper,
17009e0af237SLiu Bo 				unplug_work, NULL, NULL);
1701d05a33acSQu Wenruo 		btrfs_queue_work(plug->info->rmw_workers,
17026ac0f488SChris Mason 				 &plug->work);
17036ac0f488SChris Mason 		return;
17046ac0f488SChris Mason 	}
17056ac0f488SChris Mason 	run_plug(plug);
17066ac0f488SChris Mason }
17076ac0f488SChris Mason 
17086ac0f488SChris Mason /*
170953b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
171053b381b3SDavid Woodhouse  */
171153b381b3SDavid Woodhouse int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
171253b381b3SDavid Woodhouse 			struct btrfs_bio *bbio, u64 *raid_map,
171353b381b3SDavid Woodhouse 			u64 stripe_len)
171453b381b3SDavid Woodhouse {
171553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
17166ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
17176ac0f488SChris Mason 	struct blk_plug_cb *cb;
171853b381b3SDavid Woodhouse 
171953b381b3SDavid Woodhouse 	rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1720af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
1721af8e2d1dSMiao Xie 		__free_bbio_and_raid_map(bbio, raid_map, 1);
172253b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
1723af8e2d1dSMiao Xie 	}
172453b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
17254f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1726*1b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
17276ac0f488SChris Mason 
17286ac0f488SChris Mason 	/*
17296ac0f488SChris Mason 	 * don't plug on full rbios, just get them out the door
17306ac0f488SChris Mason 	 * as quickly as we can
17316ac0f488SChris Mason 	 */
17326ac0f488SChris Mason 	if (rbio_is_full(rbio))
17336ac0f488SChris Mason 		return full_stripe_write(rbio);
17346ac0f488SChris Mason 
17356ac0f488SChris Mason 	cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
17366ac0f488SChris Mason 			       sizeof(*plug));
17376ac0f488SChris Mason 	if (cb) {
17386ac0f488SChris Mason 		plug = container_of(cb, struct btrfs_plug_cb, cb);
17396ac0f488SChris Mason 		if (!plug->info) {
17406ac0f488SChris Mason 			plug->info = root->fs_info;
17416ac0f488SChris Mason 			INIT_LIST_HEAD(&plug->rbio_list);
17426ac0f488SChris Mason 		}
17436ac0f488SChris Mason 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
17446ac0f488SChris Mason 	} else {
174553b381b3SDavid Woodhouse 		return __raid56_parity_write(rbio);
174653b381b3SDavid Woodhouse 	}
17476ac0f488SChris Mason 	return 0;
17486ac0f488SChris Mason }
174953b381b3SDavid Woodhouse 
175053b381b3SDavid Woodhouse /*
175153b381b3SDavid Woodhouse  * all parity reconstruction happens here.  We've read in everything
175253b381b3SDavid Woodhouse  * we can find from the drives and this does the heavy lifting of
175353b381b3SDavid Woodhouse  * sorting the good from the bad.
175453b381b3SDavid Woodhouse  */
175553b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
175653b381b3SDavid Woodhouse {
175753b381b3SDavid Woodhouse 	int pagenr, stripe;
175853b381b3SDavid Woodhouse 	void **pointers;
175953b381b3SDavid Woodhouse 	int faila = -1, failb = -1;
1760ed6078f7SDavid Sterba 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
176153b381b3SDavid Woodhouse 	struct page *page;
176253b381b3SDavid Woodhouse 	int err;
176353b381b3SDavid Woodhouse 	int i;
176453b381b3SDavid Woodhouse 
176553b381b3SDavid Woodhouse 	pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
176653b381b3SDavid Woodhouse 			   GFP_NOFS);
176753b381b3SDavid Woodhouse 	if (!pointers) {
176853b381b3SDavid Woodhouse 		err = -ENOMEM;
176953b381b3SDavid Woodhouse 		goto cleanup_io;
177053b381b3SDavid Woodhouse 	}
177153b381b3SDavid Woodhouse 
177253b381b3SDavid Woodhouse 	faila = rbio->faila;
177353b381b3SDavid Woodhouse 	failb = rbio->failb;
177453b381b3SDavid Woodhouse 
1775*1b94b556SMiao Xie 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
177653b381b3SDavid Woodhouse 		spin_lock_irq(&rbio->bio_list_lock);
177753b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
177853b381b3SDavid Woodhouse 		spin_unlock_irq(&rbio->bio_list_lock);
177953b381b3SDavid Woodhouse 	}
178053b381b3SDavid Woodhouse 
178153b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
178253b381b3SDavid Woodhouse 
178353b381b3SDavid Woodhouse 	for (pagenr = 0; pagenr < nr_pages; pagenr++) {
178453b381b3SDavid Woodhouse 		/* setup our array of pointers with pages
178553b381b3SDavid Woodhouse 		 * from each stripe
178653b381b3SDavid Woodhouse 		 */
178753b381b3SDavid Woodhouse 		for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
178853b381b3SDavid Woodhouse 			/*
178953b381b3SDavid Woodhouse 			 * if we're rebuilding a read, we have to use
179053b381b3SDavid Woodhouse 			 * pages from the bio list
179153b381b3SDavid Woodhouse 			 */
1792*1b94b556SMiao Xie 			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
179353b381b3SDavid Woodhouse 			    (stripe == faila || stripe == failb)) {
179453b381b3SDavid Woodhouse 				page = page_in_rbio(rbio, stripe, pagenr, 0);
179553b381b3SDavid Woodhouse 			} else {
179653b381b3SDavid Woodhouse 				page = rbio_stripe_page(rbio, stripe, pagenr);
179753b381b3SDavid Woodhouse 			}
179853b381b3SDavid Woodhouse 			pointers[stripe] = kmap(page);
179953b381b3SDavid Woodhouse 		}
180053b381b3SDavid Woodhouse 
180153b381b3SDavid Woodhouse 		/* all raid6 handling here */
180253b381b3SDavid Woodhouse 		if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
180353b381b3SDavid Woodhouse 		    RAID6_Q_STRIPE) {
180453b381b3SDavid Woodhouse 
180553b381b3SDavid Woodhouse 			/*
180653b381b3SDavid Woodhouse 			 * single failure, rebuild from parity raid5
180753b381b3SDavid Woodhouse 			 * style
180853b381b3SDavid Woodhouse 			 */
180953b381b3SDavid Woodhouse 			if (failb < 0) {
181053b381b3SDavid Woodhouse 				if (faila == rbio->nr_data) {
181153b381b3SDavid Woodhouse 					/*
181253b381b3SDavid Woodhouse 					 * Just the P stripe has failed, without
181353b381b3SDavid Woodhouse 					 * a bad data or Q stripe.
181453b381b3SDavid Woodhouse 					 * TODO, we should redo the xor here.
181553b381b3SDavid Woodhouse 					 */
181653b381b3SDavid Woodhouse 					err = -EIO;
181753b381b3SDavid Woodhouse 					goto cleanup;
181853b381b3SDavid Woodhouse 				}
181953b381b3SDavid Woodhouse 				/*
182053b381b3SDavid Woodhouse 				 * a single failure in raid6 is rebuilt
182153b381b3SDavid Woodhouse 				 * in the pstripe code below
182253b381b3SDavid Woodhouse 				 */
182353b381b3SDavid Woodhouse 				goto pstripe;
182453b381b3SDavid Woodhouse 			}
182553b381b3SDavid Woodhouse 
182653b381b3SDavid Woodhouse 			/* make sure our ps and qs are in order */
182753b381b3SDavid Woodhouse 			if (faila > failb) {
182853b381b3SDavid Woodhouse 				int tmp = failb;
182953b381b3SDavid Woodhouse 				failb = faila;
183053b381b3SDavid Woodhouse 				faila = tmp;
183153b381b3SDavid Woodhouse 			}
183253b381b3SDavid Woodhouse 
183353b381b3SDavid Woodhouse 			/* if the q stripe is failed, do a pstripe reconstruction
183453b381b3SDavid Woodhouse 			 * from the xors.
183553b381b3SDavid Woodhouse 			 * If both the q stripe and the P stripe are failed, we're
183653b381b3SDavid Woodhouse 			 * here due to a crc mismatch and we can't give them the
183753b381b3SDavid Woodhouse 			 * data they want
183853b381b3SDavid Woodhouse 			 */
183953b381b3SDavid Woodhouse 			if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
184053b381b3SDavid Woodhouse 				if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
184153b381b3SDavid Woodhouse 					err = -EIO;
184253b381b3SDavid Woodhouse 					goto cleanup;
184353b381b3SDavid Woodhouse 				}
184453b381b3SDavid Woodhouse 				/*
184553b381b3SDavid Woodhouse 				 * otherwise we have one bad data stripe and
184653b381b3SDavid Woodhouse 				 * a good P stripe.  raid5!
184753b381b3SDavid Woodhouse 				 */
184853b381b3SDavid Woodhouse 				goto pstripe;
184953b381b3SDavid Woodhouse 			}
185053b381b3SDavid Woodhouse 
185153b381b3SDavid Woodhouse 			if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
185253b381b3SDavid Woodhouse 				raid6_datap_recov(rbio->bbio->num_stripes,
185353b381b3SDavid Woodhouse 						  PAGE_SIZE, faila, pointers);
185453b381b3SDavid Woodhouse 			} else {
185553b381b3SDavid Woodhouse 				raid6_2data_recov(rbio->bbio->num_stripes,
185653b381b3SDavid Woodhouse 						  PAGE_SIZE, faila, failb,
185753b381b3SDavid Woodhouse 						  pointers);
185853b381b3SDavid Woodhouse 			}
185953b381b3SDavid Woodhouse 		} else {
186053b381b3SDavid Woodhouse 			void *p;
186153b381b3SDavid Woodhouse 
186253b381b3SDavid Woodhouse 			/* rebuild from P stripe here (raid5 or raid6) */
186353b381b3SDavid Woodhouse 			BUG_ON(failb != -1);
186453b381b3SDavid Woodhouse pstripe:
186553b381b3SDavid Woodhouse 			/* Copy parity block into failed block to start with */
186653b381b3SDavid Woodhouse 			memcpy(pointers[faila],
186753b381b3SDavid Woodhouse 			       pointers[rbio->nr_data],
186853b381b3SDavid Woodhouse 			       PAGE_CACHE_SIZE);
186953b381b3SDavid Woodhouse 
187053b381b3SDavid Woodhouse 			/* rearrange the pointer array */
187153b381b3SDavid Woodhouse 			p = pointers[faila];
187253b381b3SDavid Woodhouse 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
187353b381b3SDavid Woodhouse 				pointers[stripe] = pointers[stripe + 1];
187453b381b3SDavid Woodhouse 			pointers[rbio->nr_data - 1] = p;
187553b381b3SDavid Woodhouse 
187653b381b3SDavid Woodhouse 			/* xor in the rest */
187753b381b3SDavid Woodhouse 			run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
187853b381b3SDavid Woodhouse 		}
187953b381b3SDavid Woodhouse 		/* if we're doing this rebuild as part of an rmw, go through
188053b381b3SDavid Woodhouse 		 * and set all of our private rbio pages in the
188153b381b3SDavid Woodhouse 		 * failed stripes as uptodate.  This way finish_rmw will
188253b381b3SDavid Woodhouse 		 * know they can be trusted.  If this was a read reconstruction,
188353b381b3SDavid Woodhouse 		 * other endio functions will fiddle the uptodate bits
188453b381b3SDavid Woodhouse 		 */
1885*1b94b556SMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE) {
188653b381b3SDavid Woodhouse 			for (i = 0;  i < nr_pages; i++) {
188753b381b3SDavid Woodhouse 				if (faila != -1) {
188853b381b3SDavid Woodhouse 					page = rbio_stripe_page(rbio, faila, i);
188953b381b3SDavid Woodhouse 					SetPageUptodate(page);
189053b381b3SDavid Woodhouse 				}
189153b381b3SDavid Woodhouse 				if (failb != -1) {
189253b381b3SDavid Woodhouse 					page = rbio_stripe_page(rbio, failb, i);
189353b381b3SDavid Woodhouse 					SetPageUptodate(page);
189453b381b3SDavid Woodhouse 				}
189553b381b3SDavid Woodhouse 			}
189653b381b3SDavid Woodhouse 		}
189753b381b3SDavid Woodhouse 		for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
189853b381b3SDavid Woodhouse 			/*
189953b381b3SDavid Woodhouse 			 * if we're rebuilding a read, we have to use
190053b381b3SDavid Woodhouse 			 * pages from the bio list
190153b381b3SDavid Woodhouse 			 */
1902*1b94b556SMiao Xie 			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
190353b381b3SDavid Woodhouse 			    (stripe == faila || stripe == failb)) {
190453b381b3SDavid Woodhouse 				page = page_in_rbio(rbio, stripe, pagenr, 0);
190553b381b3SDavid Woodhouse 			} else {
190653b381b3SDavid Woodhouse 				page = rbio_stripe_page(rbio, stripe, pagenr);
190753b381b3SDavid Woodhouse 			}
190853b381b3SDavid Woodhouse 			kunmap(page);
190953b381b3SDavid Woodhouse 		}
191053b381b3SDavid Woodhouse 	}
191153b381b3SDavid Woodhouse 
191253b381b3SDavid Woodhouse 	err = 0;
191353b381b3SDavid Woodhouse cleanup:
191453b381b3SDavid Woodhouse 	kfree(pointers);
191553b381b3SDavid Woodhouse 
191653b381b3SDavid Woodhouse cleanup_io:
1917*1b94b556SMiao Xie 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1918af8e2d1dSMiao Xie 		if (err == 0 &&
1919af8e2d1dSMiao Xie 		    !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
19204ae10b3aSChris Mason 			cache_rbio_pages(rbio);
19214ae10b3aSChris Mason 		else
19224ae10b3aSChris Mason 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
19234ae10b3aSChris Mason 
192453b381b3SDavid Woodhouse 		rbio_orig_end_io(rbio, err, err == 0);
192553b381b3SDavid Woodhouse 	} else if (err == 0) {
192653b381b3SDavid Woodhouse 		rbio->faila = -1;
192753b381b3SDavid Woodhouse 		rbio->failb = -1;
192853b381b3SDavid Woodhouse 		finish_rmw(rbio);
192953b381b3SDavid Woodhouse 	} else {
193053b381b3SDavid Woodhouse 		rbio_orig_end_io(rbio, err, 0);
193153b381b3SDavid Woodhouse 	}
193253b381b3SDavid Woodhouse }
193353b381b3SDavid Woodhouse 
193453b381b3SDavid Woodhouse /*
193553b381b3SDavid Woodhouse  * This is called only for stripes we've read from disk to
193653b381b3SDavid Woodhouse  * reconstruct the parity.
193753b381b3SDavid Woodhouse  */
193853b381b3SDavid Woodhouse static void raid_recover_end_io(struct bio *bio, int err)
193953b381b3SDavid Woodhouse {
194053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
194153b381b3SDavid Woodhouse 
194253b381b3SDavid Woodhouse 	/*
194353b381b3SDavid Woodhouse 	 * we only read stripe pages off the disk, set them
194453b381b3SDavid Woodhouse 	 * up to date if there were no errors
194553b381b3SDavid Woodhouse 	 */
194653b381b3SDavid Woodhouse 	if (err)
194753b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
194853b381b3SDavid Woodhouse 	else
194953b381b3SDavid Woodhouse 		set_bio_pages_uptodate(bio);
195053b381b3SDavid Woodhouse 	bio_put(bio);
195153b381b3SDavid Woodhouse 
1952b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
195353b381b3SDavid Woodhouse 		return;
195453b381b3SDavid Woodhouse 
1955b89e1b01SMiao Xie 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
195653b381b3SDavid Woodhouse 		rbio_orig_end_io(rbio, -EIO, 0);
195753b381b3SDavid Woodhouse 	else
195853b381b3SDavid Woodhouse 		__raid_recover_end_io(rbio);
195953b381b3SDavid Woodhouse }
196053b381b3SDavid Woodhouse 
196153b381b3SDavid Woodhouse /*
196253b381b3SDavid Woodhouse  * reads everything we need off the disk to reconstruct
196353b381b3SDavid Woodhouse  * the parity. endio handlers trigger final reconstruction
196453b381b3SDavid Woodhouse  * when the IO is done.
196553b381b3SDavid Woodhouse  *
196653b381b3SDavid Woodhouse  * This is used both for reads from the higher layers and for
196753b381b3SDavid Woodhouse  * parity construction required to finish a rmw cycle.
196853b381b3SDavid Woodhouse  */
196953b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
197053b381b3SDavid Woodhouse {
197153b381b3SDavid Woodhouse 	int bios_to_read = 0;
197253b381b3SDavid Woodhouse 	struct btrfs_bio *bbio = rbio->bbio;
197353b381b3SDavid Woodhouse 	struct bio_list bio_list;
197453b381b3SDavid Woodhouse 	int ret;
1975ed6078f7SDavid Sterba 	int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
197653b381b3SDavid Woodhouse 	int pagenr;
197753b381b3SDavid Woodhouse 	int stripe;
197853b381b3SDavid Woodhouse 	struct bio *bio;
197953b381b3SDavid Woodhouse 
198053b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
198153b381b3SDavid Woodhouse 
198253b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
198353b381b3SDavid Woodhouse 	if (ret)
198453b381b3SDavid Woodhouse 		goto cleanup;
198553b381b3SDavid Woodhouse 
1986b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
198753b381b3SDavid Woodhouse 
198853b381b3SDavid Woodhouse 	/*
19894ae10b3aSChris Mason 	 * read everything that hasn't failed.  Thanks to the
19904ae10b3aSChris Mason 	 * stripe cache, it is possible that some or all of these
19914ae10b3aSChris Mason 	 * pages are going to be uptodate.
199253b381b3SDavid Woodhouse 	 */
199353b381b3SDavid Woodhouse 	for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
19945588383eSLiu Bo 		if (rbio->faila == stripe || rbio->failb == stripe) {
1995b89e1b01SMiao Xie 			atomic_inc(&rbio->error);
199653b381b3SDavid Woodhouse 			continue;
19975588383eSLiu Bo 		}
199853b381b3SDavid Woodhouse 
199953b381b3SDavid Woodhouse 		for (pagenr = 0; pagenr < nr_pages; pagenr++) {
200053b381b3SDavid Woodhouse 			struct page *p;
200153b381b3SDavid Woodhouse 
200253b381b3SDavid Woodhouse 			/*
200353b381b3SDavid Woodhouse 			 * the rmw code may have already read this
200453b381b3SDavid Woodhouse 			 * page in
200553b381b3SDavid Woodhouse 			 */
200653b381b3SDavid Woodhouse 			p = rbio_stripe_page(rbio, stripe, pagenr);
200753b381b3SDavid Woodhouse 			if (PageUptodate(p))
200853b381b3SDavid Woodhouse 				continue;
200953b381b3SDavid Woodhouse 
201053b381b3SDavid Woodhouse 			ret = rbio_add_io_page(rbio, &bio_list,
201153b381b3SDavid Woodhouse 				       rbio_stripe_page(rbio, stripe, pagenr),
201253b381b3SDavid Woodhouse 				       stripe, pagenr, rbio->stripe_len);
201353b381b3SDavid Woodhouse 			if (ret < 0)
201453b381b3SDavid Woodhouse 				goto cleanup;
201553b381b3SDavid Woodhouse 		}
201653b381b3SDavid Woodhouse 	}
201753b381b3SDavid Woodhouse 
201853b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
201953b381b3SDavid Woodhouse 	if (!bios_to_read) {
202053b381b3SDavid Woodhouse 		/*
202153b381b3SDavid Woodhouse 		 * we might have no bios to read just because the pages
202253b381b3SDavid Woodhouse 		 * were up to date, or we might have no bios to read because
202353b381b3SDavid Woodhouse 		 * the devices were gone.
202453b381b3SDavid Woodhouse 		 */
2025b89e1b01SMiao Xie 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
202653b381b3SDavid Woodhouse 			__raid_recover_end_io(rbio);
202753b381b3SDavid Woodhouse 			goto out;
202853b381b3SDavid Woodhouse 		} else {
202953b381b3SDavid Woodhouse 			goto cleanup;
203053b381b3SDavid Woodhouse 		}
203153b381b3SDavid Woodhouse 	}
203253b381b3SDavid Woodhouse 
203353b381b3SDavid Woodhouse 	/*
203453b381b3SDavid Woodhouse 	 * the bbio may be freed once we submit the last bio.  Make sure
203553b381b3SDavid Woodhouse 	 * not to touch it after that
203653b381b3SDavid Woodhouse 	 */
2037b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
203853b381b3SDavid Woodhouse 	while (1) {
203953b381b3SDavid Woodhouse 		bio = bio_list_pop(&bio_list);
204053b381b3SDavid Woodhouse 		if (!bio)
204153b381b3SDavid Woodhouse 			break;
204253b381b3SDavid Woodhouse 
204353b381b3SDavid Woodhouse 		bio->bi_private = rbio;
204453b381b3SDavid Woodhouse 		bio->bi_end_io = raid_recover_end_io;
204553b381b3SDavid Woodhouse 
204653b381b3SDavid Woodhouse 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
204753b381b3SDavid Woodhouse 				    BTRFS_WQ_ENDIO_RAID56);
204853b381b3SDavid Woodhouse 
204953b381b3SDavid Woodhouse 		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
205053b381b3SDavid Woodhouse 		submit_bio(READ, bio);
205153b381b3SDavid Woodhouse 	}
205253b381b3SDavid Woodhouse out:
205353b381b3SDavid Woodhouse 	return 0;
205453b381b3SDavid Woodhouse 
205553b381b3SDavid Woodhouse cleanup:
2056*1b94b556SMiao Xie 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
205753b381b3SDavid Woodhouse 		rbio_orig_end_io(rbio, -EIO, 0);
205853b381b3SDavid Woodhouse 	return -EIO;
205953b381b3SDavid Woodhouse }
206053b381b3SDavid Woodhouse 
206153b381b3SDavid Woodhouse /*
206253b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
206353b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
206453b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
206553b381b3SDavid Woodhouse  * of the drive.
206653b381b3SDavid Woodhouse  */
206753b381b3SDavid Woodhouse int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
206853b381b3SDavid Woodhouse 			  struct btrfs_bio *bbio, u64 *raid_map,
2069af8e2d1dSMiao Xie 			  u64 stripe_len, int mirror_num, int hold_bbio)
207053b381b3SDavid Woodhouse {
207153b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
207253b381b3SDavid Woodhouse 	int ret;
207353b381b3SDavid Woodhouse 
207453b381b3SDavid Woodhouse 	rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2075af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
2076af8e2d1dSMiao Xie 		__free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
207753b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
2078af8e2d1dSMiao Xie 	}
207953b381b3SDavid Woodhouse 
2080af8e2d1dSMiao Xie 	if (hold_bbio)
2081af8e2d1dSMiao Xie 		set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
2082*1b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
208353b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
20844f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
208553b381b3SDavid Woodhouse 
208653b381b3SDavid Woodhouse 	rbio->faila = find_logical_bio_stripe(rbio, bio);
208753b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
208853b381b3SDavid Woodhouse 		BUG();
2089af8e2d1dSMiao Xie 		__free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
209053b381b3SDavid Woodhouse 		kfree(rbio);
209153b381b3SDavid Woodhouse 		return -EIO;
209253b381b3SDavid Woodhouse 	}
209353b381b3SDavid Woodhouse 
209453b381b3SDavid Woodhouse 	/*
209553b381b3SDavid Woodhouse 	 * reconstruct from the q stripe if they are
209653b381b3SDavid Woodhouse 	 * asking for mirror 3
209753b381b3SDavid Woodhouse 	 */
209853b381b3SDavid Woodhouse 	if (mirror_num == 3)
209953b381b3SDavid Woodhouse 		rbio->failb = bbio->num_stripes - 2;
210053b381b3SDavid Woodhouse 
210153b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
210253b381b3SDavid Woodhouse 
210353b381b3SDavid Woodhouse 	/*
210453b381b3SDavid Woodhouse 	 * __raid56_parity_recover will end the bio with
210553b381b3SDavid Woodhouse 	 * any errors it hits.  We don't want to return
210653b381b3SDavid Woodhouse 	 * its error value up the stack because our caller
210753b381b3SDavid Woodhouse 	 * will end up calling bio_endio with any nonzero
210853b381b3SDavid Woodhouse 	 * return
210953b381b3SDavid Woodhouse 	 */
211053b381b3SDavid Woodhouse 	if (ret == 0)
211153b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
211253b381b3SDavid Woodhouse 	/*
211353b381b3SDavid Woodhouse 	 * our rbio has been added to the list of
211453b381b3SDavid Woodhouse 	 * rbios that will be handled after the
211553b381b3SDavid Woodhouse 	 * currently lock owner is done
211653b381b3SDavid Woodhouse 	 */
211753b381b3SDavid Woodhouse 	return 0;
211853b381b3SDavid Woodhouse 
211953b381b3SDavid Woodhouse }
212053b381b3SDavid Woodhouse 
212153b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work)
212253b381b3SDavid Woodhouse {
212353b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
212453b381b3SDavid Woodhouse 
212553b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
212653b381b3SDavid Woodhouse 	raid56_rmw_stripe(rbio);
212753b381b3SDavid Woodhouse }
212853b381b3SDavid Woodhouse 
212953b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work)
213053b381b3SDavid Woodhouse {
213153b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
213253b381b3SDavid Woodhouse 
213353b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
213453b381b3SDavid Woodhouse 	__raid56_parity_recover(rbio);
213553b381b3SDavid Woodhouse }
2136