xref: /linux/fs/btrfs/raid56.c (revision c67c68eb57f1343dd7e315156ff0334ab72158fd)
1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
253b381b3SDavid Woodhouse /*
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
453b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
553b381b3SDavid Woodhouse  */
6c1d7c514SDavid Sterba 
753b381b3SDavid Woodhouse #include <linux/sched.h>
853b381b3SDavid Woodhouse #include <linux/bio.h>
953b381b3SDavid Woodhouse #include <linux/slab.h>
1053b381b3SDavid Woodhouse #include <linux/blkdev.h>
1153b381b3SDavid Woodhouse #include <linux/raid/pq.h>
1253b381b3SDavid Woodhouse #include <linux/hash.h>
1353b381b3SDavid Woodhouse #include <linux/list_sort.h>
1453b381b3SDavid Woodhouse #include <linux/raid/xor.h>
15818e010bSDavid Sterba #include <linux/mm.h>
16cea62800SJohannes Thumshirn #include "misc.h"
1753b381b3SDavid Woodhouse #include "ctree.h"
1853b381b3SDavid Woodhouse #include "disk-io.h"
1953b381b3SDavid Woodhouse #include "volumes.h"
2053b381b3SDavid Woodhouse #include "raid56.h"
2153b381b3SDavid Woodhouse #include "async-thread.h"
2253b381b3SDavid Woodhouse 
2353b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
2453b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
2553b381b3SDavid Woodhouse 
264ae10b3aSChris Mason /*
274ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
284ae10b3aSChris Mason  * of past RMW
294ae10b3aSChris Mason  */
304ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
314ae10b3aSChris Mason 
324ae10b3aSChris Mason /*
334ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
344ae10b3aSChris Mason  */
354ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
364ae10b3aSChris Mason 
374ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
384ae10b3aSChris Mason 
398a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS				11
408a953348SDavid Sterba 
418a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
428a953348SDavid Sterba struct btrfs_stripe_hash {
438a953348SDavid Sterba 	struct list_head hash_list;
448a953348SDavid Sterba 	spinlock_t lock;
458a953348SDavid Sterba };
468a953348SDavid Sterba 
478a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
488a953348SDavid Sterba struct btrfs_stripe_hash_table {
498a953348SDavid Sterba 	struct list_head stripe_cache;
508a953348SDavid Sterba 	spinlock_t cache_lock;
518a953348SDavid Sterba 	int cache_size;
528a953348SDavid Sterba 	struct btrfs_stripe_hash table[];
538a953348SDavid Sterba };
548a953348SDavid Sterba 
55eb357060SQu Wenruo /*
56eb357060SQu Wenruo  * A bvec like structure to present a sector inside a page.
57eb357060SQu Wenruo  *
58eb357060SQu Wenruo  * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
59eb357060SQu Wenruo  */
60eb357060SQu Wenruo struct sector_ptr {
61eb357060SQu Wenruo 	struct page *page;
6200425dd9SQu Wenruo 	unsigned int pgoff:24;
6300425dd9SQu Wenruo 	unsigned int uptodate:8;
64eb357060SQu Wenruo };
65eb357060SQu Wenruo 
661b94b556SMiao Xie enum btrfs_rbio_ops {
67b4ee1782SOmar Sandoval 	BTRFS_RBIO_WRITE,
68b4ee1782SOmar Sandoval 	BTRFS_RBIO_READ_REBUILD,
69b4ee1782SOmar Sandoval 	BTRFS_RBIO_PARITY_SCRUB,
70b4ee1782SOmar Sandoval 	BTRFS_RBIO_REBUILD_MISSING,
711b94b556SMiao Xie };
721b94b556SMiao Xie 
7353b381b3SDavid Woodhouse struct btrfs_raid_bio {
744c664611SQu Wenruo 	struct btrfs_io_context *bioc;
7553b381b3SDavid Woodhouse 
7653b381b3SDavid Woodhouse 	/* while we're doing rmw on a stripe
7753b381b3SDavid Woodhouse 	 * we put it into a hash table so we can
7853b381b3SDavid Woodhouse 	 * lock the stripe and merge more rbios
7953b381b3SDavid Woodhouse 	 * into it.
8053b381b3SDavid Woodhouse 	 */
8153b381b3SDavid Woodhouse 	struct list_head hash_list;
8253b381b3SDavid Woodhouse 
8353b381b3SDavid Woodhouse 	/*
844ae10b3aSChris Mason 	 * LRU list for the stripe cache
854ae10b3aSChris Mason 	 */
864ae10b3aSChris Mason 	struct list_head stripe_cache;
874ae10b3aSChris Mason 
884ae10b3aSChris Mason 	/*
8953b381b3SDavid Woodhouse 	 * for scheduling work in the helper threads
9053b381b3SDavid Woodhouse 	 */
91385de0efSChristoph Hellwig 	struct work_struct work;
9253b381b3SDavid Woodhouse 
9353b381b3SDavid Woodhouse 	/*
9453b381b3SDavid Woodhouse 	 * bio list and bio_list_lock are used
9553b381b3SDavid Woodhouse 	 * to add more bios into the stripe
9653b381b3SDavid Woodhouse 	 * in hopes of avoiding the full rmw
9753b381b3SDavid Woodhouse 	 */
9853b381b3SDavid Woodhouse 	struct bio_list bio_list;
9953b381b3SDavid Woodhouse 	spinlock_t bio_list_lock;
10053b381b3SDavid Woodhouse 
1016ac0f488SChris Mason 	/* also protected by the bio_list_lock, the
1026ac0f488SChris Mason 	 * plug list is used by the plugging code
1036ac0f488SChris Mason 	 * to collect partial bios while plugged.  The
1046ac0f488SChris Mason 	 * stripe locking code also uses it to hand off
10553b381b3SDavid Woodhouse 	 * the stripe lock to the next pending IO
10653b381b3SDavid Woodhouse 	 */
10753b381b3SDavid Woodhouse 	struct list_head plug_list;
10853b381b3SDavid Woodhouse 
10953b381b3SDavid Woodhouse 	/*
11053b381b3SDavid Woodhouse 	 * flags that tell us if it is safe to
11153b381b3SDavid Woodhouse 	 * merge with this bio
11253b381b3SDavid Woodhouse 	 */
11353b381b3SDavid Woodhouse 	unsigned long flags;
11453b381b3SDavid Woodhouse 
11553b381b3SDavid Woodhouse 	/*
11653b381b3SDavid Woodhouse 	 * set if we're doing a parity rebuild
11753b381b3SDavid Woodhouse 	 * for a read from higher up, which is handled
11853b381b3SDavid Woodhouse 	 * differently from a parity rebuild as part of
11953b381b3SDavid Woodhouse 	 * rmw
12053b381b3SDavid Woodhouse 	 */
1211b94b556SMiao Xie 	enum btrfs_rbio_ops operation;
12253b381b3SDavid Woodhouse 
12329b06838SQu Wenruo 	/* Size of each individual stripe on disk */
12429b06838SQu Wenruo 	u32 stripe_len;
12553b381b3SDavid Woodhouse 
12629b06838SQu Wenruo 	/* How many pages there are for the full stripe including P/Q */
12729b06838SQu Wenruo 	u16 nr_pages;
12853b381b3SDavid Woodhouse 
12994efbe19SQu Wenruo 	/* How many sectors there are for the full stripe including P/Q */
13094efbe19SQu Wenruo 	u16 nr_sectors;
13194efbe19SQu Wenruo 
13229b06838SQu Wenruo 	/* Number of data stripes (no p/q) */
13329b06838SQu Wenruo 	u8 nr_data;
13429b06838SQu Wenruo 
135143823cfSDavid Sterba 	/* Number of all stripes (including P/Q) */
13629b06838SQu Wenruo 	u8 real_stripes;
13729b06838SQu Wenruo 
13829b06838SQu Wenruo 	/* How many pages there are for each stripe */
13929b06838SQu Wenruo 	u8 stripe_npages;
14029b06838SQu Wenruo 
14194efbe19SQu Wenruo 	/* How many sectors there are for each stripe */
14294efbe19SQu Wenruo 	u8 stripe_nsectors;
14394efbe19SQu Wenruo 
14429b06838SQu Wenruo 	/* First bad stripe, -1 means no corruption */
14529b06838SQu Wenruo 	s8 faila;
14629b06838SQu Wenruo 
14729b06838SQu Wenruo 	/* Second bad stripe (for RAID6 use) */
14829b06838SQu Wenruo 	s8 failb;
14929b06838SQu Wenruo 
15029b06838SQu Wenruo 	/* Stripe number that we're scrubbing  */
15129b06838SQu Wenruo 	u8 scrubp;
15253b381b3SDavid Woodhouse 
15353b381b3SDavid Woodhouse 	/*
15453b381b3SDavid Woodhouse 	 * size of all the bios in the bio_list.  This
15553b381b3SDavid Woodhouse 	 * helps us decide if the rbio maps to a full
15653b381b3SDavid Woodhouse 	 * stripe or not
15753b381b3SDavid Woodhouse 	 */
15853b381b3SDavid Woodhouse 	int bio_list_bytes;
15953b381b3SDavid Woodhouse 
1604245215dSMiao Xie 	int generic_bio_cnt;
1614245215dSMiao Xie 
162dec95574SElena Reshetova 	refcount_t refs;
16353b381b3SDavid Woodhouse 
164b89e1b01SMiao Xie 	atomic_t stripes_pending;
165b89e1b01SMiao Xie 
166b89e1b01SMiao Xie 	atomic_t error;
167*c67c68ebSQu Wenruo 
168*c67c68ebSQu Wenruo 	/* Bitmap to record which horizontal stripe has data */
169*c67c68ebSQu Wenruo 	unsigned long dbitmap;
170*c67c68ebSQu Wenruo 
171*c67c68ebSQu Wenruo 	/* Allocated with stripe_nsectors-many bits for finish_*() calls */
172*c67c68ebSQu Wenruo 	unsigned long finish_pbitmap;
173*c67c68ebSQu Wenruo 
17453b381b3SDavid Woodhouse 	/*
17553b381b3SDavid Woodhouse 	 * these are two arrays of pointers.  We allocate the
17653b381b3SDavid Woodhouse 	 * rbio big enough to hold them both and setup their
17753b381b3SDavid Woodhouse 	 * locations when the rbio is allocated
17853b381b3SDavid Woodhouse 	 */
17953b381b3SDavid Woodhouse 
18053b381b3SDavid Woodhouse 	/* pointers to pages that we allocated for
18153b381b3SDavid Woodhouse 	 * reading/writing stripes directly from the disk (including P/Q)
18253b381b3SDavid Woodhouse 	 */
18353b381b3SDavid Woodhouse 	struct page **stripe_pages;
18453b381b3SDavid Woodhouse 
18500425dd9SQu Wenruo 	/* Pointers to the sectors in the bio_list, for faster lookup */
18600425dd9SQu Wenruo 	struct sector_ptr *bio_sectors;
18700425dd9SQu Wenruo 
18853b381b3SDavid Woodhouse 	/*
189eb357060SQu Wenruo 	 * For subpage support, we need to map each sector to above
190eb357060SQu Wenruo 	 * stripe_pages.
1915a6ac9eaSMiao Xie 	 */
192eb357060SQu Wenruo 	struct sector_ptr *stripe_sectors;
193eb357060SQu Wenruo 
1941389053eSKees Cook 	/* allocated with real_stripes-many pointers for finish_*() calls */
1951389053eSKees Cook 	void **finish_pointers;
19653b381b3SDavid Woodhouse };
19753b381b3SDavid Woodhouse 
19853b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
19953b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
200385de0efSChristoph Hellwig static void rmw_work(struct work_struct *work);
201385de0efSChristoph Hellwig static void read_rebuild_work(struct work_struct *work);
20253b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
20353b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
20453b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio);
20553b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
20653b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
20753b381b3SDavid Woodhouse 
2085a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2095a6ac9eaSMiao Xie 					 int need_check);
210385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work);
2115a6ac9eaSMiao Xie 
212385de0efSChristoph Hellwig static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
213ac638859SDavid Sterba {
214385de0efSChristoph Hellwig 	INIT_WORK(&rbio->work, work_func);
215385de0efSChristoph Hellwig 	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
216ac638859SDavid Sterba }
217ac638859SDavid Sterba 
21853b381b3SDavid Woodhouse /*
21953b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
22053b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
22153b381b3SDavid Woodhouse  */
22253b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
22353b381b3SDavid Woodhouse {
22453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
22553b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
22653b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
22753b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
22853b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
22953b381b3SDavid Woodhouse 	int i;
23053b381b3SDavid Woodhouse 
23153b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
23253b381b3SDavid Woodhouse 		return 0;
23353b381b3SDavid Woodhouse 
23483c8266aSDavid Sterba 	/*
23583c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
23683c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
23783c8266aSDavid Sterba 	 *
23883c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
23983c8266aSDavid Sterba 	 * of a failing mount.
24083c8266aSDavid Sterba 	 */
241ee787f95SDavid Sterba 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
24253b381b3SDavid Woodhouse 	if (!table)
24353b381b3SDavid Woodhouse 		return -ENOMEM;
24453b381b3SDavid Woodhouse 
2454ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
2464ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
2474ae10b3aSChris Mason 
24853b381b3SDavid Woodhouse 	h = table->table;
24953b381b3SDavid Woodhouse 
25053b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
25153b381b3SDavid Woodhouse 		cur = h + i;
25253b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
25353b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
25453b381b3SDavid Woodhouse 	}
25553b381b3SDavid Woodhouse 
25653b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
257f749303bSWang Shilong 	kvfree(x);
25853b381b3SDavid Woodhouse 	return 0;
25953b381b3SDavid Woodhouse }
26053b381b3SDavid Woodhouse 
26153b381b3SDavid Woodhouse /*
2624ae10b3aSChris Mason  * caching an rbio means to copy anything from the
263ac26df8bSQu Wenruo  * bio_sectors array into the stripe_pages array.  We
2644ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
2654ae10b3aSChris Mason  * to indicate if it has valid data
2664ae10b3aSChris Mason  *
2674ae10b3aSChris Mason  * once the caching is done, we set the cache ready
2684ae10b3aSChris Mason  * bit.
2694ae10b3aSChris Mason  */
2704ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
2714ae10b3aSChris Mason {
2724ae10b3aSChris Mason 	int i;
2734ae10b3aSChris Mason 	int ret;
2744ae10b3aSChris Mason 
2754ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
2764ae10b3aSChris Mason 	if (ret)
2774ae10b3aSChris Mason 		return;
2784ae10b3aSChris Mason 
27900425dd9SQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
28000425dd9SQu Wenruo 		/* Some range not covered by bio (partial write), skip it */
28100425dd9SQu Wenruo 		if (!rbio->bio_sectors[i].page)
28200425dd9SQu Wenruo 			continue;
28300425dd9SQu Wenruo 
28400425dd9SQu Wenruo 		ASSERT(rbio->stripe_sectors[i].page);
28500425dd9SQu Wenruo 		memcpy_page(rbio->stripe_sectors[i].page,
28600425dd9SQu Wenruo 			    rbio->stripe_sectors[i].pgoff,
28700425dd9SQu Wenruo 			    rbio->bio_sectors[i].page,
28800425dd9SQu Wenruo 			    rbio->bio_sectors[i].pgoff,
28900425dd9SQu Wenruo 			    rbio->bioc->fs_info->sectorsize);
29000425dd9SQu Wenruo 		rbio->stripe_sectors[i].uptodate = 1;
29100425dd9SQu Wenruo 	}
2924ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2934ae10b3aSChris Mason }
2944ae10b3aSChris Mason 
2954ae10b3aSChris Mason /*
29653b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
29753b381b3SDavid Woodhouse  */
29853b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
29953b381b3SDavid Woodhouse {
3004c664611SQu Wenruo 	u64 num = rbio->bioc->raid_map[0];
30153b381b3SDavid Woodhouse 
30253b381b3SDavid Woodhouse 	/*
30353b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
30453b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
30553b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
30653b381b3SDavid Woodhouse 	 * returns just one or two different values.
30753b381b3SDavid Woodhouse 	 *
30853b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
30953b381b3SDavid Woodhouse 	 */
31053b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
31153b381b3SDavid Woodhouse }
31253b381b3SDavid Woodhouse 
313d4e28d9bSQu Wenruo static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
314d4e28d9bSQu Wenruo 				       unsigned int page_nr)
315d4e28d9bSQu Wenruo {
316d4e28d9bSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
317d4e28d9bSQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
318d4e28d9bSQu Wenruo 	int i;
319d4e28d9bSQu Wenruo 
320d4e28d9bSQu Wenruo 	ASSERT(page_nr < rbio->nr_pages);
321d4e28d9bSQu Wenruo 
322d4e28d9bSQu Wenruo 	for (i = sectors_per_page * page_nr;
323d4e28d9bSQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page;
324d4e28d9bSQu Wenruo 	     i++) {
325d4e28d9bSQu Wenruo 		if (!rbio->stripe_sectors[i].uptodate)
326d4e28d9bSQu Wenruo 			return false;
327d4e28d9bSQu Wenruo 	}
328d4e28d9bSQu Wenruo 	return true;
329d4e28d9bSQu Wenruo }
330d4e28d9bSQu Wenruo 
33153b381b3SDavid Woodhouse /*
332eb357060SQu Wenruo  * Update the stripe_sectors[] array to use correct page and pgoff
333eb357060SQu Wenruo  *
334eb357060SQu Wenruo  * Should be called every time any page pointer in stripes_pages[] got modified.
335eb357060SQu Wenruo  */
336eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
337eb357060SQu Wenruo {
338eb357060SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
339eb357060SQu Wenruo 	u32 offset;
340eb357060SQu Wenruo 	int i;
341eb357060SQu Wenruo 
342eb357060SQu Wenruo 	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
343eb357060SQu Wenruo 		int page_index = offset >> PAGE_SHIFT;
344eb357060SQu Wenruo 
345eb357060SQu Wenruo 		ASSERT(page_index < rbio->nr_pages);
346eb357060SQu Wenruo 		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
347eb357060SQu Wenruo 		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
348eb357060SQu Wenruo 	}
349eb357060SQu Wenruo }
350eb357060SQu Wenruo 
351eb357060SQu Wenruo /*
352d4e28d9bSQu Wenruo  * Stealing an rbio means taking all the uptodate pages from the stripe array
353d4e28d9bSQu Wenruo  * in the source rbio and putting them into the destination rbio.
354d4e28d9bSQu Wenruo  *
355d4e28d9bSQu Wenruo  * This will also update the involved stripe_sectors[] which are referring to
356d4e28d9bSQu Wenruo  * the old pages.
3574ae10b3aSChris Mason  */
3584ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
3594ae10b3aSChris Mason {
3604ae10b3aSChris Mason 	int i;
3614ae10b3aSChris Mason 	struct page *s;
3624ae10b3aSChris Mason 	struct page *d;
3634ae10b3aSChris Mason 
3644ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3654ae10b3aSChris Mason 		return;
3664ae10b3aSChris Mason 
3674ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
3684ae10b3aSChris Mason 		s = src->stripe_pages[i];
369d4e28d9bSQu Wenruo 		if (!s || !full_page_sectors_uptodate(src, i))
3704ae10b3aSChris Mason 			continue;
3714ae10b3aSChris Mason 
3724ae10b3aSChris Mason 		d = dest->stripe_pages[i];
3734ae10b3aSChris Mason 		if (d)
3744ae10b3aSChris Mason 			__free_page(d);
3754ae10b3aSChris Mason 
3764ae10b3aSChris Mason 		dest->stripe_pages[i] = s;
3774ae10b3aSChris Mason 		src->stripe_pages[i] = NULL;
3784ae10b3aSChris Mason 	}
379eb357060SQu Wenruo 	index_stripe_sectors(dest);
380eb357060SQu Wenruo 	index_stripe_sectors(src);
3814ae10b3aSChris Mason }
3824ae10b3aSChris Mason 
3834ae10b3aSChris Mason /*
38453b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
38553b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
38653b381b3SDavid Woodhouse  * be discarded afterwards.
38753b381b3SDavid Woodhouse  *
38853b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
38953b381b3SDavid Woodhouse  */
39053b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
39153b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
39253b381b3SDavid Woodhouse {
39353b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
39453b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
3954245215dSMiao Xie 	dest->generic_bio_cnt += victim->generic_bio_cnt;
39653b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
39753b381b3SDavid Woodhouse }
39853b381b3SDavid Woodhouse 
39953b381b3SDavid Woodhouse /*
4004ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
4014ae10b3aSChris Mason  * must hold the hash table lock.
4024ae10b3aSChris Mason  */
4034ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4044ae10b3aSChris Mason {
4054ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
4064ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4074ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
4084ae10b3aSChris Mason 	int freeit = 0;
4094ae10b3aSChris Mason 
4104ae10b3aSChris Mason 	/*
4114ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
4124ae10b3aSChris Mason 	 */
4134ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4144ae10b3aSChris Mason 		return;
4154ae10b3aSChris Mason 
4166a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4174ae10b3aSChris Mason 	h = table->table + bucket;
4184ae10b3aSChris Mason 
4194ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
4204ae10b3aSChris Mason 	 * removing it from the hash table
4214ae10b3aSChris Mason 	 */
4224ae10b3aSChris Mason 	spin_lock(&h->lock);
4234ae10b3aSChris Mason 
4244ae10b3aSChris Mason 	/*
4254ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
4264ae10b3aSChris Mason 	 * to make sure the bio list is empty
4274ae10b3aSChris Mason 	 */
4284ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4294ae10b3aSChris Mason 
4304ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
4314ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
4324ae10b3aSChris Mason 		table->cache_size -= 1;
4334ae10b3aSChris Mason 		freeit = 1;
4344ae10b3aSChris Mason 
4354ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
4364ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
4374ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
4384ae10b3aSChris Mason 		 * was held for the list.
4394ae10b3aSChris Mason 		 *
4404ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
4414ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
4424ae10b3aSChris Mason 		 * the corresponding ref
4434ae10b3aSChris Mason 		 */
4444ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
4454ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
4464ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
447dec95574SElena Reshetova 				refcount_dec(&rbio->refs);
4484ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
4494ae10b3aSChris Mason 			}
4504ae10b3aSChris Mason 		}
4514ae10b3aSChris Mason 	}
4524ae10b3aSChris Mason 
4534ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4544ae10b3aSChris Mason 	spin_unlock(&h->lock);
4554ae10b3aSChris Mason 
4564ae10b3aSChris Mason 	if (freeit)
4574ae10b3aSChris Mason 		__free_raid_bio(rbio);
4584ae10b3aSChris Mason }
4594ae10b3aSChris Mason 
4604ae10b3aSChris Mason /*
4614ae10b3aSChris Mason  * prune a given rbio from the cache
4624ae10b3aSChris Mason  */
4634ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4644ae10b3aSChris Mason {
4654ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4664ae10b3aSChris Mason 	unsigned long flags;
4674ae10b3aSChris Mason 
4684ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4694ae10b3aSChris Mason 		return;
4704ae10b3aSChris Mason 
4716a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4724ae10b3aSChris Mason 
4734ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4744ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
4754ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4764ae10b3aSChris Mason }
4774ae10b3aSChris Mason 
4784ae10b3aSChris Mason /*
4794ae10b3aSChris Mason  * remove everything in the cache
4804ae10b3aSChris Mason  */
48148a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4824ae10b3aSChris Mason {
4834ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4844ae10b3aSChris Mason 	unsigned long flags;
4854ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4864ae10b3aSChris Mason 
4874ae10b3aSChris Mason 	table = info->stripe_hash_table;
4884ae10b3aSChris Mason 
4894ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4904ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4914ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4924ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4934ae10b3aSChris Mason 				  stripe_cache);
4944ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4954ae10b3aSChris Mason 	}
4964ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4974ae10b3aSChris Mason }
4984ae10b3aSChris Mason 
4994ae10b3aSChris Mason /*
5004ae10b3aSChris Mason  * remove all cached entries and free the hash table
5014ae10b3aSChris Mason  * used by unmount
50253b381b3SDavid Woodhouse  */
50353b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
50453b381b3SDavid Woodhouse {
50553b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
50653b381b3SDavid Woodhouse 		return;
5074ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
508f749303bSWang Shilong 	kvfree(info->stripe_hash_table);
50953b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
51053b381b3SDavid Woodhouse }
51153b381b3SDavid Woodhouse 
51253b381b3SDavid Woodhouse /*
5134ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
5144ae10b3aSChris Mason  * must have already been prepared by calling
5154ae10b3aSChris Mason  * cache_rbio_pages
5164ae10b3aSChris Mason  *
5174ae10b3aSChris Mason  * If this rbio was already cached, it gets
5184ae10b3aSChris Mason  * moved to the front of the lru.
5194ae10b3aSChris Mason  *
5204ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
5214ae10b3aSChris Mason  * prune an item.
5224ae10b3aSChris Mason  */
5234ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
5244ae10b3aSChris Mason {
5254ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
5264ae10b3aSChris Mason 	unsigned long flags;
5274ae10b3aSChris Mason 
5284ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
5294ae10b3aSChris Mason 		return;
5304ae10b3aSChris Mason 
5316a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
5324ae10b3aSChris Mason 
5334ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
5344ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
5354ae10b3aSChris Mason 
5364ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
5374ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
538dec95574SElena Reshetova 		refcount_inc(&rbio->refs);
5394ae10b3aSChris Mason 
5404ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
5414ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
5424ae10b3aSChris Mason 	} else {
5434ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
5444ae10b3aSChris Mason 		table->cache_size += 1;
5454ae10b3aSChris Mason 	}
5464ae10b3aSChris Mason 
5474ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
5484ae10b3aSChris Mason 
5494ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
5504ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
5514ae10b3aSChris Mason 
5524ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
5534ae10b3aSChris Mason 				  struct btrfs_raid_bio,
5544ae10b3aSChris Mason 				  stripe_cache);
5554ae10b3aSChris Mason 
5564ae10b3aSChris Mason 		if (found != rbio)
5574ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
5584ae10b3aSChris Mason 	}
5594ae10b3aSChris Mason 
5604ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
5614ae10b3aSChris Mason }
5624ae10b3aSChris Mason 
5634ae10b3aSChris Mason /*
56453b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
56553b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
56653b381b3SDavid Woodhouse  * loop through.
56753b381b3SDavid Woodhouse  */
56853b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
56953b381b3SDavid Woodhouse {
57053b381b3SDavid Woodhouse 	int src_off = 0;
57153b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
57253b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
57353b381b3SDavid Woodhouse 
57453b381b3SDavid Woodhouse 	while(src_cnt > 0) {
57553b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
57653b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
57753b381b3SDavid Woodhouse 
57853b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
57953b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
58053b381b3SDavid Woodhouse 	}
58153b381b3SDavid Woodhouse }
58253b381b3SDavid Woodhouse 
58353b381b3SDavid Woodhouse /*
584176571a1SDavid Sterba  * Returns true if the bio list inside this rbio covers an entire stripe (no
585176571a1SDavid Sterba  * rmw required).
58653b381b3SDavid Woodhouse  */
58753b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
58853b381b3SDavid Woodhouse {
58953b381b3SDavid Woodhouse 	unsigned long flags;
590176571a1SDavid Sterba 	unsigned long size = rbio->bio_list_bytes;
591176571a1SDavid Sterba 	int ret = 1;
59253b381b3SDavid Woodhouse 
59353b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
594176571a1SDavid Sterba 	if (size != rbio->nr_data * rbio->stripe_len)
595176571a1SDavid Sterba 		ret = 0;
596176571a1SDavid Sterba 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
59753b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
598176571a1SDavid Sterba 
59953b381b3SDavid Woodhouse 	return ret;
60053b381b3SDavid Woodhouse }
60153b381b3SDavid Woodhouse 
60253b381b3SDavid Woodhouse /*
60353b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
60453b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
60553b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
60653b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
60753b381b3SDavid Woodhouse  * locked for final IO
60853b381b3SDavid Woodhouse  *
60953b381b3SDavid Woodhouse  * The caller is responsible for locking such that
61053b381b3SDavid Woodhouse  * rmw_locked is safe to test
61153b381b3SDavid Woodhouse  */
61253b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
61353b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
61453b381b3SDavid Woodhouse {
61553b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
61653b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
61753b381b3SDavid Woodhouse 		return 0;
61853b381b3SDavid Woodhouse 
6194ae10b3aSChris Mason 	/*
6204ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
6214ae10b3aSChris Mason 	 * idea is that when we merge the destination
6224ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
62301327610SNicholas D Steeves 	 * steal from cached rbios though, other functions
6244ae10b3aSChris Mason 	 * handle that.
6254ae10b3aSChris Mason 	 */
6264ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
6274ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
6284ae10b3aSChris Mason 		return 0;
6294ae10b3aSChris Mason 
6304c664611SQu Wenruo 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
63153b381b3SDavid Woodhouse 		return 0;
63253b381b3SDavid Woodhouse 
6335a6ac9eaSMiao Xie 	/* we can't merge with different operations */
6345a6ac9eaSMiao Xie 	if (last->operation != cur->operation)
63553b381b3SDavid Woodhouse 		return 0;
6365a6ac9eaSMiao Xie 	/*
6375a6ac9eaSMiao Xie 	 * We've need read the full stripe from the drive.
6385a6ac9eaSMiao Xie 	 * check and repair the parity and write the new results.
6395a6ac9eaSMiao Xie 	 *
6405a6ac9eaSMiao Xie 	 * We're not allowed to add any new bios to the
6415a6ac9eaSMiao Xie 	 * bio list here, anyone else that wants to
6425a6ac9eaSMiao Xie 	 * change this stripe needs to do their own rmw.
6435a6ac9eaSMiao Xie 	 */
644db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
6455a6ac9eaSMiao Xie 		return 0;
64653b381b3SDavid Woodhouse 
647db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
648b4ee1782SOmar Sandoval 		return 0;
649b4ee1782SOmar Sandoval 
650cc54ff62SLiu Bo 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
651cc54ff62SLiu Bo 		int fa = last->faila;
652cc54ff62SLiu Bo 		int fb = last->failb;
653cc54ff62SLiu Bo 		int cur_fa = cur->faila;
654cc54ff62SLiu Bo 		int cur_fb = cur->failb;
655cc54ff62SLiu Bo 
656cc54ff62SLiu Bo 		if (last->faila >= last->failb) {
657cc54ff62SLiu Bo 			fa = last->failb;
658cc54ff62SLiu Bo 			fb = last->faila;
659cc54ff62SLiu Bo 		}
660cc54ff62SLiu Bo 
661cc54ff62SLiu Bo 		if (cur->faila >= cur->failb) {
662cc54ff62SLiu Bo 			cur_fa = cur->failb;
663cc54ff62SLiu Bo 			cur_fb = cur->faila;
664cc54ff62SLiu Bo 		}
665cc54ff62SLiu Bo 
666cc54ff62SLiu Bo 		if (fa != cur_fa || fb != cur_fb)
667cc54ff62SLiu Bo 			return 0;
668cc54ff62SLiu Bo 	}
66953b381b3SDavid Woodhouse 	return 1;
67053b381b3SDavid Woodhouse }
67153b381b3SDavid Woodhouse 
6723e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
6733e77605dSQu Wenruo 					     unsigned int stripe_nr,
6743e77605dSQu Wenruo 					     unsigned int sector_nr)
6753e77605dSQu Wenruo {
6763e77605dSQu Wenruo 	ASSERT(stripe_nr < rbio->real_stripes);
6773e77605dSQu Wenruo 	ASSERT(sector_nr < rbio->stripe_nsectors);
6783e77605dSQu Wenruo 
6793e77605dSQu Wenruo 	return stripe_nr * rbio->stripe_nsectors + sector_nr;
6803e77605dSQu Wenruo }
6813e77605dSQu Wenruo 
6823e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */
6833e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
6843e77605dSQu Wenruo 					     unsigned int stripe_nr,
6853e77605dSQu Wenruo 					     unsigned int sector_nr)
6863e77605dSQu Wenruo {
6873e77605dSQu Wenruo 	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
6883e77605dSQu Wenruo 							      sector_nr)];
6893e77605dSQu Wenruo }
6903e77605dSQu Wenruo 
6911145059aSQu Wenruo /* Grab a sector inside P stripe */
6921145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
6931145059aSQu Wenruo 					      unsigned int sector_nr)
694b7178a5fSZhao Lei {
6951145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
696b7178a5fSZhao Lei }
697b7178a5fSZhao Lei 
6981145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */
6991145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
7001145059aSQu Wenruo 					      unsigned int sector_nr)
70153b381b3SDavid Woodhouse {
7022c8cdd6eSMiao Xie 	if (rbio->nr_data + 1 == rbio->real_stripes)
70353b381b3SDavid Woodhouse 		return NULL;
7041145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
7051145059aSQu Wenruo }
7061145059aSQu Wenruo 
70753b381b3SDavid Woodhouse /*
70853b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
70953b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
71053b381b3SDavid Woodhouse  *
71153b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
71253b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
71353b381b3SDavid Woodhouse  * themselves.
71453b381b3SDavid Woodhouse  *
71553b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
71653b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
71753b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
71853b381b3SDavid Woodhouse  *
71953b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
72053b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
72153b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
72253b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
72353b381b3SDavid Woodhouse  * 1 is returned
72453b381b3SDavid Woodhouse  *
72553b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
72653b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
72753b381b3SDavid Woodhouse  * already been freed.
72853b381b3SDavid Woodhouse  */
72953b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
73053b381b3SDavid Woodhouse {
731721860d5SJohannes Thumshirn 	struct btrfs_stripe_hash *h;
73253b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
73353b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
73453b381b3SDavid Woodhouse 	unsigned long flags;
73553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
7364ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
73753b381b3SDavid Woodhouse 	int ret = 0;
73853b381b3SDavid Woodhouse 
7396a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
740721860d5SJohannes Thumshirn 
74153b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
74253b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
7434c664611SQu Wenruo 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
7449d6cb1b0SJohannes Thumshirn 			continue;
7459d6cb1b0SJohannes Thumshirn 
74653b381b3SDavid Woodhouse 		spin_lock(&cur->bio_list_lock);
74753b381b3SDavid Woodhouse 
7489d6cb1b0SJohannes Thumshirn 		/* Can we steal this cached rbio's pages? */
7494ae10b3aSChris Mason 		if (bio_list_empty(&cur->bio_list) &&
7504ae10b3aSChris Mason 		    list_empty(&cur->plug_list) &&
7514ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
7524ae10b3aSChris Mason 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
7534ae10b3aSChris Mason 			list_del_init(&cur->hash_list);
754dec95574SElena Reshetova 			refcount_dec(&cur->refs);
7554ae10b3aSChris Mason 
7564ae10b3aSChris Mason 			steal_rbio(cur, rbio);
7574ae10b3aSChris Mason 			cache_drop = cur;
7584ae10b3aSChris Mason 			spin_unlock(&cur->bio_list_lock);
7594ae10b3aSChris Mason 
7604ae10b3aSChris Mason 			goto lockit;
7614ae10b3aSChris Mason 		}
7624ae10b3aSChris Mason 
7639d6cb1b0SJohannes Thumshirn 		/* Can we merge into the lock owner? */
76453b381b3SDavid Woodhouse 		if (rbio_can_merge(cur, rbio)) {
76553b381b3SDavid Woodhouse 			merge_rbio(cur, rbio);
76653b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
76753b381b3SDavid Woodhouse 			freeit = rbio;
76853b381b3SDavid Woodhouse 			ret = 1;
76953b381b3SDavid Woodhouse 			goto out;
77053b381b3SDavid Woodhouse 		}
77153b381b3SDavid Woodhouse 
7724ae10b3aSChris Mason 
77353b381b3SDavid Woodhouse 		/*
7749d6cb1b0SJohannes Thumshirn 		 * We couldn't merge with the running rbio, see if we can merge
7759d6cb1b0SJohannes Thumshirn 		 * with the pending ones.  We don't have to check for rmw_locked
7769d6cb1b0SJohannes Thumshirn 		 * because there is no way they are inside finish_rmw right now
77753b381b3SDavid Woodhouse 		 */
7789d6cb1b0SJohannes Thumshirn 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
77953b381b3SDavid Woodhouse 			if (rbio_can_merge(pending, rbio)) {
78053b381b3SDavid Woodhouse 				merge_rbio(pending, rbio);
78153b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
78253b381b3SDavid Woodhouse 				freeit = rbio;
78353b381b3SDavid Woodhouse 				ret = 1;
78453b381b3SDavid Woodhouse 				goto out;
78553b381b3SDavid Woodhouse 			}
78653b381b3SDavid Woodhouse 		}
78753b381b3SDavid Woodhouse 
7889d6cb1b0SJohannes Thumshirn 		/*
7899d6cb1b0SJohannes Thumshirn 		 * No merging, put us on the tail of the plug list, our rbio
7909d6cb1b0SJohannes Thumshirn 		 * will be started with the currently running rbio unlocks
79153b381b3SDavid Woodhouse 		 */
79253b381b3SDavid Woodhouse 		list_add_tail(&rbio->plug_list, &cur->plug_list);
79353b381b3SDavid Woodhouse 		spin_unlock(&cur->bio_list_lock);
79453b381b3SDavid Woodhouse 		ret = 1;
79553b381b3SDavid Woodhouse 		goto out;
79653b381b3SDavid Woodhouse 	}
7974ae10b3aSChris Mason lockit:
798dec95574SElena Reshetova 	refcount_inc(&rbio->refs);
79953b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
80053b381b3SDavid Woodhouse out:
80153b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
8024ae10b3aSChris Mason 	if (cache_drop)
8034ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
80453b381b3SDavid Woodhouse 	if (freeit)
80553b381b3SDavid Woodhouse 		__free_raid_bio(freeit);
80653b381b3SDavid Woodhouse 	return ret;
80753b381b3SDavid Woodhouse }
80853b381b3SDavid Woodhouse 
80953b381b3SDavid Woodhouse /*
81053b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
81153b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
81253b381b3SDavid Woodhouse  */
81353b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
81453b381b3SDavid Woodhouse {
81553b381b3SDavid Woodhouse 	int bucket;
81653b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
81753b381b3SDavid Woodhouse 	unsigned long flags;
8184ae10b3aSChris Mason 	int keep_cache = 0;
81953b381b3SDavid Woodhouse 
82053b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
8216a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
82253b381b3SDavid Woodhouse 
8234ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
8244ae10b3aSChris Mason 		cache_rbio(rbio);
8254ae10b3aSChris Mason 
82653b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
82753b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
82853b381b3SDavid Woodhouse 
82953b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
8304ae10b3aSChris Mason 		/*
8314ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
8324ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
8334ae10b3aSChris Mason 		 * to steal from later
8344ae10b3aSChris Mason 		 */
8354ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
8364ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
8374ae10b3aSChris Mason 			keep_cache = 1;
8384ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
8394ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
8404ae10b3aSChris Mason 			goto done;
8414ae10b3aSChris Mason 		}
84253b381b3SDavid Woodhouse 
84353b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
844dec95574SElena Reshetova 		refcount_dec(&rbio->refs);
84553b381b3SDavid Woodhouse 
84653b381b3SDavid Woodhouse 		/*
84753b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
84853b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
84953b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
85053b381b3SDavid Woodhouse 		 */
85153b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
85253b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
85353b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
85453b381b3SDavid Woodhouse 
85553b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
85653b381b3SDavid Woodhouse 					  plug_list);
85753b381b3SDavid Woodhouse 
85853b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
85953b381b3SDavid Woodhouse 
86053b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
861dec95574SElena Reshetova 			refcount_inc(&next->refs);
86253b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
86353b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
86453b381b3SDavid Woodhouse 
8651b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
866e66d8d5aSDavid Sterba 				start_async_work(next, read_rebuild_work);
867b4ee1782SOmar Sandoval 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
868b4ee1782SOmar Sandoval 				steal_rbio(rbio, next);
869e66d8d5aSDavid Sterba 				start_async_work(next, read_rebuild_work);
870b4ee1782SOmar Sandoval 			} else if (next->operation == BTRFS_RBIO_WRITE) {
8714ae10b3aSChris Mason 				steal_rbio(rbio, next);
872cf6a4a75SDavid Sterba 				start_async_work(next, rmw_work);
8735a6ac9eaSMiao Xie 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
8745a6ac9eaSMiao Xie 				steal_rbio(rbio, next);
875a81b747dSDavid Sterba 				start_async_work(next, scrub_parity_work);
8764ae10b3aSChris Mason 			}
87753b381b3SDavid Woodhouse 
87853b381b3SDavid Woodhouse 			goto done_nolock;
87953b381b3SDavid Woodhouse 		}
88053b381b3SDavid Woodhouse 	}
8814ae10b3aSChris Mason done:
88253b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
88353b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
88453b381b3SDavid Woodhouse 
88553b381b3SDavid Woodhouse done_nolock:
8864ae10b3aSChris Mason 	if (!keep_cache)
8874ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
88853b381b3SDavid Woodhouse }
88953b381b3SDavid Woodhouse 
89053b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio)
89153b381b3SDavid Woodhouse {
89253b381b3SDavid Woodhouse 	int i;
89353b381b3SDavid Woodhouse 
894dec95574SElena Reshetova 	if (!refcount_dec_and_test(&rbio->refs))
89553b381b3SDavid Woodhouse 		return;
89653b381b3SDavid Woodhouse 
8974ae10b3aSChris Mason 	WARN_ON(!list_empty(&rbio->stripe_cache));
89853b381b3SDavid Woodhouse 	WARN_ON(!list_empty(&rbio->hash_list));
89953b381b3SDavid Woodhouse 	WARN_ON(!bio_list_empty(&rbio->bio_list));
90053b381b3SDavid Woodhouse 
90153b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_pages; i++) {
90253b381b3SDavid Woodhouse 		if (rbio->stripe_pages[i]) {
90353b381b3SDavid Woodhouse 			__free_page(rbio->stripe_pages[i]);
90453b381b3SDavid Woodhouse 			rbio->stripe_pages[i] = NULL;
90553b381b3SDavid Woodhouse 		}
90653b381b3SDavid Woodhouse 	}
907af8e2d1dSMiao Xie 
9084c664611SQu Wenruo 	btrfs_put_bioc(rbio->bioc);
90953b381b3SDavid Woodhouse 	kfree(rbio);
91053b381b3SDavid Woodhouse }
91153b381b3SDavid Woodhouse 
9127583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
91353b381b3SDavid Woodhouse {
9147583d8d0SLiu Bo 	struct bio *next;
9157583d8d0SLiu Bo 
9167583d8d0SLiu Bo 	while (cur) {
9177583d8d0SLiu Bo 		next = cur->bi_next;
9187583d8d0SLiu Bo 		cur->bi_next = NULL;
9197583d8d0SLiu Bo 		cur->bi_status = err;
9207583d8d0SLiu Bo 		bio_endio(cur);
9217583d8d0SLiu Bo 		cur = next;
9227583d8d0SLiu Bo 	}
92353b381b3SDavid Woodhouse }
92453b381b3SDavid Woodhouse 
92553b381b3SDavid Woodhouse /*
92653b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
92753b381b3SDavid Woodhouse  * bio_list and calls end_io on them
92853b381b3SDavid Woodhouse  */
9294e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
93053b381b3SDavid Woodhouse {
93153b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
9327583d8d0SLiu Bo 	struct bio *extra;
9334245215dSMiao Xie 
9344245215dSMiao Xie 	if (rbio->generic_bio_cnt)
9356a258d72SQu Wenruo 		btrfs_bio_counter_sub(rbio->bioc->fs_info, rbio->generic_bio_cnt);
9364245215dSMiao Xie 
9377583d8d0SLiu Bo 	/*
9387583d8d0SLiu Bo 	 * At this moment, rbio->bio_list is empty, however since rbio does not
9397583d8d0SLiu Bo 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
9407583d8d0SLiu Bo 	 * hash list, rbio may be merged with others so that rbio->bio_list
9417583d8d0SLiu Bo 	 * becomes non-empty.
9427583d8d0SLiu Bo 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
9437583d8d0SLiu Bo 	 * more and we can call bio_endio() on all queued bios.
9447583d8d0SLiu Bo 	 */
9457583d8d0SLiu Bo 	unlock_stripe(rbio);
9467583d8d0SLiu Bo 	extra = bio_list_get(&rbio->bio_list);
9477583d8d0SLiu Bo 	__free_raid_bio(rbio);
94853b381b3SDavid Woodhouse 
9497583d8d0SLiu Bo 	rbio_endio_bio_list(cur, err);
9507583d8d0SLiu Bo 	if (extra)
9517583d8d0SLiu Bo 		rbio_endio_bio_list(extra, err);
95253b381b3SDavid Woodhouse }
95353b381b3SDavid Woodhouse 
95453b381b3SDavid Woodhouse /*
95553b381b3SDavid Woodhouse  * end io function used by finish_rmw.  When we finally
95653b381b3SDavid Woodhouse  * get here, we've written a full stripe
95753b381b3SDavid Woodhouse  */
9584246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio)
95953b381b3SDavid Woodhouse {
96053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
9614e4cbee9SChristoph Hellwig 	blk_status_t err = bio->bi_status;
962a6111d11SZhao Lei 	int max_errors;
96353b381b3SDavid Woodhouse 
96453b381b3SDavid Woodhouse 	if (err)
96553b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
96653b381b3SDavid Woodhouse 
96753b381b3SDavid Woodhouse 	bio_put(bio);
96853b381b3SDavid Woodhouse 
969b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
97053b381b3SDavid Woodhouse 		return;
97153b381b3SDavid Woodhouse 
97258efbc9fSOmar Sandoval 	err = BLK_STS_OK;
97353b381b3SDavid Woodhouse 
97453b381b3SDavid Woodhouse 	/* OK, we have read all the stripes we need to. */
975a6111d11SZhao Lei 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
9764c664611SQu Wenruo 		     0 : rbio->bioc->max_errors;
977a6111d11SZhao Lei 	if (atomic_read(&rbio->error) > max_errors)
9784e4cbee9SChristoph Hellwig 		err = BLK_STS_IOERR;
97953b381b3SDavid Woodhouse 
9804246a0b6SChristoph Hellwig 	rbio_orig_end_io(rbio, err);
98153b381b3SDavid Woodhouse }
98253b381b3SDavid Woodhouse 
9833e77605dSQu Wenruo /**
9843e77605dSQu Wenruo  * Get a sector pointer specified by its @stripe_nr and @sector_nr
9853e77605dSQu Wenruo  *
9863e77605dSQu Wenruo  * @rbio:               The raid bio
9873e77605dSQu Wenruo  * @stripe_nr:          Stripe number, valid range [0, real_stripe)
9883e77605dSQu Wenruo  * @sector_nr:		Sector number inside the stripe,
9893e77605dSQu Wenruo  *			valid range [0, stripe_nsectors)
9903e77605dSQu Wenruo  * @bio_list_only:      Whether to use sectors inside the bio list only.
9913e77605dSQu Wenruo  *
9923e77605dSQu Wenruo  * The read/modify/write code wants to reuse the original bio page as much
9933e77605dSQu Wenruo  * as possible, and only use stripe_sectors as fallback.
9943e77605dSQu Wenruo  */
9953e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
9963e77605dSQu Wenruo 					 int stripe_nr, int sector_nr,
9973e77605dSQu Wenruo 					 bool bio_list_only)
9983e77605dSQu Wenruo {
9993e77605dSQu Wenruo 	struct sector_ptr *sector;
10003e77605dSQu Wenruo 	int index;
10013e77605dSQu Wenruo 
10023e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
10033e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
10043e77605dSQu Wenruo 
10053e77605dSQu Wenruo 	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
10063e77605dSQu Wenruo 	ASSERT(index >= 0 && index < rbio->nr_sectors);
10073e77605dSQu Wenruo 
10083e77605dSQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
10093e77605dSQu Wenruo 	sector = &rbio->bio_sectors[index];
10103e77605dSQu Wenruo 	if (sector->page || bio_list_only) {
10113e77605dSQu Wenruo 		/* Don't return sector without a valid page pointer */
10123e77605dSQu Wenruo 		if (!sector->page)
10133e77605dSQu Wenruo 			sector = NULL;
10143e77605dSQu Wenruo 		spin_unlock_irq(&rbio->bio_list_lock);
10153e77605dSQu Wenruo 		return sector;
10163e77605dSQu Wenruo 	}
10173e77605dSQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
10183e77605dSQu Wenruo 
10193e77605dSQu Wenruo 	return &rbio->stripe_sectors[index];
10203e77605dSQu Wenruo }
10213e77605dSQu Wenruo 
102253b381b3SDavid Woodhouse /*
102353b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
102453b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
102553b381b3SDavid Woodhouse  */
10262ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
10274c664611SQu Wenruo 					 struct btrfs_io_context *bioc,
1028cc353a8bSQu Wenruo 					 u32 stripe_len)
102953b381b3SDavid Woodhouse {
1030843de58bSQu Wenruo 	const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
1031843de58bSQu Wenruo 	const unsigned int stripe_npages = stripe_len >> PAGE_SHIFT;
1032843de58bSQu Wenruo 	const unsigned int num_pages = stripe_npages * real_stripes;
103394efbe19SQu Wenruo 	const unsigned int stripe_nsectors = stripe_len >> fs_info->sectorsize_bits;
103494efbe19SQu Wenruo 	const unsigned int num_sectors = stripe_nsectors * real_stripes;
103553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
103653b381b3SDavid Woodhouse 	int nr_data = 0;
103753b381b3SDavid Woodhouse 	void *p;
103853b381b3SDavid Woodhouse 
1039843de58bSQu Wenruo 	ASSERT(IS_ALIGNED(stripe_len, PAGE_SIZE));
104094efbe19SQu Wenruo 	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
104194efbe19SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
1042*c67c68ebSQu Wenruo 	/*
1043*c67c68ebSQu Wenruo 	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
1044*c67c68ebSQu Wenruo 	 * (at most 16) should be no larger than BITS_PER_LONG.
1045*c67c68ebSQu Wenruo 	 */
1046*c67c68ebSQu Wenruo 	ASSERT(stripe_nsectors <= BITS_PER_LONG);
1047843de58bSQu Wenruo 
10481389053eSKees Cook 	rbio = kzalloc(sizeof(*rbio) +
10491389053eSKees Cook 		       sizeof(*rbio->stripe_pages) * num_pages +
105000425dd9SQu Wenruo 		       sizeof(*rbio->bio_sectors) * num_sectors +
1051eb357060SQu Wenruo 		       sizeof(*rbio->stripe_sectors) * num_sectors +
1052*c67c68ebSQu Wenruo 		       sizeof(*rbio->finish_pointers) * real_stripes,
10531389053eSKees Cook 		       GFP_NOFS);
1054af8e2d1dSMiao Xie 	if (!rbio)
105553b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
105653b381b3SDavid Woodhouse 
105753b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
105853b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
105953b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
10604ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
106153b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
10624c664611SQu Wenruo 	rbio->bioc = bioc;
106353b381b3SDavid Woodhouse 	rbio->stripe_len = stripe_len;
106453b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
106594efbe19SQu Wenruo 	rbio->nr_sectors = num_sectors;
10662c8cdd6eSMiao Xie 	rbio->real_stripes = real_stripes;
10675a6ac9eaSMiao Xie 	rbio->stripe_npages = stripe_npages;
106894efbe19SQu Wenruo 	rbio->stripe_nsectors = stripe_nsectors;
106953b381b3SDavid Woodhouse 	rbio->faila = -1;
107053b381b3SDavid Woodhouse 	rbio->failb = -1;
1071dec95574SElena Reshetova 	refcount_set(&rbio->refs, 1);
1072b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
1073b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
107453b381b3SDavid Woodhouse 
107553b381b3SDavid Woodhouse 	/*
1076ac26df8bSQu Wenruo 	 * The stripe_pages, bio_sectors, etc arrays point to the extra memory
1077ac26df8bSQu Wenruo 	 * we allocated past the end of the rbio.
107853b381b3SDavid Woodhouse 	 */
107953b381b3SDavid Woodhouse 	p = rbio + 1;
10801389053eSKees Cook #define CONSUME_ALLOC(ptr, count)	do {				\
10811389053eSKees Cook 		ptr = p;						\
10821389053eSKees Cook 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
10831389053eSKees Cook 	} while (0)
10841389053eSKees Cook 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
108500425dd9SQu Wenruo 	CONSUME_ALLOC(rbio->bio_sectors, num_sectors);
1086eb357060SQu Wenruo 	CONSUME_ALLOC(rbio->stripe_sectors, num_sectors);
10871389053eSKees Cook 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
10881389053eSKees Cook #undef  CONSUME_ALLOC
108953b381b3SDavid Woodhouse 
10904c664611SQu Wenruo 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
109110f11900SZhao Lei 		nr_data = real_stripes - 1;
10924c664611SQu Wenruo 	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
10932c8cdd6eSMiao Xie 		nr_data = real_stripes - 2;
109453b381b3SDavid Woodhouse 	else
109510f11900SZhao Lei 		BUG();
109653b381b3SDavid Woodhouse 
109753b381b3SDavid Woodhouse 	rbio->nr_data = nr_data;
109853b381b3SDavid Woodhouse 	return rbio;
109953b381b3SDavid Woodhouse }
110053b381b3SDavid Woodhouse 
110153b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
110253b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
110353b381b3SDavid Woodhouse {
1104eb357060SQu Wenruo 	int ret;
1105eb357060SQu Wenruo 
1106eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
1107eb357060SQu Wenruo 	if (ret < 0)
1108eb357060SQu Wenruo 		return ret;
1109eb357060SQu Wenruo 	/* Mapping all sectors */
1110eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1111eb357060SQu Wenruo 	return 0;
111253b381b3SDavid Woodhouse }
111353b381b3SDavid Woodhouse 
1114b7178a5fSZhao Lei /* only allocate pages for p/q stripes */
111553b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
111653b381b3SDavid Woodhouse {
1117f77183dcSQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1118eb357060SQu Wenruo 	int ret;
111953b381b3SDavid Woodhouse 
1120eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
1121dd137dd1SSweet Tea Dorminy 				     rbio->stripe_pages + data_pages);
1122eb357060SQu Wenruo 	if (ret < 0)
1123eb357060SQu Wenruo 		return ret;
1124eb357060SQu Wenruo 
1125eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1126eb357060SQu Wenruo 	return 0;
112753b381b3SDavid Woodhouse }
112853b381b3SDavid Woodhouse 
112953b381b3SDavid Woodhouse /*
11303e77605dSQu Wenruo  * Add a single sector @sector into our list of bios for IO.
11313e77605dSQu Wenruo  *
11323e77605dSQu Wenruo  * Return 0 if everything went well.
11333e77605dSQu Wenruo  * Return <0 for error.
113453b381b3SDavid Woodhouse  */
11353e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
113653b381b3SDavid Woodhouse 			      struct bio_list *bio_list,
11373e77605dSQu Wenruo 			      struct sector_ptr *sector,
11383e77605dSQu Wenruo 			      unsigned int stripe_nr,
11393e77605dSQu Wenruo 			      unsigned int sector_nr,
1140e01bf588SChristoph Hellwig 			      unsigned long bio_max_len,
1141e01bf588SChristoph Hellwig 			      unsigned int opf)
114253b381b3SDavid Woodhouse {
11433e77605dSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
114453b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
114553b381b3SDavid Woodhouse 	int ret;
114653b381b3SDavid Woodhouse 	struct bio *bio;
11474c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
114853b381b3SDavid Woodhouse 	u64 disk_start;
114953b381b3SDavid Woodhouse 
11503e77605dSQu Wenruo 	/*
11513e77605dSQu Wenruo 	 * Note: here stripe_nr has taken device replace into consideration,
11523e77605dSQu Wenruo 	 * thus it can be larger than rbio->real_stripe.
11533e77605dSQu Wenruo 	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
11543e77605dSQu Wenruo 	 */
11553e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
11563e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
11573e77605dSQu Wenruo 	ASSERT(sector->page);
11583e77605dSQu Wenruo 
11594c664611SQu Wenruo 	stripe = &rbio->bioc->stripes[stripe_nr];
11603e77605dSQu Wenruo 	disk_start = stripe->physical + sector_nr * sectorsize;
116153b381b3SDavid Woodhouse 
116253b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
116353b381b3SDavid Woodhouse 	if (!stripe->dev->bdev)
116453b381b3SDavid Woodhouse 		return fail_rbio_index(rbio, stripe_nr);
116553b381b3SDavid Woodhouse 
116653b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
116753b381b3SDavid Woodhouse 	if (last) {
11681201b58bSDavid Sterba 		u64 last_end = last->bi_iter.bi_sector << 9;
11694f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
117053b381b3SDavid Woodhouse 
117153b381b3SDavid Woodhouse 		/*
117253b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
117353b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
117453b381b3SDavid Woodhouse 		 */
1175f90ae76aSNikolay Borisov 		if (last_end == disk_start && !last->bi_status &&
1176309dca30SChristoph Hellwig 		    last->bi_bdev == stripe->dev->bdev) {
11773e77605dSQu Wenruo 			ret = bio_add_page(last, sector->page, sectorsize,
11783e77605dSQu Wenruo 					   sector->pgoff);
11793e77605dSQu Wenruo 			if (ret == sectorsize)
118053b381b3SDavid Woodhouse 				return 0;
118153b381b3SDavid Woodhouse 		}
118253b381b3SDavid Woodhouse 	}
118353b381b3SDavid Woodhouse 
118453b381b3SDavid Woodhouse 	/* put a new bio on the list */
1185e1b4b44eSChristoph Hellwig 	bio = bio_alloc(stripe->dev->bdev, max(bio_max_len >> PAGE_SHIFT, 1UL),
1186e1b4b44eSChristoph Hellwig 			opf, GFP_NOFS);
11874f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
1188e01bf588SChristoph Hellwig 	bio->bi_private = rbio;
118953b381b3SDavid Woodhouse 
11903e77605dSQu Wenruo 	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
119153b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
119253b381b3SDavid Woodhouse 	return 0;
119353b381b3SDavid Woodhouse }
119453b381b3SDavid Woodhouse 
119553b381b3SDavid Woodhouse /*
119653b381b3SDavid Woodhouse  * while we're doing the read/modify/write cycle, we could
119753b381b3SDavid Woodhouse  * have errors in reading pages off the disk.  This checks
119853b381b3SDavid Woodhouse  * for errors and if we're not able to read the page it'll
119953b381b3SDavid Woodhouse  * trigger parity reconstruction.  The rmw will be finished
120053b381b3SDavid Woodhouse  * after we've reconstructed the failed stripes
120153b381b3SDavid Woodhouse  */
120253b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
120353b381b3SDavid Woodhouse {
120453b381b3SDavid Woodhouse 	if (rbio->faila >= 0 || rbio->failb >= 0) {
12052c8cdd6eSMiao Xie 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
120653b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
120753b381b3SDavid Woodhouse 	} else {
120853b381b3SDavid Woodhouse 		finish_rmw(rbio);
120953b381b3SDavid Woodhouse 	}
121053b381b3SDavid Woodhouse }
121153b381b3SDavid Woodhouse 
121200425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
121300425dd9SQu Wenruo {
121400425dd9SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
121500425dd9SQu Wenruo 	struct bio_vec bvec;
121600425dd9SQu Wenruo 	struct bvec_iter iter;
121700425dd9SQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
121800425dd9SQu Wenruo 		     rbio->bioc->raid_map[0];
121900425dd9SQu Wenruo 
122000425dd9SQu Wenruo 	if (bio_flagged(bio, BIO_CLONED))
122100425dd9SQu Wenruo 		bio->bi_iter = btrfs_bio(bio)->iter;
122200425dd9SQu Wenruo 
122300425dd9SQu Wenruo 	bio_for_each_segment(bvec, bio, iter) {
122400425dd9SQu Wenruo 		u32 bvec_offset;
122500425dd9SQu Wenruo 
122600425dd9SQu Wenruo 		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
122700425dd9SQu Wenruo 		     bvec_offset += sectorsize, offset += sectorsize) {
122800425dd9SQu Wenruo 			int index = offset / sectorsize;
122900425dd9SQu Wenruo 			struct sector_ptr *sector = &rbio->bio_sectors[index];
123000425dd9SQu Wenruo 
123100425dd9SQu Wenruo 			sector->page = bvec.bv_page;
123200425dd9SQu Wenruo 			sector->pgoff = bvec.bv_offset + bvec_offset;
123300425dd9SQu Wenruo 			ASSERT(sector->pgoff < PAGE_SIZE);
123400425dd9SQu Wenruo 		}
123500425dd9SQu Wenruo 	}
123600425dd9SQu Wenruo }
123700425dd9SQu Wenruo 
123853b381b3SDavid Woodhouse /*
123953b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
124053b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
124153b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
124253b381b3SDavid Woodhouse  * reconstruction.
124353b381b3SDavid Woodhouse  *
124453b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
124553b381b3SDavid Woodhouse  */
124653b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
124753b381b3SDavid Woodhouse {
124853b381b3SDavid Woodhouse 	struct bio *bio;
124953b381b3SDavid Woodhouse 
125053b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
125100425dd9SQu Wenruo 	bio_list_for_each(bio, &rbio->bio_list)
125200425dd9SQu Wenruo 		index_one_bio(rbio, bio);
125300425dd9SQu Wenruo 
125453b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
125553b381b3SDavid Woodhouse }
125653b381b3SDavid Woodhouse 
125753b381b3SDavid Woodhouse /*
125853b381b3SDavid Woodhouse  * this is called from one of two situations.  We either
125953b381b3SDavid Woodhouse  * have a full stripe from the higher layers, or we've read all
126053b381b3SDavid Woodhouse  * the missing bits off disk.
126153b381b3SDavid Woodhouse  *
126253b381b3SDavid Woodhouse  * This will calculate the parity and then send down any
126353b381b3SDavid Woodhouse  * changed blocks.
126453b381b3SDavid Woodhouse  */
126553b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
126653b381b3SDavid Woodhouse {
12674c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
12681145059aSQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
12691389053eSKees Cook 	void **pointers = rbio->finish_pointers;
127053b381b3SDavid Woodhouse 	int nr_data = rbio->nr_data;
127153b381b3SDavid Woodhouse 	int stripe;
12723e77605dSQu Wenruo 	int sectornr;
1273c17af965SDavid Sterba 	bool has_qstripe;
127453b381b3SDavid Woodhouse 	struct bio_list bio_list;
127553b381b3SDavid Woodhouse 	struct bio *bio;
127653b381b3SDavid Woodhouse 	int ret;
127753b381b3SDavid Woodhouse 
127853b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
127953b381b3SDavid Woodhouse 
1280c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
1281c17af965SDavid Sterba 		has_qstripe = false;
1282c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
1283c17af965SDavid Sterba 		has_qstripe = true;
1284c17af965SDavid Sterba 	else
128553b381b3SDavid Woodhouse 		BUG();
128653b381b3SDavid Woodhouse 
128753b381b3SDavid Woodhouse 	/* at this point we either have a full stripe,
128853b381b3SDavid Woodhouse 	 * or we've read the full stripe from the drive.
128953b381b3SDavid Woodhouse 	 * recalculate the parity and write the new results.
129053b381b3SDavid Woodhouse 	 *
129153b381b3SDavid Woodhouse 	 * We're not allowed to add any new bios to the
129253b381b3SDavid Woodhouse 	 * bio list here, anyone else that wants to
129353b381b3SDavid Woodhouse 	 * change this stripe needs to do their own rmw.
129453b381b3SDavid Woodhouse 	 */
129553b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
129653b381b3SDavid Woodhouse 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
129753b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
129853b381b3SDavid Woodhouse 
1299b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
130053b381b3SDavid Woodhouse 
130153b381b3SDavid Woodhouse 	/*
130253b381b3SDavid Woodhouse 	 * now that we've set rmw_locked, run through the
130353b381b3SDavid Woodhouse 	 * bio list one last time and map the page pointers
13044ae10b3aSChris Mason 	 *
13054ae10b3aSChris Mason 	 * We don't cache full rbios because we're assuming
13064ae10b3aSChris Mason 	 * the higher layers are unlikely to use this area of
13074ae10b3aSChris Mason 	 * the disk again soon.  If they do use it again,
13084ae10b3aSChris Mason 	 * hopefully they will send another full bio.
130953b381b3SDavid Woodhouse 	 */
131053b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
13114ae10b3aSChris Mason 	if (!rbio_is_full(rbio))
13124ae10b3aSChris Mason 		cache_rbio_pages(rbio);
13134ae10b3aSChris Mason 	else
13144ae10b3aSChris Mason 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
131553b381b3SDavid Woodhouse 
13163e77605dSQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
13171145059aSQu Wenruo 		struct sector_ptr *sector;
13181145059aSQu Wenruo 
13191145059aSQu Wenruo 		/* First collect one sector from each data stripe */
132053b381b3SDavid Woodhouse 		for (stripe = 0; stripe < nr_data; stripe++) {
13211145059aSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
13221145059aSQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
13231145059aSQu Wenruo 					   sector->pgoff;
132453b381b3SDavid Woodhouse 		}
132553b381b3SDavid Woodhouse 
13261145059aSQu Wenruo 		/* Then add the parity stripe */
13271145059aSQu Wenruo 		sector = rbio_pstripe_sector(rbio, sectornr);
13281145059aSQu Wenruo 		sector->uptodate = 1;
13291145059aSQu Wenruo 		pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
133053b381b3SDavid Woodhouse 
1331c17af965SDavid Sterba 		if (has_qstripe) {
133253b381b3SDavid Woodhouse 			/*
13331145059aSQu Wenruo 			 * RAID6, add the qstripe and call the library function
13341145059aSQu Wenruo 			 * to fill in our p/q
133553b381b3SDavid Woodhouse 			 */
13361145059aSQu Wenruo 			sector = rbio_qstripe_sector(rbio, sectornr);
13371145059aSQu Wenruo 			sector->uptodate = 1;
13381145059aSQu Wenruo 			pointers[stripe++] = kmap_local_page(sector->page) +
13391145059aSQu Wenruo 					     sector->pgoff;
134053b381b3SDavid Woodhouse 
13411145059aSQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
134253b381b3SDavid Woodhouse 						pointers);
134353b381b3SDavid Woodhouse 		} else {
134453b381b3SDavid Woodhouse 			/* raid5 */
13451145059aSQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
13461145059aSQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
134753b381b3SDavid Woodhouse 		}
134894a0b58dSIra Weiny 		for (stripe = stripe - 1; stripe >= 0; stripe--)
134994a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
135053b381b3SDavid Woodhouse 	}
135153b381b3SDavid Woodhouse 
135253b381b3SDavid Woodhouse 	/*
135353b381b3SDavid Woodhouse 	 * time to start writing.  Make bios for everything from the
135453b381b3SDavid Woodhouse 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
135553b381b3SDavid Woodhouse 	 * everything else.
135653b381b3SDavid Woodhouse 	 */
13572c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
13583e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
13593e77605dSQu Wenruo 			struct sector_ptr *sector;
13603e77605dSQu Wenruo 
136153b381b3SDavid Woodhouse 			if (stripe < rbio->nr_data) {
13623e77605dSQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13633e77605dSQu Wenruo 				if (!sector)
136453b381b3SDavid Woodhouse 					continue;
136553b381b3SDavid Woodhouse 			} else {
13663e77605dSQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
136753b381b3SDavid Woodhouse 			}
136853b381b3SDavid Woodhouse 
13693e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
13703e77605dSQu Wenruo 						 sectornr, rbio->stripe_len,
1371e01bf588SChristoph Hellwig 						 REQ_OP_WRITE);
137253b381b3SDavid Woodhouse 			if (ret)
137353b381b3SDavid Woodhouse 				goto cleanup;
137453b381b3SDavid Woodhouse 		}
137553b381b3SDavid Woodhouse 	}
137653b381b3SDavid Woodhouse 
13774c664611SQu Wenruo 	if (likely(!bioc->num_tgtdevs))
13782c8cdd6eSMiao Xie 		goto write_data;
13792c8cdd6eSMiao Xie 
13802c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
13814c664611SQu Wenruo 		if (!bioc->tgtdev_map[stripe])
13822c8cdd6eSMiao Xie 			continue;
13832c8cdd6eSMiao Xie 
13843e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
13853e77605dSQu Wenruo 			struct sector_ptr *sector;
13863e77605dSQu Wenruo 
13872c8cdd6eSMiao Xie 			if (stripe < rbio->nr_data) {
13883e77605dSQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13893e77605dSQu Wenruo 				if (!sector)
13902c8cdd6eSMiao Xie 					continue;
13912c8cdd6eSMiao Xie 			} else {
13923e77605dSQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
13932c8cdd6eSMiao Xie 			}
13942c8cdd6eSMiao Xie 
13953e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
13964c664611SQu Wenruo 					       rbio->bioc->tgtdev_map[stripe],
13973e77605dSQu Wenruo 					       sectornr, rbio->stripe_len,
1398e01bf588SChristoph Hellwig 					       REQ_OP_WRITE);
13992c8cdd6eSMiao Xie 			if (ret)
14002c8cdd6eSMiao Xie 				goto cleanup;
14012c8cdd6eSMiao Xie 		}
14022c8cdd6eSMiao Xie 	}
14032c8cdd6eSMiao Xie 
14042c8cdd6eSMiao Xie write_data:
1405b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1406b89e1b01SMiao Xie 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
140753b381b3SDavid Woodhouse 
1408bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
140953b381b3SDavid Woodhouse 		bio->bi_end_io = raid_write_end_io;
14104e49ea4aSMike Christie 
14114e49ea4aSMike Christie 		submit_bio(bio);
141253b381b3SDavid Woodhouse 	}
141353b381b3SDavid Woodhouse 	return;
141453b381b3SDavid Woodhouse 
141553b381b3SDavid Woodhouse cleanup:
141658efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1417785884fcSLiu Bo 
1418785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
1419785884fcSLiu Bo 		bio_put(bio);
142053b381b3SDavid Woodhouse }
142153b381b3SDavid Woodhouse 
142253b381b3SDavid Woodhouse /*
142353b381b3SDavid Woodhouse  * helper to find the stripe number for a given bio.  Used to figure out which
142453b381b3SDavid Woodhouse  * stripe has failed.  This expects the bio to correspond to a physical disk,
142553b381b3SDavid Woodhouse  * so it looks up based on physical sector numbers.
142653b381b3SDavid Woodhouse  */
142753b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio,
142853b381b3SDavid Woodhouse 			   struct bio *bio)
142953b381b3SDavid Woodhouse {
14304f024f37SKent Overstreet 	u64 physical = bio->bi_iter.bi_sector;
143153b381b3SDavid Woodhouse 	int i;
14324c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
143353b381b3SDavid Woodhouse 
143453b381b3SDavid Woodhouse 	physical <<= 9;
143553b381b3SDavid Woodhouse 
14364c664611SQu Wenruo 	for (i = 0; i < rbio->bioc->num_stripes; i++) {
14374c664611SQu Wenruo 		stripe = &rbio->bioc->stripes[i];
143883025863SNikolay Borisov 		if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1439309dca30SChristoph Hellwig 		    stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
144053b381b3SDavid Woodhouse 			return i;
144153b381b3SDavid Woodhouse 		}
144253b381b3SDavid Woodhouse 	}
144353b381b3SDavid Woodhouse 	return -1;
144453b381b3SDavid Woodhouse }
144553b381b3SDavid Woodhouse 
144653b381b3SDavid Woodhouse /*
144753b381b3SDavid Woodhouse  * helper to find the stripe number for a given
144853b381b3SDavid Woodhouse  * bio (before mapping).  Used to figure out which stripe has
144953b381b3SDavid Woodhouse  * failed.  This looks up based on logical block numbers.
145053b381b3SDavid Woodhouse  */
145153b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
145253b381b3SDavid Woodhouse 				   struct bio *bio)
145353b381b3SDavid Woodhouse {
14541201b58bSDavid Sterba 	u64 logical = bio->bi_iter.bi_sector << 9;
145553b381b3SDavid Woodhouse 	int i;
145653b381b3SDavid Woodhouse 
145753b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_data; i++) {
14584c664611SQu Wenruo 		u64 stripe_start = rbio->bioc->raid_map[i];
145983025863SNikolay Borisov 
146083025863SNikolay Borisov 		if (in_range(logical, stripe_start, rbio->stripe_len))
146153b381b3SDavid Woodhouse 			return i;
146253b381b3SDavid Woodhouse 	}
146353b381b3SDavid Woodhouse 	return -1;
146453b381b3SDavid Woodhouse }
146553b381b3SDavid Woodhouse 
146653b381b3SDavid Woodhouse /*
146753b381b3SDavid Woodhouse  * returns -EIO if we had too many failures
146853b381b3SDavid Woodhouse  */
146953b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
147053b381b3SDavid Woodhouse {
147153b381b3SDavid Woodhouse 	unsigned long flags;
147253b381b3SDavid Woodhouse 	int ret = 0;
147353b381b3SDavid Woodhouse 
147453b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
147553b381b3SDavid Woodhouse 
147653b381b3SDavid Woodhouse 	/* we already know this stripe is bad, move on */
147753b381b3SDavid Woodhouse 	if (rbio->faila == failed || rbio->failb == failed)
147853b381b3SDavid Woodhouse 		goto out;
147953b381b3SDavid Woodhouse 
148053b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
148153b381b3SDavid Woodhouse 		/* first failure on this rbio */
148253b381b3SDavid Woodhouse 		rbio->faila = failed;
1483b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
148453b381b3SDavid Woodhouse 	} else if (rbio->failb == -1) {
148553b381b3SDavid Woodhouse 		/* second failure on this rbio */
148653b381b3SDavid Woodhouse 		rbio->failb = failed;
1487b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
148853b381b3SDavid Woodhouse 	} else {
148953b381b3SDavid Woodhouse 		ret = -EIO;
149053b381b3SDavid Woodhouse 	}
149153b381b3SDavid Woodhouse out:
149253b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
149353b381b3SDavid Woodhouse 
149453b381b3SDavid Woodhouse 	return ret;
149553b381b3SDavid Woodhouse }
149653b381b3SDavid Woodhouse 
149753b381b3SDavid Woodhouse /*
149853b381b3SDavid Woodhouse  * helper to fail a stripe based on a physical disk
149953b381b3SDavid Woodhouse  * bio.
150053b381b3SDavid Woodhouse  */
150153b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
150253b381b3SDavid Woodhouse 			   struct bio *bio)
150353b381b3SDavid Woodhouse {
150453b381b3SDavid Woodhouse 	int failed = find_bio_stripe(rbio, bio);
150553b381b3SDavid Woodhouse 
150653b381b3SDavid Woodhouse 	if (failed < 0)
150753b381b3SDavid Woodhouse 		return -EIO;
150853b381b3SDavid Woodhouse 
150953b381b3SDavid Woodhouse 	return fail_rbio_index(rbio, failed);
151053b381b3SDavid Woodhouse }
151153b381b3SDavid Woodhouse 
151253b381b3SDavid Woodhouse /*
15135fdb7afcSQu Wenruo  * For subpage case, we can no longer set page Uptodate directly for
15145fdb7afcSQu Wenruo  * stripe_pages[], thus we need to locate the sector.
15155fdb7afcSQu Wenruo  */
15165fdb7afcSQu Wenruo static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
15175fdb7afcSQu Wenruo 					     struct page *page,
15185fdb7afcSQu Wenruo 					     unsigned int pgoff)
15195fdb7afcSQu Wenruo {
15205fdb7afcSQu Wenruo 	int i;
15215fdb7afcSQu Wenruo 
15225fdb7afcSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
15235fdb7afcSQu Wenruo 		struct sector_ptr *sector = &rbio->stripe_sectors[i];
15245fdb7afcSQu Wenruo 
15255fdb7afcSQu Wenruo 		if (sector->page == page && sector->pgoff == pgoff)
15265fdb7afcSQu Wenruo 			return sector;
15275fdb7afcSQu Wenruo 	}
15285fdb7afcSQu Wenruo 	return NULL;
15295fdb7afcSQu Wenruo }
15305fdb7afcSQu Wenruo 
15315fdb7afcSQu Wenruo /*
153253b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
153353b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
153453b381b3SDavid Woodhouse  */
15355fdb7afcSQu Wenruo static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
153653b381b3SDavid Woodhouse {
15375fdb7afcSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
15380198e5b7SLiu Bo 	struct bio_vec *bvec;
15396dc4f100SMing Lei 	struct bvec_iter_all iter_all;
154053b381b3SDavid Woodhouse 
15410198e5b7SLiu Bo 	ASSERT(!bio_flagged(bio, BIO_CLONED));
15426592e58cSFilipe Manana 
15435fdb7afcSQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all) {
15445fdb7afcSQu Wenruo 		struct sector_ptr *sector;
15455fdb7afcSQu Wenruo 		int pgoff;
15465fdb7afcSQu Wenruo 
15475fdb7afcSQu Wenruo 		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
15485fdb7afcSQu Wenruo 		     pgoff += sectorsize) {
15495fdb7afcSQu Wenruo 			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
15505fdb7afcSQu Wenruo 			ASSERT(sector);
15515fdb7afcSQu Wenruo 			if (sector)
15525fdb7afcSQu Wenruo 				sector->uptodate = 1;
15535fdb7afcSQu Wenruo 		}
15545fdb7afcSQu Wenruo 	}
155553b381b3SDavid Woodhouse }
155653b381b3SDavid Woodhouse 
155753b381b3SDavid Woodhouse /*
155853b381b3SDavid Woodhouse  * end io for the read phase of the rmw cycle.  All the bios here are physical
155953b381b3SDavid Woodhouse  * stripe bios we've read from the disk so we can recalculate the parity of the
156053b381b3SDavid Woodhouse  * stripe.
156153b381b3SDavid Woodhouse  *
156253b381b3SDavid Woodhouse  * This will usually kick off finish_rmw once all the bios are read in, but it
156353b381b3SDavid Woodhouse  * may trigger parity reconstruction if we had any errors along the way
156453b381b3SDavid Woodhouse  */
15654246a0b6SChristoph Hellwig static void raid_rmw_end_io(struct bio *bio)
156653b381b3SDavid Woodhouse {
156753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
156853b381b3SDavid Woodhouse 
15694e4cbee9SChristoph Hellwig 	if (bio->bi_status)
157053b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
157153b381b3SDavid Woodhouse 	else
15725fdb7afcSQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
157353b381b3SDavid Woodhouse 
157453b381b3SDavid Woodhouse 	bio_put(bio);
157553b381b3SDavid Woodhouse 
1576b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
157753b381b3SDavid Woodhouse 		return;
157853b381b3SDavid Woodhouse 
15794c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
158053b381b3SDavid Woodhouse 		goto cleanup;
158153b381b3SDavid Woodhouse 
158253b381b3SDavid Woodhouse 	/*
158353b381b3SDavid Woodhouse 	 * this will normally call finish_rmw to start our write
158453b381b3SDavid Woodhouse 	 * but if there are any failed stripes we'll reconstruct
158553b381b3SDavid Woodhouse 	 * from parity first
158653b381b3SDavid Woodhouse 	 */
158753b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
158853b381b3SDavid Woodhouse 	return;
158953b381b3SDavid Woodhouse 
159053b381b3SDavid Woodhouse cleanup:
159153b381b3SDavid Woodhouse 
159258efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
159353b381b3SDavid Woodhouse }
159453b381b3SDavid Woodhouse 
159553b381b3SDavid Woodhouse /*
159653b381b3SDavid Woodhouse  * the stripe must be locked by the caller.  It will
159753b381b3SDavid Woodhouse  * unlock after all the writes are done
159853b381b3SDavid Woodhouse  */
159953b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
160053b381b3SDavid Woodhouse {
160153b381b3SDavid Woodhouse 	int bios_to_read = 0;
160253b381b3SDavid Woodhouse 	struct bio_list bio_list;
160353b381b3SDavid Woodhouse 	int ret;
16043e77605dSQu Wenruo 	int sectornr;
160553b381b3SDavid Woodhouse 	int stripe;
160653b381b3SDavid Woodhouse 	struct bio *bio;
160753b381b3SDavid Woodhouse 
160853b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
160953b381b3SDavid Woodhouse 
161053b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
161153b381b3SDavid Woodhouse 	if (ret)
161253b381b3SDavid Woodhouse 		goto cleanup;
161353b381b3SDavid Woodhouse 
161453b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
161553b381b3SDavid Woodhouse 
1616b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
161753b381b3SDavid Woodhouse 	/*
161853b381b3SDavid Woodhouse 	 * build a list of bios to read all the missing parts of this
161953b381b3SDavid Woodhouse 	 * stripe
162053b381b3SDavid Woodhouse 	 */
162153b381b3SDavid Woodhouse 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
16223e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
16233e77605dSQu Wenruo 			struct sector_ptr *sector;
16243e77605dSQu Wenruo 
162553b381b3SDavid Woodhouse 			/*
16263e77605dSQu Wenruo 			 * We want to find all the sectors missing from the
16273e77605dSQu Wenruo 			 * rbio and read them from the disk.  If * sector_in_rbio()
16283e77605dSQu Wenruo 			 * finds a page in the bio list we don't need to read
16293e77605dSQu Wenruo 			 * it off the stripe.
163053b381b3SDavid Woodhouse 			 */
16313e77605dSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
16323e77605dSQu Wenruo 			if (sector)
163353b381b3SDavid Woodhouse 				continue;
163453b381b3SDavid Woodhouse 
16353e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
16364ae10b3aSChris Mason 			/*
16373e77605dSQu Wenruo 			 * The bio cache may have handed us an uptodate page.
16383e77605dSQu Wenruo 			 * If so, be happy and use it.
16394ae10b3aSChris Mason 			 */
16403e77605dSQu Wenruo 			if (sector->uptodate)
16414ae10b3aSChris Mason 				continue;
16424ae10b3aSChris Mason 
16433e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
16443e77605dSQu Wenruo 				       stripe, sectornr, rbio->stripe_len,
1645e01bf588SChristoph Hellwig 				       REQ_OP_READ);
164653b381b3SDavid Woodhouse 			if (ret)
164753b381b3SDavid Woodhouse 				goto cleanup;
164853b381b3SDavid Woodhouse 		}
164953b381b3SDavid Woodhouse 	}
165053b381b3SDavid Woodhouse 
165153b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
165253b381b3SDavid Woodhouse 	if (!bios_to_read) {
165353b381b3SDavid Woodhouse 		/*
165453b381b3SDavid Woodhouse 		 * this can happen if others have merged with
165553b381b3SDavid Woodhouse 		 * us, it means there is nothing left to read.
165653b381b3SDavid Woodhouse 		 * But if there are missing devices it may not be
165753b381b3SDavid Woodhouse 		 * safe to do the full stripe write yet.
165853b381b3SDavid Woodhouse 		 */
165953b381b3SDavid Woodhouse 		goto finish;
166053b381b3SDavid Woodhouse 	}
166153b381b3SDavid Woodhouse 
166253b381b3SDavid Woodhouse 	/*
16634c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
16644c664611SQu Wenruo 	 * touch it after that.
166553b381b3SDavid Woodhouse 	 */
1666b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
1667bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
166853b381b3SDavid Woodhouse 		bio->bi_end_io = raid_rmw_end_io;
166953b381b3SDavid Woodhouse 
16706a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
167153b381b3SDavid Woodhouse 
16724e49ea4aSMike Christie 		submit_bio(bio);
167353b381b3SDavid Woodhouse 	}
167453b381b3SDavid Woodhouse 	/* the actual write will happen once the reads are done */
167553b381b3SDavid Woodhouse 	return 0;
167653b381b3SDavid Woodhouse 
167753b381b3SDavid Woodhouse cleanup:
167858efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1679785884fcSLiu Bo 
1680785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
1681785884fcSLiu Bo 		bio_put(bio);
1682785884fcSLiu Bo 
168353b381b3SDavid Woodhouse 	return -EIO;
168453b381b3SDavid Woodhouse 
168553b381b3SDavid Woodhouse finish:
168653b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
168753b381b3SDavid Woodhouse 	return 0;
168853b381b3SDavid Woodhouse }
168953b381b3SDavid Woodhouse 
169053b381b3SDavid Woodhouse /*
169153b381b3SDavid Woodhouse  * if the upper layers pass in a full stripe, we thank them by only allocating
169253b381b3SDavid Woodhouse  * enough pages to hold the parity, and sending it all down quickly.
169353b381b3SDavid Woodhouse  */
169453b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio)
169553b381b3SDavid Woodhouse {
169653b381b3SDavid Woodhouse 	int ret;
169753b381b3SDavid Woodhouse 
169853b381b3SDavid Woodhouse 	ret = alloc_rbio_parity_pages(rbio);
16993cd846d1SMiao Xie 	if (ret) {
17003cd846d1SMiao Xie 		__free_raid_bio(rbio);
170153b381b3SDavid Woodhouse 		return ret;
17023cd846d1SMiao Xie 	}
170353b381b3SDavid Woodhouse 
170453b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
170553b381b3SDavid Woodhouse 	if (ret == 0)
170653b381b3SDavid Woodhouse 		finish_rmw(rbio);
170753b381b3SDavid Woodhouse 	return 0;
170853b381b3SDavid Woodhouse }
170953b381b3SDavid Woodhouse 
171053b381b3SDavid Woodhouse /*
171153b381b3SDavid Woodhouse  * partial stripe writes get handed over to async helpers.
171253b381b3SDavid Woodhouse  * We're really hoping to merge a few more writes into this
171353b381b3SDavid Woodhouse  * rbio before calculating new parity
171453b381b3SDavid Woodhouse  */
171553b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio)
171653b381b3SDavid Woodhouse {
171753b381b3SDavid Woodhouse 	int ret;
171853b381b3SDavid Woodhouse 
171953b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
172053b381b3SDavid Woodhouse 	if (ret == 0)
1721cf6a4a75SDavid Sterba 		start_async_work(rbio, rmw_work);
172253b381b3SDavid Woodhouse 	return 0;
172353b381b3SDavid Woodhouse }
172453b381b3SDavid Woodhouse 
172553b381b3SDavid Woodhouse /*
172653b381b3SDavid Woodhouse  * sometimes while we were reading from the drive to
172753b381b3SDavid Woodhouse  * recalculate parity, enough new bios come into create
172853b381b3SDavid Woodhouse  * a full stripe.  So we do a check here to see if we can
172953b381b3SDavid Woodhouse  * go directly to finish_rmw
173053b381b3SDavid Woodhouse  */
173153b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
173253b381b3SDavid Woodhouse {
173353b381b3SDavid Woodhouse 	/* head off into rmw land if we don't have a full stripe */
173453b381b3SDavid Woodhouse 	if (!rbio_is_full(rbio))
173553b381b3SDavid Woodhouse 		return partial_stripe_write(rbio);
173653b381b3SDavid Woodhouse 	return full_stripe_write(rbio);
173753b381b3SDavid Woodhouse }
173853b381b3SDavid Woodhouse 
173953b381b3SDavid Woodhouse /*
17406ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
17416ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
17426ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
17436ac0f488SChris Mason  * we sort the list by logical block number and merge
17446ac0f488SChris Mason  * everything we can into the same rbios
17456ac0f488SChris Mason  */
17466ac0f488SChris Mason struct btrfs_plug_cb {
17476ac0f488SChris Mason 	struct blk_plug_cb cb;
17486ac0f488SChris Mason 	struct btrfs_fs_info *info;
17496ac0f488SChris Mason 	struct list_head rbio_list;
1750385de0efSChristoph Hellwig 	struct work_struct work;
17516ac0f488SChris Mason };
17526ac0f488SChris Mason 
17536ac0f488SChris Mason /*
17546ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
17556ac0f488SChris Mason  */
17564f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a,
17574f0f586bSSami Tolvanen 		    const struct list_head *b)
17586ac0f488SChris Mason {
1759214cc184SDavid Sterba 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
17606ac0f488SChris Mason 						       plug_list);
1761214cc184SDavid Sterba 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
17626ac0f488SChris Mason 						       plug_list);
17634f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
17644f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
17656ac0f488SChris Mason 
17666ac0f488SChris Mason 	if (a_sector < b_sector)
17676ac0f488SChris Mason 		return -1;
17686ac0f488SChris Mason 	if (a_sector > b_sector)
17696ac0f488SChris Mason 		return 1;
17706ac0f488SChris Mason 	return 0;
17716ac0f488SChris Mason }
17726ac0f488SChris Mason 
17736ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug)
17746ac0f488SChris Mason {
17756ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
17766ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
17776ac0f488SChris Mason 
17786ac0f488SChris Mason 	/*
17796ac0f488SChris Mason 	 * sort our plug list then try to merge
17806ac0f488SChris Mason 	 * everything we can in hopes of creating full
17816ac0f488SChris Mason 	 * stripes.
17826ac0f488SChris Mason 	 */
17836ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
17846ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
17856ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
17866ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
17876ac0f488SChris Mason 		list_del_init(&cur->plug_list);
17886ac0f488SChris Mason 
17896ac0f488SChris Mason 		if (rbio_is_full(cur)) {
1790c7b562c5SDavid Sterba 			int ret;
1791c7b562c5SDavid Sterba 
17926ac0f488SChris Mason 			/* we have a full stripe, send it down */
1793c7b562c5SDavid Sterba 			ret = full_stripe_write(cur);
1794c7b562c5SDavid Sterba 			BUG_ON(ret);
17956ac0f488SChris Mason 			continue;
17966ac0f488SChris Mason 		}
17976ac0f488SChris Mason 		if (last) {
17986ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
17996ac0f488SChris Mason 				merge_rbio(last, cur);
18006ac0f488SChris Mason 				__free_raid_bio(cur);
18016ac0f488SChris Mason 				continue;
18026ac0f488SChris Mason 
18036ac0f488SChris Mason 			}
18046ac0f488SChris Mason 			__raid56_parity_write(last);
18056ac0f488SChris Mason 		}
18066ac0f488SChris Mason 		last = cur;
18076ac0f488SChris Mason 	}
18086ac0f488SChris Mason 	if (last) {
18096ac0f488SChris Mason 		__raid56_parity_write(last);
18106ac0f488SChris Mason 	}
18116ac0f488SChris Mason 	kfree(plug);
18126ac0f488SChris Mason }
18136ac0f488SChris Mason 
18146ac0f488SChris Mason /*
18156ac0f488SChris Mason  * if the unplug comes from schedule, we have to push the
18166ac0f488SChris Mason  * work off to a helper thread
18176ac0f488SChris Mason  */
1818385de0efSChristoph Hellwig static void unplug_work(struct work_struct *work)
18196ac0f488SChris Mason {
18206ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
18216ac0f488SChris Mason 	plug = container_of(work, struct btrfs_plug_cb, work);
18226ac0f488SChris Mason 	run_plug(plug);
18236ac0f488SChris Mason }
18246ac0f488SChris Mason 
18256ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
18266ac0f488SChris Mason {
18276ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
18286ac0f488SChris Mason 	plug = container_of(cb, struct btrfs_plug_cb, cb);
18296ac0f488SChris Mason 
18306ac0f488SChris Mason 	if (from_schedule) {
1831385de0efSChristoph Hellwig 		INIT_WORK(&plug->work, unplug_work);
1832385de0efSChristoph Hellwig 		queue_work(plug->info->rmw_workers, &plug->work);
18336ac0f488SChris Mason 		return;
18346ac0f488SChris Mason 	}
18356ac0f488SChris Mason 	run_plug(plug);
18366ac0f488SChris Mason }
18376ac0f488SChris Mason 
18386ac0f488SChris Mason /*
183953b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
184053b381b3SDavid Woodhouse  */
1841cc353a8bSQu Wenruo int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len)
184253b381b3SDavid Woodhouse {
18436a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
184453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
18456ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
18466ac0f488SChris Mason 	struct blk_plug_cb *cb;
18474245215dSMiao Xie 	int ret;
184853b381b3SDavid Woodhouse 
18494c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
1850af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
18514c664611SQu Wenruo 		btrfs_put_bioc(bioc);
185253b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
1853af8e2d1dSMiao Xie 	}
185453b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
18554f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
18561b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
18576ac0f488SChris Mason 
18580b246afaSJeff Mahoney 	btrfs_bio_counter_inc_noblocked(fs_info);
18594245215dSMiao Xie 	rbio->generic_bio_cnt = 1;
18604245215dSMiao Xie 
18616ac0f488SChris Mason 	/*
18626ac0f488SChris Mason 	 * don't plug on full rbios, just get them out the door
18636ac0f488SChris Mason 	 * as quickly as we can
18646ac0f488SChris Mason 	 */
18654245215dSMiao Xie 	if (rbio_is_full(rbio)) {
18664245215dSMiao Xie 		ret = full_stripe_write(rbio);
18674245215dSMiao Xie 		if (ret)
18680b246afaSJeff Mahoney 			btrfs_bio_counter_dec(fs_info);
18694245215dSMiao Xie 		return ret;
18704245215dSMiao Xie 	}
18716ac0f488SChris Mason 
18720b246afaSJeff Mahoney 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
18736ac0f488SChris Mason 	if (cb) {
18746ac0f488SChris Mason 		plug = container_of(cb, struct btrfs_plug_cb, cb);
18756ac0f488SChris Mason 		if (!plug->info) {
18760b246afaSJeff Mahoney 			plug->info = fs_info;
18776ac0f488SChris Mason 			INIT_LIST_HEAD(&plug->rbio_list);
18786ac0f488SChris Mason 		}
18796ac0f488SChris Mason 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
18804245215dSMiao Xie 		ret = 0;
18816ac0f488SChris Mason 	} else {
18824245215dSMiao Xie 		ret = __raid56_parity_write(rbio);
18834245215dSMiao Xie 		if (ret)
18840b246afaSJeff Mahoney 			btrfs_bio_counter_dec(fs_info);
188553b381b3SDavid Woodhouse 	}
18864245215dSMiao Xie 	return ret;
18876ac0f488SChris Mason }
188853b381b3SDavid Woodhouse 
188953b381b3SDavid Woodhouse /*
189053b381b3SDavid Woodhouse  * all parity reconstruction happens here.  We've read in everything
189153b381b3SDavid Woodhouse  * we can find from the drives and this does the heavy lifting of
189253b381b3SDavid Woodhouse  * sorting the good from the bad.
189353b381b3SDavid Woodhouse  */
189453b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
189553b381b3SDavid Woodhouse {
189607e4d380SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
189707e4d380SQu Wenruo 	int sectornr, stripe;
189853b381b3SDavid Woodhouse 	void **pointers;
189994a0b58dSIra Weiny 	void **unmap_array;
190053b381b3SDavid Woodhouse 	int faila = -1, failb = -1;
190158efbc9fSOmar Sandoval 	blk_status_t err;
190253b381b3SDavid Woodhouse 	int i;
190353b381b3SDavid Woodhouse 
190407e4d380SQu Wenruo 	/*
190507e4d380SQu Wenruo 	 * This array stores the pointer for each sector, thus it has the extra
190607e4d380SQu Wenruo 	 * pgoff value added from each sector
190707e4d380SQu Wenruo 	 */
190831e818feSDavid Sterba 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
190953b381b3SDavid Woodhouse 	if (!pointers) {
191058efbc9fSOmar Sandoval 		err = BLK_STS_RESOURCE;
191153b381b3SDavid Woodhouse 		goto cleanup_io;
191253b381b3SDavid Woodhouse 	}
191353b381b3SDavid Woodhouse 
191494a0b58dSIra Weiny 	/*
191594a0b58dSIra Weiny 	 * Store copy of pointers that does not get reordered during
191694a0b58dSIra Weiny 	 * reconstruction so that kunmap_local works.
191794a0b58dSIra Weiny 	 */
191894a0b58dSIra Weiny 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
191994a0b58dSIra Weiny 	if (!unmap_array) {
192094a0b58dSIra Weiny 		err = BLK_STS_RESOURCE;
192194a0b58dSIra Weiny 		goto cleanup_pointers;
192294a0b58dSIra Weiny 	}
192394a0b58dSIra Weiny 
192453b381b3SDavid Woodhouse 	faila = rbio->faila;
192553b381b3SDavid Woodhouse 	failb = rbio->failb;
192653b381b3SDavid Woodhouse 
1927b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1928b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
192953b381b3SDavid Woodhouse 		spin_lock_irq(&rbio->bio_list_lock);
193053b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
193153b381b3SDavid Woodhouse 		spin_unlock_irq(&rbio->bio_list_lock);
193253b381b3SDavid Woodhouse 	}
193353b381b3SDavid Woodhouse 
193453b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
193553b381b3SDavid Woodhouse 
193607e4d380SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
193707e4d380SQu Wenruo 		struct sector_ptr *sector;
193807e4d380SQu Wenruo 
19395a6ac9eaSMiao Xie 		/*
19405a6ac9eaSMiao Xie 		 * Now we just use bitmap to mark the horizontal stripes in
19415a6ac9eaSMiao Xie 		 * which we have data when doing parity scrub.
19425a6ac9eaSMiao Xie 		 */
19435a6ac9eaSMiao Xie 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1944*c67c68ebSQu Wenruo 		    !test_bit(sectornr, &rbio->dbitmap))
19455a6ac9eaSMiao Xie 			continue;
19465a6ac9eaSMiao Xie 
194794a0b58dSIra Weiny 		/*
194807e4d380SQu Wenruo 		 * Setup our array of pointers with sectors from each stripe
194994a0b58dSIra Weiny 		 *
195094a0b58dSIra Weiny 		 * NOTE: store a duplicate array of pointers to preserve the
195194a0b58dSIra Weiny 		 * pointer order
195253b381b3SDavid Woodhouse 		 */
19532c8cdd6eSMiao Xie 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
195453b381b3SDavid Woodhouse 			/*
195507e4d380SQu Wenruo 			 * If we're rebuilding a read, we have to use
195653b381b3SDavid Woodhouse 			 * pages from the bio list
195753b381b3SDavid Woodhouse 			 */
1958b4ee1782SOmar Sandoval 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1959b4ee1782SOmar Sandoval 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
196053b381b3SDavid Woodhouse 			    (stripe == faila || stripe == failb)) {
196107e4d380SQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 0);
196253b381b3SDavid Woodhouse 			} else {
196307e4d380SQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
196453b381b3SDavid Woodhouse 			}
196507e4d380SQu Wenruo 			ASSERT(sector->page);
196607e4d380SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
196707e4d380SQu Wenruo 					   sector->pgoff;
196894a0b58dSIra Weiny 			unmap_array[stripe] = pointers[stripe];
196953b381b3SDavid Woodhouse 		}
197053b381b3SDavid Woodhouse 
197107e4d380SQu Wenruo 		/* All raid6 handling here */
19724c664611SQu Wenruo 		if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
197307e4d380SQu Wenruo 			/* Single failure, rebuild from parity raid5 style */
197453b381b3SDavid Woodhouse 			if (failb < 0) {
197553b381b3SDavid Woodhouse 				if (faila == rbio->nr_data) {
197653b381b3SDavid Woodhouse 					/*
197753b381b3SDavid Woodhouse 					 * Just the P stripe has failed, without
197853b381b3SDavid Woodhouse 					 * a bad data or Q stripe.
197953b381b3SDavid Woodhouse 					 * TODO, we should redo the xor here.
198053b381b3SDavid Woodhouse 					 */
198158efbc9fSOmar Sandoval 					err = BLK_STS_IOERR;
198253b381b3SDavid Woodhouse 					goto cleanup;
198353b381b3SDavid Woodhouse 				}
198453b381b3SDavid Woodhouse 				/*
198553b381b3SDavid Woodhouse 				 * a single failure in raid6 is rebuilt
198653b381b3SDavid Woodhouse 				 * in the pstripe code below
198753b381b3SDavid Woodhouse 				 */
198853b381b3SDavid Woodhouse 				goto pstripe;
198953b381b3SDavid Woodhouse 			}
199053b381b3SDavid Woodhouse 
199153b381b3SDavid Woodhouse 			/* make sure our ps and qs are in order */
1992b7d2083aSNikolay Borisov 			if (faila > failb)
1993b7d2083aSNikolay Borisov 				swap(faila, failb);
199453b381b3SDavid Woodhouse 
199553b381b3SDavid Woodhouse 			/* if the q stripe is failed, do a pstripe reconstruction
199653b381b3SDavid Woodhouse 			 * from the xors.
199753b381b3SDavid Woodhouse 			 * If both the q stripe and the P stripe are failed, we're
199853b381b3SDavid Woodhouse 			 * here due to a crc mismatch and we can't give them the
199953b381b3SDavid Woodhouse 			 * data they want
200053b381b3SDavid Woodhouse 			 */
20014c664611SQu Wenruo 			if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
20024c664611SQu Wenruo 				if (rbio->bioc->raid_map[faila] ==
20038e5cfb55SZhao Lei 				    RAID5_P_STRIPE) {
200458efbc9fSOmar Sandoval 					err = BLK_STS_IOERR;
200553b381b3SDavid Woodhouse 					goto cleanup;
200653b381b3SDavid Woodhouse 				}
200753b381b3SDavid Woodhouse 				/*
200853b381b3SDavid Woodhouse 				 * otherwise we have one bad data stripe and
200953b381b3SDavid Woodhouse 				 * a good P stripe.  raid5!
201053b381b3SDavid Woodhouse 				 */
201153b381b3SDavid Woodhouse 				goto pstripe;
201253b381b3SDavid Woodhouse 			}
201353b381b3SDavid Woodhouse 
20144c664611SQu Wenruo 			if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
20152c8cdd6eSMiao Xie 				raid6_datap_recov(rbio->real_stripes,
201607e4d380SQu Wenruo 						  sectorsize, faila, pointers);
201753b381b3SDavid Woodhouse 			} else {
20182c8cdd6eSMiao Xie 				raid6_2data_recov(rbio->real_stripes,
201907e4d380SQu Wenruo 						  sectorsize, faila, failb,
202053b381b3SDavid Woodhouse 						  pointers);
202153b381b3SDavid Woodhouse 			}
202253b381b3SDavid Woodhouse 		} else {
202353b381b3SDavid Woodhouse 			void *p;
202453b381b3SDavid Woodhouse 
202553b381b3SDavid Woodhouse 			/* rebuild from P stripe here (raid5 or raid6) */
202653b381b3SDavid Woodhouse 			BUG_ON(failb != -1);
202753b381b3SDavid Woodhouse pstripe:
202853b381b3SDavid Woodhouse 			/* Copy parity block into failed block to start with */
202907e4d380SQu Wenruo 			memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
203053b381b3SDavid Woodhouse 
203153b381b3SDavid Woodhouse 			/* rearrange the pointer array */
203253b381b3SDavid Woodhouse 			p = pointers[faila];
203353b381b3SDavid Woodhouse 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
203453b381b3SDavid Woodhouse 				pointers[stripe] = pointers[stripe + 1];
203553b381b3SDavid Woodhouse 			pointers[rbio->nr_data - 1] = p;
203653b381b3SDavid Woodhouse 
203753b381b3SDavid Woodhouse 			/* xor in the rest */
203807e4d380SQu Wenruo 			run_xor(pointers, rbio->nr_data - 1, sectorsize);
203953b381b3SDavid Woodhouse 		}
204053b381b3SDavid Woodhouse 		/* if we're doing this rebuild as part of an rmw, go through
204153b381b3SDavid Woodhouse 		 * and set all of our private rbio pages in the
204253b381b3SDavid Woodhouse 		 * failed stripes as uptodate.  This way finish_rmw will
204353b381b3SDavid Woodhouse 		 * know they can be trusted.  If this was a read reconstruction,
204453b381b3SDavid Woodhouse 		 * other endio functions will fiddle the uptodate bits
204553b381b3SDavid Woodhouse 		 */
20461b94b556SMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE) {
204707e4d380SQu Wenruo 			for (i = 0;  i < rbio->stripe_nsectors; i++) {
204853b381b3SDavid Woodhouse 				if (faila != -1) {
204907e4d380SQu Wenruo 					sector = rbio_stripe_sector(rbio, faila, i);
205007e4d380SQu Wenruo 					sector->uptodate = 1;
205153b381b3SDavid Woodhouse 				}
205253b381b3SDavid Woodhouse 				if (failb != -1) {
205307e4d380SQu Wenruo 					sector = rbio_stripe_sector(rbio, failb, i);
205407e4d380SQu Wenruo 					sector->uptodate = 1;
205553b381b3SDavid Woodhouse 				}
205653b381b3SDavid Woodhouse 			}
205753b381b3SDavid Woodhouse 		}
205894a0b58dSIra Weiny 		for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
205994a0b58dSIra Weiny 			kunmap_local(unmap_array[stripe]);
206053b381b3SDavid Woodhouse 	}
206153b381b3SDavid Woodhouse 
206258efbc9fSOmar Sandoval 	err = BLK_STS_OK;
206353b381b3SDavid Woodhouse cleanup:
206494a0b58dSIra Weiny 	kfree(unmap_array);
206594a0b58dSIra Weiny cleanup_pointers:
206653b381b3SDavid Woodhouse 	kfree(pointers);
206753b381b3SDavid Woodhouse 
206853b381b3SDavid Woodhouse cleanup_io:
2069580c6efaSLiu Bo 	/*
2070580c6efaSLiu Bo 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2071580c6efaSLiu Bo 	 * valid rbio which is consistent with ondisk content, thus such a
2072580c6efaSLiu Bo 	 * valid rbio can be cached to avoid further disk reads.
2073580c6efaSLiu Bo 	 */
2074580c6efaSLiu Bo 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2075580c6efaSLiu Bo 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
207644ac474dSLiu Bo 		/*
207744ac474dSLiu Bo 		 * - In case of two failures, where rbio->failb != -1:
207844ac474dSLiu Bo 		 *
207944ac474dSLiu Bo 		 *   Do not cache this rbio since the above read reconstruction
208044ac474dSLiu Bo 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
208144ac474dSLiu Bo 		 *   changed some content of stripes which are not identical to
208244ac474dSLiu Bo 		 *   on-disk content any more, otherwise, a later write/recover
208344ac474dSLiu Bo 		 *   may steal stripe_pages from this rbio and end up with
208444ac474dSLiu Bo 		 *   corruptions or rebuild failures.
208544ac474dSLiu Bo 		 *
208644ac474dSLiu Bo 		 * - In case of single failure, where rbio->failb == -1:
208744ac474dSLiu Bo 		 *
208844ac474dSLiu Bo 		 *   Cache this rbio iff the above read reconstruction is
208952042d8eSAndrea Gelmini 		 *   executed without problems.
209044ac474dSLiu Bo 		 */
209144ac474dSLiu Bo 		if (err == BLK_STS_OK && rbio->failb < 0)
20924ae10b3aSChris Mason 			cache_rbio_pages(rbio);
20934ae10b3aSChris Mason 		else
20944ae10b3aSChris Mason 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
20954ae10b3aSChris Mason 
20964246a0b6SChristoph Hellwig 		rbio_orig_end_io(rbio, err);
209758efbc9fSOmar Sandoval 	} else if (err == BLK_STS_OK) {
209853b381b3SDavid Woodhouse 		rbio->faila = -1;
209953b381b3SDavid Woodhouse 		rbio->failb = -1;
21005a6ac9eaSMiao Xie 
21015a6ac9eaSMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE)
210253b381b3SDavid Woodhouse 			finish_rmw(rbio);
21035a6ac9eaSMiao Xie 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
21045a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
21055a6ac9eaSMiao Xie 		else
21065a6ac9eaSMiao Xie 			BUG();
210753b381b3SDavid Woodhouse 	} else {
21084246a0b6SChristoph Hellwig 		rbio_orig_end_io(rbio, err);
210953b381b3SDavid Woodhouse 	}
211053b381b3SDavid Woodhouse }
211153b381b3SDavid Woodhouse 
211253b381b3SDavid Woodhouse /*
211353b381b3SDavid Woodhouse  * This is called only for stripes we've read from disk to
211453b381b3SDavid Woodhouse  * reconstruct the parity.
211553b381b3SDavid Woodhouse  */
21164246a0b6SChristoph Hellwig static void raid_recover_end_io(struct bio *bio)
211753b381b3SDavid Woodhouse {
211853b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
211953b381b3SDavid Woodhouse 
212053b381b3SDavid Woodhouse 	/*
212153b381b3SDavid Woodhouse 	 * we only read stripe pages off the disk, set them
212253b381b3SDavid Woodhouse 	 * up to date if there were no errors
212353b381b3SDavid Woodhouse 	 */
21244e4cbee9SChristoph Hellwig 	if (bio->bi_status)
212553b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
212653b381b3SDavid Woodhouse 	else
21275fdb7afcSQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
212853b381b3SDavid Woodhouse 	bio_put(bio);
212953b381b3SDavid Woodhouse 
2130b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
213153b381b3SDavid Woodhouse 		return;
213253b381b3SDavid Woodhouse 
21334c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
213458efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
213553b381b3SDavid Woodhouse 	else
213653b381b3SDavid Woodhouse 		__raid_recover_end_io(rbio);
213753b381b3SDavid Woodhouse }
213853b381b3SDavid Woodhouse 
213953b381b3SDavid Woodhouse /*
214053b381b3SDavid Woodhouse  * reads everything we need off the disk to reconstruct
214153b381b3SDavid Woodhouse  * the parity. endio handlers trigger final reconstruction
214253b381b3SDavid Woodhouse  * when the IO is done.
214353b381b3SDavid Woodhouse  *
214453b381b3SDavid Woodhouse  * This is used both for reads from the higher layers and for
214553b381b3SDavid Woodhouse  * parity construction required to finish a rmw cycle.
214653b381b3SDavid Woodhouse  */
214753b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
214853b381b3SDavid Woodhouse {
214953b381b3SDavid Woodhouse 	int bios_to_read = 0;
215053b381b3SDavid Woodhouse 	struct bio_list bio_list;
215153b381b3SDavid Woodhouse 	int ret;
21523e77605dSQu Wenruo 	int sectornr;
215353b381b3SDavid Woodhouse 	int stripe;
215453b381b3SDavid Woodhouse 	struct bio *bio;
215553b381b3SDavid Woodhouse 
215653b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
215753b381b3SDavid Woodhouse 
215853b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
215953b381b3SDavid Woodhouse 	if (ret)
216053b381b3SDavid Woodhouse 		goto cleanup;
216153b381b3SDavid Woodhouse 
2162b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
216353b381b3SDavid Woodhouse 
216453b381b3SDavid Woodhouse 	/*
21654ae10b3aSChris Mason 	 * read everything that hasn't failed.  Thanks to the
21664ae10b3aSChris Mason 	 * stripe cache, it is possible that some or all of these
21674ae10b3aSChris Mason 	 * pages are going to be uptodate.
216853b381b3SDavid Woodhouse 	 */
21692c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
21705588383eSLiu Bo 		if (rbio->faila == stripe || rbio->failb == stripe) {
2171b89e1b01SMiao Xie 			atomic_inc(&rbio->error);
217253b381b3SDavid Woodhouse 			continue;
21735588383eSLiu Bo 		}
217453b381b3SDavid Woodhouse 
21753e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
21763e77605dSQu Wenruo 			struct sector_ptr *sector;
217753b381b3SDavid Woodhouse 
217853b381b3SDavid Woodhouse 			/*
217953b381b3SDavid Woodhouse 			 * the rmw code may have already read this
218053b381b3SDavid Woodhouse 			 * page in
218153b381b3SDavid Woodhouse 			 */
21823e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
21833e77605dSQu Wenruo 			if (sector->uptodate)
218453b381b3SDavid Woodhouse 				continue;
218553b381b3SDavid Woodhouse 
21863e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
21873e77605dSQu Wenruo 						 stripe, sectornr, rbio->stripe_len,
2188e01bf588SChristoph Hellwig 						 REQ_OP_READ);
218953b381b3SDavid Woodhouse 			if (ret < 0)
219053b381b3SDavid Woodhouse 				goto cleanup;
219153b381b3SDavid Woodhouse 		}
219253b381b3SDavid Woodhouse 	}
219353b381b3SDavid Woodhouse 
219453b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
219553b381b3SDavid Woodhouse 	if (!bios_to_read) {
219653b381b3SDavid Woodhouse 		/*
219753b381b3SDavid Woodhouse 		 * we might have no bios to read just because the pages
219853b381b3SDavid Woodhouse 		 * were up to date, or we might have no bios to read because
219953b381b3SDavid Woodhouse 		 * the devices were gone.
220053b381b3SDavid Woodhouse 		 */
22014c664611SQu Wenruo 		if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
220253b381b3SDavid Woodhouse 			__raid_recover_end_io(rbio);
2203813f8a0eSNikolay Borisov 			return 0;
220453b381b3SDavid Woodhouse 		} else {
220553b381b3SDavid Woodhouse 			goto cleanup;
220653b381b3SDavid Woodhouse 		}
220753b381b3SDavid Woodhouse 	}
220853b381b3SDavid Woodhouse 
220953b381b3SDavid Woodhouse 	/*
22104c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
22114c664611SQu Wenruo 	 * touch it after that.
221253b381b3SDavid Woodhouse 	 */
2213b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
2214bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
221553b381b3SDavid Woodhouse 		bio->bi_end_io = raid_recover_end_io;
221653b381b3SDavid Woodhouse 
22176a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
221853b381b3SDavid Woodhouse 
22194e49ea4aSMike Christie 		submit_bio(bio);
222053b381b3SDavid Woodhouse 	}
2221813f8a0eSNikolay Borisov 
222253b381b3SDavid Woodhouse 	return 0;
222353b381b3SDavid Woodhouse 
222453b381b3SDavid Woodhouse cleanup:
2225b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2226b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
222758efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2228785884fcSLiu Bo 
2229785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2230785884fcSLiu Bo 		bio_put(bio);
2231785884fcSLiu Bo 
223253b381b3SDavid Woodhouse 	return -EIO;
223353b381b3SDavid Woodhouse }
223453b381b3SDavid Woodhouse 
223553b381b3SDavid Woodhouse /*
223653b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
223753b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
223853b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
223953b381b3SDavid Woodhouse  * of the drive.
224053b381b3SDavid Woodhouse  */
22416a258d72SQu Wenruo int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2242cc353a8bSQu Wenruo 			  u32 stripe_len, int mirror_num, int generic_io)
224353b381b3SDavid Woodhouse {
22446a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
224553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
224653b381b3SDavid Woodhouse 	int ret;
224753b381b3SDavid Woodhouse 
2248abad60c6SLiu Bo 	if (generic_io) {
22494c664611SQu Wenruo 		ASSERT(bioc->mirror_num == mirror_num);
2250c3a3b19bSQu Wenruo 		btrfs_bio(bio)->mirror_num = mirror_num;
2251abad60c6SLiu Bo 	}
2252abad60c6SLiu Bo 
22534c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
2254af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
22556e9606d2SZhao Lei 		if (generic_io)
22564c664611SQu Wenruo 			btrfs_put_bioc(bioc);
225753b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
2258af8e2d1dSMiao Xie 	}
225953b381b3SDavid Woodhouse 
22601b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
226153b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
22624f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
226353b381b3SDavid Woodhouse 
226453b381b3SDavid Woodhouse 	rbio->faila = find_logical_bio_stripe(rbio, bio);
226553b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
22660b246afaSJeff Mahoney 		btrfs_warn(fs_info,
22674c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
22681201b58bSDavid Sterba 			   __func__, bio->bi_iter.bi_sector << 9,
22694c664611SQu Wenruo 			   (u64)bio->bi_iter.bi_size, bioc->map_type);
22706e9606d2SZhao Lei 		if (generic_io)
22714c664611SQu Wenruo 			btrfs_put_bioc(bioc);
227253b381b3SDavid Woodhouse 		kfree(rbio);
227353b381b3SDavid Woodhouse 		return -EIO;
227453b381b3SDavid Woodhouse 	}
227553b381b3SDavid Woodhouse 
22764245215dSMiao Xie 	if (generic_io) {
22770b246afaSJeff Mahoney 		btrfs_bio_counter_inc_noblocked(fs_info);
22784245215dSMiao Xie 		rbio->generic_bio_cnt = 1;
22794245215dSMiao Xie 	} else {
22804c664611SQu Wenruo 		btrfs_get_bioc(bioc);
22814245215dSMiao Xie 	}
22824245215dSMiao Xie 
228353b381b3SDavid Woodhouse 	/*
22848810f751SLiu Bo 	 * Loop retry:
22858810f751SLiu Bo 	 * for 'mirror == 2', reconstruct from all other stripes.
22868810f751SLiu Bo 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
228753b381b3SDavid Woodhouse 	 */
22888810f751SLiu Bo 	if (mirror_num > 2) {
22898810f751SLiu Bo 		/*
22908810f751SLiu Bo 		 * 'mirror == 3' is to fail the p stripe and
22918810f751SLiu Bo 		 * reconstruct from the q stripe.  'mirror > 3' is to
22928810f751SLiu Bo 		 * fail a data stripe and reconstruct from p+q stripe.
22938810f751SLiu Bo 		 */
22948810f751SLiu Bo 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
22958810f751SLiu Bo 		ASSERT(rbio->failb > 0);
22968810f751SLiu Bo 		if (rbio->failb <= rbio->faila)
22978810f751SLiu Bo 			rbio->failb--;
22988810f751SLiu Bo 	}
229953b381b3SDavid Woodhouse 
230053b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
230153b381b3SDavid Woodhouse 
230253b381b3SDavid Woodhouse 	/*
230353b381b3SDavid Woodhouse 	 * __raid56_parity_recover will end the bio with
230453b381b3SDavid Woodhouse 	 * any errors it hits.  We don't want to return
230553b381b3SDavid Woodhouse 	 * its error value up the stack because our caller
230653b381b3SDavid Woodhouse 	 * will end up calling bio_endio with any nonzero
230753b381b3SDavid Woodhouse 	 * return
230853b381b3SDavid Woodhouse 	 */
230953b381b3SDavid Woodhouse 	if (ret == 0)
231053b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
231153b381b3SDavid Woodhouse 	/*
231253b381b3SDavid Woodhouse 	 * our rbio has been added to the list of
231353b381b3SDavid Woodhouse 	 * rbios that will be handled after the
231453b381b3SDavid Woodhouse 	 * currently lock owner is done
231553b381b3SDavid Woodhouse 	 */
231653b381b3SDavid Woodhouse 	return 0;
231753b381b3SDavid Woodhouse 
231853b381b3SDavid Woodhouse }
231953b381b3SDavid Woodhouse 
2320385de0efSChristoph Hellwig static void rmw_work(struct work_struct *work)
232153b381b3SDavid Woodhouse {
232253b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
232353b381b3SDavid Woodhouse 
232453b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
232553b381b3SDavid Woodhouse 	raid56_rmw_stripe(rbio);
232653b381b3SDavid Woodhouse }
232753b381b3SDavid Woodhouse 
2328385de0efSChristoph Hellwig static void read_rebuild_work(struct work_struct *work)
232953b381b3SDavid Woodhouse {
233053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
233153b381b3SDavid Woodhouse 
233253b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
233353b381b3SDavid Woodhouse 	__raid56_parity_recover(rbio);
233453b381b3SDavid Woodhouse }
23355a6ac9eaSMiao Xie 
23365a6ac9eaSMiao Xie /*
23375a6ac9eaSMiao Xie  * The following code is used to scrub/replace the parity stripe
23385a6ac9eaSMiao Xie  *
23394c664611SQu Wenruo  * Caller must have already increased bio_counter for getting @bioc.
2340ae6529c3SQu Wenruo  *
23415a6ac9eaSMiao Xie  * Note: We need make sure all the pages that add into the scrub/replace
23425a6ac9eaSMiao Xie  * raid bio are correct and not be changed during the scrub/replace. That
23435a6ac9eaSMiao Xie  * is those pages just hold metadata or file data with checksum.
23445a6ac9eaSMiao Xie  */
23455a6ac9eaSMiao Xie 
23466a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
23476a258d72SQu Wenruo 				struct btrfs_io_context *bioc,
2348cc353a8bSQu Wenruo 				u32 stripe_len, struct btrfs_device *scrub_dev,
23495a6ac9eaSMiao Xie 				unsigned long *dbitmap, int stripe_nsectors)
23505a6ac9eaSMiao Xie {
23516a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
23525a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
23535a6ac9eaSMiao Xie 	int i;
23545a6ac9eaSMiao Xie 
23554c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
23565a6ac9eaSMiao Xie 	if (IS_ERR(rbio))
23575a6ac9eaSMiao Xie 		return NULL;
23585a6ac9eaSMiao Xie 	bio_list_add(&rbio->bio_list, bio);
23595a6ac9eaSMiao Xie 	/*
23605a6ac9eaSMiao Xie 	 * This is a special bio which is used to hold the completion handler
23615a6ac9eaSMiao Xie 	 * and make the scrub rbio is similar to the other types
23625a6ac9eaSMiao Xie 	 */
23635a6ac9eaSMiao Xie 	ASSERT(!bio->bi_iter.bi_size);
23645a6ac9eaSMiao Xie 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
23655a6ac9eaSMiao Xie 
23669cd3a7ebSLiu Bo 	/*
23674c664611SQu Wenruo 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
23689cd3a7ebSLiu Bo 	 * to the end position, so this search can start from the first parity
23699cd3a7ebSLiu Bo 	 * stripe.
23709cd3a7ebSLiu Bo 	 */
23719cd3a7ebSLiu Bo 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
23724c664611SQu Wenruo 		if (bioc->stripes[i].dev == scrub_dev) {
23735a6ac9eaSMiao Xie 			rbio->scrubp = i;
23745a6ac9eaSMiao Xie 			break;
23755a6ac9eaSMiao Xie 		}
23765a6ac9eaSMiao Xie 	}
23779cd3a7ebSLiu Bo 	ASSERT(i < rbio->real_stripes);
23785a6ac9eaSMiao Xie 
2379*c67c68ebSQu Wenruo 	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
23805a6ac9eaSMiao Xie 
2381ae6529c3SQu Wenruo 	/*
23824c664611SQu Wenruo 	 * We have already increased bio_counter when getting bioc, record it
2383ae6529c3SQu Wenruo 	 * so we can free it at rbio_orig_end_io().
2384ae6529c3SQu Wenruo 	 */
2385ae6529c3SQu Wenruo 	rbio->generic_bio_cnt = 1;
2386ae6529c3SQu Wenruo 
23875a6ac9eaSMiao Xie 	return rbio;
23885a6ac9eaSMiao Xie }
23895a6ac9eaSMiao Xie 
2390b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */
2391b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
23926346f6bfSQu Wenruo 			    unsigned int pgoff, u64 logical)
23935a6ac9eaSMiao Xie {
23946346f6bfSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
23955a6ac9eaSMiao Xie 	int stripe_offset;
23965a6ac9eaSMiao Xie 	int index;
23975a6ac9eaSMiao Xie 
23984c664611SQu Wenruo 	ASSERT(logical >= rbio->bioc->raid_map[0]);
23996346f6bfSQu Wenruo 	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
24005a6ac9eaSMiao Xie 				rbio->stripe_len * rbio->nr_data);
24014c664611SQu Wenruo 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
24026346f6bfSQu Wenruo 	index = stripe_offset / sectorsize;
24036346f6bfSQu Wenruo 	rbio->bio_sectors[index].page = page;
24046346f6bfSQu Wenruo 	rbio->bio_sectors[index].pgoff = pgoff;
24055a6ac9eaSMiao Xie }
24065a6ac9eaSMiao Xie 
24075a6ac9eaSMiao Xie /*
24085a6ac9eaSMiao Xie  * We just scrub the parity that we have correct data on the same horizontal,
24095a6ac9eaSMiao Xie  * so we needn't allocate all pages for all the stripes.
24105a6ac9eaSMiao Xie  */
24115a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
24125a6ac9eaSMiao Xie {
24133907ce29SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
24143907ce29SQu Wenruo 	int stripe;
24153907ce29SQu Wenruo 	int sectornr;
24165a6ac9eaSMiao Xie 
2417*c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
24183907ce29SQu Wenruo 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
24193907ce29SQu Wenruo 			struct page *page;
24203907ce29SQu Wenruo 			int index = (stripe * rbio->stripe_nsectors + sectornr) *
24213907ce29SQu Wenruo 				    sectorsize >> PAGE_SHIFT;
24223907ce29SQu Wenruo 
24235a6ac9eaSMiao Xie 			if (rbio->stripe_pages[index])
24245a6ac9eaSMiao Xie 				continue;
24255a6ac9eaSMiao Xie 
2426b0ee5e1eSDavid Sterba 			page = alloc_page(GFP_NOFS);
24275a6ac9eaSMiao Xie 			if (!page)
24285a6ac9eaSMiao Xie 				return -ENOMEM;
24295a6ac9eaSMiao Xie 			rbio->stripe_pages[index] = page;
24305a6ac9eaSMiao Xie 		}
24315a6ac9eaSMiao Xie 	}
2432eb357060SQu Wenruo 	index_stripe_sectors(rbio);
24335a6ac9eaSMiao Xie 	return 0;
24345a6ac9eaSMiao Xie }
24355a6ac9eaSMiao Xie 
24365a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
24375a6ac9eaSMiao Xie 					 int need_check)
24385a6ac9eaSMiao Xie {
24394c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
244046900662SQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
24411389053eSKees Cook 	void **pointers = rbio->finish_pointers;
2442*c67c68ebSQu Wenruo 	unsigned long *pbitmap = &rbio->finish_pbitmap;
24435a6ac9eaSMiao Xie 	int nr_data = rbio->nr_data;
24445a6ac9eaSMiao Xie 	int stripe;
24453e77605dSQu Wenruo 	int sectornr;
2446c17af965SDavid Sterba 	bool has_qstripe;
244746900662SQu Wenruo 	struct sector_ptr p_sector = { 0 };
244846900662SQu Wenruo 	struct sector_ptr q_sector = { 0 };
24495a6ac9eaSMiao Xie 	struct bio_list bio_list;
24505a6ac9eaSMiao Xie 	struct bio *bio;
245176035976SMiao Xie 	int is_replace = 0;
24525a6ac9eaSMiao Xie 	int ret;
24535a6ac9eaSMiao Xie 
24545a6ac9eaSMiao Xie 	bio_list_init(&bio_list);
24555a6ac9eaSMiao Xie 
2456c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
2457c17af965SDavid Sterba 		has_qstripe = false;
2458c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
2459c17af965SDavid Sterba 		has_qstripe = true;
2460c17af965SDavid Sterba 	else
24615a6ac9eaSMiao Xie 		BUG();
24625a6ac9eaSMiao Xie 
24634c664611SQu Wenruo 	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
246476035976SMiao Xie 		is_replace = 1;
2465*c67c68ebSQu Wenruo 		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
246676035976SMiao Xie 	}
246776035976SMiao Xie 
24685a6ac9eaSMiao Xie 	/*
24695a6ac9eaSMiao Xie 	 * Because the higher layers(scrubber) are unlikely to
24705a6ac9eaSMiao Xie 	 * use this area of the disk again soon, so don't cache
24715a6ac9eaSMiao Xie 	 * it.
24725a6ac9eaSMiao Xie 	 */
24735a6ac9eaSMiao Xie 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
24745a6ac9eaSMiao Xie 
24755a6ac9eaSMiao Xie 	if (!need_check)
24765a6ac9eaSMiao Xie 		goto writeback;
24775a6ac9eaSMiao Xie 
247846900662SQu Wenruo 	p_sector.page = alloc_page(GFP_NOFS);
247946900662SQu Wenruo 	if (!p_sector.page)
24805a6ac9eaSMiao Xie 		goto cleanup;
248146900662SQu Wenruo 	p_sector.pgoff = 0;
248246900662SQu Wenruo 	p_sector.uptodate = 1;
24835a6ac9eaSMiao Xie 
2484c17af965SDavid Sterba 	if (has_qstripe) {
2485d70cef0dSIra Weiny 		/* RAID6, allocate and map temp space for the Q stripe */
248646900662SQu Wenruo 		q_sector.page = alloc_page(GFP_NOFS);
248746900662SQu Wenruo 		if (!q_sector.page) {
248846900662SQu Wenruo 			__free_page(p_sector.page);
248946900662SQu Wenruo 			p_sector.page = NULL;
24905a6ac9eaSMiao Xie 			goto cleanup;
24915a6ac9eaSMiao Xie 		}
249246900662SQu Wenruo 		q_sector.pgoff = 0;
249346900662SQu Wenruo 		q_sector.uptodate = 1;
249446900662SQu Wenruo 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
24955a6ac9eaSMiao Xie 	}
24965a6ac9eaSMiao Xie 
24975a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
24985a6ac9eaSMiao Xie 
2499d70cef0dSIra Weiny 	/* Map the parity stripe just once */
250046900662SQu Wenruo 	pointers[nr_data] = kmap_local_page(p_sector.page);
2501d70cef0dSIra Weiny 
2502*c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
250346900662SQu Wenruo 		struct sector_ptr *sector;
25045a6ac9eaSMiao Xie 		void *parity;
250546900662SQu Wenruo 
25065a6ac9eaSMiao Xie 		/* first collect one page from each data stripe */
25075a6ac9eaSMiao Xie 		for (stripe = 0; stripe < nr_data; stripe++) {
250846900662SQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
250946900662SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
251046900662SQu Wenruo 					   sector->pgoff;
25115a6ac9eaSMiao Xie 		}
25125a6ac9eaSMiao Xie 
2513c17af965SDavid Sterba 		if (has_qstripe) {
2514d70cef0dSIra Weiny 			/* RAID6, call the library function to fill in our P/Q */
251546900662SQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
25165a6ac9eaSMiao Xie 						pointers);
25175a6ac9eaSMiao Xie 		} else {
25185a6ac9eaSMiao Xie 			/* raid5 */
251946900662SQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
252046900662SQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
25215a6ac9eaSMiao Xie 		}
25225a6ac9eaSMiao Xie 
252301327610SNicholas D Steeves 		/* Check scrubbing parity and repair it */
252446900662SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
252546900662SQu Wenruo 		parity = kmap_local_page(sector->page) + sector->pgoff;
252646900662SQu Wenruo 		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
252746900662SQu Wenruo 			memcpy(parity, pointers[rbio->scrubp], sectorsize);
25285a6ac9eaSMiao Xie 		else
25295a6ac9eaSMiao Xie 			/* Parity is right, needn't writeback */
2530*c67c68ebSQu Wenruo 			bitmap_clear(&rbio->dbitmap, sectornr, 1);
253158c1a35cSIra Weiny 		kunmap_local(parity);
25325a6ac9eaSMiao Xie 
253394a0b58dSIra Weiny 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
253494a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
25355a6ac9eaSMiao Xie 	}
25365a6ac9eaSMiao Xie 
253794a0b58dSIra Weiny 	kunmap_local(pointers[nr_data]);
253846900662SQu Wenruo 	__free_page(p_sector.page);
253946900662SQu Wenruo 	p_sector.page = NULL;
254046900662SQu Wenruo 	if (q_sector.page) {
254194a0b58dSIra Weiny 		kunmap_local(pointers[rbio->real_stripes - 1]);
254246900662SQu Wenruo 		__free_page(q_sector.page);
254346900662SQu Wenruo 		q_sector.page = NULL;
2544d70cef0dSIra Weiny 	}
25455a6ac9eaSMiao Xie 
25465a6ac9eaSMiao Xie writeback:
25475a6ac9eaSMiao Xie 	/*
25485a6ac9eaSMiao Xie 	 * time to start writing.  Make bios for everything from the
25495a6ac9eaSMiao Xie 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
25505a6ac9eaSMiao Xie 	 * everything else.
25515a6ac9eaSMiao Xie 	 */
2552*c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
25533e77605dSQu Wenruo 		struct sector_ptr *sector;
25545a6ac9eaSMiao Xie 
25553e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25563e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
25573e77605dSQu Wenruo 					 sectornr, rbio->stripe_len, REQ_OP_WRITE);
25585a6ac9eaSMiao Xie 		if (ret)
25595a6ac9eaSMiao Xie 			goto cleanup;
25605a6ac9eaSMiao Xie 	}
25615a6ac9eaSMiao Xie 
256276035976SMiao Xie 	if (!is_replace)
256376035976SMiao Xie 		goto submit_write;
256476035976SMiao Xie 
25653e77605dSQu Wenruo 	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
25663e77605dSQu Wenruo 		struct sector_ptr *sector;
256776035976SMiao Xie 
25683e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25693e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
25704c664611SQu Wenruo 				       bioc->tgtdev_map[rbio->scrubp],
25713e77605dSQu Wenruo 				       sectornr, rbio->stripe_len, REQ_OP_WRITE);
257276035976SMiao Xie 		if (ret)
257376035976SMiao Xie 			goto cleanup;
257476035976SMiao Xie 	}
257576035976SMiao Xie 
257676035976SMiao Xie submit_write:
25775a6ac9eaSMiao Xie 	nr_data = bio_list_size(&bio_list);
25785a6ac9eaSMiao Xie 	if (!nr_data) {
25795a6ac9eaSMiao Xie 		/* Every parity is right */
258058efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_OK);
25815a6ac9eaSMiao Xie 		return;
25825a6ac9eaSMiao Xie 	}
25835a6ac9eaSMiao Xie 
25845a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, nr_data);
25855a6ac9eaSMiao Xie 
2586bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
2587a6111d11SZhao Lei 		bio->bi_end_io = raid_write_end_io;
25884e49ea4aSMike Christie 
25894e49ea4aSMike Christie 		submit_bio(bio);
25905a6ac9eaSMiao Xie 	}
25915a6ac9eaSMiao Xie 	return;
25925a6ac9eaSMiao Xie 
25935a6ac9eaSMiao Xie cleanup:
259458efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2595785884fcSLiu Bo 
2596785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2597785884fcSLiu Bo 		bio_put(bio);
25985a6ac9eaSMiao Xie }
25995a6ac9eaSMiao Xie 
26005a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
26015a6ac9eaSMiao Xie {
26025a6ac9eaSMiao Xie 	if (stripe >= 0 && stripe < rbio->nr_data)
26035a6ac9eaSMiao Xie 		return 1;
26045a6ac9eaSMiao Xie 	return 0;
26055a6ac9eaSMiao Xie }
26065a6ac9eaSMiao Xie 
26075a6ac9eaSMiao Xie /*
26085a6ac9eaSMiao Xie  * While we're doing the parity check and repair, we could have errors
26095a6ac9eaSMiao Xie  * in reading pages off the disk.  This checks for errors and if we're
26105a6ac9eaSMiao Xie  * not able to read the page it'll trigger parity reconstruction.  The
26115a6ac9eaSMiao Xie  * parity scrub will be finished after we've reconstructed the failed
26125a6ac9eaSMiao Xie  * stripes
26135a6ac9eaSMiao Xie  */
26145a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
26155a6ac9eaSMiao Xie {
26164c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
26175a6ac9eaSMiao Xie 		goto cleanup;
26185a6ac9eaSMiao Xie 
26195a6ac9eaSMiao Xie 	if (rbio->faila >= 0 || rbio->failb >= 0) {
26205a6ac9eaSMiao Xie 		int dfail = 0, failp = -1;
26215a6ac9eaSMiao Xie 
26225a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->faila))
26235a6ac9eaSMiao Xie 			dfail++;
26245a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->faila))
26255a6ac9eaSMiao Xie 			failp = rbio->faila;
26265a6ac9eaSMiao Xie 
26275a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->failb))
26285a6ac9eaSMiao Xie 			dfail++;
26295a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->failb))
26305a6ac9eaSMiao Xie 			failp = rbio->failb;
26315a6ac9eaSMiao Xie 
26325a6ac9eaSMiao Xie 		/*
26335a6ac9eaSMiao Xie 		 * Because we can not use a scrubbing parity to repair
26345a6ac9eaSMiao Xie 		 * the data, so the capability of the repair is declined.
26355a6ac9eaSMiao Xie 		 * (In the case of RAID5, we can not repair anything)
26365a6ac9eaSMiao Xie 		 */
26374c664611SQu Wenruo 		if (dfail > rbio->bioc->max_errors - 1)
26385a6ac9eaSMiao Xie 			goto cleanup;
26395a6ac9eaSMiao Xie 
26405a6ac9eaSMiao Xie 		/*
26415a6ac9eaSMiao Xie 		 * If all data is good, only parity is correctly, just
26425a6ac9eaSMiao Xie 		 * repair the parity.
26435a6ac9eaSMiao Xie 		 */
26445a6ac9eaSMiao Xie 		if (dfail == 0) {
26455a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
26465a6ac9eaSMiao Xie 			return;
26475a6ac9eaSMiao Xie 		}
26485a6ac9eaSMiao Xie 
26495a6ac9eaSMiao Xie 		/*
26505a6ac9eaSMiao Xie 		 * Here means we got one corrupted data stripe and one
26515a6ac9eaSMiao Xie 		 * corrupted parity on RAID6, if the corrupted parity
265201327610SNicholas D Steeves 		 * is scrubbing parity, luckily, use the other one to repair
26535a6ac9eaSMiao Xie 		 * the data, or we can not repair the data stripe.
26545a6ac9eaSMiao Xie 		 */
26555a6ac9eaSMiao Xie 		if (failp != rbio->scrubp)
26565a6ac9eaSMiao Xie 			goto cleanup;
26575a6ac9eaSMiao Xie 
26585a6ac9eaSMiao Xie 		__raid_recover_end_io(rbio);
26595a6ac9eaSMiao Xie 	} else {
26605a6ac9eaSMiao Xie 		finish_parity_scrub(rbio, 1);
26615a6ac9eaSMiao Xie 	}
26625a6ac9eaSMiao Xie 	return;
26635a6ac9eaSMiao Xie 
26645a6ac9eaSMiao Xie cleanup:
266558efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
26665a6ac9eaSMiao Xie }
26675a6ac9eaSMiao Xie 
26685a6ac9eaSMiao Xie /*
26695a6ac9eaSMiao Xie  * end io for the read phase of the rmw cycle.  All the bios here are physical
26705a6ac9eaSMiao Xie  * stripe bios we've read from the disk so we can recalculate the parity of the
26715a6ac9eaSMiao Xie  * stripe.
26725a6ac9eaSMiao Xie  *
26735a6ac9eaSMiao Xie  * This will usually kick off finish_rmw once all the bios are read in, but it
26745a6ac9eaSMiao Xie  * may trigger parity reconstruction if we had any errors along the way
26755a6ac9eaSMiao Xie  */
26764246a0b6SChristoph Hellwig static void raid56_parity_scrub_end_io(struct bio *bio)
26775a6ac9eaSMiao Xie {
26785a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio = bio->bi_private;
26795a6ac9eaSMiao Xie 
26804e4cbee9SChristoph Hellwig 	if (bio->bi_status)
26815a6ac9eaSMiao Xie 		fail_bio_stripe(rbio, bio);
26825a6ac9eaSMiao Xie 	else
26835fdb7afcSQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
26845a6ac9eaSMiao Xie 
26855a6ac9eaSMiao Xie 	bio_put(bio);
26865a6ac9eaSMiao Xie 
26875a6ac9eaSMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
26885a6ac9eaSMiao Xie 		return;
26895a6ac9eaSMiao Xie 
26905a6ac9eaSMiao Xie 	/*
26915a6ac9eaSMiao Xie 	 * this will normally call finish_rmw to start our write
26925a6ac9eaSMiao Xie 	 * but if there are any failed stripes we'll reconstruct
26935a6ac9eaSMiao Xie 	 * from parity first
26945a6ac9eaSMiao Xie 	 */
26955a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
26965a6ac9eaSMiao Xie }
26975a6ac9eaSMiao Xie 
26985a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
26995a6ac9eaSMiao Xie {
27005a6ac9eaSMiao Xie 	int bios_to_read = 0;
27015a6ac9eaSMiao Xie 	struct bio_list bio_list;
27025a6ac9eaSMiao Xie 	int ret;
27033e77605dSQu Wenruo 	int sectornr;
27045a6ac9eaSMiao Xie 	int stripe;
27055a6ac9eaSMiao Xie 	struct bio *bio;
27065a6ac9eaSMiao Xie 
2707785884fcSLiu Bo 	bio_list_init(&bio_list);
2708785884fcSLiu Bo 
27095a6ac9eaSMiao Xie 	ret = alloc_rbio_essential_pages(rbio);
27105a6ac9eaSMiao Xie 	if (ret)
27115a6ac9eaSMiao Xie 		goto cleanup;
27125a6ac9eaSMiao Xie 
27135a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
27145a6ac9eaSMiao Xie 	/*
27155a6ac9eaSMiao Xie 	 * build a list of bios to read all the missing parts of this
27165a6ac9eaSMiao Xie 	 * stripe
27175a6ac9eaSMiao Xie 	 */
27182c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2719*c67c68ebSQu Wenruo 		for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
27203e77605dSQu Wenruo 			struct sector_ptr *sector;
27215a6ac9eaSMiao Xie 			/*
27223e77605dSQu Wenruo 			 * We want to find all the sectors missing from the
27233e77605dSQu Wenruo 			 * rbio and read them from the disk.  If * sector_in_rbio()
27243e77605dSQu Wenruo 			 * finds a sector in the bio list we don't need to read
27253e77605dSQu Wenruo 			 * it off the stripe.
27265a6ac9eaSMiao Xie 			 */
27273e77605dSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
27283e77605dSQu Wenruo 			if (sector)
27295a6ac9eaSMiao Xie 				continue;
27305a6ac9eaSMiao Xie 
27313e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
27325a6ac9eaSMiao Xie 			/*
27333e77605dSQu Wenruo 			 * The bio cache may have handed us an uptodate sector.
27343e77605dSQu Wenruo 			 * If so, be happy and use it.
27355a6ac9eaSMiao Xie 			 */
27363e77605dSQu Wenruo 			if (sector->uptodate)
27375a6ac9eaSMiao Xie 				continue;
27385a6ac9eaSMiao Xie 
27393e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
27403e77605dSQu Wenruo 						 stripe, sectornr, rbio->stripe_len,
27413e77605dSQu Wenruo 						 REQ_OP_READ);
27425a6ac9eaSMiao Xie 			if (ret)
27435a6ac9eaSMiao Xie 				goto cleanup;
27445a6ac9eaSMiao Xie 		}
27455a6ac9eaSMiao Xie 	}
27465a6ac9eaSMiao Xie 
27475a6ac9eaSMiao Xie 	bios_to_read = bio_list_size(&bio_list);
27485a6ac9eaSMiao Xie 	if (!bios_to_read) {
27495a6ac9eaSMiao Xie 		/*
27505a6ac9eaSMiao Xie 		 * this can happen if others have merged with
27515a6ac9eaSMiao Xie 		 * us, it means there is nothing left to read.
27525a6ac9eaSMiao Xie 		 * But if there are missing devices it may not be
27535a6ac9eaSMiao Xie 		 * safe to do the full stripe write yet.
27545a6ac9eaSMiao Xie 		 */
27555a6ac9eaSMiao Xie 		goto finish;
27565a6ac9eaSMiao Xie 	}
27575a6ac9eaSMiao Xie 
27585a6ac9eaSMiao Xie 	/*
27594c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
27604c664611SQu Wenruo 	 * touch it after that.
27615a6ac9eaSMiao Xie 	 */
27625a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
2763bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
27645a6ac9eaSMiao Xie 		bio->bi_end_io = raid56_parity_scrub_end_io;
27655a6ac9eaSMiao Xie 
27666a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
27675a6ac9eaSMiao Xie 
27684e49ea4aSMike Christie 		submit_bio(bio);
27695a6ac9eaSMiao Xie 	}
27705a6ac9eaSMiao Xie 	/* the actual write will happen once the reads are done */
27715a6ac9eaSMiao Xie 	return;
27725a6ac9eaSMiao Xie 
27735a6ac9eaSMiao Xie cleanup:
277458efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2775785884fcSLiu Bo 
2776785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2777785884fcSLiu Bo 		bio_put(bio);
2778785884fcSLiu Bo 
27795a6ac9eaSMiao Xie 	return;
27805a6ac9eaSMiao Xie 
27815a6ac9eaSMiao Xie finish:
27825a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
27835a6ac9eaSMiao Xie }
27845a6ac9eaSMiao Xie 
2785385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work)
27865a6ac9eaSMiao Xie {
27875a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
27885a6ac9eaSMiao Xie 
27895a6ac9eaSMiao Xie 	rbio = container_of(work, struct btrfs_raid_bio, work);
27905a6ac9eaSMiao Xie 	raid56_parity_scrub_stripe(rbio);
27915a6ac9eaSMiao Xie }
27925a6ac9eaSMiao Xie 
27935a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
27945a6ac9eaSMiao Xie {
27955a6ac9eaSMiao Xie 	if (!lock_stripe_add(rbio))
2796a81b747dSDavid Sterba 		start_async_work(rbio, scrub_parity_work);
27975a6ac9eaSMiao Xie }
2798b4ee1782SOmar Sandoval 
2799b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */
2800b4ee1782SOmar Sandoval 
2801b4ee1782SOmar Sandoval struct btrfs_raid_bio *
28026a258d72SQu Wenruo raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc,
28036a258d72SQu Wenruo 			  u64 length)
2804b4ee1782SOmar Sandoval {
28056a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
2806b4ee1782SOmar Sandoval 	struct btrfs_raid_bio *rbio;
2807b4ee1782SOmar Sandoval 
28084c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, length);
2809b4ee1782SOmar Sandoval 	if (IS_ERR(rbio))
2810b4ee1782SOmar Sandoval 		return NULL;
2811b4ee1782SOmar Sandoval 
2812b4ee1782SOmar Sandoval 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2813b4ee1782SOmar Sandoval 	bio_list_add(&rbio->bio_list, bio);
2814b4ee1782SOmar Sandoval 	/*
2815b4ee1782SOmar Sandoval 	 * This is a special bio which is used to hold the completion handler
2816b4ee1782SOmar Sandoval 	 * and make the scrub rbio is similar to the other types
2817b4ee1782SOmar Sandoval 	 */
2818b4ee1782SOmar Sandoval 	ASSERT(!bio->bi_iter.bi_size);
2819b4ee1782SOmar Sandoval 
2820b4ee1782SOmar Sandoval 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2821b4ee1782SOmar Sandoval 	if (rbio->faila == -1) {
2822b4ee1782SOmar Sandoval 		BUG();
2823b4ee1782SOmar Sandoval 		kfree(rbio);
2824b4ee1782SOmar Sandoval 		return NULL;
2825b4ee1782SOmar Sandoval 	}
2826b4ee1782SOmar Sandoval 
2827ae6529c3SQu Wenruo 	/*
28284c664611SQu Wenruo 	 * When we get bioc, we have already increased bio_counter, record it
2829ae6529c3SQu Wenruo 	 * so we can free it at rbio_orig_end_io()
2830ae6529c3SQu Wenruo 	 */
2831ae6529c3SQu Wenruo 	rbio->generic_bio_cnt = 1;
2832ae6529c3SQu Wenruo 
2833b4ee1782SOmar Sandoval 	return rbio;
2834b4ee1782SOmar Sandoval }
2835b4ee1782SOmar Sandoval 
2836b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2837b4ee1782SOmar Sandoval {
2838b4ee1782SOmar Sandoval 	if (!lock_stripe_add(rbio))
2839e66d8d5aSDavid Sterba 		start_async_work(rbio, read_rebuild_work);
2840b4ee1782SOmar Sandoval }
2841