xref: /linux/fs/btrfs/raid56.c (revision 75b47033296595efb208cc563cbb8cf4fb7c3ebc)
1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
253b381b3SDavid Woodhouse /*
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
453b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
553b381b3SDavid Woodhouse  */
6c1d7c514SDavid Sterba 
753b381b3SDavid Woodhouse #include <linux/sched.h>
853b381b3SDavid Woodhouse #include <linux/bio.h>
953b381b3SDavid Woodhouse #include <linux/slab.h>
1053b381b3SDavid Woodhouse #include <linux/blkdev.h>
1153b381b3SDavid Woodhouse #include <linux/raid/pq.h>
1253b381b3SDavid Woodhouse #include <linux/hash.h>
1353b381b3SDavid Woodhouse #include <linux/list_sort.h>
1453b381b3SDavid Woodhouse #include <linux/raid/xor.h>
15818e010bSDavid Sterba #include <linux/mm.h>
169b569ea0SJosef Bacik #include "messages.h"
17cea62800SJohannes Thumshirn #include "misc.h"
1853b381b3SDavid Woodhouse #include "ctree.h"
1953b381b3SDavid Woodhouse #include "disk-io.h"
2053b381b3SDavid Woodhouse #include "volumes.h"
2153b381b3SDavid Woodhouse #include "raid56.h"
2253b381b3SDavid Woodhouse #include "async-thread.h"
2353b381b3SDavid Woodhouse 
2453b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
2553b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
2653b381b3SDavid Woodhouse 
274ae10b3aSChris Mason /*
284ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
294ae10b3aSChris Mason  * of past RMW
304ae10b3aSChris Mason  */
314ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
324ae10b3aSChris Mason 
334ae10b3aSChris Mason /*
344ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
354ae10b3aSChris Mason  */
364ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
374ae10b3aSChris Mason 
384ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
394ae10b3aSChris Mason 
408a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS				11
418a953348SDavid Sterba 
428a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
438a953348SDavid Sterba struct btrfs_stripe_hash {
448a953348SDavid Sterba 	struct list_head hash_list;
458a953348SDavid Sterba 	spinlock_t lock;
468a953348SDavid Sterba };
478a953348SDavid Sterba 
488a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
498a953348SDavid Sterba struct btrfs_stripe_hash_table {
508a953348SDavid Sterba 	struct list_head stripe_cache;
518a953348SDavid Sterba 	spinlock_t cache_lock;
528a953348SDavid Sterba 	int cache_size;
538a953348SDavid Sterba 	struct btrfs_stripe_hash table[];
548a953348SDavid Sterba };
558a953348SDavid Sterba 
56eb357060SQu Wenruo /*
57eb357060SQu Wenruo  * A bvec like structure to present a sector inside a page.
58eb357060SQu Wenruo  *
59eb357060SQu Wenruo  * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
60eb357060SQu Wenruo  */
61eb357060SQu Wenruo struct sector_ptr {
62eb357060SQu Wenruo 	struct page *page;
6300425dd9SQu Wenruo 	unsigned int pgoff:24;
6400425dd9SQu Wenruo 	unsigned int uptodate:8;
65eb357060SQu Wenruo };
66eb357060SQu Wenruo 
6793723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work);
6893723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work);
6953b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
7053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
7153b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
7253b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
7353b381b3SDavid Woodhouse 
746bfd0133SQu Wenruo static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
756bfd0133SQu Wenruo static void scrub_rbio_work_locked(struct work_struct *work);
765a6ac9eaSMiao Xie 
77797d74b7SQu Wenruo static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78797d74b7SQu Wenruo {
792942a50dSQu Wenruo 	bitmap_free(rbio->error_bitmap);
80797d74b7SQu Wenruo 	kfree(rbio->stripe_pages);
81797d74b7SQu Wenruo 	kfree(rbio->bio_sectors);
82797d74b7SQu Wenruo 	kfree(rbio->stripe_sectors);
83797d74b7SQu Wenruo 	kfree(rbio->finish_pointers);
84797d74b7SQu Wenruo }
85797d74b7SQu Wenruo 
86ff2b64a2SQu Wenruo static void free_raid_bio(struct btrfs_raid_bio *rbio)
87ff2b64a2SQu Wenruo {
88ff2b64a2SQu Wenruo 	int i;
89ff2b64a2SQu Wenruo 
90ff2b64a2SQu Wenruo 	if (!refcount_dec_and_test(&rbio->refs))
91ff2b64a2SQu Wenruo 		return;
92ff2b64a2SQu Wenruo 
93ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->stripe_cache));
94ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->hash_list));
95ff2b64a2SQu Wenruo 	WARN_ON(!bio_list_empty(&rbio->bio_list));
96ff2b64a2SQu Wenruo 
97ff2b64a2SQu Wenruo 	for (i = 0; i < rbio->nr_pages; i++) {
98ff2b64a2SQu Wenruo 		if (rbio->stripe_pages[i]) {
99ff2b64a2SQu Wenruo 			__free_page(rbio->stripe_pages[i]);
100ff2b64a2SQu Wenruo 			rbio->stripe_pages[i] = NULL;
101ff2b64a2SQu Wenruo 		}
102ff2b64a2SQu Wenruo 	}
103ff2b64a2SQu Wenruo 
104ff2b64a2SQu Wenruo 	btrfs_put_bioc(rbio->bioc);
105797d74b7SQu Wenruo 	free_raid_bio_pointers(rbio);
106ff2b64a2SQu Wenruo 	kfree(rbio);
107ff2b64a2SQu Wenruo }
108ff2b64a2SQu Wenruo 
109385de0efSChristoph Hellwig static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
110ac638859SDavid Sterba {
111385de0efSChristoph Hellwig 	INIT_WORK(&rbio->work, work_func);
112385de0efSChristoph Hellwig 	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
113ac638859SDavid Sterba }
114ac638859SDavid Sterba 
11553b381b3SDavid Woodhouse /*
11653b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
11753b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
11853b381b3SDavid Woodhouse  */
11953b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
12053b381b3SDavid Woodhouse {
12153b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
12253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
12353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
12453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
12553b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
12653b381b3SDavid Woodhouse 	int i;
12753b381b3SDavid Woodhouse 
12853b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
12953b381b3SDavid Woodhouse 		return 0;
13053b381b3SDavid Woodhouse 
13183c8266aSDavid Sterba 	/*
13283c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
13383c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
13483c8266aSDavid Sterba 	 *
13583c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
13683c8266aSDavid Sterba 	 * of a failing mount.
13783c8266aSDavid Sterba 	 */
138ee787f95SDavid Sterba 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
13953b381b3SDavid Woodhouse 	if (!table)
14053b381b3SDavid Woodhouse 		return -ENOMEM;
14153b381b3SDavid Woodhouse 
1424ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
1434ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
1444ae10b3aSChris Mason 
14553b381b3SDavid Woodhouse 	h = table->table;
14653b381b3SDavid Woodhouse 
14753b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
14853b381b3SDavid Woodhouse 		cur = h + i;
14953b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
15053b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
15153b381b3SDavid Woodhouse 	}
15253b381b3SDavid Woodhouse 
15353b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
154f749303bSWang Shilong 	kvfree(x);
15553b381b3SDavid Woodhouse 	return 0;
15653b381b3SDavid Woodhouse }
15753b381b3SDavid Woodhouse 
15853b381b3SDavid Woodhouse /*
1594ae10b3aSChris Mason  * caching an rbio means to copy anything from the
160ac26df8bSQu Wenruo  * bio_sectors array into the stripe_pages array.  We
1614ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
1624ae10b3aSChris Mason  * to indicate if it has valid data
1634ae10b3aSChris Mason  *
1644ae10b3aSChris Mason  * once the caching is done, we set the cache ready
1654ae10b3aSChris Mason  * bit.
1664ae10b3aSChris Mason  */
1674ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
1684ae10b3aSChris Mason {
1694ae10b3aSChris Mason 	int i;
1704ae10b3aSChris Mason 	int ret;
1714ae10b3aSChris Mason 
1724ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
1734ae10b3aSChris Mason 	if (ret)
1744ae10b3aSChris Mason 		return;
1754ae10b3aSChris Mason 
17600425dd9SQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
17700425dd9SQu Wenruo 		/* Some range not covered by bio (partial write), skip it */
17888074c8bSQu Wenruo 		if (!rbio->bio_sectors[i].page) {
17988074c8bSQu Wenruo 			/*
18088074c8bSQu Wenruo 			 * Even if the sector is not covered by bio, if it is
18188074c8bSQu Wenruo 			 * a data sector it should still be uptodate as it is
18288074c8bSQu Wenruo 			 * read from disk.
18388074c8bSQu Wenruo 			 */
18488074c8bSQu Wenruo 			if (i < rbio->nr_data * rbio->stripe_nsectors)
18588074c8bSQu Wenruo 				ASSERT(rbio->stripe_sectors[i].uptodate);
18600425dd9SQu Wenruo 			continue;
18788074c8bSQu Wenruo 		}
18800425dd9SQu Wenruo 
18900425dd9SQu Wenruo 		ASSERT(rbio->stripe_sectors[i].page);
19000425dd9SQu Wenruo 		memcpy_page(rbio->stripe_sectors[i].page,
19100425dd9SQu Wenruo 			    rbio->stripe_sectors[i].pgoff,
19200425dd9SQu Wenruo 			    rbio->bio_sectors[i].page,
19300425dd9SQu Wenruo 			    rbio->bio_sectors[i].pgoff,
19400425dd9SQu Wenruo 			    rbio->bioc->fs_info->sectorsize);
19500425dd9SQu Wenruo 		rbio->stripe_sectors[i].uptodate = 1;
19600425dd9SQu Wenruo 	}
1974ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1984ae10b3aSChris Mason }
1994ae10b3aSChris Mason 
2004ae10b3aSChris Mason /*
20153b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
20253b381b3SDavid Woodhouse  */
20353b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
20453b381b3SDavid Woodhouse {
2054c664611SQu Wenruo 	u64 num = rbio->bioc->raid_map[0];
20653b381b3SDavid Woodhouse 
20753b381b3SDavid Woodhouse 	/*
20853b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
20953b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
21053b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
21153b381b3SDavid Woodhouse 	 * returns just one or two different values.
21253b381b3SDavid Woodhouse 	 *
21353b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
21453b381b3SDavid Woodhouse 	 */
21553b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
21653b381b3SDavid Woodhouse }
21753b381b3SDavid Woodhouse 
218d4e28d9bSQu Wenruo static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219d4e28d9bSQu Wenruo 				       unsigned int page_nr)
220d4e28d9bSQu Wenruo {
221d4e28d9bSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222d4e28d9bSQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223d4e28d9bSQu Wenruo 	int i;
224d4e28d9bSQu Wenruo 
225d4e28d9bSQu Wenruo 	ASSERT(page_nr < rbio->nr_pages);
226d4e28d9bSQu Wenruo 
227d4e28d9bSQu Wenruo 	for (i = sectors_per_page * page_nr;
228d4e28d9bSQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page;
229d4e28d9bSQu Wenruo 	     i++) {
230d4e28d9bSQu Wenruo 		if (!rbio->stripe_sectors[i].uptodate)
231d4e28d9bSQu Wenruo 			return false;
232d4e28d9bSQu Wenruo 	}
233d4e28d9bSQu Wenruo 	return true;
234d4e28d9bSQu Wenruo }
235d4e28d9bSQu Wenruo 
23653b381b3SDavid Woodhouse /*
237eb357060SQu Wenruo  * Update the stripe_sectors[] array to use correct page and pgoff
238eb357060SQu Wenruo  *
239eb357060SQu Wenruo  * Should be called every time any page pointer in stripes_pages[] got modified.
240eb357060SQu Wenruo  */
241eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242eb357060SQu Wenruo {
243eb357060SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244eb357060SQu Wenruo 	u32 offset;
245eb357060SQu Wenruo 	int i;
246eb357060SQu Wenruo 
247eb357060SQu Wenruo 	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248eb357060SQu Wenruo 		int page_index = offset >> PAGE_SHIFT;
249eb357060SQu Wenruo 
250eb357060SQu Wenruo 		ASSERT(page_index < rbio->nr_pages);
251eb357060SQu Wenruo 		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252eb357060SQu Wenruo 		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253eb357060SQu Wenruo 	}
254eb357060SQu Wenruo }
255eb357060SQu Wenruo 
2564d100466SQu Wenruo static void steal_rbio_page(struct btrfs_raid_bio *src,
2574d100466SQu Wenruo 			    struct btrfs_raid_bio *dest, int page_nr)
2584d100466SQu Wenruo {
2594d100466SQu Wenruo 	const u32 sectorsize = src->bioc->fs_info->sectorsize;
2604d100466SQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2614d100466SQu Wenruo 	int i;
2624d100466SQu Wenruo 
2634d100466SQu Wenruo 	if (dest->stripe_pages[page_nr])
2644d100466SQu Wenruo 		__free_page(dest->stripe_pages[page_nr]);
2654d100466SQu Wenruo 	dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
2664d100466SQu Wenruo 	src->stripe_pages[page_nr] = NULL;
2674d100466SQu Wenruo 
2684d100466SQu Wenruo 	/* Also update the sector->uptodate bits. */
2694d100466SQu Wenruo 	for (i = sectors_per_page * page_nr;
2704d100466SQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page; i++)
2714d100466SQu Wenruo 		dest->stripe_sectors[i].uptodate = true;
2724d100466SQu Wenruo }
2734d100466SQu Wenruo 
27488074c8bSQu Wenruo static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
27588074c8bSQu Wenruo {
27688074c8bSQu Wenruo 	const int sector_nr = (page_nr << PAGE_SHIFT) >>
27788074c8bSQu Wenruo 			      rbio->bioc->fs_info->sectorsize_bits;
27888074c8bSQu Wenruo 
27988074c8bSQu Wenruo 	/*
28088074c8bSQu Wenruo 	 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
28188074c8bSQu Wenruo 	 * we won't have a page which is half data half parity.
28288074c8bSQu Wenruo 	 *
28388074c8bSQu Wenruo 	 * Thus if the first sector of the page belongs to data stripes, then
28488074c8bSQu Wenruo 	 * the full page belongs to data stripes.
28588074c8bSQu Wenruo 	 */
28688074c8bSQu Wenruo 	return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
28788074c8bSQu Wenruo }
28888074c8bSQu Wenruo 
289eb357060SQu Wenruo /*
290d4e28d9bSQu Wenruo  * Stealing an rbio means taking all the uptodate pages from the stripe array
291d4e28d9bSQu Wenruo  * in the source rbio and putting them into the destination rbio.
292d4e28d9bSQu Wenruo  *
293d4e28d9bSQu Wenruo  * This will also update the involved stripe_sectors[] which are referring to
294d4e28d9bSQu Wenruo  * the old pages.
2954ae10b3aSChris Mason  */
2964ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
2974ae10b3aSChris Mason {
2984ae10b3aSChris Mason 	int i;
2994ae10b3aSChris Mason 
3004ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3014ae10b3aSChris Mason 		return;
3024ae10b3aSChris Mason 
3034ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
30488074c8bSQu Wenruo 		struct page *p = src->stripe_pages[i];
30588074c8bSQu Wenruo 
30688074c8bSQu Wenruo 		/*
30788074c8bSQu Wenruo 		 * We don't need to steal P/Q pages as they will always be
30888074c8bSQu Wenruo 		 * regenerated for RMW or full write anyway.
30988074c8bSQu Wenruo 		 */
31088074c8bSQu Wenruo 		if (!is_data_stripe_page(src, i))
3114ae10b3aSChris Mason 			continue;
3124ae10b3aSChris Mason 
31388074c8bSQu Wenruo 		/*
31488074c8bSQu Wenruo 		 * If @src already has RBIO_CACHE_READY_BIT, it should have
31588074c8bSQu Wenruo 		 * all data stripe pages present and uptodate.
31688074c8bSQu Wenruo 		 */
31788074c8bSQu Wenruo 		ASSERT(p);
31888074c8bSQu Wenruo 		ASSERT(full_page_sectors_uptodate(src, i));
3194d100466SQu Wenruo 		steal_rbio_page(src, dest, i);
3204ae10b3aSChris Mason 	}
321eb357060SQu Wenruo 	index_stripe_sectors(dest);
322eb357060SQu Wenruo 	index_stripe_sectors(src);
3234ae10b3aSChris Mason }
3244ae10b3aSChris Mason 
3254ae10b3aSChris Mason /*
32653b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
32753b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
32853b381b3SDavid Woodhouse  * be discarded afterwards.
32953b381b3SDavid Woodhouse  *
33053b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
33153b381b3SDavid Woodhouse  */
33253b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
33353b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
33453b381b3SDavid Woodhouse {
33553b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
33653b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
337bd8f7e62SQu Wenruo 	/* Also inherit the bitmaps from @victim. */
338bd8f7e62SQu Wenruo 	bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339bd8f7e62SQu Wenruo 		  dest->stripe_nsectors);
34053b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
34153b381b3SDavid Woodhouse }
34253b381b3SDavid Woodhouse 
34353b381b3SDavid Woodhouse /*
3444ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
3454ae10b3aSChris Mason  * must hold the hash table lock.
3464ae10b3aSChris Mason  */
3474ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
3484ae10b3aSChris Mason {
3494ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
3504ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
3514ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
3524ae10b3aSChris Mason 	int freeit = 0;
3534ae10b3aSChris Mason 
3544ae10b3aSChris Mason 	/*
3554ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
3564ae10b3aSChris Mason 	 */
3574ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
3584ae10b3aSChris Mason 		return;
3594ae10b3aSChris Mason 
3606a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
3614ae10b3aSChris Mason 	h = table->table + bucket;
3624ae10b3aSChris Mason 
3634ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
3644ae10b3aSChris Mason 	 * removing it from the hash table
3654ae10b3aSChris Mason 	 */
3664ae10b3aSChris Mason 	spin_lock(&h->lock);
3674ae10b3aSChris Mason 
3684ae10b3aSChris Mason 	/*
3694ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
3704ae10b3aSChris Mason 	 * to make sure the bio list is empty
3714ae10b3aSChris Mason 	 */
3724ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
3734ae10b3aSChris Mason 
3744ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
3754ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
3764ae10b3aSChris Mason 		table->cache_size -= 1;
3774ae10b3aSChris Mason 		freeit = 1;
3784ae10b3aSChris Mason 
3794ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
3804ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
3814ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
3824ae10b3aSChris Mason 		 * was held for the list.
3834ae10b3aSChris Mason 		 *
3844ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
3854ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
3864ae10b3aSChris Mason 		 * the corresponding ref
3874ae10b3aSChris Mason 		 */
3884ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
3894ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
3904ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
391dec95574SElena Reshetova 				refcount_dec(&rbio->refs);
3924ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
3934ae10b3aSChris Mason 			}
3944ae10b3aSChris Mason 		}
3954ae10b3aSChris Mason 	}
3964ae10b3aSChris Mason 
3974ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
3984ae10b3aSChris Mason 	spin_unlock(&h->lock);
3994ae10b3aSChris Mason 
4004ae10b3aSChris Mason 	if (freeit)
401ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
4024ae10b3aSChris Mason }
4034ae10b3aSChris Mason 
4044ae10b3aSChris Mason /*
4054ae10b3aSChris Mason  * prune a given rbio from the cache
4064ae10b3aSChris Mason  */
4074ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4084ae10b3aSChris Mason {
4094ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4104ae10b3aSChris Mason 	unsigned long flags;
4114ae10b3aSChris Mason 
4124ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4134ae10b3aSChris Mason 		return;
4144ae10b3aSChris Mason 
4156a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4164ae10b3aSChris Mason 
4174ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4184ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
4194ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4204ae10b3aSChris Mason }
4214ae10b3aSChris Mason 
4224ae10b3aSChris Mason /*
4234ae10b3aSChris Mason  * remove everything in the cache
4244ae10b3aSChris Mason  */
42548a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4264ae10b3aSChris Mason {
4274ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4284ae10b3aSChris Mason 	unsigned long flags;
4294ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4304ae10b3aSChris Mason 
4314ae10b3aSChris Mason 	table = info->stripe_hash_table;
4324ae10b3aSChris Mason 
4334ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4344ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4354ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4364ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4374ae10b3aSChris Mason 				  stripe_cache);
4384ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4394ae10b3aSChris Mason 	}
4404ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4414ae10b3aSChris Mason }
4424ae10b3aSChris Mason 
4434ae10b3aSChris Mason /*
4444ae10b3aSChris Mason  * remove all cached entries and free the hash table
4454ae10b3aSChris Mason  * used by unmount
44653b381b3SDavid Woodhouse  */
44753b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
44853b381b3SDavid Woodhouse {
44953b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
45053b381b3SDavid Woodhouse 		return;
4514ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
452f749303bSWang Shilong 	kvfree(info->stripe_hash_table);
45353b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
45453b381b3SDavid Woodhouse }
45553b381b3SDavid Woodhouse 
45653b381b3SDavid Woodhouse /*
4574ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
4584ae10b3aSChris Mason  * must have already been prepared by calling
4594ae10b3aSChris Mason  * cache_rbio_pages
4604ae10b3aSChris Mason  *
4614ae10b3aSChris Mason  * If this rbio was already cached, it gets
4624ae10b3aSChris Mason  * moved to the front of the lru.
4634ae10b3aSChris Mason  *
4644ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
4654ae10b3aSChris Mason  * prune an item.
4664ae10b3aSChris Mason  */
4674ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
4684ae10b3aSChris Mason {
4694ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4704ae10b3aSChris Mason 	unsigned long flags;
4714ae10b3aSChris Mason 
4724ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
4734ae10b3aSChris Mason 		return;
4744ae10b3aSChris Mason 
4756a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4764ae10b3aSChris Mason 
4774ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4784ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4794ae10b3aSChris Mason 
4804ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
4814ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
482dec95574SElena Reshetova 		refcount_inc(&rbio->refs);
4834ae10b3aSChris Mason 
4844ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
4854ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
4864ae10b3aSChris Mason 	} else {
4874ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
4884ae10b3aSChris Mason 		table->cache_size += 1;
4894ae10b3aSChris Mason 	}
4904ae10b3aSChris Mason 
4914ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4924ae10b3aSChris Mason 
4934ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
4944ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
4954ae10b3aSChris Mason 
4964ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
4974ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4984ae10b3aSChris Mason 				  stripe_cache);
4994ae10b3aSChris Mason 
5004ae10b3aSChris Mason 		if (found != rbio)
5014ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
5024ae10b3aSChris Mason 	}
5034ae10b3aSChris Mason 
5044ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
5054ae10b3aSChris Mason }
5064ae10b3aSChris Mason 
5074ae10b3aSChris Mason /*
50853b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
50953b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
51053b381b3SDavid Woodhouse  * loop through.
51153b381b3SDavid Woodhouse  */
51253b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
51353b381b3SDavid Woodhouse {
51453b381b3SDavid Woodhouse 	int src_off = 0;
51553b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
51653b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
51753b381b3SDavid Woodhouse 
51853b381b3SDavid Woodhouse 	while(src_cnt > 0) {
51953b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
52053b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
52153b381b3SDavid Woodhouse 
52253b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
52353b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
52453b381b3SDavid Woodhouse 	}
52553b381b3SDavid Woodhouse }
52653b381b3SDavid Woodhouse 
52753b381b3SDavid Woodhouse /*
528176571a1SDavid Sterba  * Returns true if the bio list inside this rbio covers an entire stripe (no
529176571a1SDavid Sterba  * rmw required).
53053b381b3SDavid Woodhouse  */
53153b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
53253b381b3SDavid Woodhouse {
53353b381b3SDavid Woodhouse 	unsigned long flags;
534176571a1SDavid Sterba 	unsigned long size = rbio->bio_list_bytes;
535176571a1SDavid Sterba 	int ret = 1;
53653b381b3SDavid Woodhouse 
53753b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
538ff18a4afSChristoph Hellwig 	if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
539176571a1SDavid Sterba 		ret = 0;
540ff18a4afSChristoph Hellwig 	BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
54153b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
542176571a1SDavid Sterba 
54353b381b3SDavid Woodhouse 	return ret;
54453b381b3SDavid Woodhouse }
54553b381b3SDavid Woodhouse 
54653b381b3SDavid Woodhouse /*
54753b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
54853b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
54953b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
55053b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
55153b381b3SDavid Woodhouse  * locked for final IO
55253b381b3SDavid Woodhouse  *
55353b381b3SDavid Woodhouse  * The caller is responsible for locking such that
55453b381b3SDavid Woodhouse  * rmw_locked is safe to test
55553b381b3SDavid Woodhouse  */
55653b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
55753b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
55853b381b3SDavid Woodhouse {
55953b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
56053b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
56153b381b3SDavid Woodhouse 		return 0;
56253b381b3SDavid Woodhouse 
5634ae10b3aSChris Mason 	/*
5644ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
5654ae10b3aSChris Mason 	 * idea is that when we merge the destination
5664ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
56701327610SNicholas D Steeves 	 * steal from cached rbios though, other functions
5684ae10b3aSChris Mason 	 * handle that.
5694ae10b3aSChris Mason 	 */
5704ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
5714ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
5724ae10b3aSChris Mason 		return 0;
5734ae10b3aSChris Mason 
5744c664611SQu Wenruo 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
57553b381b3SDavid Woodhouse 		return 0;
57653b381b3SDavid Woodhouse 
5775a6ac9eaSMiao Xie 	/* we can't merge with different operations */
5785a6ac9eaSMiao Xie 	if (last->operation != cur->operation)
57953b381b3SDavid Woodhouse 		return 0;
5805a6ac9eaSMiao Xie 	/*
5815a6ac9eaSMiao Xie 	 * We've need read the full stripe from the drive.
5825a6ac9eaSMiao Xie 	 * check and repair the parity and write the new results.
5835a6ac9eaSMiao Xie 	 *
5845a6ac9eaSMiao Xie 	 * We're not allowed to add any new bios to the
5855a6ac9eaSMiao Xie 	 * bio list here, anyone else that wants to
5865a6ac9eaSMiao Xie 	 * change this stripe needs to do their own rmw.
5875a6ac9eaSMiao Xie 	 */
588db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
5895a6ac9eaSMiao Xie 		return 0;
59053b381b3SDavid Woodhouse 
591db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
592b4ee1782SOmar Sandoval 		return 0;
593b4ee1782SOmar Sandoval 
594cc54ff62SLiu Bo 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
595cc54ff62SLiu Bo 		int fa = last->faila;
596cc54ff62SLiu Bo 		int fb = last->failb;
597cc54ff62SLiu Bo 		int cur_fa = cur->faila;
598cc54ff62SLiu Bo 		int cur_fb = cur->failb;
599cc54ff62SLiu Bo 
600cc54ff62SLiu Bo 		if (last->faila >= last->failb) {
601cc54ff62SLiu Bo 			fa = last->failb;
602cc54ff62SLiu Bo 			fb = last->faila;
603cc54ff62SLiu Bo 		}
604cc54ff62SLiu Bo 
605cc54ff62SLiu Bo 		if (cur->faila >= cur->failb) {
606cc54ff62SLiu Bo 			cur_fa = cur->failb;
607cc54ff62SLiu Bo 			cur_fb = cur->faila;
608cc54ff62SLiu Bo 		}
609cc54ff62SLiu Bo 
610cc54ff62SLiu Bo 		if (fa != cur_fa || fb != cur_fb)
611cc54ff62SLiu Bo 			return 0;
612cc54ff62SLiu Bo 	}
61353b381b3SDavid Woodhouse 	return 1;
61453b381b3SDavid Woodhouse }
61553b381b3SDavid Woodhouse 
6163e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
6173e77605dSQu Wenruo 					     unsigned int stripe_nr,
6183e77605dSQu Wenruo 					     unsigned int sector_nr)
6193e77605dSQu Wenruo {
6203e77605dSQu Wenruo 	ASSERT(stripe_nr < rbio->real_stripes);
6213e77605dSQu Wenruo 	ASSERT(sector_nr < rbio->stripe_nsectors);
6223e77605dSQu Wenruo 
6233e77605dSQu Wenruo 	return stripe_nr * rbio->stripe_nsectors + sector_nr;
6243e77605dSQu Wenruo }
6253e77605dSQu Wenruo 
6263e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */
6273e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
6283e77605dSQu Wenruo 					     unsigned int stripe_nr,
6293e77605dSQu Wenruo 					     unsigned int sector_nr)
6303e77605dSQu Wenruo {
6313e77605dSQu Wenruo 	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
6323e77605dSQu Wenruo 							      sector_nr)];
6333e77605dSQu Wenruo }
6343e77605dSQu Wenruo 
6351145059aSQu Wenruo /* Grab a sector inside P stripe */
6361145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
6371145059aSQu Wenruo 					      unsigned int sector_nr)
638b7178a5fSZhao Lei {
6391145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
640b7178a5fSZhao Lei }
641b7178a5fSZhao Lei 
6421145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */
6431145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
6441145059aSQu Wenruo 					      unsigned int sector_nr)
64553b381b3SDavid Woodhouse {
6462c8cdd6eSMiao Xie 	if (rbio->nr_data + 1 == rbio->real_stripes)
64753b381b3SDavid Woodhouse 		return NULL;
6481145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
6491145059aSQu Wenruo }
6501145059aSQu Wenruo 
65153b381b3SDavid Woodhouse /*
65253b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
65353b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
65453b381b3SDavid Woodhouse  *
65553b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
65653b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
65753b381b3SDavid Woodhouse  * themselves.
65853b381b3SDavid Woodhouse  *
65953b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
66053b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
66153b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
66253b381b3SDavid Woodhouse  *
66353b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
66453b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
66553b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
66653b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
66753b381b3SDavid Woodhouse  * 1 is returned
66853b381b3SDavid Woodhouse  *
66953b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
67053b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
67153b381b3SDavid Woodhouse  * already been freed.
67253b381b3SDavid Woodhouse  */
67353b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
67453b381b3SDavid Woodhouse {
675721860d5SJohannes Thumshirn 	struct btrfs_stripe_hash *h;
67653b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
67753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
67853b381b3SDavid Woodhouse 	unsigned long flags;
67953b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
6804ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
68153b381b3SDavid Woodhouse 	int ret = 0;
68253b381b3SDavid Woodhouse 
6836a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
684721860d5SJohannes Thumshirn 
68553b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
68653b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
6874c664611SQu Wenruo 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
6889d6cb1b0SJohannes Thumshirn 			continue;
6899d6cb1b0SJohannes Thumshirn 
69053b381b3SDavid Woodhouse 		spin_lock(&cur->bio_list_lock);
69153b381b3SDavid Woodhouse 
6929d6cb1b0SJohannes Thumshirn 		/* Can we steal this cached rbio's pages? */
6934ae10b3aSChris Mason 		if (bio_list_empty(&cur->bio_list) &&
6944ae10b3aSChris Mason 		    list_empty(&cur->plug_list) &&
6954ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
6964ae10b3aSChris Mason 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
6974ae10b3aSChris Mason 			list_del_init(&cur->hash_list);
698dec95574SElena Reshetova 			refcount_dec(&cur->refs);
6994ae10b3aSChris Mason 
7004ae10b3aSChris Mason 			steal_rbio(cur, rbio);
7014ae10b3aSChris Mason 			cache_drop = cur;
7024ae10b3aSChris Mason 			spin_unlock(&cur->bio_list_lock);
7034ae10b3aSChris Mason 
7044ae10b3aSChris Mason 			goto lockit;
7054ae10b3aSChris Mason 		}
7064ae10b3aSChris Mason 
7079d6cb1b0SJohannes Thumshirn 		/* Can we merge into the lock owner? */
70853b381b3SDavid Woodhouse 		if (rbio_can_merge(cur, rbio)) {
70953b381b3SDavid Woodhouse 			merge_rbio(cur, rbio);
71053b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
71153b381b3SDavid Woodhouse 			freeit = rbio;
71253b381b3SDavid Woodhouse 			ret = 1;
71353b381b3SDavid Woodhouse 			goto out;
71453b381b3SDavid Woodhouse 		}
71553b381b3SDavid Woodhouse 
7164ae10b3aSChris Mason 
71753b381b3SDavid Woodhouse 		/*
7189d6cb1b0SJohannes Thumshirn 		 * We couldn't merge with the running rbio, see if we can merge
7199d6cb1b0SJohannes Thumshirn 		 * with the pending ones.  We don't have to check for rmw_locked
7209d6cb1b0SJohannes Thumshirn 		 * because there is no way they are inside finish_rmw right now
72153b381b3SDavid Woodhouse 		 */
7229d6cb1b0SJohannes Thumshirn 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
72353b381b3SDavid Woodhouse 			if (rbio_can_merge(pending, rbio)) {
72453b381b3SDavid Woodhouse 				merge_rbio(pending, rbio);
72553b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
72653b381b3SDavid Woodhouse 				freeit = rbio;
72753b381b3SDavid Woodhouse 				ret = 1;
72853b381b3SDavid Woodhouse 				goto out;
72953b381b3SDavid Woodhouse 			}
73053b381b3SDavid Woodhouse 		}
73153b381b3SDavid Woodhouse 
7329d6cb1b0SJohannes Thumshirn 		/*
7339d6cb1b0SJohannes Thumshirn 		 * No merging, put us on the tail of the plug list, our rbio
7349d6cb1b0SJohannes Thumshirn 		 * will be started with the currently running rbio unlocks
73553b381b3SDavid Woodhouse 		 */
73653b381b3SDavid Woodhouse 		list_add_tail(&rbio->plug_list, &cur->plug_list);
73753b381b3SDavid Woodhouse 		spin_unlock(&cur->bio_list_lock);
73853b381b3SDavid Woodhouse 		ret = 1;
73953b381b3SDavid Woodhouse 		goto out;
74053b381b3SDavid Woodhouse 	}
7414ae10b3aSChris Mason lockit:
742dec95574SElena Reshetova 	refcount_inc(&rbio->refs);
74353b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
74453b381b3SDavid Woodhouse out:
74553b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
7464ae10b3aSChris Mason 	if (cache_drop)
7474ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
74853b381b3SDavid Woodhouse 	if (freeit)
749ff2b64a2SQu Wenruo 		free_raid_bio(freeit);
75053b381b3SDavid Woodhouse 	return ret;
75153b381b3SDavid Woodhouse }
75253b381b3SDavid Woodhouse 
753d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work);
754d817ce35SQu Wenruo 
75553b381b3SDavid Woodhouse /*
75653b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
75753b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
75853b381b3SDavid Woodhouse  */
75953b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
76053b381b3SDavid Woodhouse {
76153b381b3SDavid Woodhouse 	int bucket;
76253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
76353b381b3SDavid Woodhouse 	unsigned long flags;
7644ae10b3aSChris Mason 	int keep_cache = 0;
76553b381b3SDavid Woodhouse 
76653b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
7676a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
76853b381b3SDavid Woodhouse 
7694ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
7704ae10b3aSChris Mason 		cache_rbio(rbio);
7714ae10b3aSChris Mason 
77253b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
77353b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
77453b381b3SDavid Woodhouse 
77553b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
7764ae10b3aSChris Mason 		/*
7774ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
7784ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
7794ae10b3aSChris Mason 		 * to steal from later
7804ae10b3aSChris Mason 		 */
7814ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
7824ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
7834ae10b3aSChris Mason 			keep_cache = 1;
7844ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
7854ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
7864ae10b3aSChris Mason 			goto done;
7874ae10b3aSChris Mason 		}
78853b381b3SDavid Woodhouse 
78953b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
790dec95574SElena Reshetova 		refcount_dec(&rbio->refs);
79153b381b3SDavid Woodhouse 
79253b381b3SDavid Woodhouse 		/*
79353b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
79453b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
79553b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
79653b381b3SDavid Woodhouse 		 */
79753b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
79853b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
79953b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
80053b381b3SDavid Woodhouse 
80153b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
80253b381b3SDavid Woodhouse 					  plug_list);
80353b381b3SDavid Woodhouse 
80453b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
80553b381b3SDavid Woodhouse 
80653b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
807dec95574SElena Reshetova 			refcount_inc(&next->refs);
80853b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
80953b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
81053b381b3SDavid Woodhouse 
8111b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
812d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
813b4ee1782SOmar Sandoval 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
814b4ee1782SOmar Sandoval 				steal_rbio(rbio, next);
815d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
816b4ee1782SOmar Sandoval 			} else if (next->operation == BTRFS_RBIO_WRITE) {
8174ae10b3aSChris Mason 				steal_rbio(rbio, next);
81893723095SQu Wenruo 				start_async_work(next, rmw_rbio_work_locked);
8195a6ac9eaSMiao Xie 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
8205a6ac9eaSMiao Xie 				steal_rbio(rbio, next);
8216bfd0133SQu Wenruo 				start_async_work(next, scrub_rbio_work_locked);
8224ae10b3aSChris Mason 			}
82353b381b3SDavid Woodhouse 
82453b381b3SDavid Woodhouse 			goto done_nolock;
82553b381b3SDavid Woodhouse 		}
82653b381b3SDavid Woodhouse 	}
8274ae10b3aSChris Mason done:
82853b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
82953b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
83053b381b3SDavid Woodhouse 
83153b381b3SDavid Woodhouse done_nolock:
8324ae10b3aSChris Mason 	if (!keep_cache)
8334ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
83453b381b3SDavid Woodhouse }
83553b381b3SDavid Woodhouse 
8367583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
83753b381b3SDavid Woodhouse {
8387583d8d0SLiu Bo 	struct bio *next;
8397583d8d0SLiu Bo 
8407583d8d0SLiu Bo 	while (cur) {
8417583d8d0SLiu Bo 		next = cur->bi_next;
8427583d8d0SLiu Bo 		cur->bi_next = NULL;
8437583d8d0SLiu Bo 		cur->bi_status = err;
8447583d8d0SLiu Bo 		bio_endio(cur);
8457583d8d0SLiu Bo 		cur = next;
8467583d8d0SLiu Bo 	}
84753b381b3SDavid Woodhouse }
84853b381b3SDavid Woodhouse 
84953b381b3SDavid Woodhouse /*
85053b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
85153b381b3SDavid Woodhouse  * bio_list and calls end_io on them
85253b381b3SDavid Woodhouse  */
8534e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
85453b381b3SDavid Woodhouse {
85553b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
8567583d8d0SLiu Bo 	struct bio *extra;
8574245215dSMiao Xie 
858bd8f7e62SQu Wenruo 	/*
859bd8f7e62SQu Wenruo 	 * Clear the data bitmap, as the rbio may be cached for later usage.
860bd8f7e62SQu Wenruo 	 * do this before before unlock_stripe() so there will be no new bio
861bd8f7e62SQu Wenruo 	 * for this bio.
862bd8f7e62SQu Wenruo 	 */
863bd8f7e62SQu Wenruo 	bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
8644245215dSMiao Xie 
8657583d8d0SLiu Bo 	/*
8667583d8d0SLiu Bo 	 * At this moment, rbio->bio_list is empty, however since rbio does not
8677583d8d0SLiu Bo 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
8687583d8d0SLiu Bo 	 * hash list, rbio may be merged with others so that rbio->bio_list
8697583d8d0SLiu Bo 	 * becomes non-empty.
8707583d8d0SLiu Bo 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
8717583d8d0SLiu Bo 	 * more and we can call bio_endio() on all queued bios.
8727583d8d0SLiu Bo 	 */
8737583d8d0SLiu Bo 	unlock_stripe(rbio);
8747583d8d0SLiu Bo 	extra = bio_list_get(&rbio->bio_list);
875ff2b64a2SQu Wenruo 	free_raid_bio(rbio);
87653b381b3SDavid Woodhouse 
8777583d8d0SLiu Bo 	rbio_endio_bio_list(cur, err);
8787583d8d0SLiu Bo 	if (extra)
8797583d8d0SLiu Bo 		rbio_endio_bio_list(extra, err);
88053b381b3SDavid Woodhouse }
88153b381b3SDavid Woodhouse 
88253b381b3SDavid Woodhouse /*
88343dd529aSDavid Sterba  * Get a sector pointer specified by its @stripe_nr and @sector_nr.
8843e77605dSQu Wenruo  *
8853e77605dSQu Wenruo  * @rbio:               The raid bio
8863e77605dSQu Wenruo  * @stripe_nr:          Stripe number, valid range [0, real_stripe)
8873e77605dSQu Wenruo  * @sector_nr:		Sector number inside the stripe,
8883e77605dSQu Wenruo  *			valid range [0, stripe_nsectors)
8893e77605dSQu Wenruo  * @bio_list_only:      Whether to use sectors inside the bio list only.
8903e77605dSQu Wenruo  *
8913e77605dSQu Wenruo  * The read/modify/write code wants to reuse the original bio page as much
8923e77605dSQu Wenruo  * as possible, and only use stripe_sectors as fallback.
8933e77605dSQu Wenruo  */
8943e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
8953e77605dSQu Wenruo 					 int stripe_nr, int sector_nr,
8963e77605dSQu Wenruo 					 bool bio_list_only)
8973e77605dSQu Wenruo {
8983e77605dSQu Wenruo 	struct sector_ptr *sector;
8993e77605dSQu Wenruo 	int index;
9003e77605dSQu Wenruo 
9013e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
9023e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
9033e77605dSQu Wenruo 
9043e77605dSQu Wenruo 	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
9053e77605dSQu Wenruo 	ASSERT(index >= 0 && index < rbio->nr_sectors);
9063e77605dSQu Wenruo 
9073e77605dSQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
9083e77605dSQu Wenruo 	sector = &rbio->bio_sectors[index];
9093e77605dSQu Wenruo 	if (sector->page || bio_list_only) {
9103e77605dSQu Wenruo 		/* Don't return sector without a valid page pointer */
9113e77605dSQu Wenruo 		if (!sector->page)
9123e77605dSQu Wenruo 			sector = NULL;
9133e77605dSQu Wenruo 		spin_unlock_irq(&rbio->bio_list_lock);
9143e77605dSQu Wenruo 		return sector;
9153e77605dSQu Wenruo 	}
9163e77605dSQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
9173e77605dSQu Wenruo 
9183e77605dSQu Wenruo 	return &rbio->stripe_sectors[index];
9193e77605dSQu Wenruo }
9203e77605dSQu Wenruo 
92153b381b3SDavid Woodhouse /*
92253b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
92353b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
92453b381b3SDavid Woodhouse  */
9252ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
926ff18a4afSChristoph Hellwig 					 struct btrfs_io_context *bioc)
92753b381b3SDavid Woodhouse {
928843de58bSQu Wenruo 	const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
929ff18a4afSChristoph Hellwig 	const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
930843de58bSQu Wenruo 	const unsigned int num_pages = stripe_npages * real_stripes;
931ff18a4afSChristoph Hellwig 	const unsigned int stripe_nsectors =
932ff18a4afSChristoph Hellwig 		BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
93394efbe19SQu Wenruo 	const unsigned int num_sectors = stripe_nsectors * real_stripes;
93453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
93553b381b3SDavid Woodhouse 
93694efbe19SQu Wenruo 	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
93794efbe19SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
938c67c68ebSQu Wenruo 	/*
939c67c68ebSQu Wenruo 	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
940c67c68ebSQu Wenruo 	 * (at most 16) should be no larger than BITS_PER_LONG.
941c67c68ebSQu Wenruo 	 */
942c67c68ebSQu Wenruo 	ASSERT(stripe_nsectors <= BITS_PER_LONG);
943843de58bSQu Wenruo 
944797d74b7SQu Wenruo 	rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
945af8e2d1dSMiao Xie 	if (!rbio)
94653b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
947797d74b7SQu Wenruo 	rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
948797d74b7SQu Wenruo 				     GFP_NOFS);
949797d74b7SQu Wenruo 	rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
950797d74b7SQu Wenruo 				    GFP_NOFS);
951797d74b7SQu Wenruo 	rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
952797d74b7SQu Wenruo 				       GFP_NOFS);
953797d74b7SQu Wenruo 	rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
9542942a50dSQu Wenruo 	rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
955797d74b7SQu Wenruo 
956797d74b7SQu Wenruo 	if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
9572942a50dSQu Wenruo 	    !rbio->finish_pointers || !rbio->error_bitmap) {
958797d74b7SQu Wenruo 		free_raid_bio_pointers(rbio);
959797d74b7SQu Wenruo 		kfree(rbio);
960797d74b7SQu Wenruo 		return ERR_PTR(-ENOMEM);
961797d74b7SQu Wenruo 	}
96253b381b3SDavid Woodhouse 
96353b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
964d817ce35SQu Wenruo 	init_waitqueue_head(&rbio->io_wait);
96553b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
96653b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
9674ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
96853b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
969f1c29379SChristoph Hellwig 	btrfs_get_bioc(bioc);
9704c664611SQu Wenruo 	rbio->bioc = bioc;
97153b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
97294efbe19SQu Wenruo 	rbio->nr_sectors = num_sectors;
9732c8cdd6eSMiao Xie 	rbio->real_stripes = real_stripes;
9745a6ac9eaSMiao Xie 	rbio->stripe_npages = stripe_npages;
97594efbe19SQu Wenruo 	rbio->stripe_nsectors = stripe_nsectors;
97653b381b3SDavid Woodhouse 	rbio->faila = -1;
97753b381b3SDavid Woodhouse 	rbio->failb = -1;
978dec95574SElena Reshetova 	refcount_set(&rbio->refs, 1);
979b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
980b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
98153b381b3SDavid Woodhouse 
9820b30f719SQu Wenruo 	ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
9830b30f719SQu Wenruo 	rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
98453b381b3SDavid Woodhouse 
98553b381b3SDavid Woodhouse 	return rbio;
98653b381b3SDavid Woodhouse }
98753b381b3SDavid Woodhouse 
98853b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
98953b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
99053b381b3SDavid Woodhouse {
991eb357060SQu Wenruo 	int ret;
992eb357060SQu Wenruo 
993eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
994eb357060SQu Wenruo 	if (ret < 0)
995eb357060SQu Wenruo 		return ret;
996eb357060SQu Wenruo 	/* Mapping all sectors */
997eb357060SQu Wenruo 	index_stripe_sectors(rbio);
998eb357060SQu Wenruo 	return 0;
99953b381b3SDavid Woodhouse }
100053b381b3SDavid Woodhouse 
1001b7178a5fSZhao Lei /* only allocate pages for p/q stripes */
100253b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
100353b381b3SDavid Woodhouse {
1004f77183dcSQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1005eb357060SQu Wenruo 	int ret;
100653b381b3SDavid Woodhouse 
1007eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
1008dd137dd1SSweet Tea Dorminy 				     rbio->stripe_pages + data_pages);
1009eb357060SQu Wenruo 	if (ret < 0)
1010eb357060SQu Wenruo 		return ret;
1011eb357060SQu Wenruo 
1012eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1013eb357060SQu Wenruo 	return 0;
101453b381b3SDavid Woodhouse }
101553b381b3SDavid Woodhouse 
101653b381b3SDavid Woodhouse /*
1017*75b47033SQu Wenruo  * Return the total numer of errors found in the vertical stripe of @sector_nr.
1018*75b47033SQu Wenruo  *
1019*75b47033SQu Wenruo  * @faila and @failb will also be updated to the first and second stripe
1020*75b47033SQu Wenruo  * number of the errors.
1021*75b47033SQu Wenruo  */
1022*75b47033SQu Wenruo static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1023*75b47033SQu Wenruo 				     int *faila, int *failb)
1024*75b47033SQu Wenruo {
1025*75b47033SQu Wenruo 	int stripe_nr;
1026*75b47033SQu Wenruo 	int found_errors = 0;
1027*75b47033SQu Wenruo 
1028*75b47033SQu Wenruo 	ASSERT(faila && failb);
1029*75b47033SQu Wenruo 	*faila = -1;
1030*75b47033SQu Wenruo 	*failb = -1;
1031*75b47033SQu Wenruo 
1032*75b47033SQu Wenruo 	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1033*75b47033SQu Wenruo 		int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1034*75b47033SQu Wenruo 
1035*75b47033SQu Wenruo 		if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1036*75b47033SQu Wenruo 			found_errors++;
1037*75b47033SQu Wenruo 			if (*faila < 0)
1038*75b47033SQu Wenruo 				*faila = stripe_nr;
1039*75b47033SQu Wenruo 			else if (*failb < 0)
1040*75b47033SQu Wenruo 				*failb = stripe_nr;
1041*75b47033SQu Wenruo 		}
1042*75b47033SQu Wenruo 	}
1043*75b47033SQu Wenruo 	return found_errors;
1044*75b47033SQu Wenruo }
1045*75b47033SQu Wenruo 
1046*75b47033SQu Wenruo /*
10473e77605dSQu Wenruo  * Add a single sector @sector into our list of bios for IO.
10483e77605dSQu Wenruo  *
10493e77605dSQu Wenruo  * Return 0 if everything went well.
10503e77605dSQu Wenruo  * Return <0 for error.
105153b381b3SDavid Woodhouse  */
10523e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
105353b381b3SDavid Woodhouse 			      struct bio_list *bio_list,
10543e77605dSQu Wenruo 			      struct sector_ptr *sector,
10553e77605dSQu Wenruo 			      unsigned int stripe_nr,
10563e77605dSQu Wenruo 			      unsigned int sector_nr,
1057bf9486d6SBart Van Assche 			      enum req_op op)
105853b381b3SDavid Woodhouse {
10593e77605dSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
106053b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
106153b381b3SDavid Woodhouse 	int ret;
106253b381b3SDavid Woodhouse 	struct bio *bio;
10634c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
106453b381b3SDavid Woodhouse 	u64 disk_start;
106553b381b3SDavid Woodhouse 
10663e77605dSQu Wenruo 	/*
10673e77605dSQu Wenruo 	 * Note: here stripe_nr has taken device replace into consideration,
10683e77605dSQu Wenruo 	 * thus it can be larger than rbio->real_stripe.
10693e77605dSQu Wenruo 	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
10703e77605dSQu Wenruo 	 */
10713e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
10723e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
10733e77605dSQu Wenruo 	ASSERT(sector->page);
10743e77605dSQu Wenruo 
10754c664611SQu Wenruo 	stripe = &rbio->bioc->stripes[stripe_nr];
10763e77605dSQu Wenruo 	disk_start = stripe->physical + sector_nr * sectorsize;
107753b381b3SDavid Woodhouse 
107853b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
10792942a50dSQu Wenruo 	if (!stripe->dev->bdev) {
10802942a50dSQu Wenruo 		set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
10812942a50dSQu Wenruo 			rbio->error_bitmap);
108253b381b3SDavid Woodhouse 		return fail_rbio_index(rbio, stripe_nr);
10832942a50dSQu Wenruo 	}
108453b381b3SDavid Woodhouse 
108553b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
108653b381b3SDavid Woodhouse 	if (last) {
10871201b58bSDavid Sterba 		u64 last_end = last->bi_iter.bi_sector << 9;
10884f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
108953b381b3SDavid Woodhouse 
109053b381b3SDavid Woodhouse 		/*
109153b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
109253b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
109353b381b3SDavid Woodhouse 		 */
1094f90ae76aSNikolay Borisov 		if (last_end == disk_start && !last->bi_status &&
1095309dca30SChristoph Hellwig 		    last->bi_bdev == stripe->dev->bdev) {
10963e77605dSQu Wenruo 			ret = bio_add_page(last, sector->page, sectorsize,
10973e77605dSQu Wenruo 					   sector->pgoff);
10983e77605dSQu Wenruo 			if (ret == sectorsize)
109953b381b3SDavid Woodhouse 				return 0;
110053b381b3SDavid Woodhouse 		}
110153b381b3SDavid Woodhouse 	}
110253b381b3SDavid Woodhouse 
110353b381b3SDavid Woodhouse 	/* put a new bio on the list */
1104ff18a4afSChristoph Hellwig 	bio = bio_alloc(stripe->dev->bdev,
1105ff18a4afSChristoph Hellwig 			max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1106bf9486d6SBart Van Assche 			op, GFP_NOFS);
11074f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
1108e01bf588SChristoph Hellwig 	bio->bi_private = rbio;
110953b381b3SDavid Woodhouse 
11103e77605dSQu Wenruo 	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
111153b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
111253b381b3SDavid Woodhouse 	return 0;
111353b381b3SDavid Woodhouse }
111453b381b3SDavid Woodhouse 
111500425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
111600425dd9SQu Wenruo {
111700425dd9SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
111800425dd9SQu Wenruo 	struct bio_vec bvec;
111900425dd9SQu Wenruo 	struct bvec_iter iter;
112000425dd9SQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
112100425dd9SQu Wenruo 		     rbio->bioc->raid_map[0];
112200425dd9SQu Wenruo 
112300425dd9SQu Wenruo 	bio_for_each_segment(bvec, bio, iter) {
112400425dd9SQu Wenruo 		u32 bvec_offset;
112500425dd9SQu Wenruo 
112600425dd9SQu Wenruo 		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
112700425dd9SQu Wenruo 		     bvec_offset += sectorsize, offset += sectorsize) {
112800425dd9SQu Wenruo 			int index = offset / sectorsize;
112900425dd9SQu Wenruo 			struct sector_ptr *sector = &rbio->bio_sectors[index];
113000425dd9SQu Wenruo 
113100425dd9SQu Wenruo 			sector->page = bvec.bv_page;
113200425dd9SQu Wenruo 			sector->pgoff = bvec.bv_offset + bvec_offset;
113300425dd9SQu Wenruo 			ASSERT(sector->pgoff < PAGE_SIZE);
113400425dd9SQu Wenruo 		}
113500425dd9SQu Wenruo 	}
113600425dd9SQu Wenruo }
113700425dd9SQu Wenruo 
113853b381b3SDavid Woodhouse /*
113953b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
114053b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
114153b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
114253b381b3SDavid Woodhouse  * reconstruction.
114353b381b3SDavid Woodhouse  *
114453b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
114553b381b3SDavid Woodhouse  */
114653b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
114753b381b3SDavid Woodhouse {
114853b381b3SDavid Woodhouse 	struct bio *bio;
114953b381b3SDavid Woodhouse 
115053b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
115100425dd9SQu Wenruo 	bio_list_for_each(bio, &rbio->bio_list)
115200425dd9SQu Wenruo 		index_one_bio(rbio, bio);
115300425dd9SQu Wenruo 
115453b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
115553b381b3SDavid Woodhouse }
115653b381b3SDavid Woodhouse 
1157b8bea09aSQu Wenruo static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1158b8bea09aSQu Wenruo 			       struct raid56_bio_trace_info *trace_info)
1159b8bea09aSQu Wenruo {
1160b8bea09aSQu Wenruo 	const struct btrfs_io_context *bioc = rbio->bioc;
1161b8bea09aSQu Wenruo 	int i;
1162b8bea09aSQu Wenruo 
1163b8bea09aSQu Wenruo 	ASSERT(bioc);
1164b8bea09aSQu Wenruo 
1165b8bea09aSQu Wenruo 	/* We rely on bio->bi_bdev to find the stripe number. */
1166b8bea09aSQu Wenruo 	if (!bio->bi_bdev)
1167b8bea09aSQu Wenruo 		goto not_found;
1168b8bea09aSQu Wenruo 
1169b8bea09aSQu Wenruo 	for (i = 0; i < bioc->num_stripes; i++) {
1170b8bea09aSQu Wenruo 		if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1171b8bea09aSQu Wenruo 			continue;
1172b8bea09aSQu Wenruo 		trace_info->stripe_nr = i;
1173b8bea09aSQu Wenruo 		trace_info->devid = bioc->stripes[i].dev->devid;
1174b8bea09aSQu Wenruo 		trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1175b8bea09aSQu Wenruo 				     bioc->stripes[i].physical;
1176b8bea09aSQu Wenruo 		return;
1177b8bea09aSQu Wenruo 	}
1178b8bea09aSQu Wenruo 
1179b8bea09aSQu Wenruo not_found:
1180b8bea09aSQu Wenruo 	trace_info->devid = -1;
1181b8bea09aSQu Wenruo 	trace_info->offset = -1;
1182b8bea09aSQu Wenruo 	trace_info->stripe_nr = -1;
1183b8bea09aSQu Wenruo }
1184b8bea09aSQu Wenruo 
118530e3c897SQu Wenruo /* Generate PQ for one veritical stripe. */
118630e3c897SQu Wenruo static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
118730e3c897SQu Wenruo {
118830e3c897SQu Wenruo 	void **pointers = rbio->finish_pointers;
118930e3c897SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
119030e3c897SQu Wenruo 	struct sector_ptr *sector;
119130e3c897SQu Wenruo 	int stripe;
119230e3c897SQu Wenruo 	const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
119330e3c897SQu Wenruo 
119430e3c897SQu Wenruo 	/* First collect one sector from each data stripe */
119530e3c897SQu Wenruo 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
119630e3c897SQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 0);
119730e3c897SQu Wenruo 		pointers[stripe] = kmap_local_page(sector->page) +
119830e3c897SQu Wenruo 				   sector->pgoff;
119930e3c897SQu Wenruo 	}
120030e3c897SQu Wenruo 
120130e3c897SQu Wenruo 	/* Then add the parity stripe */
120230e3c897SQu Wenruo 	sector = rbio_pstripe_sector(rbio, sectornr);
120330e3c897SQu Wenruo 	sector->uptodate = 1;
120430e3c897SQu Wenruo 	pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
120530e3c897SQu Wenruo 
120630e3c897SQu Wenruo 	if (has_qstripe) {
120730e3c897SQu Wenruo 		/*
120830e3c897SQu Wenruo 		 * RAID6, add the qstripe and call the library function
120930e3c897SQu Wenruo 		 * to fill in our p/q
121030e3c897SQu Wenruo 		 */
121130e3c897SQu Wenruo 		sector = rbio_qstripe_sector(rbio, sectornr);
121230e3c897SQu Wenruo 		sector->uptodate = 1;
121330e3c897SQu Wenruo 		pointers[stripe++] = kmap_local_page(sector->page) +
121430e3c897SQu Wenruo 				     sector->pgoff;
121530e3c897SQu Wenruo 
121630e3c897SQu Wenruo 		raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
121730e3c897SQu Wenruo 					pointers);
121830e3c897SQu Wenruo 	} else {
121930e3c897SQu Wenruo 		/* raid5 */
122030e3c897SQu Wenruo 		memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
122130e3c897SQu Wenruo 		run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
122230e3c897SQu Wenruo 	}
122330e3c897SQu Wenruo 	for (stripe = stripe - 1; stripe >= 0; stripe--)
122430e3c897SQu Wenruo 		kunmap_local(pointers[stripe]);
122530e3c897SQu Wenruo }
122630e3c897SQu Wenruo 
12276486d21cSQu Wenruo static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
12286486d21cSQu Wenruo 				   struct bio_list *bio_list)
12296486d21cSQu Wenruo {
12306486d21cSQu Wenruo 	struct bio *bio;
12316486d21cSQu Wenruo 	/* The total sector number inside the full stripe. */
12326486d21cSQu Wenruo 	int total_sector_nr;
12336486d21cSQu Wenruo 	int sectornr;
12346486d21cSQu Wenruo 	int stripe;
12356486d21cSQu Wenruo 	int ret;
12366486d21cSQu Wenruo 
12376486d21cSQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
12386486d21cSQu Wenruo 
12396486d21cSQu Wenruo 	/* We should have at least one data sector. */
12406486d21cSQu Wenruo 	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
12416486d21cSQu Wenruo 
12426486d21cSQu Wenruo 	/*
12435eb30ee2SQu Wenruo 	 * Reset errors, as we may have errors inherited from from degraded
12445eb30ee2SQu Wenruo 	 * write.
12455eb30ee2SQu Wenruo 	 */
12465eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
12472942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
12485eb30ee2SQu Wenruo 	rbio->faila = -1;
12495eb30ee2SQu Wenruo 	rbio->failb = -1;
12505eb30ee2SQu Wenruo 
12515eb30ee2SQu Wenruo 	/*
12526486d21cSQu Wenruo 	 * Start assembly.  Make bios for everything from the higher layers (the
12536486d21cSQu Wenruo 	 * bio_list in our rbio) and our P/Q.  Ignore everything else.
12546486d21cSQu Wenruo 	 */
12556486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12566486d21cSQu Wenruo 	     total_sector_nr++) {
12576486d21cSQu Wenruo 		struct sector_ptr *sector;
12586486d21cSQu Wenruo 
12596486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12606486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12616486d21cSQu Wenruo 
12626486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
12636486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
12646486d21cSQu Wenruo 			continue;
12656486d21cSQu Wenruo 
12666486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
12676486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
12686486d21cSQu Wenruo 			if (!sector)
12696486d21cSQu Wenruo 				continue;
12706486d21cSQu Wenruo 		} else {
12716486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
12726486d21cSQu Wenruo 		}
12736486d21cSQu Wenruo 
12746486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
12756486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
12766486d21cSQu Wenruo 		if (ret)
12776486d21cSQu Wenruo 			goto error;
12786486d21cSQu Wenruo 	}
12796486d21cSQu Wenruo 
12806486d21cSQu Wenruo 	if (likely(!rbio->bioc->num_tgtdevs))
12816486d21cSQu Wenruo 		return 0;
12826486d21cSQu Wenruo 
12836486d21cSQu Wenruo 	/* Make a copy for the replace target device. */
12846486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12856486d21cSQu Wenruo 	     total_sector_nr++) {
12866486d21cSQu Wenruo 		struct sector_ptr *sector;
12876486d21cSQu Wenruo 
12886486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12896486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12906486d21cSQu Wenruo 
12916486d21cSQu Wenruo 		if (!rbio->bioc->tgtdev_map[stripe]) {
12926486d21cSQu Wenruo 			/*
12936486d21cSQu Wenruo 			 * We can skip the whole stripe completely, note
12946486d21cSQu Wenruo 			 * total_sector_nr will be increased by one anyway.
12956486d21cSQu Wenruo 			 */
12966486d21cSQu Wenruo 			ASSERT(sectornr == 0);
12976486d21cSQu Wenruo 			total_sector_nr += rbio->stripe_nsectors - 1;
12986486d21cSQu Wenruo 			continue;
12996486d21cSQu Wenruo 		}
13006486d21cSQu Wenruo 
13016486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
13026486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
13036486d21cSQu Wenruo 			continue;
13046486d21cSQu Wenruo 
13056486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
13066486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13076486d21cSQu Wenruo 			if (!sector)
13086486d21cSQu Wenruo 				continue;
13096486d21cSQu Wenruo 		} else {
13106486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
13116486d21cSQu Wenruo 		}
13126486d21cSQu Wenruo 
13136486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector,
13146486d21cSQu Wenruo 					 rbio->bioc->tgtdev_map[stripe],
13156486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
13166486d21cSQu Wenruo 		if (ret)
13176486d21cSQu Wenruo 			goto error;
13186486d21cSQu Wenruo 	}
13196486d21cSQu Wenruo 
13206486d21cSQu Wenruo 	return 0;
13216486d21cSQu Wenruo error:
13226486d21cSQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
13236486d21cSQu Wenruo 		bio_put(bio);
13246486d21cSQu Wenruo 	return -EIO;
13256486d21cSQu Wenruo }
13266486d21cSQu Wenruo 
132753b381b3SDavid Woodhouse /*
132853b381b3SDavid Woodhouse  * helper to find the stripe number for a given bio.  Used to figure out which
132953b381b3SDavid Woodhouse  * stripe has failed.  This expects the bio to correspond to a physical disk,
133053b381b3SDavid Woodhouse  * so it looks up based on physical sector numbers.
133153b381b3SDavid Woodhouse  */
133253b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio,
133353b381b3SDavid Woodhouse 			   struct bio *bio)
133453b381b3SDavid Woodhouse {
13354f024f37SKent Overstreet 	u64 physical = bio->bi_iter.bi_sector;
133653b381b3SDavid Woodhouse 	int i;
13374c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
133853b381b3SDavid Woodhouse 
133953b381b3SDavid Woodhouse 	physical <<= 9;
134053b381b3SDavid Woodhouse 
13414c664611SQu Wenruo 	for (i = 0; i < rbio->bioc->num_stripes; i++) {
13424c664611SQu Wenruo 		stripe = &rbio->bioc->stripes[i];
1343ff18a4afSChristoph Hellwig 		if (in_range(physical, stripe->physical, BTRFS_STRIPE_LEN) &&
1344309dca30SChristoph Hellwig 		    stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
134553b381b3SDavid Woodhouse 			return i;
134653b381b3SDavid Woodhouse 		}
134753b381b3SDavid Woodhouse 	}
134853b381b3SDavid Woodhouse 	return -1;
134953b381b3SDavid Woodhouse }
135053b381b3SDavid Woodhouse 
135153b381b3SDavid Woodhouse /*
135253b381b3SDavid Woodhouse  * helper to find the stripe number for a given
135353b381b3SDavid Woodhouse  * bio (before mapping).  Used to figure out which stripe has
135453b381b3SDavid Woodhouse  * failed.  This looks up based on logical block numbers.
135553b381b3SDavid Woodhouse  */
135653b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
135753b381b3SDavid Woodhouse 				   struct bio *bio)
135853b381b3SDavid Woodhouse {
13591201b58bSDavid Sterba 	u64 logical = bio->bi_iter.bi_sector << 9;
136053b381b3SDavid Woodhouse 	int i;
136153b381b3SDavid Woodhouse 
136253b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_data; i++) {
13634c664611SQu Wenruo 		u64 stripe_start = rbio->bioc->raid_map[i];
136483025863SNikolay Borisov 
1365ff18a4afSChristoph Hellwig 		if (in_range(logical, stripe_start, BTRFS_STRIPE_LEN))
136653b381b3SDavid Woodhouse 			return i;
136753b381b3SDavid Woodhouse 	}
136853b381b3SDavid Woodhouse 	return -1;
136953b381b3SDavid Woodhouse }
137053b381b3SDavid Woodhouse 
13712942a50dSQu Wenruo static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
13722942a50dSQu Wenruo {
13732942a50dSQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
13742942a50dSQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
13752942a50dSQu Wenruo 		     rbio->bioc->raid_map[0];
13762942a50dSQu Wenruo 	int total_nr_sector = offset >> fs_info->sectorsize_bits;
13772942a50dSQu Wenruo 
13782942a50dSQu Wenruo 	ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
13792942a50dSQu Wenruo 
13802942a50dSQu Wenruo 	bitmap_set(rbio->error_bitmap, total_nr_sector,
13812942a50dSQu Wenruo 		   bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
13822942a50dSQu Wenruo 
13832942a50dSQu Wenruo 	/*
13842942a50dSQu Wenruo 	 * Special handling for raid56_alloc_missing_rbio() used by
13852942a50dSQu Wenruo 	 * scrub/replace.  Unlike call path in raid56_parity_recover(), they
13862942a50dSQu Wenruo 	 * pass an empty bio here.  Thus we have to find out the missing device
13872942a50dSQu Wenruo 	 * and mark the stripe error instead.
13882942a50dSQu Wenruo 	 */
13892942a50dSQu Wenruo 	if (bio->bi_iter.bi_size == 0) {
13902942a50dSQu Wenruo 		bool found_missing = false;
13912942a50dSQu Wenruo 		int stripe_nr;
13922942a50dSQu Wenruo 
13932942a50dSQu Wenruo 		for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
13942942a50dSQu Wenruo 			if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
13952942a50dSQu Wenruo 				found_missing = true;
13962942a50dSQu Wenruo 				bitmap_set(rbio->error_bitmap,
13972942a50dSQu Wenruo 					   stripe_nr * rbio->stripe_nsectors,
13982942a50dSQu Wenruo 					   rbio->stripe_nsectors);
13992942a50dSQu Wenruo 			}
14002942a50dSQu Wenruo 		}
14012942a50dSQu Wenruo 		ASSERT(found_missing);
14022942a50dSQu Wenruo 	}
14032942a50dSQu Wenruo }
14042942a50dSQu Wenruo 
140553b381b3SDavid Woodhouse /*
140653b381b3SDavid Woodhouse  * returns -EIO if we had too many failures
140753b381b3SDavid Woodhouse  */
140853b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
140953b381b3SDavid Woodhouse {
141053b381b3SDavid Woodhouse 	unsigned long flags;
141153b381b3SDavid Woodhouse 	int ret = 0;
141253b381b3SDavid Woodhouse 
141353b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
141453b381b3SDavid Woodhouse 
141553b381b3SDavid Woodhouse 	/* we already know this stripe is bad, move on */
141653b381b3SDavid Woodhouse 	if (rbio->faila == failed || rbio->failb == failed)
141753b381b3SDavid Woodhouse 		goto out;
141853b381b3SDavid Woodhouse 
141953b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
142053b381b3SDavid Woodhouse 		/* first failure on this rbio */
142153b381b3SDavid Woodhouse 		rbio->faila = failed;
1422b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
142353b381b3SDavid Woodhouse 	} else if (rbio->failb == -1) {
142453b381b3SDavid Woodhouse 		/* second failure on this rbio */
142553b381b3SDavid Woodhouse 		rbio->failb = failed;
1426b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
142753b381b3SDavid Woodhouse 	} else {
142853b381b3SDavid Woodhouse 		ret = -EIO;
142953b381b3SDavid Woodhouse 	}
143053b381b3SDavid Woodhouse out:
143153b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
143253b381b3SDavid Woodhouse 
143353b381b3SDavid Woodhouse 	return ret;
143453b381b3SDavid Woodhouse }
143553b381b3SDavid Woodhouse 
143653b381b3SDavid Woodhouse /*
143753b381b3SDavid Woodhouse  * helper to fail a stripe based on a physical disk
143853b381b3SDavid Woodhouse  * bio.
143953b381b3SDavid Woodhouse  */
144053b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
144153b381b3SDavid Woodhouse 			   struct bio *bio)
144253b381b3SDavid Woodhouse {
144353b381b3SDavid Woodhouse 	int failed = find_bio_stripe(rbio, bio);
144453b381b3SDavid Woodhouse 
144553b381b3SDavid Woodhouse 	if (failed < 0)
144653b381b3SDavid Woodhouse 		return -EIO;
144753b381b3SDavid Woodhouse 
144853b381b3SDavid Woodhouse 	return fail_rbio_index(rbio, failed);
144953b381b3SDavid Woodhouse }
145053b381b3SDavid Woodhouse 
145153b381b3SDavid Woodhouse /*
14525fdb7afcSQu Wenruo  * For subpage case, we can no longer set page Uptodate directly for
14535fdb7afcSQu Wenruo  * stripe_pages[], thus we need to locate the sector.
14545fdb7afcSQu Wenruo  */
14555fdb7afcSQu Wenruo static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
14565fdb7afcSQu Wenruo 					     struct page *page,
14575fdb7afcSQu Wenruo 					     unsigned int pgoff)
14585fdb7afcSQu Wenruo {
14595fdb7afcSQu Wenruo 	int i;
14605fdb7afcSQu Wenruo 
14615fdb7afcSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
14625fdb7afcSQu Wenruo 		struct sector_ptr *sector = &rbio->stripe_sectors[i];
14635fdb7afcSQu Wenruo 
14645fdb7afcSQu Wenruo 		if (sector->page == page && sector->pgoff == pgoff)
14655fdb7afcSQu Wenruo 			return sector;
14665fdb7afcSQu Wenruo 	}
14675fdb7afcSQu Wenruo 	return NULL;
14685fdb7afcSQu Wenruo }
14695fdb7afcSQu Wenruo 
14705fdb7afcSQu Wenruo /*
147153b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
147253b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
147353b381b3SDavid Woodhouse  */
14745fdb7afcSQu Wenruo static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
147553b381b3SDavid Woodhouse {
14765fdb7afcSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
14770198e5b7SLiu Bo 	struct bio_vec *bvec;
14786dc4f100SMing Lei 	struct bvec_iter_all iter_all;
147953b381b3SDavid Woodhouse 
14800198e5b7SLiu Bo 	ASSERT(!bio_flagged(bio, BIO_CLONED));
14816592e58cSFilipe Manana 
14825fdb7afcSQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all) {
14835fdb7afcSQu Wenruo 		struct sector_ptr *sector;
14845fdb7afcSQu Wenruo 		int pgoff;
14855fdb7afcSQu Wenruo 
14865fdb7afcSQu Wenruo 		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
14875fdb7afcSQu Wenruo 		     pgoff += sectorsize) {
14885fdb7afcSQu Wenruo 			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
14895fdb7afcSQu Wenruo 			ASSERT(sector);
14905fdb7afcSQu Wenruo 			if (sector)
14915fdb7afcSQu Wenruo 				sector->uptodate = 1;
14925fdb7afcSQu Wenruo 		}
14935fdb7afcSQu Wenruo 	}
149453b381b3SDavid Woodhouse }
149553b381b3SDavid Woodhouse 
14962942a50dSQu Wenruo static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
14972942a50dSQu Wenruo {
14982942a50dSQu Wenruo 	struct bio_vec *bv = bio_first_bvec_all(bio);
14992942a50dSQu Wenruo 	int i;
15002942a50dSQu Wenruo 
15012942a50dSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
15022942a50dSQu Wenruo 		struct sector_ptr *sector;
15032942a50dSQu Wenruo 
15042942a50dSQu Wenruo 		sector = &rbio->stripe_sectors[i];
15052942a50dSQu Wenruo 		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
15062942a50dSQu Wenruo 			break;
15072942a50dSQu Wenruo 		sector = &rbio->bio_sectors[i];
15082942a50dSQu Wenruo 		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
15092942a50dSQu Wenruo 			break;
15102942a50dSQu Wenruo 	}
15112942a50dSQu Wenruo 	ASSERT(i < rbio->nr_sectors);
15122942a50dSQu Wenruo 	return i;
15132942a50dSQu Wenruo }
15142942a50dSQu Wenruo 
15152942a50dSQu Wenruo static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
15162942a50dSQu Wenruo {
15172942a50dSQu Wenruo 	int total_sector_nr = get_bio_sector_nr(rbio, bio);
15182942a50dSQu Wenruo 	u32 bio_size = 0;
15192942a50dSQu Wenruo 	struct bio_vec *bvec;
15202942a50dSQu Wenruo 	struct bvec_iter_all iter_all;
15212942a50dSQu Wenruo 
15222942a50dSQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all)
15232942a50dSQu Wenruo 		bio_size += bvec->bv_len;
15242942a50dSQu Wenruo 
15252942a50dSQu Wenruo 	bitmap_set(rbio->error_bitmap, total_sector_nr,
15262942a50dSQu Wenruo 		   bio_size >> rbio->bioc->fs_info->sectorsize_bits);
15272942a50dSQu Wenruo }
15282942a50dSQu Wenruo 
1529d817ce35SQu Wenruo static void raid_wait_read_end_io(struct bio *bio)
1530d817ce35SQu Wenruo {
1531d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
1532d817ce35SQu Wenruo 
15332942a50dSQu Wenruo 	if (bio->bi_status) {
1534d817ce35SQu Wenruo 		fail_bio_stripe(rbio, bio);
15352942a50dSQu Wenruo 		rbio_update_error_bitmap(rbio, bio);
15362942a50dSQu Wenruo 	} else {
1537d817ce35SQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
15382942a50dSQu Wenruo 	}
1539d817ce35SQu Wenruo 
1540d817ce35SQu Wenruo 	bio_put(bio);
1541d817ce35SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
1542d817ce35SQu Wenruo 		wake_up(&rbio->io_wait);
1543d817ce35SQu Wenruo }
1544d817ce35SQu Wenruo 
1545d817ce35SQu Wenruo static void submit_read_bios(struct btrfs_raid_bio *rbio,
1546d817ce35SQu Wenruo 			     struct bio_list *bio_list)
1547d817ce35SQu Wenruo {
1548d817ce35SQu Wenruo 	struct bio *bio;
1549d817ce35SQu Wenruo 
1550d817ce35SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1551d817ce35SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
1552d817ce35SQu Wenruo 		bio->bi_end_io = raid_wait_read_end_io;
1553d817ce35SQu Wenruo 
1554d817ce35SQu Wenruo 		if (trace_raid56_scrub_read_recover_enabled()) {
1555d817ce35SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
1556d817ce35SQu Wenruo 
1557d817ce35SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
1558d817ce35SQu Wenruo 			trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1559d817ce35SQu Wenruo 		}
1560d817ce35SQu Wenruo 		submit_bio(bio);
1561d817ce35SQu Wenruo 	}
1562d817ce35SQu Wenruo }
1563d817ce35SQu Wenruo 
1564509c27aaSQu Wenruo static int rmw_assemble_read_bios(struct btrfs_raid_bio *rbio,
1565509c27aaSQu Wenruo 				  struct bio_list *bio_list)
156653b381b3SDavid Woodhouse {
1567550cdeb3SQu Wenruo 	const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data;
156853b381b3SDavid Woodhouse 	struct bio *bio;
1569509c27aaSQu Wenruo 	int total_sector_nr;
1570509c27aaSQu Wenruo 	int ret = 0;
157153b381b3SDavid Woodhouse 
1572509c27aaSQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
157353b381b3SDavid Woodhouse 
1574550cdeb3SQu Wenruo 	/* Build a list of bios to read all the missing data sectors. */
1575550cdeb3SQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < nr_data_sectors;
1576550cdeb3SQu Wenruo 	     total_sector_nr++) {
15773e77605dSQu Wenruo 		struct sector_ptr *sector;
1578550cdeb3SQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
1579550cdeb3SQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
15803e77605dSQu Wenruo 
158153b381b3SDavid Woodhouse 		/*
1582550cdeb3SQu Wenruo 		 * We want to find all the sectors missing from the rbio and
1583550cdeb3SQu Wenruo 		 * read them from the disk.  If sector_in_rbio() finds a page
1584550cdeb3SQu Wenruo 		 * in the bio list we don't need to read it off the stripe.
158553b381b3SDavid Woodhouse 		 */
15863e77605dSQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
15873e77605dSQu Wenruo 		if (sector)
158853b381b3SDavid Woodhouse 			continue;
158953b381b3SDavid Woodhouse 
15903e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
15914ae10b3aSChris Mason 		/*
1592550cdeb3SQu Wenruo 		 * The bio cache may have handed us an uptodate page.  If so,
1593550cdeb3SQu Wenruo 		 * use it.
15944ae10b3aSChris Mason 		 */
15953e77605dSQu Wenruo 		if (sector->uptodate)
15964ae10b3aSChris Mason 			continue;
15974ae10b3aSChris Mason 
1598509c27aaSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector,
1599ff18a4afSChristoph Hellwig 			       stripe, sectornr, REQ_OP_READ);
160053b381b3SDavid Woodhouse 		if (ret)
160153b381b3SDavid Woodhouse 			goto cleanup;
160253b381b3SDavid Woodhouse 	}
1603509c27aaSQu Wenruo 	return 0;
1604509c27aaSQu Wenruo 
1605509c27aaSQu Wenruo cleanup:
1606509c27aaSQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
1607509c27aaSQu Wenruo 		bio_put(bio);
1608509c27aaSQu Wenruo 	return ret;
1609509c27aaSQu Wenruo }
1610509c27aaSQu Wenruo 
16115eb30ee2SQu Wenruo static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
16125eb30ee2SQu Wenruo {
16135eb30ee2SQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
16145eb30ee2SQu Wenruo 	int ret;
16155eb30ee2SQu Wenruo 
16165eb30ee2SQu Wenruo 	ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
16175eb30ee2SQu Wenruo 	if (ret < 0)
16185eb30ee2SQu Wenruo 		return ret;
16195eb30ee2SQu Wenruo 
16205eb30ee2SQu Wenruo 	index_stripe_sectors(rbio);
16215eb30ee2SQu Wenruo 	return 0;
16225eb30ee2SQu Wenruo }
16235eb30ee2SQu Wenruo 
1624509c27aaSQu Wenruo /*
16256ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
16266ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
16276ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
16286ac0f488SChris Mason  * we sort the list by logical block number and merge
16296ac0f488SChris Mason  * everything we can into the same rbios
16306ac0f488SChris Mason  */
16316ac0f488SChris Mason struct btrfs_plug_cb {
16326ac0f488SChris Mason 	struct blk_plug_cb cb;
16336ac0f488SChris Mason 	struct btrfs_fs_info *info;
16346ac0f488SChris Mason 	struct list_head rbio_list;
1635385de0efSChristoph Hellwig 	struct work_struct work;
16366ac0f488SChris Mason };
16376ac0f488SChris Mason 
16386ac0f488SChris Mason /*
16396ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
16406ac0f488SChris Mason  */
16414f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a,
16424f0f586bSSami Tolvanen 		    const struct list_head *b)
16436ac0f488SChris Mason {
1644214cc184SDavid Sterba 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
16456ac0f488SChris Mason 						       plug_list);
1646214cc184SDavid Sterba 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
16476ac0f488SChris Mason 						       plug_list);
16484f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
16494f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
16506ac0f488SChris Mason 
16516ac0f488SChris Mason 	if (a_sector < b_sector)
16526ac0f488SChris Mason 		return -1;
16536ac0f488SChris Mason 	if (a_sector > b_sector)
16546ac0f488SChris Mason 		return 1;
16556ac0f488SChris Mason 	return 0;
16566ac0f488SChris Mason }
16576ac0f488SChris Mason 
165893723095SQu Wenruo static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
16596ac0f488SChris Mason {
166093723095SQu Wenruo 	struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
16616ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
16626ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
16636ac0f488SChris Mason 
16646ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
166593723095SQu Wenruo 
16666ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
16676ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
16686ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
16696ac0f488SChris Mason 		list_del_init(&cur->plug_list);
16706ac0f488SChris Mason 
16716ac0f488SChris Mason 		if (rbio_is_full(cur)) {
167293723095SQu Wenruo 			/* We have a full stripe, queue it down. */
167393723095SQu Wenruo 			start_async_work(cur, rmw_rbio_work);
16746ac0f488SChris Mason 			continue;
16756ac0f488SChris Mason 		}
16766ac0f488SChris Mason 		if (last) {
16776ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
16786ac0f488SChris Mason 				merge_rbio(last, cur);
1679ff2b64a2SQu Wenruo 				free_raid_bio(cur);
16806ac0f488SChris Mason 				continue;
16816ac0f488SChris Mason 			}
168293723095SQu Wenruo 			start_async_work(last, rmw_rbio_work);
16836ac0f488SChris Mason 		}
16846ac0f488SChris Mason 		last = cur;
16856ac0f488SChris Mason 	}
168693723095SQu Wenruo 	if (last)
168793723095SQu Wenruo 		start_async_work(last, rmw_rbio_work);
16886ac0f488SChris Mason 	kfree(plug);
16896ac0f488SChris Mason }
16906ac0f488SChris Mason 
1691bd8f7e62SQu Wenruo /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1692bd8f7e62SQu Wenruo static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1693bd8f7e62SQu Wenruo {
1694bd8f7e62SQu Wenruo 	const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1695bd8f7e62SQu Wenruo 	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1696bd8f7e62SQu Wenruo 	const u64 full_stripe_start = rbio->bioc->raid_map[0];
1697bd8f7e62SQu Wenruo 	const u32 orig_len = orig_bio->bi_iter.bi_size;
1698bd8f7e62SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
1699bd8f7e62SQu Wenruo 	u64 cur_logical;
1700bd8f7e62SQu Wenruo 
1701bd8f7e62SQu Wenruo 	ASSERT(orig_logical >= full_stripe_start &&
1702bd8f7e62SQu Wenruo 	       orig_logical + orig_len <= full_stripe_start +
1703ff18a4afSChristoph Hellwig 	       rbio->nr_data * BTRFS_STRIPE_LEN);
1704bd8f7e62SQu Wenruo 
1705bd8f7e62SQu Wenruo 	bio_list_add(&rbio->bio_list, orig_bio);
1706bd8f7e62SQu Wenruo 	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1707bd8f7e62SQu Wenruo 
1708bd8f7e62SQu Wenruo 	/* Update the dbitmap. */
1709bd8f7e62SQu Wenruo 	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1710bd8f7e62SQu Wenruo 	     cur_logical += sectorsize) {
1711bd8f7e62SQu Wenruo 		int bit = ((u32)(cur_logical - full_stripe_start) >>
1712bd8f7e62SQu Wenruo 			   fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1713bd8f7e62SQu Wenruo 
1714bd8f7e62SQu Wenruo 		set_bit(bit, &rbio->dbitmap);
1715bd8f7e62SQu Wenruo 	}
1716bd8f7e62SQu Wenruo }
1717bd8f7e62SQu Wenruo 
17186ac0f488SChris Mason /*
171953b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
172053b381b3SDavid Woodhouse  */
172131683f4aSChristoph Hellwig void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
172253b381b3SDavid Woodhouse {
17236a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
172453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
17256ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
17266ac0f488SChris Mason 	struct blk_plug_cb *cb;
172731683f4aSChristoph Hellwig 	int ret = 0;
172853b381b3SDavid Woodhouse 
1729ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
1730af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
173131683f4aSChristoph Hellwig 		ret = PTR_ERR(rbio);
1732f1c29379SChristoph Hellwig 		goto fail;
1733af8e2d1dSMiao Xie 	}
17341b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
1735bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
17366ac0f488SChris Mason 
17376ac0f488SChris Mason 	/*
173893723095SQu Wenruo 	 * Don't plug on full rbios, just get them out the door
17396ac0f488SChris Mason 	 * as quickly as we can
17406ac0f488SChris Mason 	 */
174193723095SQu Wenruo 	if (rbio_is_full(rbio))
174293723095SQu Wenruo 		goto queue_rbio;
17436ac0f488SChris Mason 
174493723095SQu Wenruo 	cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
17456ac0f488SChris Mason 	if (cb) {
17466ac0f488SChris Mason 		plug = container_of(cb, struct btrfs_plug_cb, cb);
17476ac0f488SChris Mason 		if (!plug->info) {
17480b246afaSJeff Mahoney 			plug->info = fs_info;
17496ac0f488SChris Mason 			INIT_LIST_HEAD(&plug->rbio_list);
17506ac0f488SChris Mason 		}
17516ac0f488SChris Mason 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
175293723095SQu Wenruo 		return;
175353b381b3SDavid Woodhouse 	}
175493723095SQu Wenruo queue_rbio:
175593723095SQu Wenruo 	/*
175693723095SQu Wenruo 	 * Either we don't have any existing plug, or we're doing a full stripe,
175793723095SQu Wenruo 	 * can queue the rmw work now.
175893723095SQu Wenruo 	 */
175993723095SQu Wenruo 	start_async_work(rbio, rmw_rbio_work);
176031683f4aSChristoph Hellwig 
176131683f4aSChristoph Hellwig 	return;
176231683f4aSChristoph Hellwig 
1763f1c29379SChristoph Hellwig fail:
176431683f4aSChristoph Hellwig 	bio->bi_status = errno_to_blk_status(ret);
176531683f4aSChristoph Hellwig 	bio_endio(bio);
17666ac0f488SChris Mason }
176753b381b3SDavid Woodhouse 
176853b381b3SDavid Woodhouse /*
17699c5ff9b4SQu Wenruo  * Recover a vertical stripe specified by @sector_nr.
17709c5ff9b4SQu Wenruo  * @*pointers are the pre-allocated pointers by the caller, so we don't
17719c5ff9b4SQu Wenruo  * need to allocate/free the pointers again and again.
17729c5ff9b4SQu Wenruo  */
1773*75b47033SQu Wenruo static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
17749c5ff9b4SQu Wenruo 			    void **pointers, void **unmap_array)
17759c5ff9b4SQu Wenruo {
17769c5ff9b4SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
17779c5ff9b4SQu Wenruo 	struct sector_ptr *sector;
17789c5ff9b4SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
1779*75b47033SQu Wenruo 	int found_errors;
1780*75b47033SQu Wenruo 	int faila;
1781*75b47033SQu Wenruo 	int failb;
17829c5ff9b4SQu Wenruo 	int stripe_nr;
17839c5ff9b4SQu Wenruo 
17849c5ff9b4SQu Wenruo 	/*
17859c5ff9b4SQu Wenruo 	 * Now we just use bitmap to mark the horizontal stripes in
17869c5ff9b4SQu Wenruo 	 * which we have data when doing parity scrub.
17879c5ff9b4SQu Wenruo 	 */
17889c5ff9b4SQu Wenruo 	if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
17899c5ff9b4SQu Wenruo 	    !test_bit(sector_nr, &rbio->dbitmap))
1790*75b47033SQu Wenruo 		return 0;
1791*75b47033SQu Wenruo 
1792*75b47033SQu Wenruo 	found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1793*75b47033SQu Wenruo 						 &failb);
1794*75b47033SQu Wenruo 	/*
1795*75b47033SQu Wenruo 	 * No errors in the veritical stripe, skip it.  Can happen for recovery
1796*75b47033SQu Wenruo 	 * which only part of a stripe failed csum check.
1797*75b47033SQu Wenruo 	 */
1798*75b47033SQu Wenruo 	if (!found_errors)
1799*75b47033SQu Wenruo 		return 0;
1800*75b47033SQu Wenruo 
1801*75b47033SQu Wenruo 	if (found_errors > rbio->bioc->max_errors)
1802*75b47033SQu Wenruo 		return -EIO;
18039c5ff9b4SQu Wenruo 
18049c5ff9b4SQu Wenruo 	/*
18059c5ff9b4SQu Wenruo 	 * Setup our array of pointers with sectors from each stripe
18069c5ff9b4SQu Wenruo 	 *
18079c5ff9b4SQu Wenruo 	 * NOTE: store a duplicate array of pointers to preserve the
18089c5ff9b4SQu Wenruo 	 * pointer order.
18099c5ff9b4SQu Wenruo 	 */
18109c5ff9b4SQu Wenruo 	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
18119c5ff9b4SQu Wenruo 		/*
1812*75b47033SQu Wenruo 		 * If we're rebuilding a read, we have to use pages from the
1813*75b47033SQu Wenruo 		 * bio list if possible.
18149c5ff9b4SQu Wenruo 		 */
18159c5ff9b4SQu Wenruo 		if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1816*75b47033SQu Wenruo 		     rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
18179c5ff9b4SQu Wenruo 			sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
18189c5ff9b4SQu Wenruo 		} else {
18199c5ff9b4SQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
18209c5ff9b4SQu Wenruo 		}
18219c5ff9b4SQu Wenruo 		ASSERT(sector->page);
18229c5ff9b4SQu Wenruo 		pointers[stripe_nr] = kmap_local_page(sector->page) +
18239c5ff9b4SQu Wenruo 				   sector->pgoff;
18249c5ff9b4SQu Wenruo 		unmap_array[stripe_nr] = pointers[stripe_nr];
18259c5ff9b4SQu Wenruo 	}
18269c5ff9b4SQu Wenruo 
18279c5ff9b4SQu Wenruo 	/* All raid6 handling here */
18289c5ff9b4SQu Wenruo 	if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
18299c5ff9b4SQu Wenruo 		/* Single failure, rebuild from parity raid5 style */
18309c5ff9b4SQu Wenruo 		if (failb < 0) {
18319c5ff9b4SQu Wenruo 			if (faila == rbio->nr_data)
18329c5ff9b4SQu Wenruo 				/*
18339c5ff9b4SQu Wenruo 				 * Just the P stripe has failed, without
18349c5ff9b4SQu Wenruo 				 * a bad data or Q stripe.
18359c5ff9b4SQu Wenruo 				 * We have nothing to do, just skip the
18369c5ff9b4SQu Wenruo 				 * recovery for this stripe.
18379c5ff9b4SQu Wenruo 				 */
18389c5ff9b4SQu Wenruo 				goto cleanup;
18399c5ff9b4SQu Wenruo 			/*
18409c5ff9b4SQu Wenruo 			 * a single failure in raid6 is rebuilt
18419c5ff9b4SQu Wenruo 			 * in the pstripe code below
18429c5ff9b4SQu Wenruo 			 */
18439c5ff9b4SQu Wenruo 			goto pstripe;
18449c5ff9b4SQu Wenruo 		}
18459c5ff9b4SQu Wenruo 
18469c5ff9b4SQu Wenruo 		/*
18479c5ff9b4SQu Wenruo 		 * If the q stripe is failed, do a pstripe reconstruction from
18489c5ff9b4SQu Wenruo 		 * the xors.
18499c5ff9b4SQu Wenruo 		 * If both the q stripe and the P stripe are failed, we're
18509c5ff9b4SQu Wenruo 		 * here due to a crc mismatch and we can't give them the
18519c5ff9b4SQu Wenruo 		 * data they want.
18529c5ff9b4SQu Wenruo 		 */
18539c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
18549c5ff9b4SQu Wenruo 			if (rbio->bioc->raid_map[faila] ==
18559c5ff9b4SQu Wenruo 			    RAID5_P_STRIPE)
18569c5ff9b4SQu Wenruo 				/*
18579c5ff9b4SQu Wenruo 				 * Only P and Q are corrupted.
18589c5ff9b4SQu Wenruo 				 * We only care about data stripes recovery,
18599c5ff9b4SQu Wenruo 				 * can skip this vertical stripe.
18609c5ff9b4SQu Wenruo 				 */
18619c5ff9b4SQu Wenruo 				goto cleanup;
18629c5ff9b4SQu Wenruo 			/*
18639c5ff9b4SQu Wenruo 			 * Otherwise we have one bad data stripe and
18649c5ff9b4SQu Wenruo 			 * a good P stripe.  raid5!
18659c5ff9b4SQu Wenruo 			 */
18669c5ff9b4SQu Wenruo 			goto pstripe;
18679c5ff9b4SQu Wenruo 		}
18689c5ff9b4SQu Wenruo 
18699c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
18709c5ff9b4SQu Wenruo 			raid6_datap_recov(rbio->real_stripes, sectorsize,
18719c5ff9b4SQu Wenruo 					  faila, pointers);
18729c5ff9b4SQu Wenruo 		} else {
18739c5ff9b4SQu Wenruo 			raid6_2data_recov(rbio->real_stripes, sectorsize,
18749c5ff9b4SQu Wenruo 					  faila, failb, pointers);
18759c5ff9b4SQu Wenruo 		}
18769c5ff9b4SQu Wenruo 	} else {
18779c5ff9b4SQu Wenruo 		void *p;
18789c5ff9b4SQu Wenruo 
18799c5ff9b4SQu Wenruo 		/* Rebuild from P stripe here (raid5 or raid6). */
18809c5ff9b4SQu Wenruo 		ASSERT(failb == -1);
18819c5ff9b4SQu Wenruo pstripe:
18829c5ff9b4SQu Wenruo 		/* Copy parity block into failed block to start with */
18839c5ff9b4SQu Wenruo 		memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
18849c5ff9b4SQu Wenruo 
18859c5ff9b4SQu Wenruo 		/* Rearrange the pointer array */
18869c5ff9b4SQu Wenruo 		p = pointers[faila];
18879c5ff9b4SQu Wenruo 		for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
18889c5ff9b4SQu Wenruo 		     stripe_nr++)
18899c5ff9b4SQu Wenruo 			pointers[stripe_nr] = pointers[stripe_nr + 1];
18909c5ff9b4SQu Wenruo 		pointers[rbio->nr_data - 1] = p;
18919c5ff9b4SQu Wenruo 
18929c5ff9b4SQu Wenruo 		/* Xor in the rest */
18939c5ff9b4SQu Wenruo 		run_xor(pointers, rbio->nr_data - 1, sectorsize);
18949c5ff9b4SQu Wenruo 
18959c5ff9b4SQu Wenruo 	}
18969c5ff9b4SQu Wenruo 
18979c5ff9b4SQu Wenruo 	/*
18989c5ff9b4SQu Wenruo 	 * No matter if this is a RMW or recovery, we should have all
18999c5ff9b4SQu Wenruo 	 * failed sectors repaired in the vertical stripe, thus they are now
19009c5ff9b4SQu Wenruo 	 * uptodate.
19019c5ff9b4SQu Wenruo 	 * Especially if we determine to cache the rbio, we need to
19029c5ff9b4SQu Wenruo 	 * have at least all data sectors uptodate.
19039c5ff9b4SQu Wenruo 	 */
1904*75b47033SQu Wenruo 	if (faila >= 0) {
1905*75b47033SQu Wenruo 		sector = rbio_stripe_sector(rbio, faila, sector_nr);
19069c5ff9b4SQu Wenruo 		sector->uptodate = 1;
19079c5ff9b4SQu Wenruo 	}
1908*75b47033SQu Wenruo 	if (failb >= 0) {
1909*75b47033SQu Wenruo 		sector = rbio_stripe_sector(rbio, failb, sector_nr);
19109c5ff9b4SQu Wenruo 		sector->uptodate = 1;
19119c5ff9b4SQu Wenruo 	}
19129c5ff9b4SQu Wenruo 
19139c5ff9b4SQu Wenruo cleanup:
19149c5ff9b4SQu Wenruo 	for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
19159c5ff9b4SQu Wenruo 		kunmap_local(unmap_array[stripe_nr]);
1916*75b47033SQu Wenruo 	return 0;
19179c5ff9b4SQu Wenruo }
19189c5ff9b4SQu Wenruo 
1919ec936b03SQu Wenruo static int recover_sectors(struct btrfs_raid_bio *rbio)
192053b381b3SDavid Woodhouse {
19219c5ff9b4SQu Wenruo 	void **pointers = NULL;
19229c5ff9b4SQu Wenruo 	void **unmap_array = NULL;
1923ec936b03SQu Wenruo 	int sectornr;
1924ec936b03SQu Wenruo 	int ret = 0;
192553b381b3SDavid Woodhouse 
192607e4d380SQu Wenruo 	/*
1927ec936b03SQu Wenruo 	 * @pointers array stores the pointer for each sector.
1928ec936b03SQu Wenruo 	 *
1929ec936b03SQu Wenruo 	 * @unmap_array stores copy of pointers that does not get reordered
1930ec936b03SQu Wenruo 	 * during reconstruction so that kunmap_local works.
193107e4d380SQu Wenruo 	 */
193231e818feSDavid Sterba 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
193394a0b58dSIra Weiny 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1934ec936b03SQu Wenruo 	if (!pointers || !unmap_array) {
1935ec936b03SQu Wenruo 		ret = -ENOMEM;
1936ec936b03SQu Wenruo 		goto out;
193794a0b58dSIra Weiny 	}
193894a0b58dSIra Weiny 
1939b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1940b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
194153b381b3SDavid Woodhouse 		spin_lock_irq(&rbio->bio_list_lock);
194253b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
194353b381b3SDavid Woodhouse 		spin_unlock_irq(&rbio->bio_list_lock);
194453b381b3SDavid Woodhouse 	}
194553b381b3SDavid Woodhouse 
194653b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
194753b381b3SDavid Woodhouse 
1948*75b47033SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1949*75b47033SQu Wenruo 		ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1950*75b47033SQu Wenruo 		if (ret < 0)
1951*75b47033SQu Wenruo 			break;
1952*75b47033SQu Wenruo 	}
195353b381b3SDavid Woodhouse 
1954ec936b03SQu Wenruo out:
195553b381b3SDavid Woodhouse 	kfree(pointers);
1956ec936b03SQu Wenruo 	kfree(unmap_array);
1957ec936b03SQu Wenruo 	return ret;
1958ec936b03SQu Wenruo }
1959ec936b03SQu Wenruo 
1960d31968d9SQu Wenruo static int recover_assemble_read_bios(struct btrfs_raid_bio *rbio,
1961d31968d9SQu Wenruo 				      struct bio_list *bio_list)
196253b381b3SDavid Woodhouse {
196353b381b3SDavid Woodhouse 	struct bio *bio;
1964d31968d9SQu Wenruo 	int total_sector_nr;
1965d31968d9SQu Wenruo 	int ret = 0;
196653b381b3SDavid Woodhouse 
1967d31968d9SQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
196853b381b3SDavid Woodhouse 	/*
1969f6065f8eSQu Wenruo 	 * Read everything that hasn't failed. However this time we will
1970f6065f8eSQu Wenruo 	 * not trust any cached sector.
1971f6065f8eSQu Wenruo 	 * As we may read out some stale data but higher layer is not reading
1972f6065f8eSQu Wenruo 	 * that stale part.
1973f6065f8eSQu Wenruo 	 *
1974f6065f8eSQu Wenruo 	 * So here we always re-read everything in recovery path.
197553b381b3SDavid Woodhouse 	 */
1976ef340fccSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1977ef340fccSQu Wenruo 	     total_sector_nr++) {
1978ef340fccSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
1979ef340fccSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
19803e77605dSQu Wenruo 		struct sector_ptr *sector;
198153b381b3SDavid Woodhouse 
1982*75b47033SQu Wenruo 		/*
1983*75b47033SQu Wenruo 		 * Skip the range which has error.  It can be a range which is
1984*75b47033SQu Wenruo 		 * marked error (for csum mismatch), or it can be a missing
1985*75b47033SQu Wenruo 		 * device.
1986*75b47033SQu Wenruo 		 */
1987*75b47033SQu Wenruo 		if (!rbio->bioc->stripes[stripe].dev->bdev ||
1988*75b47033SQu Wenruo 		    test_bit(total_sector_nr, rbio->error_bitmap)) {
1989*75b47033SQu Wenruo 			/*
1990*75b47033SQu Wenruo 			 * Also set the error bit for missing device, which
1991*75b47033SQu Wenruo 			 * may not yet have its error bit set.
1992*75b47033SQu Wenruo 			 */
1993*75b47033SQu Wenruo 			set_bit(total_sector_nr, rbio->error_bitmap);
199453b381b3SDavid Woodhouse 			continue;
1995ef340fccSQu Wenruo 		}
1996*75b47033SQu Wenruo 
199753b381b3SDavid Woodhouse 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
1998d31968d9SQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1999ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
200053b381b3SDavid Woodhouse 		if (ret < 0)
2001d31968d9SQu Wenruo 			goto error;
200253b381b3SDavid Woodhouse 	}
2003d31968d9SQu Wenruo 	return 0;
2004d31968d9SQu Wenruo error:
2005d31968d9SQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
2006d31968d9SQu Wenruo 		bio_put(bio);
2007d31968d9SQu Wenruo 
2008d31968d9SQu Wenruo 	return -EIO;
2009d31968d9SQu Wenruo }
2010d31968d9SQu Wenruo 
2011d817ce35SQu Wenruo static int recover_rbio(struct btrfs_raid_bio *rbio)
2012d817ce35SQu Wenruo {
2013d817ce35SQu Wenruo 	struct bio_list bio_list;
2014d817ce35SQu Wenruo 	struct bio *bio;
2015d817ce35SQu Wenruo 	int ret;
2016d817ce35SQu Wenruo 
2017d817ce35SQu Wenruo 	/*
2018d817ce35SQu Wenruo 	 * Either we're doing recover for a read failure or degraded write,
2019*75b47033SQu Wenruo 	 * caller should have set error bitmap correctly.
2020d817ce35SQu Wenruo 	 */
20212942a50dSQu Wenruo 	ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
2022d817ce35SQu Wenruo 	bio_list_init(&bio_list);
2023d817ce35SQu Wenruo 
2024d817ce35SQu Wenruo 	/*
2025d817ce35SQu Wenruo 	 * Reset error to 0, as we will later increase error for missing
2026d817ce35SQu Wenruo 	 * devices.
2027d817ce35SQu Wenruo 	 */
2028d817ce35SQu Wenruo 	atomic_set(&rbio->error, 0);
2029d817ce35SQu Wenruo 
2030d817ce35SQu Wenruo 	/* For recovery, we need to read all sectors including P/Q. */
2031d817ce35SQu Wenruo 	ret = alloc_rbio_pages(rbio);
2032d817ce35SQu Wenruo 	if (ret < 0)
2033d817ce35SQu Wenruo 		goto out;
2034d817ce35SQu Wenruo 
2035d817ce35SQu Wenruo 	index_rbio_pages(rbio);
2036d817ce35SQu Wenruo 
2037d817ce35SQu Wenruo 	ret = recover_assemble_read_bios(rbio, &bio_list);
2038d817ce35SQu Wenruo 	if (ret < 0)
2039d817ce35SQu Wenruo 		goto out;
2040d817ce35SQu Wenruo 
2041d817ce35SQu Wenruo 	submit_read_bios(rbio, &bio_list);
2042d817ce35SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2043d817ce35SQu Wenruo 
2044d817ce35SQu Wenruo 	ret = recover_sectors(rbio);
2045d817ce35SQu Wenruo 
2046d817ce35SQu Wenruo out:
2047d817ce35SQu Wenruo 	while ((bio = bio_list_pop(&bio_list)))
2048d817ce35SQu Wenruo 		bio_put(bio);
2049d817ce35SQu Wenruo 
2050d817ce35SQu Wenruo 	return ret;
2051d817ce35SQu Wenruo }
2052d817ce35SQu Wenruo 
2053d817ce35SQu Wenruo static void recover_rbio_work(struct work_struct *work)
2054d817ce35SQu Wenruo {
2055d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio;
2056d817ce35SQu Wenruo 	int ret;
2057d817ce35SQu Wenruo 
2058d817ce35SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
2059d817ce35SQu Wenruo 
2060d817ce35SQu Wenruo 	ret = lock_stripe_add(rbio);
2061d817ce35SQu Wenruo 	if (ret == 0) {
2062d817ce35SQu Wenruo 		ret = recover_rbio(rbio);
2063d817ce35SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2064d817ce35SQu Wenruo 	}
2065d817ce35SQu Wenruo }
2066d817ce35SQu Wenruo 
2067d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work)
2068d817ce35SQu Wenruo {
2069d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio;
2070d817ce35SQu Wenruo 	int ret;
2071d817ce35SQu Wenruo 
2072d817ce35SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
2073d817ce35SQu Wenruo 
2074d817ce35SQu Wenruo 	ret = recover_rbio(rbio);
2075d817ce35SQu Wenruo 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2076d817ce35SQu Wenruo }
2077d817ce35SQu Wenruo 
2078*75b47033SQu Wenruo static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2079*75b47033SQu Wenruo {
2080*75b47033SQu Wenruo 	bool found = false;
2081*75b47033SQu Wenruo 	int sector_nr;
2082*75b47033SQu Wenruo 
2083*75b47033SQu Wenruo 	/*
2084*75b47033SQu Wenruo 	 * This is for RAID6 extra recovery tries, thus mirror number should
2085*75b47033SQu Wenruo 	 * be large than 2.
2086*75b47033SQu Wenruo 	 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2087*75b47033SQu Wenruo 	 * RAID5 methods.
2088*75b47033SQu Wenruo 	 */
2089*75b47033SQu Wenruo 	ASSERT(mirror_num > 2);
2090*75b47033SQu Wenruo 	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2091*75b47033SQu Wenruo 		int found_errors;
2092*75b47033SQu Wenruo 		int faila;
2093*75b47033SQu Wenruo 		int failb;
2094*75b47033SQu Wenruo 
2095*75b47033SQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2096*75b47033SQu Wenruo 							 &faila, &failb);
2097*75b47033SQu Wenruo 		/* This vertical stripe doesn't have errors. */
2098*75b47033SQu Wenruo 		if (!found_errors)
2099*75b47033SQu Wenruo 			continue;
2100*75b47033SQu Wenruo 
2101*75b47033SQu Wenruo 		/*
2102*75b47033SQu Wenruo 		 * If we found errors, there should be only one error marked
2103*75b47033SQu Wenruo 		 * by previous set_rbio_range_error().
2104*75b47033SQu Wenruo 		 */
2105*75b47033SQu Wenruo 		ASSERT(found_errors == 1);
2106*75b47033SQu Wenruo 		found = true;
2107*75b47033SQu Wenruo 
2108*75b47033SQu Wenruo 		/* Now select another stripe to mark as error. */
2109*75b47033SQu Wenruo 		failb = rbio->real_stripes - (mirror_num - 1);
2110*75b47033SQu Wenruo 		if (failb <= faila)
2111*75b47033SQu Wenruo 			failb--;
2112*75b47033SQu Wenruo 
2113*75b47033SQu Wenruo 		/* Set the extra bit in error bitmap. */
2114*75b47033SQu Wenruo 		if (failb >= 0)
2115*75b47033SQu Wenruo 			set_bit(failb * rbio->stripe_nsectors + sector_nr,
2116*75b47033SQu Wenruo 				rbio->error_bitmap);
2117*75b47033SQu Wenruo 	}
2118*75b47033SQu Wenruo 
2119*75b47033SQu Wenruo 	/* We should found at least one vertical stripe with error.*/
2120*75b47033SQu Wenruo 	ASSERT(found);
2121*75b47033SQu Wenruo }
2122*75b47033SQu Wenruo 
2123d31968d9SQu Wenruo /*
212453b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
212553b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
212653b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
212753b381b3SDavid Woodhouse  * of the drive.
212853b381b3SDavid Woodhouse  */
21296065fd95SChristoph Hellwig void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2130f1c29379SChristoph Hellwig 			   int mirror_num)
213153b381b3SDavid Woodhouse {
21326a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
213353b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
213453b381b3SDavid Woodhouse 
2135ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2136af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
21376065fd95SChristoph Hellwig 		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2138d817ce35SQu Wenruo 		bio_endio(bio);
2139d817ce35SQu Wenruo 		return;
2140af8e2d1dSMiao Xie 	}
214153b381b3SDavid Woodhouse 
21421b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2143bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
214453b381b3SDavid Woodhouse 
21452942a50dSQu Wenruo 	set_rbio_range_error(rbio, bio);
21462942a50dSQu Wenruo 
214753b381b3SDavid Woodhouse 	rbio->faila = find_logical_bio_stripe(rbio, bio);
214853b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
21490b246afaSJeff Mahoney 		btrfs_warn(fs_info,
21504c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
21511201b58bSDavid Sterba 			   __func__, bio->bi_iter.bi_sector << 9,
21524c664611SQu Wenruo 			   (u64)bio->bi_iter.bi_size, bioc->map_type);
2153ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
21546065fd95SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
2155d817ce35SQu Wenruo 		bio_endio(bio);
2156d817ce35SQu Wenruo 		return;
215753b381b3SDavid Woodhouse 	}
215853b381b3SDavid Woodhouse 
215953b381b3SDavid Woodhouse 	/*
21608810f751SLiu Bo 	 * Loop retry:
21618810f751SLiu Bo 	 * for 'mirror == 2', reconstruct from all other stripes.
21628810f751SLiu Bo 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
216353b381b3SDavid Woodhouse 	 */
21648810f751SLiu Bo 	if (mirror_num > 2) {
2165*75b47033SQu Wenruo 		set_rbio_raid6_extra_error(rbio, mirror_num);
21668810f751SLiu Bo 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
21678810f751SLiu Bo 		ASSERT(rbio->failb > 0);
21688810f751SLiu Bo 		if (rbio->failb <= rbio->faila)
21698810f751SLiu Bo 			rbio->failb--;
21708810f751SLiu Bo 	}
217153b381b3SDavid Woodhouse 
2172d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
217353b381b3SDavid Woodhouse }
217453b381b3SDavid Woodhouse 
21755eb30ee2SQu Wenruo static int rmw_read_and_wait(struct btrfs_raid_bio *rbio)
21765eb30ee2SQu Wenruo {
21775eb30ee2SQu Wenruo 	struct bio_list bio_list;
21785eb30ee2SQu Wenruo 	struct bio *bio;
21795eb30ee2SQu Wenruo 	int ret;
21805eb30ee2SQu Wenruo 
21815eb30ee2SQu Wenruo 	bio_list_init(&bio_list);
21825eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
21835eb30ee2SQu Wenruo 
21845eb30ee2SQu Wenruo 	ret = rmw_assemble_read_bios(rbio, &bio_list);
21855eb30ee2SQu Wenruo 	if (ret < 0)
21865eb30ee2SQu Wenruo 		goto out;
21875eb30ee2SQu Wenruo 
21885eb30ee2SQu Wenruo 	submit_read_bios(rbio, &bio_list);
21895eb30ee2SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
21905eb30ee2SQu Wenruo 	return ret;
21915eb30ee2SQu Wenruo out:
21925eb30ee2SQu Wenruo 	while ((bio = bio_list_pop(&bio_list)))
21935eb30ee2SQu Wenruo 		bio_put(bio);
21945eb30ee2SQu Wenruo 
21955eb30ee2SQu Wenruo 	return ret;
21965eb30ee2SQu Wenruo }
21975eb30ee2SQu Wenruo 
21985eb30ee2SQu Wenruo static void raid_wait_write_end_io(struct bio *bio)
21995eb30ee2SQu Wenruo {
22005eb30ee2SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
22015eb30ee2SQu Wenruo 	blk_status_t err = bio->bi_status;
22025eb30ee2SQu Wenruo 
22032942a50dSQu Wenruo 	if (err) {
22045eb30ee2SQu Wenruo 		fail_bio_stripe(rbio, bio);
22052942a50dSQu Wenruo 		rbio_update_error_bitmap(rbio, bio);
22062942a50dSQu Wenruo 	}
22075eb30ee2SQu Wenruo 	bio_put(bio);
22085eb30ee2SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
22095eb30ee2SQu Wenruo 		wake_up(&rbio->io_wait);
22105eb30ee2SQu Wenruo }
22115eb30ee2SQu Wenruo 
22125eb30ee2SQu Wenruo static void submit_write_bios(struct btrfs_raid_bio *rbio,
22135eb30ee2SQu Wenruo 			      struct bio_list *bio_list)
22145eb30ee2SQu Wenruo {
22155eb30ee2SQu Wenruo 	struct bio *bio;
22165eb30ee2SQu Wenruo 
22175eb30ee2SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
22185eb30ee2SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
22195eb30ee2SQu Wenruo 		bio->bi_end_io = raid_wait_write_end_io;
22205eb30ee2SQu Wenruo 
22215eb30ee2SQu Wenruo 		if (trace_raid56_write_stripe_enabled()) {
22225eb30ee2SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
22235eb30ee2SQu Wenruo 
22245eb30ee2SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
22255eb30ee2SQu Wenruo 			trace_raid56_write_stripe(rbio, bio, &trace_info);
22265eb30ee2SQu Wenruo 		}
22275eb30ee2SQu Wenruo 		submit_bio(bio);
22285eb30ee2SQu Wenruo 	}
22295eb30ee2SQu Wenruo }
22305eb30ee2SQu Wenruo 
223193723095SQu Wenruo static int rmw_rbio(struct btrfs_raid_bio *rbio)
22325eb30ee2SQu Wenruo {
22335eb30ee2SQu Wenruo 	struct bio_list bio_list;
22345eb30ee2SQu Wenruo 	int sectornr;
22355eb30ee2SQu Wenruo 	int ret = 0;
22365eb30ee2SQu Wenruo 
22375eb30ee2SQu Wenruo 	/*
22385eb30ee2SQu Wenruo 	 * Allocate the pages for parity first, as P/Q pages will always be
22395eb30ee2SQu Wenruo 	 * needed for both full-stripe and sub-stripe writes.
22405eb30ee2SQu Wenruo 	 */
22415eb30ee2SQu Wenruo 	ret = alloc_rbio_parity_pages(rbio);
22425eb30ee2SQu Wenruo 	if (ret < 0)
22435eb30ee2SQu Wenruo 		return ret;
22445eb30ee2SQu Wenruo 
22455eb30ee2SQu Wenruo 	/* Full stripe write, can write the full stripe right now. */
22465eb30ee2SQu Wenruo 	if (rbio_is_full(rbio))
22475eb30ee2SQu Wenruo 		goto write;
22485eb30ee2SQu Wenruo 	/*
22495eb30ee2SQu Wenruo 	 * Now we're doing sub-stripe write, also need all data stripes to do
22505eb30ee2SQu Wenruo 	 * the full RMW.
22515eb30ee2SQu Wenruo 	 */
22525eb30ee2SQu Wenruo 	ret = alloc_rbio_data_pages(rbio);
22535eb30ee2SQu Wenruo 	if (ret < 0)
22545eb30ee2SQu Wenruo 		return ret;
22555eb30ee2SQu Wenruo 
22565eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
22575eb30ee2SQu Wenruo 	index_rbio_pages(rbio);
22585eb30ee2SQu Wenruo 
22595eb30ee2SQu Wenruo 	ret = rmw_read_and_wait(rbio);
22605eb30ee2SQu Wenruo 	if (ret < 0)
22615eb30ee2SQu Wenruo 		return ret;
22625eb30ee2SQu Wenruo 
22635eb30ee2SQu Wenruo 	/* Too many read errors, beyond our tolerance. */
22645eb30ee2SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
22655eb30ee2SQu Wenruo 		return ret;
22665eb30ee2SQu Wenruo 
22675eb30ee2SQu Wenruo 	/* Have read failures but under tolerance, needs recovery. */
22685eb30ee2SQu Wenruo 	if (rbio->faila >= 0 || rbio->failb >= 0) {
22695eb30ee2SQu Wenruo 		ret = recover_rbio(rbio);
22705eb30ee2SQu Wenruo 		if (ret < 0)
22715eb30ee2SQu Wenruo 			return ret;
22725eb30ee2SQu Wenruo 	}
22735eb30ee2SQu Wenruo write:
22745eb30ee2SQu Wenruo 	/*
22755eb30ee2SQu Wenruo 	 * At this stage we're not allowed to add any new bios to the
22765eb30ee2SQu Wenruo 	 * bio list any more, anyone else that wants to change this stripe
22775eb30ee2SQu Wenruo 	 * needs to do their own rmw.
22785eb30ee2SQu Wenruo 	 */
22795eb30ee2SQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
22805eb30ee2SQu Wenruo 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
22815eb30ee2SQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
22825eb30ee2SQu Wenruo 
22835eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
22842942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
22855eb30ee2SQu Wenruo 
22865eb30ee2SQu Wenruo 	index_rbio_pages(rbio);
22875eb30ee2SQu Wenruo 
22885eb30ee2SQu Wenruo 	/*
22895eb30ee2SQu Wenruo 	 * We don't cache full rbios because we're assuming
22905eb30ee2SQu Wenruo 	 * the higher layers are unlikely to use this area of
22915eb30ee2SQu Wenruo 	 * the disk again soon.  If they do use it again,
22925eb30ee2SQu Wenruo 	 * hopefully they will send another full bio.
22935eb30ee2SQu Wenruo 	 */
22945eb30ee2SQu Wenruo 	if (!rbio_is_full(rbio))
22955eb30ee2SQu Wenruo 		cache_rbio_pages(rbio);
22965eb30ee2SQu Wenruo 	else
22975eb30ee2SQu Wenruo 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
22985eb30ee2SQu Wenruo 
22995eb30ee2SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
23005eb30ee2SQu Wenruo 		generate_pq_vertical(rbio, sectornr);
23015eb30ee2SQu Wenruo 
23025eb30ee2SQu Wenruo 	bio_list_init(&bio_list);
23035eb30ee2SQu Wenruo 	ret = rmw_assemble_write_bios(rbio, &bio_list);
23045eb30ee2SQu Wenruo 	if (ret < 0)
23055eb30ee2SQu Wenruo 		return ret;
23065eb30ee2SQu Wenruo 
23075eb30ee2SQu Wenruo 	/* We should have at least one bio assembled. */
23085eb30ee2SQu Wenruo 	ASSERT(bio_list_size(&bio_list));
23095eb30ee2SQu Wenruo 	submit_write_bios(rbio, &bio_list);
23105eb30ee2SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
23115eb30ee2SQu Wenruo 
23125eb30ee2SQu Wenruo 	/* We have more errors than our tolerance during the read. */
23135eb30ee2SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
23145eb30ee2SQu Wenruo 		ret = -EIO;
23155eb30ee2SQu Wenruo 	return ret;
23165eb30ee2SQu Wenruo }
23175eb30ee2SQu Wenruo 
231893723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work)
231953b381b3SDavid Woodhouse {
232053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
232193723095SQu Wenruo 	int ret;
232253b381b3SDavid Woodhouse 
232353b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
232493723095SQu Wenruo 
232593723095SQu Wenruo 	ret = lock_stripe_add(rbio);
232693723095SQu Wenruo 	if (ret == 0) {
232793723095SQu Wenruo 		ret = rmw_rbio(rbio);
232893723095SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
232993723095SQu Wenruo 	}
233093723095SQu Wenruo }
233193723095SQu Wenruo 
233293723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work)
233393723095SQu Wenruo {
233493723095SQu Wenruo 	struct btrfs_raid_bio *rbio;
233593723095SQu Wenruo 	int ret;
233693723095SQu Wenruo 
233793723095SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
233893723095SQu Wenruo 
233993723095SQu Wenruo 	ret = rmw_rbio(rbio);
234093723095SQu Wenruo 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
234153b381b3SDavid Woodhouse }
234253b381b3SDavid Woodhouse 
23435a6ac9eaSMiao Xie /*
23445a6ac9eaSMiao Xie  * The following code is used to scrub/replace the parity stripe
23455a6ac9eaSMiao Xie  *
23464c664611SQu Wenruo  * Caller must have already increased bio_counter for getting @bioc.
2347ae6529c3SQu Wenruo  *
23485a6ac9eaSMiao Xie  * Note: We need make sure all the pages that add into the scrub/replace
23495a6ac9eaSMiao Xie  * raid bio are correct and not be changed during the scrub/replace. That
23505a6ac9eaSMiao Xie  * is those pages just hold metadata or file data with checksum.
23515a6ac9eaSMiao Xie  */
23525a6ac9eaSMiao Xie 
23536a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
23546a258d72SQu Wenruo 				struct btrfs_io_context *bioc,
2355ff18a4afSChristoph Hellwig 				struct btrfs_device *scrub_dev,
23565a6ac9eaSMiao Xie 				unsigned long *dbitmap, int stripe_nsectors)
23575a6ac9eaSMiao Xie {
23586a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
23595a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
23605a6ac9eaSMiao Xie 	int i;
23615a6ac9eaSMiao Xie 
2362ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
23635a6ac9eaSMiao Xie 	if (IS_ERR(rbio))
23645a6ac9eaSMiao Xie 		return NULL;
23655a6ac9eaSMiao Xie 	bio_list_add(&rbio->bio_list, bio);
23665a6ac9eaSMiao Xie 	/*
23675a6ac9eaSMiao Xie 	 * This is a special bio which is used to hold the completion handler
23685a6ac9eaSMiao Xie 	 * and make the scrub rbio is similar to the other types
23695a6ac9eaSMiao Xie 	 */
23705a6ac9eaSMiao Xie 	ASSERT(!bio->bi_iter.bi_size);
23715a6ac9eaSMiao Xie 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
23725a6ac9eaSMiao Xie 
23739cd3a7ebSLiu Bo 	/*
23744c664611SQu Wenruo 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
23759cd3a7ebSLiu Bo 	 * to the end position, so this search can start from the first parity
23769cd3a7ebSLiu Bo 	 * stripe.
23779cd3a7ebSLiu Bo 	 */
23789cd3a7ebSLiu Bo 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
23794c664611SQu Wenruo 		if (bioc->stripes[i].dev == scrub_dev) {
23805a6ac9eaSMiao Xie 			rbio->scrubp = i;
23815a6ac9eaSMiao Xie 			break;
23825a6ac9eaSMiao Xie 		}
23835a6ac9eaSMiao Xie 	}
23849cd3a7ebSLiu Bo 	ASSERT(i < rbio->real_stripes);
23855a6ac9eaSMiao Xie 
2386c67c68ebSQu Wenruo 	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
23875a6ac9eaSMiao Xie 	return rbio;
23885a6ac9eaSMiao Xie }
23895a6ac9eaSMiao Xie 
2390b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */
2391b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
23926346f6bfSQu Wenruo 			    unsigned int pgoff, u64 logical)
23935a6ac9eaSMiao Xie {
23946346f6bfSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
23955a6ac9eaSMiao Xie 	int stripe_offset;
23965a6ac9eaSMiao Xie 	int index;
23975a6ac9eaSMiao Xie 
23984c664611SQu Wenruo 	ASSERT(logical >= rbio->bioc->raid_map[0]);
23996346f6bfSQu Wenruo 	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2400ff18a4afSChristoph Hellwig 				       BTRFS_STRIPE_LEN * rbio->nr_data);
24014c664611SQu Wenruo 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
24026346f6bfSQu Wenruo 	index = stripe_offset / sectorsize;
24036346f6bfSQu Wenruo 	rbio->bio_sectors[index].page = page;
24046346f6bfSQu Wenruo 	rbio->bio_sectors[index].pgoff = pgoff;
24055a6ac9eaSMiao Xie }
24065a6ac9eaSMiao Xie 
24075a6ac9eaSMiao Xie /*
24085a6ac9eaSMiao Xie  * We just scrub the parity that we have correct data on the same horizontal,
24095a6ac9eaSMiao Xie  * so we needn't allocate all pages for all the stripes.
24105a6ac9eaSMiao Xie  */
24115a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
24125a6ac9eaSMiao Xie {
24133907ce29SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2414aee35e4bSQu Wenruo 	int total_sector_nr;
24155a6ac9eaSMiao Xie 
2416aee35e4bSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2417aee35e4bSQu Wenruo 	     total_sector_nr++) {
24183907ce29SQu Wenruo 		struct page *page;
2419aee35e4bSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2420aee35e4bSQu Wenruo 		int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
24213907ce29SQu Wenruo 
2422aee35e4bSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
2423aee35e4bSQu Wenruo 			continue;
24245a6ac9eaSMiao Xie 		if (rbio->stripe_pages[index])
24255a6ac9eaSMiao Xie 			continue;
2426b0ee5e1eSDavid Sterba 		page = alloc_page(GFP_NOFS);
24275a6ac9eaSMiao Xie 		if (!page)
24285a6ac9eaSMiao Xie 			return -ENOMEM;
24295a6ac9eaSMiao Xie 		rbio->stripe_pages[index] = page;
24305a6ac9eaSMiao Xie 	}
2431eb357060SQu Wenruo 	index_stripe_sectors(rbio);
24325a6ac9eaSMiao Xie 	return 0;
24335a6ac9eaSMiao Xie }
24345a6ac9eaSMiao Xie 
24356bfd0133SQu Wenruo static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
24365a6ac9eaSMiao Xie {
24374c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
243846900662SQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
24391389053eSKees Cook 	void **pointers = rbio->finish_pointers;
2440c67c68ebSQu Wenruo 	unsigned long *pbitmap = &rbio->finish_pbitmap;
24415a6ac9eaSMiao Xie 	int nr_data = rbio->nr_data;
24425a6ac9eaSMiao Xie 	int stripe;
24433e77605dSQu Wenruo 	int sectornr;
2444c17af965SDavid Sterba 	bool has_qstripe;
244546900662SQu Wenruo 	struct sector_ptr p_sector = { 0 };
244646900662SQu Wenruo 	struct sector_ptr q_sector = { 0 };
24475a6ac9eaSMiao Xie 	struct bio_list bio_list;
24485a6ac9eaSMiao Xie 	struct bio *bio;
244976035976SMiao Xie 	int is_replace = 0;
24505a6ac9eaSMiao Xie 	int ret;
24515a6ac9eaSMiao Xie 
24525a6ac9eaSMiao Xie 	bio_list_init(&bio_list);
24535a6ac9eaSMiao Xie 
2454c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
2455c17af965SDavid Sterba 		has_qstripe = false;
2456c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
2457c17af965SDavid Sterba 		has_qstripe = true;
2458c17af965SDavid Sterba 	else
24595a6ac9eaSMiao Xie 		BUG();
24605a6ac9eaSMiao Xie 
24614c664611SQu Wenruo 	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
246276035976SMiao Xie 		is_replace = 1;
2463c67c68ebSQu Wenruo 		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
246476035976SMiao Xie 	}
246576035976SMiao Xie 
24665a6ac9eaSMiao Xie 	/*
24675a6ac9eaSMiao Xie 	 * Because the higher layers(scrubber) are unlikely to
24685a6ac9eaSMiao Xie 	 * use this area of the disk again soon, so don't cache
24695a6ac9eaSMiao Xie 	 * it.
24705a6ac9eaSMiao Xie 	 */
24715a6ac9eaSMiao Xie 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
24725a6ac9eaSMiao Xie 
24735a6ac9eaSMiao Xie 	if (!need_check)
24745a6ac9eaSMiao Xie 		goto writeback;
24755a6ac9eaSMiao Xie 
247646900662SQu Wenruo 	p_sector.page = alloc_page(GFP_NOFS);
247746900662SQu Wenruo 	if (!p_sector.page)
24786bfd0133SQu Wenruo 		return -ENOMEM;
247946900662SQu Wenruo 	p_sector.pgoff = 0;
248046900662SQu Wenruo 	p_sector.uptodate = 1;
24815a6ac9eaSMiao Xie 
2482c17af965SDavid Sterba 	if (has_qstripe) {
2483d70cef0dSIra Weiny 		/* RAID6, allocate and map temp space for the Q stripe */
248446900662SQu Wenruo 		q_sector.page = alloc_page(GFP_NOFS);
248546900662SQu Wenruo 		if (!q_sector.page) {
248646900662SQu Wenruo 			__free_page(p_sector.page);
248746900662SQu Wenruo 			p_sector.page = NULL;
24886bfd0133SQu Wenruo 			return -ENOMEM;
24895a6ac9eaSMiao Xie 		}
249046900662SQu Wenruo 		q_sector.pgoff = 0;
249146900662SQu Wenruo 		q_sector.uptodate = 1;
249246900662SQu Wenruo 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
24935a6ac9eaSMiao Xie 	}
24945a6ac9eaSMiao Xie 
24955a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
24962942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
24975a6ac9eaSMiao Xie 
2498d70cef0dSIra Weiny 	/* Map the parity stripe just once */
249946900662SQu Wenruo 	pointers[nr_data] = kmap_local_page(p_sector.page);
2500d70cef0dSIra Weiny 
2501c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
250246900662SQu Wenruo 		struct sector_ptr *sector;
25035a6ac9eaSMiao Xie 		void *parity;
250446900662SQu Wenruo 
25055a6ac9eaSMiao Xie 		/* first collect one page from each data stripe */
25065a6ac9eaSMiao Xie 		for (stripe = 0; stripe < nr_data; stripe++) {
250746900662SQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
250846900662SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
250946900662SQu Wenruo 					   sector->pgoff;
25105a6ac9eaSMiao Xie 		}
25115a6ac9eaSMiao Xie 
2512c17af965SDavid Sterba 		if (has_qstripe) {
2513d70cef0dSIra Weiny 			/* RAID6, call the library function to fill in our P/Q */
251446900662SQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
25155a6ac9eaSMiao Xie 						pointers);
25165a6ac9eaSMiao Xie 		} else {
25175a6ac9eaSMiao Xie 			/* raid5 */
251846900662SQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
251946900662SQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
25205a6ac9eaSMiao Xie 		}
25215a6ac9eaSMiao Xie 
252201327610SNicholas D Steeves 		/* Check scrubbing parity and repair it */
252346900662SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
252446900662SQu Wenruo 		parity = kmap_local_page(sector->page) + sector->pgoff;
252546900662SQu Wenruo 		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
252646900662SQu Wenruo 			memcpy(parity, pointers[rbio->scrubp], sectorsize);
25275a6ac9eaSMiao Xie 		else
25285a6ac9eaSMiao Xie 			/* Parity is right, needn't writeback */
2529c67c68ebSQu Wenruo 			bitmap_clear(&rbio->dbitmap, sectornr, 1);
253058c1a35cSIra Weiny 		kunmap_local(parity);
25315a6ac9eaSMiao Xie 
253294a0b58dSIra Weiny 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
253394a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
25345a6ac9eaSMiao Xie 	}
25355a6ac9eaSMiao Xie 
253694a0b58dSIra Weiny 	kunmap_local(pointers[nr_data]);
253746900662SQu Wenruo 	__free_page(p_sector.page);
253846900662SQu Wenruo 	p_sector.page = NULL;
253946900662SQu Wenruo 	if (q_sector.page) {
254094a0b58dSIra Weiny 		kunmap_local(pointers[rbio->real_stripes - 1]);
254146900662SQu Wenruo 		__free_page(q_sector.page);
254246900662SQu Wenruo 		q_sector.page = NULL;
2543d70cef0dSIra Weiny 	}
25445a6ac9eaSMiao Xie 
25455a6ac9eaSMiao Xie writeback:
25465a6ac9eaSMiao Xie 	/*
25475a6ac9eaSMiao Xie 	 * time to start writing.  Make bios for everything from the
25485a6ac9eaSMiao Xie 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
25495a6ac9eaSMiao Xie 	 * everything else.
25505a6ac9eaSMiao Xie 	 */
2551c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
25523e77605dSQu Wenruo 		struct sector_ptr *sector;
25535a6ac9eaSMiao Xie 
25543e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25553e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2556ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_WRITE);
25575a6ac9eaSMiao Xie 		if (ret)
25585a6ac9eaSMiao Xie 			goto cleanup;
25595a6ac9eaSMiao Xie 	}
25605a6ac9eaSMiao Xie 
256176035976SMiao Xie 	if (!is_replace)
256276035976SMiao Xie 		goto submit_write;
256376035976SMiao Xie 
25643e77605dSQu Wenruo 	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
25653e77605dSQu Wenruo 		struct sector_ptr *sector;
256676035976SMiao Xie 
25673e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25683e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
25694c664611SQu Wenruo 				       bioc->tgtdev_map[rbio->scrubp],
2570ff18a4afSChristoph Hellwig 				       sectornr, REQ_OP_WRITE);
257176035976SMiao Xie 		if (ret)
257276035976SMiao Xie 			goto cleanup;
257376035976SMiao Xie 	}
257476035976SMiao Xie 
257576035976SMiao Xie submit_write:
25766bfd0133SQu Wenruo 	submit_write_bios(rbio, &bio_list);
25776bfd0133SQu Wenruo 	return 0;
25785a6ac9eaSMiao Xie 
25795a6ac9eaSMiao Xie cleanup:
2580785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2581785884fcSLiu Bo 		bio_put(bio);
25826bfd0133SQu Wenruo 	return ret;
25835a6ac9eaSMiao Xie }
25845a6ac9eaSMiao Xie 
25855a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
25865a6ac9eaSMiao Xie {
25875a6ac9eaSMiao Xie 	if (stripe >= 0 && stripe < rbio->nr_data)
25885a6ac9eaSMiao Xie 		return 1;
25895a6ac9eaSMiao Xie 	return 0;
25905a6ac9eaSMiao Xie }
25915a6ac9eaSMiao Xie 
25926bfd0133SQu Wenruo static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
25935a6ac9eaSMiao Xie {
2594*75b47033SQu Wenruo 	void **pointers = NULL;
2595*75b47033SQu Wenruo 	void **unmap_array = NULL;
2596*75b47033SQu Wenruo 	int sector_nr;
25976bfd0133SQu Wenruo 	int ret;
25986bfd0133SQu Wenruo 
25995a6ac9eaSMiao Xie 	/*
2600*75b47033SQu Wenruo 	 * @pointers array stores the pointer for each sector.
2601*75b47033SQu Wenruo 	 *
2602*75b47033SQu Wenruo 	 * @unmap_array stores copy of pointers that does not get reordered
2603*75b47033SQu Wenruo 	 * during reconstruction so that kunmap_local works.
26045a6ac9eaSMiao Xie 	 */
2605*75b47033SQu Wenruo 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2606*75b47033SQu Wenruo 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2607*75b47033SQu Wenruo 	if (!pointers || !unmap_array) {
2608*75b47033SQu Wenruo 		ret = -ENOMEM;
2609*75b47033SQu Wenruo 		goto out;
2610*75b47033SQu Wenruo 	}
26115a6ac9eaSMiao Xie 
2612*75b47033SQu Wenruo 	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2613*75b47033SQu Wenruo 		int dfail = 0, failp = -1;
2614*75b47033SQu Wenruo 		int faila;
2615*75b47033SQu Wenruo 		int failb;
2616*75b47033SQu Wenruo 		int found_errors;
2617*75b47033SQu Wenruo 
2618*75b47033SQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2619*75b47033SQu Wenruo 							 &faila, &failb);
2620*75b47033SQu Wenruo 		if (found_errors > rbio->bioc->max_errors) {
2621*75b47033SQu Wenruo 			ret = -EIO;
2622*75b47033SQu Wenruo 			goto out;
2623*75b47033SQu Wenruo 		}
2624*75b47033SQu Wenruo 		if (found_errors == 0)
2625*75b47033SQu Wenruo 			continue;
2626*75b47033SQu Wenruo 
2627*75b47033SQu Wenruo 		/* We should have at least one error here. */
2628*75b47033SQu Wenruo 		ASSERT(faila >= 0 || failb >= 0);
2629*75b47033SQu Wenruo 
2630*75b47033SQu Wenruo 		if (is_data_stripe(rbio, faila))
2631*75b47033SQu Wenruo 			dfail++;
2632*75b47033SQu Wenruo 		else if (is_parity_stripe(faila))
2633*75b47033SQu Wenruo 			failp = faila;
2634*75b47033SQu Wenruo 
2635*75b47033SQu Wenruo 		if (is_data_stripe(rbio, failb))
2636*75b47033SQu Wenruo 			dfail++;
2637*75b47033SQu Wenruo 		else if (is_parity_stripe(failb))
2638*75b47033SQu Wenruo 			failp = failb;
26395a6ac9eaSMiao Xie 		/*
2640*75b47033SQu Wenruo 		 * Because we can not use a scrubbing parity to repair the
2641*75b47033SQu Wenruo 		 * data, so the capability of the repair is declined.  (In the
2642*75b47033SQu Wenruo 		 * case of RAID5, we can not repair anything.)
2643*75b47033SQu Wenruo 		 */
2644*75b47033SQu Wenruo 		if (dfail > rbio->bioc->max_errors - 1) {
2645*75b47033SQu Wenruo 			ret = -EIO;
2646*75b47033SQu Wenruo 			goto out;
2647*75b47033SQu Wenruo 		}
2648*75b47033SQu Wenruo 		/*
2649*75b47033SQu Wenruo 		 * If all data is good, only parity is correctly, just repair
2650*75b47033SQu Wenruo 		 * the parity, no need to recover data stripes.
26515a6ac9eaSMiao Xie 		 */
26526bfd0133SQu Wenruo 		if (dfail == 0)
2653*75b47033SQu Wenruo 			continue;
26545a6ac9eaSMiao Xie 
26555a6ac9eaSMiao Xie 		/*
26565a6ac9eaSMiao Xie 		 * Here means we got one corrupted data stripe and one
2657*75b47033SQu Wenruo 		 * corrupted parity on RAID6, if the corrupted parity is
2658*75b47033SQu Wenruo 		 * scrubbing parity, luckily, use the other one to repair the
2659*75b47033SQu Wenruo 		 * data, or we can not repair the data stripe.
26605a6ac9eaSMiao Xie 		 */
2661*75b47033SQu Wenruo 		if (failp != rbio->scrubp) {
2662*75b47033SQu Wenruo 			ret = -EIO;
2663*75b47033SQu Wenruo 			goto out;
2664*75b47033SQu Wenruo 		}
26655a6ac9eaSMiao Xie 
2666*75b47033SQu Wenruo 		ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2667*75b47033SQu Wenruo 		if (ret < 0)
2668*75b47033SQu Wenruo 			goto out;
2669*75b47033SQu Wenruo 	}
2670*75b47033SQu Wenruo out:
2671*75b47033SQu Wenruo 	kfree(pointers);
2672*75b47033SQu Wenruo 	kfree(unmap_array);
26736bfd0133SQu Wenruo 	return ret;
26745a6ac9eaSMiao Xie }
26755a6ac9eaSMiao Xie 
2676cb3450b7SQu Wenruo static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio,
2677cb3450b7SQu Wenruo 				    struct bio_list *bio_list)
26785a6ac9eaSMiao Xie {
26795a6ac9eaSMiao Xie 	struct bio *bio;
2680cb3450b7SQu Wenruo 	int total_sector_nr;
2681cb3450b7SQu Wenruo 	int ret = 0;
26825a6ac9eaSMiao Xie 
2683cb3450b7SQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
2684785884fcSLiu Bo 
26851c10702eSQu Wenruo 	/* Build a list of bios to read all the missing parts. */
26861c10702eSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
26871c10702eSQu Wenruo 	     total_sector_nr++) {
26881c10702eSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
26891c10702eSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
26903e77605dSQu Wenruo 		struct sector_ptr *sector;
26911c10702eSQu Wenruo 
26921c10702eSQu Wenruo 		/* No data in the vertical stripe, no need to read. */
26931c10702eSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
26941c10702eSQu Wenruo 			continue;
26951c10702eSQu Wenruo 
26965a6ac9eaSMiao Xie 		/*
26971c10702eSQu Wenruo 		 * We want to find all the sectors missing from the rbio and
26981c10702eSQu Wenruo 		 * read them from the disk. If sector_in_rbio() finds a sector
26991c10702eSQu Wenruo 		 * in the bio list we don't need to read it off the stripe.
27005a6ac9eaSMiao Xie 		 */
27013e77605dSQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
27023e77605dSQu Wenruo 		if (sector)
27035a6ac9eaSMiao Xie 			continue;
27045a6ac9eaSMiao Xie 
27053e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
27065a6ac9eaSMiao Xie 		/*
27071c10702eSQu Wenruo 		 * The bio cache may have handed us an uptodate sector.  If so,
27081c10702eSQu Wenruo 		 * use it.
27095a6ac9eaSMiao Xie 		 */
27103e77605dSQu Wenruo 		if (sector->uptodate)
27115a6ac9eaSMiao Xie 			continue;
27125a6ac9eaSMiao Xie 
2713cb3450b7SQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2714ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
27155a6ac9eaSMiao Xie 		if (ret)
2716cb3450b7SQu Wenruo 			goto error;
27175a6ac9eaSMiao Xie 	}
2718cb3450b7SQu Wenruo 	return 0;
2719cb3450b7SQu Wenruo error:
2720cb3450b7SQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
2721cb3450b7SQu Wenruo 		bio_put(bio);
2722cb3450b7SQu Wenruo 	return ret;
2723cb3450b7SQu Wenruo }
2724cb3450b7SQu Wenruo 
27256bfd0133SQu Wenruo static int scrub_rbio(struct btrfs_raid_bio *rbio)
2726cb3450b7SQu Wenruo {
27276bfd0133SQu Wenruo 	bool need_check = false;
2728cb3450b7SQu Wenruo 	struct bio_list bio_list;
2729cb3450b7SQu Wenruo 	int ret;
2730cb3450b7SQu Wenruo 	struct bio *bio;
2731cb3450b7SQu Wenruo 
2732cb3450b7SQu Wenruo 	bio_list_init(&bio_list);
2733cb3450b7SQu Wenruo 
2734cb3450b7SQu Wenruo 	ret = alloc_rbio_essential_pages(rbio);
2735cb3450b7SQu Wenruo 	if (ret)
2736cb3450b7SQu Wenruo 		goto cleanup;
2737cb3450b7SQu Wenruo 
2738cb3450b7SQu Wenruo 	atomic_set(&rbio->error, 0);
27392942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
27402942a50dSQu Wenruo 
2741cb3450b7SQu Wenruo 	ret = scrub_assemble_read_bios(rbio, &bio_list);
2742cb3450b7SQu Wenruo 	if (ret < 0)
2743cb3450b7SQu Wenruo 		goto cleanup;
27445a6ac9eaSMiao Xie 
27456bfd0133SQu Wenruo 	submit_read_bios(rbio, &bio_list);
27466bfd0133SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
27476bfd0133SQu Wenruo 
2748*75b47033SQu Wenruo 	/* We may have some failures, recover the failed sectors first. */
27496bfd0133SQu Wenruo 	ret = recover_scrub_rbio(rbio);
27506bfd0133SQu Wenruo 	if (ret < 0)
27516bfd0133SQu Wenruo 		goto cleanup;
27526bfd0133SQu Wenruo 
27535a6ac9eaSMiao Xie 	/*
27546bfd0133SQu Wenruo 	 * We have every sector properly prepared. Can finish the scrub
27556bfd0133SQu Wenruo 	 * and writeback the good content.
27565a6ac9eaSMiao Xie 	 */
27576bfd0133SQu Wenruo 	ret = finish_parity_scrub(rbio, need_check);
27586bfd0133SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
27596bfd0133SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
27606bfd0133SQu Wenruo 		ret = -EIO;
27616bfd0133SQu Wenruo 	return ret;
27625a6ac9eaSMiao Xie 
27635a6ac9eaSMiao Xie cleanup:
2764785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2765785884fcSLiu Bo 		bio_put(bio);
2766785884fcSLiu Bo 
27676bfd0133SQu Wenruo 	return ret;
27685a6ac9eaSMiao Xie }
27695a6ac9eaSMiao Xie 
27706bfd0133SQu Wenruo static void scrub_rbio_work_locked(struct work_struct *work)
27715a6ac9eaSMiao Xie {
27725a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
27736bfd0133SQu Wenruo 	int ret;
27745a6ac9eaSMiao Xie 
27755a6ac9eaSMiao Xie 	rbio = container_of(work, struct btrfs_raid_bio, work);
27766bfd0133SQu Wenruo 	ret = scrub_rbio(rbio);
27776bfd0133SQu Wenruo 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
27785a6ac9eaSMiao Xie }
27795a6ac9eaSMiao Xie 
27805a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
27815a6ac9eaSMiao Xie {
27825a6ac9eaSMiao Xie 	if (!lock_stripe_add(rbio))
27836bfd0133SQu Wenruo 		start_async_work(rbio, scrub_rbio_work_locked);
27845a6ac9eaSMiao Xie }
2785b4ee1782SOmar Sandoval 
2786b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */
2787b4ee1782SOmar Sandoval 
2788b4ee1782SOmar Sandoval struct btrfs_raid_bio *
2789ff18a4afSChristoph Hellwig raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2790b4ee1782SOmar Sandoval {
27916a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
2792b4ee1782SOmar Sandoval 	struct btrfs_raid_bio *rbio;
2793b4ee1782SOmar Sandoval 
2794ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2795b4ee1782SOmar Sandoval 	if (IS_ERR(rbio))
2796b4ee1782SOmar Sandoval 		return NULL;
2797b4ee1782SOmar Sandoval 
2798b4ee1782SOmar Sandoval 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2799b4ee1782SOmar Sandoval 	bio_list_add(&rbio->bio_list, bio);
2800b4ee1782SOmar Sandoval 	/*
2801b4ee1782SOmar Sandoval 	 * This is a special bio which is used to hold the completion handler
2802b4ee1782SOmar Sandoval 	 * and make the scrub rbio is similar to the other types
2803b4ee1782SOmar Sandoval 	 */
2804b4ee1782SOmar Sandoval 	ASSERT(!bio->bi_iter.bi_size);
2805b4ee1782SOmar Sandoval 
28062942a50dSQu Wenruo 	set_rbio_range_error(rbio, bio);
2807b4ee1782SOmar Sandoval 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2808b4ee1782SOmar Sandoval 	if (rbio->faila == -1) {
2809f15fb2cdSQu Wenruo 		btrfs_warn_rl(fs_info,
2810f15fb2cdSQu Wenruo 	"can not determine the failed stripe number for full stripe %llu",
2811f15fb2cdSQu Wenruo 			      bioc->raid_map[0]);
2812ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
2813b4ee1782SOmar Sandoval 		return NULL;
2814b4ee1782SOmar Sandoval 	}
2815b4ee1782SOmar Sandoval 
2816b4ee1782SOmar Sandoval 	return rbio;
2817b4ee1782SOmar Sandoval }
2818b4ee1782SOmar Sandoval 
2819b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2820b4ee1782SOmar Sandoval {
2821d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
2822b4ee1782SOmar Sandoval }
2823