xref: /linux/fs/btrfs/raid56.c (revision 1faf3885067d5be65597d5dc682f0da505822104)
1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
253b381b3SDavid Woodhouse /*
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
453b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
553b381b3SDavid Woodhouse  */
6c1d7c514SDavid Sterba 
753b381b3SDavid Woodhouse #include <linux/sched.h>
853b381b3SDavid Woodhouse #include <linux/bio.h>
953b381b3SDavid Woodhouse #include <linux/slab.h>
1053b381b3SDavid Woodhouse #include <linux/blkdev.h>
1153b381b3SDavid Woodhouse #include <linux/raid/pq.h>
1253b381b3SDavid Woodhouse #include <linux/hash.h>
1353b381b3SDavid Woodhouse #include <linux/list_sort.h>
1453b381b3SDavid Woodhouse #include <linux/raid/xor.h>
15818e010bSDavid Sterba #include <linux/mm.h>
169b569ea0SJosef Bacik #include "messages.h"
17cea62800SJohannes Thumshirn #include "misc.h"
1853b381b3SDavid Woodhouse #include "ctree.h"
1953b381b3SDavid Woodhouse #include "disk-io.h"
2053b381b3SDavid Woodhouse #include "volumes.h"
2153b381b3SDavid Woodhouse #include "raid56.h"
2253b381b3SDavid Woodhouse #include "async-thread.h"
23c5a41562SQu Wenruo #include "file-item.h"
247a315072SQu Wenruo #include "btrfs_inode.h"
2553b381b3SDavid Woodhouse 
2653b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
2753b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
2853b381b3SDavid Woodhouse 
294ae10b3aSChris Mason /*
304ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
314ae10b3aSChris Mason  * of past RMW
324ae10b3aSChris Mason  */
334ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
344ae10b3aSChris Mason 
354ae10b3aSChris Mason /*
364ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
374ae10b3aSChris Mason  */
384ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
394ae10b3aSChris Mason 
404ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
414ae10b3aSChris Mason 
428a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS				11
438a953348SDavid Sterba 
448a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
458a953348SDavid Sterba struct btrfs_stripe_hash {
468a953348SDavid Sterba 	struct list_head hash_list;
478a953348SDavid Sterba 	spinlock_t lock;
488a953348SDavid Sterba };
498a953348SDavid Sterba 
508a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
518a953348SDavid Sterba struct btrfs_stripe_hash_table {
528a953348SDavid Sterba 	struct list_head stripe_cache;
538a953348SDavid Sterba 	spinlock_t cache_lock;
548a953348SDavid Sterba 	int cache_size;
558a953348SDavid Sterba 	struct btrfs_stripe_hash table[];
568a953348SDavid Sterba };
578a953348SDavid Sterba 
58eb357060SQu Wenruo /*
59eb357060SQu Wenruo  * A bvec like structure to present a sector inside a page.
60eb357060SQu Wenruo  *
61eb357060SQu Wenruo  * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
62eb357060SQu Wenruo  */
63eb357060SQu Wenruo struct sector_ptr {
64eb357060SQu Wenruo 	struct page *page;
6500425dd9SQu Wenruo 	unsigned int pgoff:24;
6600425dd9SQu Wenruo 	unsigned int uptodate:8;
67eb357060SQu Wenruo };
68eb357060SQu Wenruo 
6993723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work);
7093723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work);
7153b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
7253b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
7353b381b3SDavid Woodhouse 
746bfd0133SQu Wenruo static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
756bfd0133SQu Wenruo static void scrub_rbio_work_locked(struct work_struct *work);
765a6ac9eaSMiao Xie 
77797d74b7SQu Wenruo static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78797d74b7SQu Wenruo {
792942a50dSQu Wenruo 	bitmap_free(rbio->error_bitmap);
80797d74b7SQu Wenruo 	kfree(rbio->stripe_pages);
81797d74b7SQu Wenruo 	kfree(rbio->bio_sectors);
82797d74b7SQu Wenruo 	kfree(rbio->stripe_sectors);
83797d74b7SQu Wenruo 	kfree(rbio->finish_pointers);
84797d74b7SQu Wenruo }
85797d74b7SQu Wenruo 
86ff2b64a2SQu Wenruo static void free_raid_bio(struct btrfs_raid_bio *rbio)
87ff2b64a2SQu Wenruo {
88ff2b64a2SQu Wenruo 	int i;
89ff2b64a2SQu Wenruo 
90ff2b64a2SQu Wenruo 	if (!refcount_dec_and_test(&rbio->refs))
91ff2b64a2SQu Wenruo 		return;
92ff2b64a2SQu Wenruo 
93ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->stripe_cache));
94ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->hash_list));
95ff2b64a2SQu Wenruo 	WARN_ON(!bio_list_empty(&rbio->bio_list));
96ff2b64a2SQu Wenruo 
97ff2b64a2SQu Wenruo 	for (i = 0; i < rbio->nr_pages; i++) {
98ff2b64a2SQu Wenruo 		if (rbio->stripe_pages[i]) {
99ff2b64a2SQu Wenruo 			__free_page(rbio->stripe_pages[i]);
100ff2b64a2SQu Wenruo 			rbio->stripe_pages[i] = NULL;
101ff2b64a2SQu Wenruo 		}
102ff2b64a2SQu Wenruo 	}
103ff2b64a2SQu Wenruo 
104ff2b64a2SQu Wenruo 	btrfs_put_bioc(rbio->bioc);
105797d74b7SQu Wenruo 	free_raid_bio_pointers(rbio);
106ff2b64a2SQu Wenruo 	kfree(rbio);
107ff2b64a2SQu Wenruo }
108ff2b64a2SQu Wenruo 
109385de0efSChristoph Hellwig static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
110ac638859SDavid Sterba {
111385de0efSChristoph Hellwig 	INIT_WORK(&rbio->work, work_func);
112385de0efSChristoph Hellwig 	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
113ac638859SDavid Sterba }
114ac638859SDavid Sterba 
11553b381b3SDavid Woodhouse /*
11653b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
11753b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
11853b381b3SDavid Woodhouse  */
11953b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
12053b381b3SDavid Woodhouse {
12153b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
12253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
12353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
12453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
12553b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
12653b381b3SDavid Woodhouse 	int i;
12753b381b3SDavid Woodhouse 
12853b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
12953b381b3SDavid Woodhouse 		return 0;
13053b381b3SDavid Woodhouse 
13183c8266aSDavid Sterba 	/*
13283c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
13383c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
13483c8266aSDavid Sterba 	 *
13583c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
13683c8266aSDavid Sterba 	 * of a failing mount.
13783c8266aSDavid Sterba 	 */
138ee787f95SDavid Sterba 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
13953b381b3SDavid Woodhouse 	if (!table)
14053b381b3SDavid Woodhouse 		return -ENOMEM;
14153b381b3SDavid Woodhouse 
1424ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
1434ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
1444ae10b3aSChris Mason 
14553b381b3SDavid Woodhouse 	h = table->table;
14653b381b3SDavid Woodhouse 
14753b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
14853b381b3SDavid Woodhouse 		cur = h + i;
14953b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
15053b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
15153b381b3SDavid Woodhouse 	}
15253b381b3SDavid Woodhouse 
15353b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
154f749303bSWang Shilong 	kvfree(x);
15553b381b3SDavid Woodhouse 	return 0;
15653b381b3SDavid Woodhouse }
15753b381b3SDavid Woodhouse 
15853b381b3SDavid Woodhouse /*
1594ae10b3aSChris Mason  * caching an rbio means to copy anything from the
160ac26df8bSQu Wenruo  * bio_sectors array into the stripe_pages array.  We
1614ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
1624ae10b3aSChris Mason  * to indicate if it has valid data
1634ae10b3aSChris Mason  *
1644ae10b3aSChris Mason  * once the caching is done, we set the cache ready
1654ae10b3aSChris Mason  * bit.
1664ae10b3aSChris Mason  */
1674ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
1684ae10b3aSChris Mason {
1694ae10b3aSChris Mason 	int i;
1704ae10b3aSChris Mason 	int ret;
1714ae10b3aSChris Mason 
1724ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
1734ae10b3aSChris Mason 	if (ret)
1744ae10b3aSChris Mason 		return;
1754ae10b3aSChris Mason 
17600425dd9SQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
17700425dd9SQu Wenruo 		/* Some range not covered by bio (partial write), skip it */
17888074c8bSQu Wenruo 		if (!rbio->bio_sectors[i].page) {
17988074c8bSQu Wenruo 			/*
18088074c8bSQu Wenruo 			 * Even if the sector is not covered by bio, if it is
18188074c8bSQu Wenruo 			 * a data sector it should still be uptodate as it is
18288074c8bSQu Wenruo 			 * read from disk.
18388074c8bSQu Wenruo 			 */
18488074c8bSQu Wenruo 			if (i < rbio->nr_data * rbio->stripe_nsectors)
18588074c8bSQu Wenruo 				ASSERT(rbio->stripe_sectors[i].uptodate);
18600425dd9SQu Wenruo 			continue;
18788074c8bSQu Wenruo 		}
18800425dd9SQu Wenruo 
18900425dd9SQu Wenruo 		ASSERT(rbio->stripe_sectors[i].page);
19000425dd9SQu Wenruo 		memcpy_page(rbio->stripe_sectors[i].page,
19100425dd9SQu Wenruo 			    rbio->stripe_sectors[i].pgoff,
19200425dd9SQu Wenruo 			    rbio->bio_sectors[i].page,
19300425dd9SQu Wenruo 			    rbio->bio_sectors[i].pgoff,
19400425dd9SQu Wenruo 			    rbio->bioc->fs_info->sectorsize);
19500425dd9SQu Wenruo 		rbio->stripe_sectors[i].uptodate = 1;
19600425dd9SQu Wenruo 	}
1974ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1984ae10b3aSChris Mason }
1994ae10b3aSChris Mason 
2004ae10b3aSChris Mason /*
20153b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
20253b381b3SDavid Woodhouse  */
20353b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
20453b381b3SDavid Woodhouse {
2054c664611SQu Wenruo 	u64 num = rbio->bioc->raid_map[0];
20653b381b3SDavid Woodhouse 
20753b381b3SDavid Woodhouse 	/*
20853b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
20953b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
21053b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
21153b381b3SDavid Woodhouse 	 * returns just one or two different values.
21253b381b3SDavid Woodhouse 	 *
21353b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
21453b381b3SDavid Woodhouse 	 */
21553b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
21653b381b3SDavid Woodhouse }
21753b381b3SDavid Woodhouse 
218d4e28d9bSQu Wenruo static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219d4e28d9bSQu Wenruo 				       unsigned int page_nr)
220d4e28d9bSQu Wenruo {
221d4e28d9bSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222d4e28d9bSQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223d4e28d9bSQu Wenruo 	int i;
224d4e28d9bSQu Wenruo 
225d4e28d9bSQu Wenruo 	ASSERT(page_nr < rbio->nr_pages);
226d4e28d9bSQu Wenruo 
227d4e28d9bSQu Wenruo 	for (i = sectors_per_page * page_nr;
228d4e28d9bSQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page;
229d4e28d9bSQu Wenruo 	     i++) {
230d4e28d9bSQu Wenruo 		if (!rbio->stripe_sectors[i].uptodate)
231d4e28d9bSQu Wenruo 			return false;
232d4e28d9bSQu Wenruo 	}
233d4e28d9bSQu Wenruo 	return true;
234d4e28d9bSQu Wenruo }
235d4e28d9bSQu Wenruo 
23653b381b3SDavid Woodhouse /*
237eb357060SQu Wenruo  * Update the stripe_sectors[] array to use correct page and pgoff
238eb357060SQu Wenruo  *
239eb357060SQu Wenruo  * Should be called every time any page pointer in stripes_pages[] got modified.
240eb357060SQu Wenruo  */
241eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242eb357060SQu Wenruo {
243eb357060SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244eb357060SQu Wenruo 	u32 offset;
245eb357060SQu Wenruo 	int i;
246eb357060SQu Wenruo 
247eb357060SQu Wenruo 	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248eb357060SQu Wenruo 		int page_index = offset >> PAGE_SHIFT;
249eb357060SQu Wenruo 
250eb357060SQu Wenruo 		ASSERT(page_index < rbio->nr_pages);
251eb357060SQu Wenruo 		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252eb357060SQu Wenruo 		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253eb357060SQu Wenruo 	}
254eb357060SQu Wenruo }
255eb357060SQu Wenruo 
2564d100466SQu Wenruo static void steal_rbio_page(struct btrfs_raid_bio *src,
2574d100466SQu Wenruo 			    struct btrfs_raid_bio *dest, int page_nr)
2584d100466SQu Wenruo {
2594d100466SQu Wenruo 	const u32 sectorsize = src->bioc->fs_info->sectorsize;
2604d100466SQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2614d100466SQu Wenruo 	int i;
2624d100466SQu Wenruo 
2634d100466SQu Wenruo 	if (dest->stripe_pages[page_nr])
2644d100466SQu Wenruo 		__free_page(dest->stripe_pages[page_nr]);
2654d100466SQu Wenruo 	dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
2664d100466SQu Wenruo 	src->stripe_pages[page_nr] = NULL;
2674d100466SQu Wenruo 
2684d100466SQu Wenruo 	/* Also update the sector->uptodate bits. */
2694d100466SQu Wenruo 	for (i = sectors_per_page * page_nr;
2704d100466SQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page; i++)
2714d100466SQu Wenruo 		dest->stripe_sectors[i].uptodate = true;
2724d100466SQu Wenruo }
2734d100466SQu Wenruo 
27488074c8bSQu Wenruo static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
27588074c8bSQu Wenruo {
27688074c8bSQu Wenruo 	const int sector_nr = (page_nr << PAGE_SHIFT) >>
27788074c8bSQu Wenruo 			      rbio->bioc->fs_info->sectorsize_bits;
27888074c8bSQu Wenruo 
27988074c8bSQu Wenruo 	/*
28088074c8bSQu Wenruo 	 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
28188074c8bSQu Wenruo 	 * we won't have a page which is half data half parity.
28288074c8bSQu Wenruo 	 *
28388074c8bSQu Wenruo 	 * Thus if the first sector of the page belongs to data stripes, then
28488074c8bSQu Wenruo 	 * the full page belongs to data stripes.
28588074c8bSQu Wenruo 	 */
28688074c8bSQu Wenruo 	return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
28788074c8bSQu Wenruo }
28888074c8bSQu Wenruo 
289eb357060SQu Wenruo /*
290d4e28d9bSQu Wenruo  * Stealing an rbio means taking all the uptodate pages from the stripe array
291d4e28d9bSQu Wenruo  * in the source rbio and putting them into the destination rbio.
292d4e28d9bSQu Wenruo  *
293d4e28d9bSQu Wenruo  * This will also update the involved stripe_sectors[] which are referring to
294d4e28d9bSQu Wenruo  * the old pages.
2954ae10b3aSChris Mason  */
2964ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
2974ae10b3aSChris Mason {
2984ae10b3aSChris Mason 	int i;
2994ae10b3aSChris Mason 
3004ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3014ae10b3aSChris Mason 		return;
3024ae10b3aSChris Mason 
3034ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
30488074c8bSQu Wenruo 		struct page *p = src->stripe_pages[i];
30588074c8bSQu Wenruo 
30688074c8bSQu Wenruo 		/*
30788074c8bSQu Wenruo 		 * We don't need to steal P/Q pages as they will always be
30888074c8bSQu Wenruo 		 * regenerated for RMW or full write anyway.
30988074c8bSQu Wenruo 		 */
31088074c8bSQu Wenruo 		if (!is_data_stripe_page(src, i))
3114ae10b3aSChris Mason 			continue;
3124ae10b3aSChris Mason 
31388074c8bSQu Wenruo 		/*
31488074c8bSQu Wenruo 		 * If @src already has RBIO_CACHE_READY_BIT, it should have
31588074c8bSQu Wenruo 		 * all data stripe pages present and uptodate.
31688074c8bSQu Wenruo 		 */
31788074c8bSQu Wenruo 		ASSERT(p);
31888074c8bSQu Wenruo 		ASSERT(full_page_sectors_uptodate(src, i));
3194d100466SQu Wenruo 		steal_rbio_page(src, dest, i);
3204ae10b3aSChris Mason 	}
321eb357060SQu Wenruo 	index_stripe_sectors(dest);
322eb357060SQu Wenruo 	index_stripe_sectors(src);
3234ae10b3aSChris Mason }
3244ae10b3aSChris Mason 
3254ae10b3aSChris Mason /*
32653b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
32753b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
32853b381b3SDavid Woodhouse  * be discarded afterwards.
32953b381b3SDavid Woodhouse  *
33053b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
33153b381b3SDavid Woodhouse  */
33253b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
33353b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
33453b381b3SDavid Woodhouse {
33553b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
33653b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
337bd8f7e62SQu Wenruo 	/* Also inherit the bitmaps from @victim. */
338bd8f7e62SQu Wenruo 	bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339bd8f7e62SQu Wenruo 		  dest->stripe_nsectors);
34053b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
34153b381b3SDavid Woodhouse }
34253b381b3SDavid Woodhouse 
34353b381b3SDavid Woodhouse /*
3444ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
3454ae10b3aSChris Mason  * must hold the hash table lock.
3464ae10b3aSChris Mason  */
3474ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
3484ae10b3aSChris Mason {
3494ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
3504ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
3514ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
3524ae10b3aSChris Mason 	int freeit = 0;
3534ae10b3aSChris Mason 
3544ae10b3aSChris Mason 	/*
3554ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
3564ae10b3aSChris Mason 	 */
3574ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
3584ae10b3aSChris Mason 		return;
3594ae10b3aSChris Mason 
3606a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
3614ae10b3aSChris Mason 	h = table->table + bucket;
3624ae10b3aSChris Mason 
3634ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
3644ae10b3aSChris Mason 	 * removing it from the hash table
3654ae10b3aSChris Mason 	 */
3664ae10b3aSChris Mason 	spin_lock(&h->lock);
3674ae10b3aSChris Mason 
3684ae10b3aSChris Mason 	/*
3694ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
3704ae10b3aSChris Mason 	 * to make sure the bio list is empty
3714ae10b3aSChris Mason 	 */
3724ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
3734ae10b3aSChris Mason 
3744ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
3754ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
3764ae10b3aSChris Mason 		table->cache_size -= 1;
3774ae10b3aSChris Mason 		freeit = 1;
3784ae10b3aSChris Mason 
3794ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
3804ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
3814ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
3824ae10b3aSChris Mason 		 * was held for the list.
3834ae10b3aSChris Mason 		 *
3844ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
3854ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
3864ae10b3aSChris Mason 		 * the corresponding ref
3874ae10b3aSChris Mason 		 */
3884ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
3894ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
3904ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
391dec95574SElena Reshetova 				refcount_dec(&rbio->refs);
3924ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
3934ae10b3aSChris Mason 			}
3944ae10b3aSChris Mason 		}
3954ae10b3aSChris Mason 	}
3964ae10b3aSChris Mason 
3974ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
3984ae10b3aSChris Mason 	spin_unlock(&h->lock);
3994ae10b3aSChris Mason 
4004ae10b3aSChris Mason 	if (freeit)
401ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
4024ae10b3aSChris Mason }
4034ae10b3aSChris Mason 
4044ae10b3aSChris Mason /*
4054ae10b3aSChris Mason  * prune a given rbio from the cache
4064ae10b3aSChris Mason  */
4074ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4084ae10b3aSChris Mason {
4094ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4104ae10b3aSChris Mason 
4114ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4124ae10b3aSChris Mason 		return;
4134ae10b3aSChris Mason 
4146a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4154ae10b3aSChris Mason 
41674cc3600SChristoph Hellwig 	spin_lock(&table->cache_lock);
4174ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
41874cc3600SChristoph Hellwig 	spin_unlock(&table->cache_lock);
4194ae10b3aSChris Mason }
4204ae10b3aSChris Mason 
4214ae10b3aSChris Mason /*
4224ae10b3aSChris Mason  * remove everything in the cache
4234ae10b3aSChris Mason  */
42448a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4254ae10b3aSChris Mason {
4264ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4274ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4284ae10b3aSChris Mason 
4294ae10b3aSChris Mason 	table = info->stripe_hash_table;
4304ae10b3aSChris Mason 
43174cc3600SChristoph Hellwig 	spin_lock(&table->cache_lock);
4324ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4334ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4344ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4354ae10b3aSChris Mason 				  stripe_cache);
4364ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4374ae10b3aSChris Mason 	}
43874cc3600SChristoph Hellwig 	spin_unlock(&table->cache_lock);
4394ae10b3aSChris Mason }
4404ae10b3aSChris Mason 
4414ae10b3aSChris Mason /*
4424ae10b3aSChris Mason  * remove all cached entries and free the hash table
4434ae10b3aSChris Mason  * used by unmount
44453b381b3SDavid Woodhouse  */
44553b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
44653b381b3SDavid Woodhouse {
44753b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
44853b381b3SDavid Woodhouse 		return;
4494ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
450f749303bSWang Shilong 	kvfree(info->stripe_hash_table);
45153b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
45253b381b3SDavid Woodhouse }
45353b381b3SDavid Woodhouse 
45453b381b3SDavid Woodhouse /*
4554ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
4564ae10b3aSChris Mason  * must have already been prepared by calling
4574ae10b3aSChris Mason  * cache_rbio_pages
4584ae10b3aSChris Mason  *
4594ae10b3aSChris Mason  * If this rbio was already cached, it gets
4604ae10b3aSChris Mason  * moved to the front of the lru.
4614ae10b3aSChris Mason  *
4624ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
4634ae10b3aSChris Mason  * prune an item.
4644ae10b3aSChris Mason  */
4654ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
4664ae10b3aSChris Mason {
4674ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4684ae10b3aSChris Mason 
4694ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
4704ae10b3aSChris Mason 		return;
4714ae10b3aSChris Mason 
4726a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4734ae10b3aSChris Mason 
47474cc3600SChristoph Hellwig 	spin_lock(&table->cache_lock);
4754ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4764ae10b3aSChris Mason 
4774ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
4784ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
479dec95574SElena Reshetova 		refcount_inc(&rbio->refs);
4804ae10b3aSChris Mason 
4814ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
4824ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
4834ae10b3aSChris Mason 	} else {
4844ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
4854ae10b3aSChris Mason 		table->cache_size += 1;
4864ae10b3aSChris Mason 	}
4874ae10b3aSChris Mason 
4884ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4894ae10b3aSChris Mason 
4904ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
4914ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
4924ae10b3aSChris Mason 
4934ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
4944ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4954ae10b3aSChris Mason 				  stripe_cache);
4964ae10b3aSChris Mason 
4974ae10b3aSChris Mason 		if (found != rbio)
4984ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
4994ae10b3aSChris Mason 	}
5004ae10b3aSChris Mason 
50174cc3600SChristoph Hellwig 	spin_unlock(&table->cache_lock);
5024ae10b3aSChris Mason }
5034ae10b3aSChris Mason 
5044ae10b3aSChris Mason /*
50553b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
50653b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
50753b381b3SDavid Woodhouse  * loop through.
50853b381b3SDavid Woodhouse  */
50953b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
51053b381b3SDavid Woodhouse {
51153b381b3SDavid Woodhouse 	int src_off = 0;
51253b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
51353b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
51453b381b3SDavid Woodhouse 
51553b381b3SDavid Woodhouse 	while(src_cnt > 0) {
51653b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
51753b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
51853b381b3SDavid Woodhouse 
51953b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
52053b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
52153b381b3SDavid Woodhouse 	}
52253b381b3SDavid Woodhouse }
52353b381b3SDavid Woodhouse 
52453b381b3SDavid Woodhouse /*
525176571a1SDavid Sterba  * Returns true if the bio list inside this rbio covers an entire stripe (no
526176571a1SDavid Sterba  * rmw required).
52753b381b3SDavid Woodhouse  */
52853b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
52953b381b3SDavid Woodhouse {
530176571a1SDavid Sterba 	unsigned long size = rbio->bio_list_bytes;
531176571a1SDavid Sterba 	int ret = 1;
53253b381b3SDavid Woodhouse 
53374cc3600SChristoph Hellwig 	spin_lock(&rbio->bio_list_lock);
534ff18a4afSChristoph Hellwig 	if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
535176571a1SDavid Sterba 		ret = 0;
536ff18a4afSChristoph Hellwig 	BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
53774cc3600SChristoph Hellwig 	spin_unlock(&rbio->bio_list_lock);
538176571a1SDavid Sterba 
53953b381b3SDavid Woodhouse 	return ret;
54053b381b3SDavid Woodhouse }
54153b381b3SDavid Woodhouse 
54253b381b3SDavid Woodhouse /*
54353b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
54453b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
54553b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
54653b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
54753b381b3SDavid Woodhouse  * locked for final IO
54853b381b3SDavid Woodhouse  *
54953b381b3SDavid Woodhouse  * The caller is responsible for locking such that
55053b381b3SDavid Woodhouse  * rmw_locked is safe to test
55153b381b3SDavid Woodhouse  */
55253b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
55353b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
55453b381b3SDavid Woodhouse {
55553b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
55653b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
55753b381b3SDavid Woodhouse 		return 0;
55853b381b3SDavid Woodhouse 
5594ae10b3aSChris Mason 	/*
5604ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
5614ae10b3aSChris Mason 	 * idea is that when we merge the destination
5624ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
56301327610SNicholas D Steeves 	 * steal from cached rbios though, other functions
5644ae10b3aSChris Mason 	 * handle that.
5654ae10b3aSChris Mason 	 */
5664ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
5674ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
5684ae10b3aSChris Mason 		return 0;
5694ae10b3aSChris Mason 
5704c664611SQu Wenruo 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
57153b381b3SDavid Woodhouse 		return 0;
57253b381b3SDavid Woodhouse 
5735a6ac9eaSMiao Xie 	/* we can't merge with different operations */
5745a6ac9eaSMiao Xie 	if (last->operation != cur->operation)
57553b381b3SDavid Woodhouse 		return 0;
5765a6ac9eaSMiao Xie 	/*
5775a6ac9eaSMiao Xie 	 * We've need read the full stripe from the drive.
5785a6ac9eaSMiao Xie 	 * check and repair the parity and write the new results.
5795a6ac9eaSMiao Xie 	 *
5805a6ac9eaSMiao Xie 	 * We're not allowed to add any new bios to the
5815a6ac9eaSMiao Xie 	 * bio list here, anyone else that wants to
5825a6ac9eaSMiao Xie 	 * change this stripe needs to do their own rmw.
5835a6ac9eaSMiao Xie 	 */
584db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
5855a6ac9eaSMiao Xie 		return 0;
58653b381b3SDavid Woodhouse 
587ad3daf1cSQu Wenruo 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
588ad3daf1cSQu Wenruo 	    last->operation == BTRFS_RBIO_READ_REBUILD)
589b4ee1782SOmar Sandoval 		return 0;
590b4ee1782SOmar Sandoval 
59153b381b3SDavid Woodhouse 	return 1;
59253b381b3SDavid Woodhouse }
59353b381b3SDavid Woodhouse 
5943e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
5953e77605dSQu Wenruo 					     unsigned int stripe_nr,
5963e77605dSQu Wenruo 					     unsigned int sector_nr)
5973e77605dSQu Wenruo {
5983e77605dSQu Wenruo 	ASSERT(stripe_nr < rbio->real_stripes);
5993e77605dSQu Wenruo 	ASSERT(sector_nr < rbio->stripe_nsectors);
6003e77605dSQu Wenruo 
6013e77605dSQu Wenruo 	return stripe_nr * rbio->stripe_nsectors + sector_nr;
6023e77605dSQu Wenruo }
6033e77605dSQu Wenruo 
6043e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */
6053e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
6063e77605dSQu Wenruo 					     unsigned int stripe_nr,
6073e77605dSQu Wenruo 					     unsigned int sector_nr)
6083e77605dSQu Wenruo {
6093e77605dSQu Wenruo 	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
6103e77605dSQu Wenruo 							      sector_nr)];
6113e77605dSQu Wenruo }
6123e77605dSQu Wenruo 
6131145059aSQu Wenruo /* Grab a sector inside P stripe */
6141145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
6151145059aSQu Wenruo 					      unsigned int sector_nr)
616b7178a5fSZhao Lei {
6171145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
618b7178a5fSZhao Lei }
619b7178a5fSZhao Lei 
6201145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */
6211145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
6221145059aSQu Wenruo 					      unsigned int sector_nr)
62353b381b3SDavid Woodhouse {
6242c8cdd6eSMiao Xie 	if (rbio->nr_data + 1 == rbio->real_stripes)
62553b381b3SDavid Woodhouse 		return NULL;
6261145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
6271145059aSQu Wenruo }
6281145059aSQu Wenruo 
62953b381b3SDavid Woodhouse /*
63053b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
63153b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
63253b381b3SDavid Woodhouse  *
63353b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
63453b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
63553b381b3SDavid Woodhouse  * themselves.
63653b381b3SDavid Woodhouse  *
63753b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
63853b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
63953b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
64053b381b3SDavid Woodhouse  *
64153b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
64253b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
64353b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
64453b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
64553b381b3SDavid Woodhouse  * 1 is returned
64653b381b3SDavid Woodhouse  *
64753b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
64853b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
64953b381b3SDavid Woodhouse  * already been freed.
65053b381b3SDavid Woodhouse  */
65153b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
65253b381b3SDavid Woodhouse {
653721860d5SJohannes Thumshirn 	struct btrfs_stripe_hash *h;
65453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
65553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
65653b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
6574ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
65853b381b3SDavid Woodhouse 	int ret = 0;
65953b381b3SDavid Woodhouse 
6606a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
661721860d5SJohannes Thumshirn 
66274cc3600SChristoph Hellwig 	spin_lock(&h->lock);
66353b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
6644c664611SQu Wenruo 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
6659d6cb1b0SJohannes Thumshirn 			continue;
6669d6cb1b0SJohannes Thumshirn 
66753b381b3SDavid Woodhouse 		spin_lock(&cur->bio_list_lock);
66853b381b3SDavid Woodhouse 
6699d6cb1b0SJohannes Thumshirn 		/* Can we steal this cached rbio's pages? */
6704ae10b3aSChris Mason 		if (bio_list_empty(&cur->bio_list) &&
6714ae10b3aSChris Mason 		    list_empty(&cur->plug_list) &&
6724ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
6734ae10b3aSChris Mason 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
6744ae10b3aSChris Mason 			list_del_init(&cur->hash_list);
675dec95574SElena Reshetova 			refcount_dec(&cur->refs);
6764ae10b3aSChris Mason 
6774ae10b3aSChris Mason 			steal_rbio(cur, rbio);
6784ae10b3aSChris Mason 			cache_drop = cur;
6794ae10b3aSChris Mason 			spin_unlock(&cur->bio_list_lock);
6804ae10b3aSChris Mason 
6814ae10b3aSChris Mason 			goto lockit;
6824ae10b3aSChris Mason 		}
6834ae10b3aSChris Mason 
6849d6cb1b0SJohannes Thumshirn 		/* Can we merge into the lock owner? */
68553b381b3SDavid Woodhouse 		if (rbio_can_merge(cur, rbio)) {
68653b381b3SDavid Woodhouse 			merge_rbio(cur, rbio);
68753b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
68853b381b3SDavid Woodhouse 			freeit = rbio;
68953b381b3SDavid Woodhouse 			ret = 1;
69053b381b3SDavid Woodhouse 			goto out;
69153b381b3SDavid Woodhouse 		}
69253b381b3SDavid Woodhouse 
6934ae10b3aSChris Mason 
69453b381b3SDavid Woodhouse 		/*
6959d6cb1b0SJohannes Thumshirn 		 * We couldn't merge with the running rbio, see if we can merge
6969d6cb1b0SJohannes Thumshirn 		 * with the pending ones.  We don't have to check for rmw_locked
6979d6cb1b0SJohannes Thumshirn 		 * because there is no way they are inside finish_rmw right now
69853b381b3SDavid Woodhouse 		 */
6999d6cb1b0SJohannes Thumshirn 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
70053b381b3SDavid Woodhouse 			if (rbio_can_merge(pending, rbio)) {
70153b381b3SDavid Woodhouse 				merge_rbio(pending, rbio);
70253b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
70353b381b3SDavid Woodhouse 				freeit = rbio;
70453b381b3SDavid Woodhouse 				ret = 1;
70553b381b3SDavid Woodhouse 				goto out;
70653b381b3SDavid Woodhouse 			}
70753b381b3SDavid Woodhouse 		}
70853b381b3SDavid Woodhouse 
7099d6cb1b0SJohannes Thumshirn 		/*
7109d6cb1b0SJohannes Thumshirn 		 * No merging, put us on the tail of the plug list, our rbio
7119d6cb1b0SJohannes Thumshirn 		 * will be started with the currently running rbio unlocks
71253b381b3SDavid Woodhouse 		 */
71353b381b3SDavid Woodhouse 		list_add_tail(&rbio->plug_list, &cur->plug_list);
71453b381b3SDavid Woodhouse 		spin_unlock(&cur->bio_list_lock);
71553b381b3SDavid Woodhouse 		ret = 1;
71653b381b3SDavid Woodhouse 		goto out;
71753b381b3SDavid Woodhouse 	}
7184ae10b3aSChris Mason lockit:
719dec95574SElena Reshetova 	refcount_inc(&rbio->refs);
72053b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
72153b381b3SDavid Woodhouse out:
72274cc3600SChristoph Hellwig 	spin_unlock(&h->lock);
7234ae10b3aSChris Mason 	if (cache_drop)
7244ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
72553b381b3SDavid Woodhouse 	if (freeit)
726ff2b64a2SQu Wenruo 		free_raid_bio(freeit);
72753b381b3SDavid Woodhouse 	return ret;
72853b381b3SDavid Woodhouse }
72953b381b3SDavid Woodhouse 
730d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work);
731d817ce35SQu Wenruo 
73253b381b3SDavid Woodhouse /*
73353b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
73453b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
73553b381b3SDavid Woodhouse  */
73653b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
73753b381b3SDavid Woodhouse {
73853b381b3SDavid Woodhouse 	int bucket;
73953b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
7404ae10b3aSChris Mason 	int keep_cache = 0;
74153b381b3SDavid Woodhouse 
74253b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
7436a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
74453b381b3SDavid Woodhouse 
7454ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
7464ae10b3aSChris Mason 		cache_rbio(rbio);
7474ae10b3aSChris Mason 
74874cc3600SChristoph Hellwig 	spin_lock(&h->lock);
74953b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
75053b381b3SDavid Woodhouse 
75153b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
7524ae10b3aSChris Mason 		/*
7534ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
7544ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
7554ae10b3aSChris Mason 		 * to steal from later
7564ae10b3aSChris Mason 		 */
7574ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
7584ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
7594ae10b3aSChris Mason 			keep_cache = 1;
7604ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
7614ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
7624ae10b3aSChris Mason 			goto done;
7634ae10b3aSChris Mason 		}
76453b381b3SDavid Woodhouse 
76553b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
766dec95574SElena Reshetova 		refcount_dec(&rbio->refs);
76753b381b3SDavid Woodhouse 
76853b381b3SDavid Woodhouse 		/*
76953b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
77053b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
77153b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
77253b381b3SDavid Woodhouse 		 */
77353b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
77453b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
77553b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
77653b381b3SDavid Woodhouse 
77753b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
77853b381b3SDavid Woodhouse 					  plug_list);
77953b381b3SDavid Woodhouse 
78053b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
78153b381b3SDavid Woodhouse 
78253b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
783dec95574SElena Reshetova 			refcount_inc(&next->refs);
78453b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
78574cc3600SChristoph Hellwig 			spin_unlock(&h->lock);
78653b381b3SDavid Woodhouse 
7871b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
788d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
789b4ee1782SOmar Sandoval 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
790b4ee1782SOmar Sandoval 				steal_rbio(rbio, next);
791d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
792b4ee1782SOmar Sandoval 			} else if (next->operation == BTRFS_RBIO_WRITE) {
7934ae10b3aSChris Mason 				steal_rbio(rbio, next);
79493723095SQu Wenruo 				start_async_work(next, rmw_rbio_work_locked);
7955a6ac9eaSMiao Xie 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
7965a6ac9eaSMiao Xie 				steal_rbio(rbio, next);
7976bfd0133SQu Wenruo 				start_async_work(next, scrub_rbio_work_locked);
7984ae10b3aSChris Mason 			}
79953b381b3SDavid Woodhouse 
80053b381b3SDavid Woodhouse 			goto done_nolock;
80153b381b3SDavid Woodhouse 		}
80253b381b3SDavid Woodhouse 	}
8034ae10b3aSChris Mason done:
80453b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
80574cc3600SChristoph Hellwig 	spin_unlock(&h->lock);
80653b381b3SDavid Woodhouse 
80753b381b3SDavid Woodhouse done_nolock:
8084ae10b3aSChris Mason 	if (!keep_cache)
8094ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
81053b381b3SDavid Woodhouse }
81153b381b3SDavid Woodhouse 
8127583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
81353b381b3SDavid Woodhouse {
8147583d8d0SLiu Bo 	struct bio *next;
8157583d8d0SLiu Bo 
8167583d8d0SLiu Bo 	while (cur) {
8177583d8d0SLiu Bo 		next = cur->bi_next;
8187583d8d0SLiu Bo 		cur->bi_next = NULL;
8197583d8d0SLiu Bo 		cur->bi_status = err;
8207583d8d0SLiu Bo 		bio_endio(cur);
8217583d8d0SLiu Bo 		cur = next;
8227583d8d0SLiu Bo 	}
82353b381b3SDavid Woodhouse }
82453b381b3SDavid Woodhouse 
82553b381b3SDavid Woodhouse /*
82653b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
82753b381b3SDavid Woodhouse  * bio_list and calls end_io on them
82853b381b3SDavid Woodhouse  */
8294e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
83053b381b3SDavid Woodhouse {
83153b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
8327583d8d0SLiu Bo 	struct bio *extra;
8334245215dSMiao Xie 
834c5a41562SQu Wenruo 	kfree(rbio->csum_buf);
835c5a41562SQu Wenruo 	bitmap_free(rbio->csum_bitmap);
836c5a41562SQu Wenruo 	rbio->csum_buf = NULL;
837c5a41562SQu Wenruo 	rbio->csum_bitmap = NULL;
838c5a41562SQu Wenruo 
839bd8f7e62SQu Wenruo 	/*
840bd8f7e62SQu Wenruo 	 * Clear the data bitmap, as the rbio may be cached for later usage.
841bd8f7e62SQu Wenruo 	 * do this before before unlock_stripe() so there will be no new bio
842bd8f7e62SQu Wenruo 	 * for this bio.
843bd8f7e62SQu Wenruo 	 */
844bd8f7e62SQu Wenruo 	bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
8454245215dSMiao Xie 
8467583d8d0SLiu Bo 	/*
8477583d8d0SLiu Bo 	 * At this moment, rbio->bio_list is empty, however since rbio does not
8487583d8d0SLiu Bo 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
8497583d8d0SLiu Bo 	 * hash list, rbio may be merged with others so that rbio->bio_list
8507583d8d0SLiu Bo 	 * becomes non-empty.
8517583d8d0SLiu Bo 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
8527583d8d0SLiu Bo 	 * more and we can call bio_endio() on all queued bios.
8537583d8d0SLiu Bo 	 */
8547583d8d0SLiu Bo 	unlock_stripe(rbio);
8557583d8d0SLiu Bo 	extra = bio_list_get(&rbio->bio_list);
856ff2b64a2SQu Wenruo 	free_raid_bio(rbio);
85753b381b3SDavid Woodhouse 
8587583d8d0SLiu Bo 	rbio_endio_bio_list(cur, err);
8597583d8d0SLiu Bo 	if (extra)
8607583d8d0SLiu Bo 		rbio_endio_bio_list(extra, err);
86153b381b3SDavid Woodhouse }
86253b381b3SDavid Woodhouse 
86353b381b3SDavid Woodhouse /*
86443dd529aSDavid Sterba  * Get a sector pointer specified by its @stripe_nr and @sector_nr.
8653e77605dSQu Wenruo  *
8663e77605dSQu Wenruo  * @rbio:               The raid bio
8673e77605dSQu Wenruo  * @stripe_nr:          Stripe number, valid range [0, real_stripe)
8683e77605dSQu Wenruo  * @sector_nr:		Sector number inside the stripe,
8693e77605dSQu Wenruo  *			valid range [0, stripe_nsectors)
8703e77605dSQu Wenruo  * @bio_list_only:      Whether to use sectors inside the bio list only.
8713e77605dSQu Wenruo  *
8723e77605dSQu Wenruo  * The read/modify/write code wants to reuse the original bio page as much
8733e77605dSQu Wenruo  * as possible, and only use stripe_sectors as fallback.
8743e77605dSQu Wenruo  */
8753e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
8763e77605dSQu Wenruo 					 int stripe_nr, int sector_nr,
8773e77605dSQu Wenruo 					 bool bio_list_only)
8783e77605dSQu Wenruo {
8793e77605dSQu Wenruo 	struct sector_ptr *sector;
8803e77605dSQu Wenruo 	int index;
8813e77605dSQu Wenruo 
8823e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
8833e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
8843e77605dSQu Wenruo 
8853e77605dSQu Wenruo 	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
8863e77605dSQu Wenruo 	ASSERT(index >= 0 && index < rbio->nr_sectors);
8873e77605dSQu Wenruo 
88874cc3600SChristoph Hellwig 	spin_lock(&rbio->bio_list_lock);
8893e77605dSQu Wenruo 	sector = &rbio->bio_sectors[index];
8903e77605dSQu Wenruo 	if (sector->page || bio_list_only) {
8913e77605dSQu Wenruo 		/* Don't return sector without a valid page pointer */
8923e77605dSQu Wenruo 		if (!sector->page)
8933e77605dSQu Wenruo 			sector = NULL;
89474cc3600SChristoph Hellwig 		spin_unlock(&rbio->bio_list_lock);
8953e77605dSQu Wenruo 		return sector;
8963e77605dSQu Wenruo 	}
89774cc3600SChristoph Hellwig 	spin_unlock(&rbio->bio_list_lock);
8983e77605dSQu Wenruo 
8993e77605dSQu Wenruo 	return &rbio->stripe_sectors[index];
9003e77605dSQu Wenruo }
9013e77605dSQu Wenruo 
90253b381b3SDavid Woodhouse /*
90353b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
90453b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
90553b381b3SDavid Woodhouse  */
9062ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
907ff18a4afSChristoph Hellwig 					 struct btrfs_io_context *bioc)
90853b381b3SDavid Woodhouse {
909*1faf3885SQu Wenruo 	const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
910ff18a4afSChristoph Hellwig 	const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
911843de58bSQu Wenruo 	const unsigned int num_pages = stripe_npages * real_stripes;
912ff18a4afSChristoph Hellwig 	const unsigned int stripe_nsectors =
913ff18a4afSChristoph Hellwig 		BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
91494efbe19SQu Wenruo 	const unsigned int num_sectors = stripe_nsectors * real_stripes;
91553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
91653b381b3SDavid Woodhouse 
91794efbe19SQu Wenruo 	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
91894efbe19SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
919c67c68ebSQu Wenruo 	/*
920c67c68ebSQu Wenruo 	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
921c67c68ebSQu Wenruo 	 * (at most 16) should be no larger than BITS_PER_LONG.
922c67c68ebSQu Wenruo 	 */
923c67c68ebSQu Wenruo 	ASSERT(stripe_nsectors <= BITS_PER_LONG);
924843de58bSQu Wenruo 
925797d74b7SQu Wenruo 	rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
926af8e2d1dSMiao Xie 	if (!rbio)
92753b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
928797d74b7SQu Wenruo 	rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
929797d74b7SQu Wenruo 				     GFP_NOFS);
930797d74b7SQu Wenruo 	rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
931797d74b7SQu Wenruo 				    GFP_NOFS);
932797d74b7SQu Wenruo 	rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
933797d74b7SQu Wenruo 				       GFP_NOFS);
934797d74b7SQu Wenruo 	rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
9352942a50dSQu Wenruo 	rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
936797d74b7SQu Wenruo 
937797d74b7SQu Wenruo 	if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
9382942a50dSQu Wenruo 	    !rbio->finish_pointers || !rbio->error_bitmap) {
939797d74b7SQu Wenruo 		free_raid_bio_pointers(rbio);
940797d74b7SQu Wenruo 		kfree(rbio);
941797d74b7SQu Wenruo 		return ERR_PTR(-ENOMEM);
942797d74b7SQu Wenruo 	}
94353b381b3SDavid Woodhouse 
94453b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
945d817ce35SQu Wenruo 	init_waitqueue_head(&rbio->io_wait);
94653b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
94753b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
9484ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
94953b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
950f1c29379SChristoph Hellwig 	btrfs_get_bioc(bioc);
9514c664611SQu Wenruo 	rbio->bioc = bioc;
95253b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
95394efbe19SQu Wenruo 	rbio->nr_sectors = num_sectors;
9542c8cdd6eSMiao Xie 	rbio->real_stripes = real_stripes;
9555a6ac9eaSMiao Xie 	rbio->stripe_npages = stripe_npages;
95694efbe19SQu Wenruo 	rbio->stripe_nsectors = stripe_nsectors;
957dec95574SElena Reshetova 	refcount_set(&rbio->refs, 1);
958b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
95953b381b3SDavid Woodhouse 
9600b30f719SQu Wenruo 	ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
9610b30f719SQu Wenruo 	rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
96253b381b3SDavid Woodhouse 
96353b381b3SDavid Woodhouse 	return rbio;
96453b381b3SDavid Woodhouse }
96553b381b3SDavid Woodhouse 
96653b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
96753b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
96853b381b3SDavid Woodhouse {
969eb357060SQu Wenruo 	int ret;
970eb357060SQu Wenruo 
971eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
972eb357060SQu Wenruo 	if (ret < 0)
973eb357060SQu Wenruo 		return ret;
974eb357060SQu Wenruo 	/* Mapping all sectors */
975eb357060SQu Wenruo 	index_stripe_sectors(rbio);
976eb357060SQu Wenruo 	return 0;
97753b381b3SDavid Woodhouse }
97853b381b3SDavid Woodhouse 
979b7178a5fSZhao Lei /* only allocate pages for p/q stripes */
98053b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
98153b381b3SDavid Woodhouse {
982f77183dcSQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
983eb357060SQu Wenruo 	int ret;
98453b381b3SDavid Woodhouse 
985eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
986dd137dd1SSweet Tea Dorminy 				     rbio->stripe_pages + data_pages);
987eb357060SQu Wenruo 	if (ret < 0)
988eb357060SQu Wenruo 		return ret;
989eb357060SQu Wenruo 
990eb357060SQu Wenruo 	index_stripe_sectors(rbio);
991eb357060SQu Wenruo 	return 0;
99253b381b3SDavid Woodhouse }
99353b381b3SDavid Woodhouse 
99453b381b3SDavid Woodhouse /*
99567da05b3SColin Ian King  * Return the total number of errors found in the vertical stripe of @sector_nr.
99675b47033SQu Wenruo  *
99775b47033SQu Wenruo  * @faila and @failb will also be updated to the first and second stripe
99875b47033SQu Wenruo  * number of the errors.
99975b47033SQu Wenruo  */
100075b47033SQu Wenruo static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
100175b47033SQu Wenruo 				     int *faila, int *failb)
100275b47033SQu Wenruo {
100375b47033SQu Wenruo 	int stripe_nr;
100475b47033SQu Wenruo 	int found_errors = 0;
100575b47033SQu Wenruo 
1006ad3daf1cSQu Wenruo 	if (faila || failb) {
1007ad3daf1cSQu Wenruo 		/*
1008ad3daf1cSQu Wenruo 		 * Both @faila and @failb should be valid pointers if any of
1009ad3daf1cSQu Wenruo 		 * them is specified.
1010ad3daf1cSQu Wenruo 		 */
101175b47033SQu Wenruo 		ASSERT(faila && failb);
101275b47033SQu Wenruo 		*faila = -1;
101375b47033SQu Wenruo 		*failb = -1;
1014ad3daf1cSQu Wenruo 	}
101575b47033SQu Wenruo 
101675b47033SQu Wenruo 	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
101775b47033SQu Wenruo 		int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
101875b47033SQu Wenruo 
101975b47033SQu Wenruo 		if (test_bit(total_sector_nr, rbio->error_bitmap)) {
102075b47033SQu Wenruo 			found_errors++;
1021ad3daf1cSQu Wenruo 			if (faila) {
1022ad3daf1cSQu Wenruo 				/* Update faila and failb. */
102375b47033SQu Wenruo 				if (*faila < 0)
102475b47033SQu Wenruo 					*faila = stripe_nr;
102575b47033SQu Wenruo 				else if (*failb < 0)
102675b47033SQu Wenruo 					*failb = stripe_nr;
102775b47033SQu Wenruo 			}
102875b47033SQu Wenruo 		}
1029ad3daf1cSQu Wenruo 	}
103075b47033SQu Wenruo 	return found_errors;
103175b47033SQu Wenruo }
103275b47033SQu Wenruo 
103375b47033SQu Wenruo /*
10343e77605dSQu Wenruo  * Add a single sector @sector into our list of bios for IO.
10353e77605dSQu Wenruo  *
10363e77605dSQu Wenruo  * Return 0 if everything went well.
10373e77605dSQu Wenruo  * Return <0 for error.
103853b381b3SDavid Woodhouse  */
10393e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
104053b381b3SDavid Woodhouse 			      struct bio_list *bio_list,
10413e77605dSQu Wenruo 			      struct sector_ptr *sector,
10423e77605dSQu Wenruo 			      unsigned int stripe_nr,
10433e77605dSQu Wenruo 			      unsigned int sector_nr,
1044bf9486d6SBart Van Assche 			      enum req_op op)
104553b381b3SDavid Woodhouse {
10463e77605dSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
104753b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
104853b381b3SDavid Woodhouse 	int ret;
104953b381b3SDavid Woodhouse 	struct bio *bio;
10504c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
105153b381b3SDavid Woodhouse 	u64 disk_start;
105253b381b3SDavid Woodhouse 
10533e77605dSQu Wenruo 	/*
10543e77605dSQu Wenruo 	 * Note: here stripe_nr has taken device replace into consideration,
10553e77605dSQu Wenruo 	 * thus it can be larger than rbio->real_stripe.
10563e77605dSQu Wenruo 	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
10573e77605dSQu Wenruo 	 */
10583e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
10593e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
10603e77605dSQu Wenruo 	ASSERT(sector->page);
10613e77605dSQu Wenruo 
10624c664611SQu Wenruo 	stripe = &rbio->bioc->stripes[stripe_nr];
10633e77605dSQu Wenruo 	disk_start = stripe->physical + sector_nr * sectorsize;
106453b381b3SDavid Woodhouse 
106553b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
10662942a50dSQu Wenruo 	if (!stripe->dev->bdev) {
1067ad3daf1cSQu Wenruo 		int found_errors;
1068ad3daf1cSQu Wenruo 
10692942a50dSQu Wenruo 		set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
10702942a50dSQu Wenruo 			rbio->error_bitmap);
1071ad3daf1cSQu Wenruo 
1072ad3daf1cSQu Wenruo 		/* Check if we have reached tolerance early. */
1073ad3daf1cSQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1074ad3daf1cSQu Wenruo 							 NULL, NULL);
1075ad3daf1cSQu Wenruo 		if (found_errors > rbio->bioc->max_errors)
1076ad3daf1cSQu Wenruo 			return -EIO;
1077ad3daf1cSQu Wenruo 		return 0;
10782942a50dSQu Wenruo 	}
107953b381b3SDavid Woodhouse 
108053b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
108153b381b3SDavid Woodhouse 	if (last) {
10821201b58bSDavid Sterba 		u64 last_end = last->bi_iter.bi_sector << 9;
10834f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
108453b381b3SDavid Woodhouse 
108553b381b3SDavid Woodhouse 		/*
108653b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
108753b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
108853b381b3SDavid Woodhouse 		 */
1089f90ae76aSNikolay Borisov 		if (last_end == disk_start && !last->bi_status &&
1090309dca30SChristoph Hellwig 		    last->bi_bdev == stripe->dev->bdev) {
10913e77605dSQu Wenruo 			ret = bio_add_page(last, sector->page, sectorsize,
10923e77605dSQu Wenruo 					   sector->pgoff);
10933e77605dSQu Wenruo 			if (ret == sectorsize)
109453b381b3SDavid Woodhouse 				return 0;
109553b381b3SDavid Woodhouse 		}
109653b381b3SDavid Woodhouse 	}
109753b381b3SDavid Woodhouse 
109853b381b3SDavid Woodhouse 	/* put a new bio on the list */
1099ff18a4afSChristoph Hellwig 	bio = bio_alloc(stripe->dev->bdev,
1100ff18a4afSChristoph Hellwig 			max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1101bf9486d6SBart Van Assche 			op, GFP_NOFS);
11024f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
1103e01bf588SChristoph Hellwig 	bio->bi_private = rbio;
110453b381b3SDavid Woodhouse 
11053e77605dSQu Wenruo 	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
110653b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
110753b381b3SDavid Woodhouse 	return 0;
110853b381b3SDavid Woodhouse }
110953b381b3SDavid Woodhouse 
111000425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
111100425dd9SQu Wenruo {
111200425dd9SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
111300425dd9SQu Wenruo 	struct bio_vec bvec;
111400425dd9SQu Wenruo 	struct bvec_iter iter;
111500425dd9SQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
111600425dd9SQu Wenruo 		     rbio->bioc->raid_map[0];
111700425dd9SQu Wenruo 
111800425dd9SQu Wenruo 	bio_for_each_segment(bvec, bio, iter) {
111900425dd9SQu Wenruo 		u32 bvec_offset;
112000425dd9SQu Wenruo 
112100425dd9SQu Wenruo 		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
112200425dd9SQu Wenruo 		     bvec_offset += sectorsize, offset += sectorsize) {
112300425dd9SQu Wenruo 			int index = offset / sectorsize;
112400425dd9SQu Wenruo 			struct sector_ptr *sector = &rbio->bio_sectors[index];
112500425dd9SQu Wenruo 
112600425dd9SQu Wenruo 			sector->page = bvec.bv_page;
112700425dd9SQu Wenruo 			sector->pgoff = bvec.bv_offset + bvec_offset;
112800425dd9SQu Wenruo 			ASSERT(sector->pgoff < PAGE_SIZE);
112900425dd9SQu Wenruo 		}
113000425dd9SQu Wenruo 	}
113100425dd9SQu Wenruo }
113200425dd9SQu Wenruo 
113353b381b3SDavid Woodhouse /*
113453b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
113553b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
113653b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
113753b381b3SDavid Woodhouse  * reconstruction.
113853b381b3SDavid Woodhouse  *
113953b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
114053b381b3SDavid Woodhouse  */
114153b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
114253b381b3SDavid Woodhouse {
114353b381b3SDavid Woodhouse 	struct bio *bio;
114453b381b3SDavid Woodhouse 
114574cc3600SChristoph Hellwig 	spin_lock(&rbio->bio_list_lock);
114600425dd9SQu Wenruo 	bio_list_for_each(bio, &rbio->bio_list)
114700425dd9SQu Wenruo 		index_one_bio(rbio, bio);
114800425dd9SQu Wenruo 
114974cc3600SChristoph Hellwig 	spin_unlock(&rbio->bio_list_lock);
115053b381b3SDavid Woodhouse }
115153b381b3SDavid Woodhouse 
1152b8bea09aSQu Wenruo static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1153b8bea09aSQu Wenruo 			       struct raid56_bio_trace_info *trace_info)
1154b8bea09aSQu Wenruo {
1155b8bea09aSQu Wenruo 	const struct btrfs_io_context *bioc = rbio->bioc;
1156b8bea09aSQu Wenruo 	int i;
1157b8bea09aSQu Wenruo 
1158b8bea09aSQu Wenruo 	ASSERT(bioc);
1159b8bea09aSQu Wenruo 
1160b8bea09aSQu Wenruo 	/* We rely on bio->bi_bdev to find the stripe number. */
1161b8bea09aSQu Wenruo 	if (!bio->bi_bdev)
1162b8bea09aSQu Wenruo 		goto not_found;
1163b8bea09aSQu Wenruo 
1164b8bea09aSQu Wenruo 	for (i = 0; i < bioc->num_stripes; i++) {
1165b8bea09aSQu Wenruo 		if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1166b8bea09aSQu Wenruo 			continue;
1167b8bea09aSQu Wenruo 		trace_info->stripe_nr = i;
1168b8bea09aSQu Wenruo 		trace_info->devid = bioc->stripes[i].dev->devid;
1169b8bea09aSQu Wenruo 		trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1170b8bea09aSQu Wenruo 				     bioc->stripes[i].physical;
1171b8bea09aSQu Wenruo 		return;
1172b8bea09aSQu Wenruo 	}
1173b8bea09aSQu Wenruo 
1174b8bea09aSQu Wenruo not_found:
1175b8bea09aSQu Wenruo 	trace_info->devid = -1;
1176b8bea09aSQu Wenruo 	trace_info->offset = -1;
1177b8bea09aSQu Wenruo 	trace_info->stripe_nr = -1;
1178b8bea09aSQu Wenruo }
1179b8bea09aSQu Wenruo 
1180801fcfc5SChristoph Hellwig static inline void bio_list_put(struct bio_list *bio_list)
1181801fcfc5SChristoph Hellwig {
1182801fcfc5SChristoph Hellwig 	struct bio *bio;
1183801fcfc5SChristoph Hellwig 
1184801fcfc5SChristoph Hellwig 	while ((bio = bio_list_pop(bio_list)))
1185801fcfc5SChristoph Hellwig 		bio_put(bio);
1186801fcfc5SChristoph Hellwig }
1187801fcfc5SChristoph Hellwig 
118867da05b3SColin Ian King /* Generate PQ for one vertical stripe. */
118930e3c897SQu Wenruo static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
119030e3c897SQu Wenruo {
119130e3c897SQu Wenruo 	void **pointers = rbio->finish_pointers;
119230e3c897SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
119330e3c897SQu Wenruo 	struct sector_ptr *sector;
119430e3c897SQu Wenruo 	int stripe;
119530e3c897SQu Wenruo 	const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
119630e3c897SQu Wenruo 
119730e3c897SQu Wenruo 	/* First collect one sector from each data stripe */
119830e3c897SQu Wenruo 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
119930e3c897SQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 0);
120030e3c897SQu Wenruo 		pointers[stripe] = kmap_local_page(sector->page) +
120130e3c897SQu Wenruo 				   sector->pgoff;
120230e3c897SQu Wenruo 	}
120330e3c897SQu Wenruo 
120430e3c897SQu Wenruo 	/* Then add the parity stripe */
120530e3c897SQu Wenruo 	sector = rbio_pstripe_sector(rbio, sectornr);
120630e3c897SQu Wenruo 	sector->uptodate = 1;
120730e3c897SQu Wenruo 	pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
120830e3c897SQu Wenruo 
120930e3c897SQu Wenruo 	if (has_qstripe) {
121030e3c897SQu Wenruo 		/*
121130e3c897SQu Wenruo 		 * RAID6, add the qstripe and call the library function
121230e3c897SQu Wenruo 		 * to fill in our p/q
121330e3c897SQu Wenruo 		 */
121430e3c897SQu Wenruo 		sector = rbio_qstripe_sector(rbio, sectornr);
121530e3c897SQu Wenruo 		sector->uptodate = 1;
121630e3c897SQu Wenruo 		pointers[stripe++] = kmap_local_page(sector->page) +
121730e3c897SQu Wenruo 				     sector->pgoff;
121830e3c897SQu Wenruo 
121930e3c897SQu Wenruo 		raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
122030e3c897SQu Wenruo 					pointers);
122130e3c897SQu Wenruo 	} else {
122230e3c897SQu Wenruo 		/* raid5 */
122330e3c897SQu Wenruo 		memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
122430e3c897SQu Wenruo 		run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
122530e3c897SQu Wenruo 	}
122630e3c897SQu Wenruo 	for (stripe = stripe - 1; stripe >= 0; stripe--)
122730e3c897SQu Wenruo 		kunmap_local(pointers[stripe]);
122830e3c897SQu Wenruo }
122930e3c897SQu Wenruo 
12306486d21cSQu Wenruo static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
12316486d21cSQu Wenruo 				   struct bio_list *bio_list)
12326486d21cSQu Wenruo {
12336486d21cSQu Wenruo 	/* The total sector number inside the full stripe. */
12346486d21cSQu Wenruo 	int total_sector_nr;
12356486d21cSQu Wenruo 	int sectornr;
12366486d21cSQu Wenruo 	int stripe;
12376486d21cSQu Wenruo 	int ret;
12386486d21cSQu Wenruo 
12396486d21cSQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
12406486d21cSQu Wenruo 
12416486d21cSQu Wenruo 	/* We should have at least one data sector. */
12426486d21cSQu Wenruo 	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
12436486d21cSQu Wenruo 
12446486d21cSQu Wenruo 	/*
12455eb30ee2SQu Wenruo 	 * Reset errors, as we may have errors inherited from from degraded
12465eb30ee2SQu Wenruo 	 * write.
12475eb30ee2SQu Wenruo 	 */
12482942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
12495eb30ee2SQu Wenruo 
12505eb30ee2SQu Wenruo 	/*
12516486d21cSQu Wenruo 	 * Start assembly.  Make bios for everything from the higher layers (the
12526486d21cSQu Wenruo 	 * bio_list in our rbio) and our P/Q.  Ignore everything else.
12536486d21cSQu Wenruo 	 */
12546486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12556486d21cSQu Wenruo 	     total_sector_nr++) {
12566486d21cSQu Wenruo 		struct sector_ptr *sector;
12576486d21cSQu Wenruo 
12586486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12596486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12606486d21cSQu Wenruo 
12616486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
12626486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
12636486d21cSQu Wenruo 			continue;
12646486d21cSQu Wenruo 
12656486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
12666486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
12676486d21cSQu Wenruo 			if (!sector)
12686486d21cSQu Wenruo 				continue;
12696486d21cSQu Wenruo 		} else {
12706486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
12716486d21cSQu Wenruo 		}
12726486d21cSQu Wenruo 
12736486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
12746486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
12756486d21cSQu Wenruo 		if (ret)
12766486d21cSQu Wenruo 			goto error;
12776486d21cSQu Wenruo 	}
12786486d21cSQu Wenruo 
1279*1faf3885SQu Wenruo 	if (likely(!rbio->bioc->replace_nr_stripes))
12806486d21cSQu Wenruo 		return 0;
12816486d21cSQu Wenruo 
1282*1faf3885SQu Wenruo 	/*
1283*1faf3885SQu Wenruo 	 * Make a copy for the replace target device.
1284*1faf3885SQu Wenruo 	 *
1285*1faf3885SQu Wenruo 	 * Thus the source stripe number (in replace_stripe_src) should be valid.
1286*1faf3885SQu Wenruo 	 */
1287*1faf3885SQu Wenruo 	ASSERT(rbio->bioc->replace_stripe_src >= 0);
1288*1faf3885SQu Wenruo 
12896486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12906486d21cSQu Wenruo 	     total_sector_nr++) {
12916486d21cSQu Wenruo 		struct sector_ptr *sector;
12926486d21cSQu Wenruo 
12936486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12946486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12956486d21cSQu Wenruo 
1296*1faf3885SQu Wenruo 		/*
1297*1faf3885SQu Wenruo 		 * For RAID56, there is only one device that can be replaced,
1298*1faf3885SQu Wenruo 		 * and replace_stripe_src[0] indicates the stripe number we
1299*1faf3885SQu Wenruo 		 * need to copy from.
1300*1faf3885SQu Wenruo 		 */
1301*1faf3885SQu Wenruo 		if (stripe != rbio->bioc->replace_stripe_src) {
13026486d21cSQu Wenruo 			/*
13036486d21cSQu Wenruo 			 * We can skip the whole stripe completely, note
13046486d21cSQu Wenruo 			 * total_sector_nr will be increased by one anyway.
13056486d21cSQu Wenruo 			 */
13066486d21cSQu Wenruo 			ASSERT(sectornr == 0);
13076486d21cSQu Wenruo 			total_sector_nr += rbio->stripe_nsectors - 1;
13086486d21cSQu Wenruo 			continue;
13096486d21cSQu Wenruo 		}
13106486d21cSQu Wenruo 
13116486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
13126486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
13136486d21cSQu Wenruo 			continue;
13146486d21cSQu Wenruo 
13156486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
13166486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13176486d21cSQu Wenruo 			if (!sector)
13186486d21cSQu Wenruo 				continue;
13196486d21cSQu Wenruo 		} else {
13206486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
13216486d21cSQu Wenruo 		}
13226486d21cSQu Wenruo 
13236486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector,
1324*1faf3885SQu Wenruo 					 rbio->real_stripes,
13256486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
13266486d21cSQu Wenruo 		if (ret)
13276486d21cSQu Wenruo 			goto error;
13286486d21cSQu Wenruo 	}
13296486d21cSQu Wenruo 
13306486d21cSQu Wenruo 	return 0;
13316486d21cSQu Wenruo error:
1332801fcfc5SChristoph Hellwig 	bio_list_put(bio_list);
13336486d21cSQu Wenruo 	return -EIO;
13346486d21cSQu Wenruo }
13356486d21cSQu Wenruo 
13362942a50dSQu Wenruo static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
13372942a50dSQu Wenruo {
13382942a50dSQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
13392942a50dSQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
13402942a50dSQu Wenruo 		     rbio->bioc->raid_map[0];
13412942a50dSQu Wenruo 	int total_nr_sector = offset >> fs_info->sectorsize_bits;
13422942a50dSQu Wenruo 
13432942a50dSQu Wenruo 	ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
13442942a50dSQu Wenruo 
13452942a50dSQu Wenruo 	bitmap_set(rbio->error_bitmap, total_nr_sector,
13462942a50dSQu Wenruo 		   bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
13472942a50dSQu Wenruo 
13482942a50dSQu Wenruo 	/*
13492942a50dSQu Wenruo 	 * Special handling for raid56_alloc_missing_rbio() used by
13502942a50dSQu Wenruo 	 * scrub/replace.  Unlike call path in raid56_parity_recover(), they
13512942a50dSQu Wenruo 	 * pass an empty bio here.  Thus we have to find out the missing device
13522942a50dSQu Wenruo 	 * and mark the stripe error instead.
13532942a50dSQu Wenruo 	 */
13542942a50dSQu Wenruo 	if (bio->bi_iter.bi_size == 0) {
13552942a50dSQu Wenruo 		bool found_missing = false;
13562942a50dSQu Wenruo 		int stripe_nr;
13572942a50dSQu Wenruo 
13582942a50dSQu Wenruo 		for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
13592942a50dSQu Wenruo 			if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
13602942a50dSQu Wenruo 				found_missing = true;
13612942a50dSQu Wenruo 				bitmap_set(rbio->error_bitmap,
13622942a50dSQu Wenruo 					   stripe_nr * rbio->stripe_nsectors,
13632942a50dSQu Wenruo 					   rbio->stripe_nsectors);
13642942a50dSQu Wenruo 			}
13652942a50dSQu Wenruo 		}
13662942a50dSQu Wenruo 		ASSERT(found_missing);
13672942a50dSQu Wenruo 	}
13682942a50dSQu Wenruo }
13692942a50dSQu Wenruo 
137053b381b3SDavid Woodhouse /*
137167da05b3SColin Ian King  * For subpage case, we can no longer set page Up-to-date directly for
13725fdb7afcSQu Wenruo  * stripe_pages[], thus we need to locate the sector.
13735fdb7afcSQu Wenruo  */
13745fdb7afcSQu Wenruo static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
13755fdb7afcSQu Wenruo 					     struct page *page,
13765fdb7afcSQu Wenruo 					     unsigned int pgoff)
13775fdb7afcSQu Wenruo {
13785fdb7afcSQu Wenruo 	int i;
13795fdb7afcSQu Wenruo 
13805fdb7afcSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
13815fdb7afcSQu Wenruo 		struct sector_ptr *sector = &rbio->stripe_sectors[i];
13825fdb7afcSQu Wenruo 
13835fdb7afcSQu Wenruo 		if (sector->page == page && sector->pgoff == pgoff)
13845fdb7afcSQu Wenruo 			return sector;
13855fdb7afcSQu Wenruo 	}
13865fdb7afcSQu Wenruo 	return NULL;
13875fdb7afcSQu Wenruo }
13885fdb7afcSQu Wenruo 
13895fdb7afcSQu Wenruo /*
139053b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
139153b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
139253b381b3SDavid Woodhouse  */
13935fdb7afcSQu Wenruo static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
139453b381b3SDavid Woodhouse {
13955fdb7afcSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
13960198e5b7SLiu Bo 	struct bio_vec *bvec;
13976dc4f100SMing Lei 	struct bvec_iter_all iter_all;
139853b381b3SDavid Woodhouse 
13990198e5b7SLiu Bo 	ASSERT(!bio_flagged(bio, BIO_CLONED));
14006592e58cSFilipe Manana 
14015fdb7afcSQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all) {
14025fdb7afcSQu Wenruo 		struct sector_ptr *sector;
14035fdb7afcSQu Wenruo 		int pgoff;
14045fdb7afcSQu Wenruo 
14055fdb7afcSQu Wenruo 		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
14065fdb7afcSQu Wenruo 		     pgoff += sectorsize) {
14075fdb7afcSQu Wenruo 			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
14085fdb7afcSQu Wenruo 			ASSERT(sector);
14095fdb7afcSQu Wenruo 			if (sector)
14105fdb7afcSQu Wenruo 				sector->uptodate = 1;
14115fdb7afcSQu Wenruo 		}
14125fdb7afcSQu Wenruo 	}
141353b381b3SDavid Woodhouse }
141453b381b3SDavid Woodhouse 
14152942a50dSQu Wenruo static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
14162942a50dSQu Wenruo {
14172942a50dSQu Wenruo 	struct bio_vec *bv = bio_first_bvec_all(bio);
14182942a50dSQu Wenruo 	int i;
14192942a50dSQu Wenruo 
14202942a50dSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
14212942a50dSQu Wenruo 		struct sector_ptr *sector;
14222942a50dSQu Wenruo 
14232942a50dSQu Wenruo 		sector = &rbio->stripe_sectors[i];
14242942a50dSQu Wenruo 		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
14252942a50dSQu Wenruo 			break;
14262942a50dSQu Wenruo 		sector = &rbio->bio_sectors[i];
14272942a50dSQu Wenruo 		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
14282942a50dSQu Wenruo 			break;
14292942a50dSQu Wenruo 	}
14302942a50dSQu Wenruo 	ASSERT(i < rbio->nr_sectors);
14312942a50dSQu Wenruo 	return i;
14322942a50dSQu Wenruo }
14332942a50dSQu Wenruo 
14342942a50dSQu Wenruo static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
14352942a50dSQu Wenruo {
14362942a50dSQu Wenruo 	int total_sector_nr = get_bio_sector_nr(rbio, bio);
14372942a50dSQu Wenruo 	u32 bio_size = 0;
14382942a50dSQu Wenruo 	struct bio_vec *bvec;
1439a9ad4d87SQu Wenruo 	int i;
14402942a50dSQu Wenruo 
1441c9a43aafSQu Wenruo 	bio_for_each_bvec_all(bvec, bio, i)
14422942a50dSQu Wenruo 		bio_size += bvec->bv_len;
14432942a50dSQu Wenruo 
1444a9ad4d87SQu Wenruo 	/*
1445a9ad4d87SQu Wenruo 	 * Since we can have multiple bios touching the error_bitmap, we cannot
1446a9ad4d87SQu Wenruo 	 * call bitmap_set() without protection.
1447a9ad4d87SQu Wenruo 	 *
1448a9ad4d87SQu Wenruo 	 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1449a9ad4d87SQu Wenruo 	 */
1450a9ad4d87SQu Wenruo 	for (i = total_sector_nr; i < total_sector_nr +
1451a9ad4d87SQu Wenruo 	     (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1452a9ad4d87SQu Wenruo 		set_bit(i, rbio->error_bitmap);
14532942a50dSQu Wenruo }
14542942a50dSQu Wenruo 
14557a315072SQu Wenruo /* Verify the data sectors at read time. */
14567a315072SQu Wenruo static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
14577a315072SQu Wenruo 				    struct bio *bio)
14587a315072SQu Wenruo {
14597a315072SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
14607a315072SQu Wenruo 	int total_sector_nr = get_bio_sector_nr(rbio, bio);
14617a315072SQu Wenruo 	struct bio_vec *bvec;
14627a315072SQu Wenruo 	struct bvec_iter_all iter_all;
14637a315072SQu Wenruo 
14647a315072SQu Wenruo 	/* No data csum for the whole stripe, no need to verify. */
14657a315072SQu Wenruo 	if (!rbio->csum_bitmap || !rbio->csum_buf)
14667a315072SQu Wenruo 		return;
14677a315072SQu Wenruo 
14687a315072SQu Wenruo 	/* P/Q stripes, they have no data csum to verify against. */
14697a315072SQu Wenruo 	if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
14707a315072SQu Wenruo 		return;
14717a315072SQu Wenruo 
14727a315072SQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all) {
14737a315072SQu Wenruo 		int bv_offset;
14747a315072SQu Wenruo 
14757a315072SQu Wenruo 		for (bv_offset = bvec->bv_offset;
14767a315072SQu Wenruo 		     bv_offset < bvec->bv_offset + bvec->bv_len;
14777a315072SQu Wenruo 		     bv_offset += fs_info->sectorsize, total_sector_nr++) {
14787a315072SQu Wenruo 			u8 csum_buf[BTRFS_CSUM_SIZE];
14797a315072SQu Wenruo 			u8 *expected_csum = rbio->csum_buf +
14807a315072SQu Wenruo 					    total_sector_nr * fs_info->csum_size;
14817a315072SQu Wenruo 			int ret;
14827a315072SQu Wenruo 
14837a315072SQu Wenruo 			/* No csum for this sector, skip to the next sector. */
14847a315072SQu Wenruo 			if (!test_bit(total_sector_nr, rbio->csum_bitmap))
14857a315072SQu Wenruo 				continue;
14867a315072SQu Wenruo 
14877a315072SQu Wenruo 			ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
14887a315072SQu Wenruo 				bv_offset, csum_buf, expected_csum);
14897a315072SQu Wenruo 			if (ret < 0)
14907a315072SQu Wenruo 				set_bit(total_sector_nr, rbio->error_bitmap);
14917a315072SQu Wenruo 		}
14927a315072SQu Wenruo 	}
14937a315072SQu Wenruo }
14947a315072SQu Wenruo 
1495d817ce35SQu Wenruo static void raid_wait_read_end_io(struct bio *bio)
1496d817ce35SQu Wenruo {
1497d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
1498d817ce35SQu Wenruo 
14997a315072SQu Wenruo 	if (bio->bi_status) {
15002942a50dSQu Wenruo 		rbio_update_error_bitmap(rbio, bio);
15017a315072SQu Wenruo 	} else {
1502d817ce35SQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
15037a315072SQu Wenruo 		verify_bio_data_sectors(rbio, bio);
15047a315072SQu Wenruo 	}
1505d817ce35SQu Wenruo 
1506d817ce35SQu Wenruo 	bio_put(bio);
1507d817ce35SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
1508d817ce35SQu Wenruo 		wake_up(&rbio->io_wait);
1509d817ce35SQu Wenruo }
1510d817ce35SQu Wenruo 
15111c76fb7bSChristoph Hellwig static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1512d817ce35SQu Wenruo 			     struct bio_list *bio_list)
1513d817ce35SQu Wenruo {
1514d817ce35SQu Wenruo 	struct bio *bio;
1515d817ce35SQu Wenruo 
1516d817ce35SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1517d817ce35SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
1518d817ce35SQu Wenruo 		bio->bi_end_io = raid_wait_read_end_io;
1519d817ce35SQu Wenruo 
1520d817ce35SQu Wenruo 		if (trace_raid56_scrub_read_recover_enabled()) {
1521d817ce35SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
1522d817ce35SQu Wenruo 
1523d817ce35SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
1524d817ce35SQu Wenruo 			trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1525d817ce35SQu Wenruo 		}
1526d817ce35SQu Wenruo 		submit_bio(bio);
1527d817ce35SQu Wenruo 	}
15281c76fb7bSChristoph Hellwig 
15291c76fb7bSChristoph Hellwig 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1530d817ce35SQu Wenruo }
1531d817ce35SQu Wenruo 
15325eb30ee2SQu Wenruo static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
15335eb30ee2SQu Wenruo {
15345eb30ee2SQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
15355eb30ee2SQu Wenruo 	int ret;
15365eb30ee2SQu Wenruo 
15375eb30ee2SQu Wenruo 	ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
15385eb30ee2SQu Wenruo 	if (ret < 0)
15395eb30ee2SQu Wenruo 		return ret;
15405eb30ee2SQu Wenruo 
15415eb30ee2SQu Wenruo 	index_stripe_sectors(rbio);
15425eb30ee2SQu Wenruo 	return 0;
15435eb30ee2SQu Wenruo }
15445eb30ee2SQu Wenruo 
1545509c27aaSQu Wenruo /*
15466ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
15476ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
15486ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
15496ac0f488SChris Mason  * we sort the list by logical block number and merge
15506ac0f488SChris Mason  * everything we can into the same rbios
15516ac0f488SChris Mason  */
15526ac0f488SChris Mason struct btrfs_plug_cb {
15536ac0f488SChris Mason 	struct blk_plug_cb cb;
15546ac0f488SChris Mason 	struct btrfs_fs_info *info;
15556ac0f488SChris Mason 	struct list_head rbio_list;
1556385de0efSChristoph Hellwig 	struct work_struct work;
15576ac0f488SChris Mason };
15586ac0f488SChris Mason 
15596ac0f488SChris Mason /*
15606ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
15616ac0f488SChris Mason  */
15624f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a,
15634f0f586bSSami Tolvanen 		    const struct list_head *b)
15646ac0f488SChris Mason {
1565214cc184SDavid Sterba 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
15666ac0f488SChris Mason 						       plug_list);
1567214cc184SDavid Sterba 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
15686ac0f488SChris Mason 						       plug_list);
15694f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
15704f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
15716ac0f488SChris Mason 
15726ac0f488SChris Mason 	if (a_sector < b_sector)
15736ac0f488SChris Mason 		return -1;
15746ac0f488SChris Mason 	if (a_sector > b_sector)
15756ac0f488SChris Mason 		return 1;
15766ac0f488SChris Mason 	return 0;
15776ac0f488SChris Mason }
15786ac0f488SChris Mason 
157993723095SQu Wenruo static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
15806ac0f488SChris Mason {
158193723095SQu Wenruo 	struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
15826ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
15836ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
15846ac0f488SChris Mason 
15856ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
158693723095SQu Wenruo 
15876ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
15886ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
15896ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
15906ac0f488SChris Mason 		list_del_init(&cur->plug_list);
15916ac0f488SChris Mason 
15926ac0f488SChris Mason 		if (rbio_is_full(cur)) {
159393723095SQu Wenruo 			/* We have a full stripe, queue it down. */
159493723095SQu Wenruo 			start_async_work(cur, rmw_rbio_work);
15956ac0f488SChris Mason 			continue;
15966ac0f488SChris Mason 		}
15976ac0f488SChris Mason 		if (last) {
15986ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
15996ac0f488SChris Mason 				merge_rbio(last, cur);
1600ff2b64a2SQu Wenruo 				free_raid_bio(cur);
16016ac0f488SChris Mason 				continue;
16026ac0f488SChris Mason 			}
160393723095SQu Wenruo 			start_async_work(last, rmw_rbio_work);
16046ac0f488SChris Mason 		}
16056ac0f488SChris Mason 		last = cur;
16066ac0f488SChris Mason 	}
160793723095SQu Wenruo 	if (last)
160893723095SQu Wenruo 		start_async_work(last, rmw_rbio_work);
16096ac0f488SChris Mason 	kfree(plug);
16106ac0f488SChris Mason }
16116ac0f488SChris Mason 
1612bd8f7e62SQu Wenruo /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1613bd8f7e62SQu Wenruo static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1614bd8f7e62SQu Wenruo {
1615bd8f7e62SQu Wenruo 	const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1616bd8f7e62SQu Wenruo 	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1617bd8f7e62SQu Wenruo 	const u64 full_stripe_start = rbio->bioc->raid_map[0];
1618bd8f7e62SQu Wenruo 	const u32 orig_len = orig_bio->bi_iter.bi_size;
1619bd8f7e62SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
1620bd8f7e62SQu Wenruo 	u64 cur_logical;
1621bd8f7e62SQu Wenruo 
1622bd8f7e62SQu Wenruo 	ASSERT(orig_logical >= full_stripe_start &&
1623bd8f7e62SQu Wenruo 	       orig_logical + orig_len <= full_stripe_start +
1624ff18a4afSChristoph Hellwig 	       rbio->nr_data * BTRFS_STRIPE_LEN);
1625bd8f7e62SQu Wenruo 
1626bd8f7e62SQu Wenruo 	bio_list_add(&rbio->bio_list, orig_bio);
1627bd8f7e62SQu Wenruo 	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1628bd8f7e62SQu Wenruo 
1629bd8f7e62SQu Wenruo 	/* Update the dbitmap. */
1630bd8f7e62SQu Wenruo 	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1631bd8f7e62SQu Wenruo 	     cur_logical += sectorsize) {
1632bd8f7e62SQu Wenruo 		int bit = ((u32)(cur_logical - full_stripe_start) >>
1633bd8f7e62SQu Wenruo 			   fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1634bd8f7e62SQu Wenruo 
1635bd8f7e62SQu Wenruo 		set_bit(bit, &rbio->dbitmap);
1636bd8f7e62SQu Wenruo 	}
1637bd8f7e62SQu Wenruo }
1638bd8f7e62SQu Wenruo 
16396ac0f488SChris Mason /*
164053b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
164153b381b3SDavid Woodhouse  */
164231683f4aSChristoph Hellwig void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
164353b381b3SDavid Woodhouse {
16446a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
164553b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
16466ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
16476ac0f488SChris Mason 	struct blk_plug_cb *cb;
164853b381b3SDavid Woodhouse 
1649ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
1650af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
1651abb49e87SChristoph Hellwig 		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1652abb49e87SChristoph Hellwig 		bio_endio(bio);
1653abb49e87SChristoph Hellwig 		return;
1654af8e2d1dSMiao Xie 	}
16551b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
1656bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
16576ac0f488SChris Mason 
16586ac0f488SChris Mason 	/*
165993723095SQu Wenruo 	 * Don't plug on full rbios, just get them out the door
16606ac0f488SChris Mason 	 * as quickly as we can
16616ac0f488SChris Mason 	 */
1662abb49e87SChristoph Hellwig 	if (!rbio_is_full(rbio)) {
166393723095SQu Wenruo 		cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
16646ac0f488SChris Mason 		if (cb) {
16656ac0f488SChris Mason 			plug = container_of(cb, struct btrfs_plug_cb, cb);
16666ac0f488SChris Mason 			if (!plug->info) {
16670b246afaSJeff Mahoney 				plug->info = fs_info;
16686ac0f488SChris Mason 				INIT_LIST_HEAD(&plug->rbio_list);
16696ac0f488SChris Mason 			}
16706ac0f488SChris Mason 			list_add_tail(&rbio->plug_list, &plug->rbio_list);
167193723095SQu Wenruo 			return;
167253b381b3SDavid Woodhouse 		}
1673abb49e87SChristoph Hellwig 	}
1674abb49e87SChristoph Hellwig 
167593723095SQu Wenruo 	/*
167693723095SQu Wenruo 	 * Either we don't have any existing plug, or we're doing a full stripe,
1677abb49e87SChristoph Hellwig 	 * queue the rmw work now.
167893723095SQu Wenruo 	 */
167993723095SQu Wenruo 	start_async_work(rbio, rmw_rbio_work);
16806ac0f488SChris Mason }
168153b381b3SDavid Woodhouse 
16827a315072SQu Wenruo static int verify_one_sector(struct btrfs_raid_bio *rbio,
16837a315072SQu Wenruo 			     int stripe_nr, int sector_nr)
16847a315072SQu Wenruo {
16857a315072SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
16867a315072SQu Wenruo 	struct sector_ptr *sector;
16877a315072SQu Wenruo 	u8 csum_buf[BTRFS_CSUM_SIZE];
16887a315072SQu Wenruo 	u8 *csum_expected;
16897a315072SQu Wenruo 	int ret;
16907a315072SQu Wenruo 
16917a315072SQu Wenruo 	if (!rbio->csum_bitmap || !rbio->csum_buf)
16927a315072SQu Wenruo 		return 0;
16937a315072SQu Wenruo 
16947a315072SQu Wenruo 	/* No way to verify P/Q as they are not covered by data csum. */
16957a315072SQu Wenruo 	if (stripe_nr >= rbio->nr_data)
16967a315072SQu Wenruo 		return 0;
16977a315072SQu Wenruo 	/*
16987a315072SQu Wenruo 	 * If we're rebuilding a read, we have to use pages from the
16997a315072SQu Wenruo 	 * bio list if possible.
17007a315072SQu Wenruo 	 */
17017a315072SQu Wenruo 	if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
17027a315072SQu Wenruo 	     rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
17037a315072SQu Wenruo 		sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
17047a315072SQu Wenruo 	} else {
17057a315072SQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
17067a315072SQu Wenruo 	}
17077a315072SQu Wenruo 
17087a315072SQu Wenruo 	ASSERT(sector->page);
17097a315072SQu Wenruo 
17107a315072SQu Wenruo 	csum_expected = rbio->csum_buf +
17117a315072SQu Wenruo 			(stripe_nr * rbio->stripe_nsectors + sector_nr) *
17127a315072SQu Wenruo 			fs_info->csum_size;
17137a315072SQu Wenruo 	ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
17147a315072SQu Wenruo 				      csum_buf, csum_expected);
17157a315072SQu Wenruo 	return ret;
17167a315072SQu Wenruo }
17177a315072SQu Wenruo 
171853b381b3SDavid Woodhouse /*
17199c5ff9b4SQu Wenruo  * Recover a vertical stripe specified by @sector_nr.
17209c5ff9b4SQu Wenruo  * @*pointers are the pre-allocated pointers by the caller, so we don't
17219c5ff9b4SQu Wenruo  * need to allocate/free the pointers again and again.
17229c5ff9b4SQu Wenruo  */
172375b47033SQu Wenruo static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
17249c5ff9b4SQu Wenruo 			    void **pointers, void **unmap_array)
17259c5ff9b4SQu Wenruo {
17269c5ff9b4SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
17279c5ff9b4SQu Wenruo 	struct sector_ptr *sector;
17289c5ff9b4SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
172975b47033SQu Wenruo 	int found_errors;
173075b47033SQu Wenruo 	int faila;
173175b47033SQu Wenruo 	int failb;
17329c5ff9b4SQu Wenruo 	int stripe_nr;
17337a315072SQu Wenruo 	int ret = 0;
17349c5ff9b4SQu Wenruo 
17359c5ff9b4SQu Wenruo 	/*
17369c5ff9b4SQu Wenruo 	 * Now we just use bitmap to mark the horizontal stripes in
17379c5ff9b4SQu Wenruo 	 * which we have data when doing parity scrub.
17389c5ff9b4SQu Wenruo 	 */
17399c5ff9b4SQu Wenruo 	if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
17409c5ff9b4SQu Wenruo 	    !test_bit(sector_nr, &rbio->dbitmap))
174175b47033SQu Wenruo 		return 0;
174275b47033SQu Wenruo 
174375b47033SQu Wenruo 	found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
174475b47033SQu Wenruo 						 &failb);
174575b47033SQu Wenruo 	/*
174667da05b3SColin Ian King 	 * No errors in the vertical stripe, skip it.  Can happen for recovery
174775b47033SQu Wenruo 	 * which only part of a stripe failed csum check.
174875b47033SQu Wenruo 	 */
174975b47033SQu Wenruo 	if (!found_errors)
175075b47033SQu Wenruo 		return 0;
175175b47033SQu Wenruo 
175275b47033SQu Wenruo 	if (found_errors > rbio->bioc->max_errors)
175375b47033SQu Wenruo 		return -EIO;
17549c5ff9b4SQu Wenruo 
17559c5ff9b4SQu Wenruo 	/*
17569c5ff9b4SQu Wenruo 	 * Setup our array of pointers with sectors from each stripe
17579c5ff9b4SQu Wenruo 	 *
17589c5ff9b4SQu Wenruo 	 * NOTE: store a duplicate array of pointers to preserve the
17599c5ff9b4SQu Wenruo 	 * pointer order.
17609c5ff9b4SQu Wenruo 	 */
17619c5ff9b4SQu Wenruo 	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
17629c5ff9b4SQu Wenruo 		/*
176375b47033SQu Wenruo 		 * If we're rebuilding a read, we have to use pages from the
176475b47033SQu Wenruo 		 * bio list if possible.
17659c5ff9b4SQu Wenruo 		 */
17669c5ff9b4SQu Wenruo 		if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
176775b47033SQu Wenruo 		     rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
17689c5ff9b4SQu Wenruo 			sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
17699c5ff9b4SQu Wenruo 		} else {
17709c5ff9b4SQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
17719c5ff9b4SQu Wenruo 		}
17729c5ff9b4SQu Wenruo 		ASSERT(sector->page);
17739c5ff9b4SQu Wenruo 		pointers[stripe_nr] = kmap_local_page(sector->page) +
17749c5ff9b4SQu Wenruo 				   sector->pgoff;
17759c5ff9b4SQu Wenruo 		unmap_array[stripe_nr] = pointers[stripe_nr];
17769c5ff9b4SQu Wenruo 	}
17779c5ff9b4SQu Wenruo 
17789c5ff9b4SQu Wenruo 	/* All raid6 handling here */
17799c5ff9b4SQu Wenruo 	if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
17809c5ff9b4SQu Wenruo 		/* Single failure, rebuild from parity raid5 style */
17819c5ff9b4SQu Wenruo 		if (failb < 0) {
17829c5ff9b4SQu Wenruo 			if (faila == rbio->nr_data)
17839c5ff9b4SQu Wenruo 				/*
17849c5ff9b4SQu Wenruo 				 * Just the P stripe has failed, without
17859c5ff9b4SQu Wenruo 				 * a bad data or Q stripe.
17869c5ff9b4SQu Wenruo 				 * We have nothing to do, just skip the
17879c5ff9b4SQu Wenruo 				 * recovery for this stripe.
17889c5ff9b4SQu Wenruo 				 */
17899c5ff9b4SQu Wenruo 				goto cleanup;
17909c5ff9b4SQu Wenruo 			/*
17919c5ff9b4SQu Wenruo 			 * a single failure in raid6 is rebuilt
17929c5ff9b4SQu Wenruo 			 * in the pstripe code below
17939c5ff9b4SQu Wenruo 			 */
17949c5ff9b4SQu Wenruo 			goto pstripe;
17959c5ff9b4SQu Wenruo 		}
17969c5ff9b4SQu Wenruo 
17979c5ff9b4SQu Wenruo 		/*
17989c5ff9b4SQu Wenruo 		 * If the q stripe is failed, do a pstripe reconstruction from
17999c5ff9b4SQu Wenruo 		 * the xors.
18009c5ff9b4SQu Wenruo 		 * If both the q stripe and the P stripe are failed, we're
18019c5ff9b4SQu Wenruo 		 * here due to a crc mismatch and we can't give them the
18029c5ff9b4SQu Wenruo 		 * data they want.
18039c5ff9b4SQu Wenruo 		 */
18049c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
18059c5ff9b4SQu Wenruo 			if (rbio->bioc->raid_map[faila] ==
18069c5ff9b4SQu Wenruo 			    RAID5_P_STRIPE)
18079c5ff9b4SQu Wenruo 				/*
18089c5ff9b4SQu Wenruo 				 * Only P and Q are corrupted.
18099c5ff9b4SQu Wenruo 				 * We only care about data stripes recovery,
18109c5ff9b4SQu Wenruo 				 * can skip this vertical stripe.
18119c5ff9b4SQu Wenruo 				 */
18129c5ff9b4SQu Wenruo 				goto cleanup;
18139c5ff9b4SQu Wenruo 			/*
18149c5ff9b4SQu Wenruo 			 * Otherwise we have one bad data stripe and
18159c5ff9b4SQu Wenruo 			 * a good P stripe.  raid5!
18169c5ff9b4SQu Wenruo 			 */
18179c5ff9b4SQu Wenruo 			goto pstripe;
18189c5ff9b4SQu Wenruo 		}
18199c5ff9b4SQu Wenruo 
18209c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
18219c5ff9b4SQu Wenruo 			raid6_datap_recov(rbio->real_stripes, sectorsize,
18229c5ff9b4SQu Wenruo 					  faila, pointers);
18239c5ff9b4SQu Wenruo 		} else {
18249c5ff9b4SQu Wenruo 			raid6_2data_recov(rbio->real_stripes, sectorsize,
18259c5ff9b4SQu Wenruo 					  faila, failb, pointers);
18269c5ff9b4SQu Wenruo 		}
18279c5ff9b4SQu Wenruo 	} else {
18289c5ff9b4SQu Wenruo 		void *p;
18299c5ff9b4SQu Wenruo 
18309c5ff9b4SQu Wenruo 		/* Rebuild from P stripe here (raid5 or raid6). */
18319c5ff9b4SQu Wenruo 		ASSERT(failb == -1);
18329c5ff9b4SQu Wenruo pstripe:
18339c5ff9b4SQu Wenruo 		/* Copy parity block into failed block to start with */
18349c5ff9b4SQu Wenruo 		memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
18359c5ff9b4SQu Wenruo 
18369c5ff9b4SQu Wenruo 		/* Rearrange the pointer array */
18379c5ff9b4SQu Wenruo 		p = pointers[faila];
18389c5ff9b4SQu Wenruo 		for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
18399c5ff9b4SQu Wenruo 		     stripe_nr++)
18409c5ff9b4SQu Wenruo 			pointers[stripe_nr] = pointers[stripe_nr + 1];
18419c5ff9b4SQu Wenruo 		pointers[rbio->nr_data - 1] = p;
18429c5ff9b4SQu Wenruo 
18439c5ff9b4SQu Wenruo 		/* Xor in the rest */
18449c5ff9b4SQu Wenruo 		run_xor(pointers, rbio->nr_data - 1, sectorsize);
18459c5ff9b4SQu Wenruo 
18469c5ff9b4SQu Wenruo 	}
18479c5ff9b4SQu Wenruo 
18489c5ff9b4SQu Wenruo 	/*
18499c5ff9b4SQu Wenruo 	 * No matter if this is a RMW or recovery, we should have all
18509c5ff9b4SQu Wenruo 	 * failed sectors repaired in the vertical stripe, thus they are now
18519c5ff9b4SQu Wenruo 	 * uptodate.
18529c5ff9b4SQu Wenruo 	 * Especially if we determine to cache the rbio, we need to
18539c5ff9b4SQu Wenruo 	 * have at least all data sectors uptodate.
18547a315072SQu Wenruo 	 *
18557a315072SQu Wenruo 	 * If possible, also check if the repaired sector matches its data
18567a315072SQu Wenruo 	 * checksum.
18579c5ff9b4SQu Wenruo 	 */
185875b47033SQu Wenruo 	if (faila >= 0) {
18597a315072SQu Wenruo 		ret = verify_one_sector(rbio, faila, sector_nr);
18607a315072SQu Wenruo 		if (ret < 0)
18617a315072SQu Wenruo 			goto cleanup;
18627a315072SQu Wenruo 
186375b47033SQu Wenruo 		sector = rbio_stripe_sector(rbio, faila, sector_nr);
18649c5ff9b4SQu Wenruo 		sector->uptodate = 1;
18659c5ff9b4SQu Wenruo 	}
186675b47033SQu Wenruo 	if (failb >= 0) {
1867f7c11affSTanmay Bhushan 		ret = verify_one_sector(rbio, failb, sector_nr);
18687a315072SQu Wenruo 		if (ret < 0)
18697a315072SQu Wenruo 			goto cleanup;
18707a315072SQu Wenruo 
187175b47033SQu Wenruo 		sector = rbio_stripe_sector(rbio, failb, sector_nr);
18729c5ff9b4SQu Wenruo 		sector->uptodate = 1;
18739c5ff9b4SQu Wenruo 	}
18749c5ff9b4SQu Wenruo 
18759c5ff9b4SQu Wenruo cleanup:
18769c5ff9b4SQu Wenruo 	for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
18779c5ff9b4SQu Wenruo 		kunmap_local(unmap_array[stripe_nr]);
18787a315072SQu Wenruo 	return ret;
18799c5ff9b4SQu Wenruo }
18809c5ff9b4SQu Wenruo 
1881ec936b03SQu Wenruo static int recover_sectors(struct btrfs_raid_bio *rbio)
188253b381b3SDavid Woodhouse {
18839c5ff9b4SQu Wenruo 	void **pointers = NULL;
18849c5ff9b4SQu Wenruo 	void **unmap_array = NULL;
1885ec936b03SQu Wenruo 	int sectornr;
1886ec936b03SQu Wenruo 	int ret = 0;
188753b381b3SDavid Woodhouse 
188807e4d380SQu Wenruo 	/*
1889ec936b03SQu Wenruo 	 * @pointers array stores the pointer for each sector.
1890ec936b03SQu Wenruo 	 *
1891ec936b03SQu Wenruo 	 * @unmap_array stores copy of pointers that does not get reordered
1892ec936b03SQu Wenruo 	 * during reconstruction so that kunmap_local works.
189307e4d380SQu Wenruo 	 */
189431e818feSDavid Sterba 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
189594a0b58dSIra Weiny 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1896ec936b03SQu Wenruo 	if (!pointers || !unmap_array) {
1897ec936b03SQu Wenruo 		ret = -ENOMEM;
1898ec936b03SQu Wenruo 		goto out;
189994a0b58dSIra Weiny 	}
190094a0b58dSIra Weiny 
1901b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1902b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
190374cc3600SChristoph Hellwig 		spin_lock(&rbio->bio_list_lock);
190453b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
190574cc3600SChristoph Hellwig 		spin_unlock(&rbio->bio_list_lock);
190653b381b3SDavid Woodhouse 	}
190753b381b3SDavid Woodhouse 
190853b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
190953b381b3SDavid Woodhouse 
191075b47033SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
191175b47033SQu Wenruo 		ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
191275b47033SQu Wenruo 		if (ret < 0)
191375b47033SQu Wenruo 			break;
191475b47033SQu Wenruo 	}
191553b381b3SDavid Woodhouse 
1916ec936b03SQu Wenruo out:
191753b381b3SDavid Woodhouse 	kfree(pointers);
1918ec936b03SQu Wenruo 	kfree(unmap_array);
1919ec936b03SQu Wenruo 	return ret;
1920ec936b03SQu Wenruo }
1921ec936b03SQu Wenruo 
192240f87ddbSChristoph Hellwig static void recover_rbio(struct btrfs_raid_bio *rbio)
192353b381b3SDavid Woodhouse {
1924d838d05eSChristoph Hellwig 	struct bio_list bio_list = BIO_EMPTY_LIST;
1925d31968d9SQu Wenruo 	int total_sector_nr;
1926d31968d9SQu Wenruo 	int ret = 0;
192753b381b3SDavid Woodhouse 
1928d838d05eSChristoph Hellwig 	/*
1929d838d05eSChristoph Hellwig 	 * Either we're doing recover for a read failure or degraded write,
1930d838d05eSChristoph Hellwig 	 * caller should have set error bitmap correctly.
1931d838d05eSChristoph Hellwig 	 */
1932d838d05eSChristoph Hellwig 	ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1933d838d05eSChristoph Hellwig 
1934d838d05eSChristoph Hellwig 	/* For recovery, we need to read all sectors including P/Q. */
1935d838d05eSChristoph Hellwig 	ret = alloc_rbio_pages(rbio);
1936d838d05eSChristoph Hellwig 	if (ret < 0)
193740f87ddbSChristoph Hellwig 		goto out;
1938d838d05eSChristoph Hellwig 
1939d838d05eSChristoph Hellwig 	index_rbio_pages(rbio);
1940d838d05eSChristoph Hellwig 
194153b381b3SDavid Woodhouse 	/*
1942f6065f8eSQu Wenruo 	 * Read everything that hasn't failed. However this time we will
1943f6065f8eSQu Wenruo 	 * not trust any cached sector.
1944f6065f8eSQu Wenruo 	 * As we may read out some stale data but higher layer is not reading
1945f6065f8eSQu Wenruo 	 * that stale part.
1946f6065f8eSQu Wenruo 	 *
1947f6065f8eSQu Wenruo 	 * So here we always re-read everything in recovery path.
194853b381b3SDavid Woodhouse 	 */
1949ef340fccSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1950ef340fccSQu Wenruo 	     total_sector_nr++) {
1951ef340fccSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
1952ef340fccSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
19533e77605dSQu Wenruo 		struct sector_ptr *sector;
195453b381b3SDavid Woodhouse 
195575b47033SQu Wenruo 		/*
195675b47033SQu Wenruo 		 * Skip the range which has error.  It can be a range which is
195775b47033SQu Wenruo 		 * marked error (for csum mismatch), or it can be a missing
195875b47033SQu Wenruo 		 * device.
195975b47033SQu Wenruo 		 */
196075b47033SQu Wenruo 		if (!rbio->bioc->stripes[stripe].dev->bdev ||
196175b47033SQu Wenruo 		    test_bit(total_sector_nr, rbio->error_bitmap)) {
196275b47033SQu Wenruo 			/*
196375b47033SQu Wenruo 			 * Also set the error bit for missing device, which
196475b47033SQu Wenruo 			 * may not yet have its error bit set.
196575b47033SQu Wenruo 			 */
196675b47033SQu Wenruo 			set_bit(total_sector_nr, rbio->error_bitmap);
196753b381b3SDavid Woodhouse 			continue;
1968ef340fccSQu Wenruo 		}
196975b47033SQu Wenruo 
197053b381b3SDavid Woodhouse 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
1971d838d05eSChristoph Hellwig 		ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
1972ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
1973d838d05eSChristoph Hellwig 		if (ret < 0) {
1974801fcfc5SChristoph Hellwig 			bio_list_put(&bio_list);
197540f87ddbSChristoph Hellwig 			goto out;
1976d817ce35SQu Wenruo 		}
1977d838d05eSChristoph Hellwig 	}
1978d838d05eSChristoph Hellwig 
1979d838d05eSChristoph Hellwig 	submit_read_wait_bio_list(rbio, &bio_list);
198040f87ddbSChristoph Hellwig 	ret = recover_sectors(rbio);
198140f87ddbSChristoph Hellwig out:
198240f87ddbSChristoph Hellwig 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1983d838d05eSChristoph Hellwig }
1984d817ce35SQu Wenruo 
1985d817ce35SQu Wenruo static void recover_rbio_work(struct work_struct *work)
1986d817ce35SQu Wenruo {
1987d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio;
1988d817ce35SQu Wenruo 
1989d817ce35SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
199040f87ddbSChristoph Hellwig 	if (!lock_stripe_add(rbio))
199140f87ddbSChristoph Hellwig 		recover_rbio(rbio);
1992d817ce35SQu Wenruo }
1993d817ce35SQu Wenruo 
1994d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work)
1995d817ce35SQu Wenruo {
199640f87ddbSChristoph Hellwig 	recover_rbio(container_of(work, struct btrfs_raid_bio, work));
1997d817ce35SQu Wenruo }
1998d817ce35SQu Wenruo 
199975b47033SQu Wenruo static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
200075b47033SQu Wenruo {
200175b47033SQu Wenruo 	bool found = false;
200275b47033SQu Wenruo 	int sector_nr;
200375b47033SQu Wenruo 
200475b47033SQu Wenruo 	/*
200575b47033SQu Wenruo 	 * This is for RAID6 extra recovery tries, thus mirror number should
200675b47033SQu Wenruo 	 * be large than 2.
200775b47033SQu Wenruo 	 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
200875b47033SQu Wenruo 	 * RAID5 methods.
200975b47033SQu Wenruo 	 */
201075b47033SQu Wenruo 	ASSERT(mirror_num > 2);
201175b47033SQu Wenruo 	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
201275b47033SQu Wenruo 		int found_errors;
201375b47033SQu Wenruo 		int faila;
201475b47033SQu Wenruo 		int failb;
201575b47033SQu Wenruo 
201675b47033SQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
201775b47033SQu Wenruo 							 &faila, &failb);
201875b47033SQu Wenruo 		/* This vertical stripe doesn't have errors. */
201975b47033SQu Wenruo 		if (!found_errors)
202075b47033SQu Wenruo 			continue;
202175b47033SQu Wenruo 
202275b47033SQu Wenruo 		/*
202375b47033SQu Wenruo 		 * If we found errors, there should be only one error marked
202475b47033SQu Wenruo 		 * by previous set_rbio_range_error().
202575b47033SQu Wenruo 		 */
202675b47033SQu Wenruo 		ASSERT(found_errors == 1);
202775b47033SQu Wenruo 		found = true;
202875b47033SQu Wenruo 
202975b47033SQu Wenruo 		/* Now select another stripe to mark as error. */
203075b47033SQu Wenruo 		failb = rbio->real_stripes - (mirror_num - 1);
203175b47033SQu Wenruo 		if (failb <= faila)
203275b47033SQu Wenruo 			failb--;
203375b47033SQu Wenruo 
203475b47033SQu Wenruo 		/* Set the extra bit in error bitmap. */
203575b47033SQu Wenruo 		if (failb >= 0)
203675b47033SQu Wenruo 			set_bit(failb * rbio->stripe_nsectors + sector_nr,
203775b47033SQu Wenruo 				rbio->error_bitmap);
203875b47033SQu Wenruo 	}
203975b47033SQu Wenruo 
204075b47033SQu Wenruo 	/* We should found at least one vertical stripe with error.*/
204175b47033SQu Wenruo 	ASSERT(found);
204275b47033SQu Wenruo }
204375b47033SQu Wenruo 
2044d31968d9SQu Wenruo /*
204553b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
204653b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
204753b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
204853b381b3SDavid Woodhouse  * of the drive.
204953b381b3SDavid Woodhouse  */
20506065fd95SChristoph Hellwig void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2051f1c29379SChristoph Hellwig 			   int mirror_num)
205253b381b3SDavid Woodhouse {
20536a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
205453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
205553b381b3SDavid Woodhouse 
2056ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2057af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
20586065fd95SChristoph Hellwig 		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2059d817ce35SQu Wenruo 		bio_endio(bio);
2060d817ce35SQu Wenruo 		return;
2061af8e2d1dSMiao Xie 	}
206253b381b3SDavid Woodhouse 
20631b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2064bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
206553b381b3SDavid Woodhouse 
20662942a50dSQu Wenruo 	set_rbio_range_error(rbio, bio);
20672942a50dSQu Wenruo 
206853b381b3SDavid Woodhouse 	/*
20698810f751SLiu Bo 	 * Loop retry:
20708810f751SLiu Bo 	 * for 'mirror == 2', reconstruct from all other stripes.
20718810f751SLiu Bo 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
207253b381b3SDavid Woodhouse 	 */
2073ad3daf1cSQu Wenruo 	if (mirror_num > 2)
207475b47033SQu Wenruo 		set_rbio_raid6_extra_error(rbio, mirror_num);
207553b381b3SDavid Woodhouse 
2076d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
207753b381b3SDavid Woodhouse }
207853b381b3SDavid Woodhouse 
2079c5a41562SQu Wenruo static void fill_data_csums(struct btrfs_raid_bio *rbio)
2080c5a41562SQu Wenruo {
2081c5a41562SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2082c5a41562SQu Wenruo 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2083c5a41562SQu Wenruo 						       rbio->bioc->raid_map[0]);
2084c5a41562SQu Wenruo 	const u64 start = rbio->bioc->raid_map[0];
2085c5a41562SQu Wenruo 	const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2086c5a41562SQu Wenruo 			fs_info->sectorsize_bits;
2087c5a41562SQu Wenruo 	int ret;
2088c5a41562SQu Wenruo 
2089c5a41562SQu Wenruo 	/* The rbio should not have its csum buffer initialized. */
2090c5a41562SQu Wenruo 	ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2091c5a41562SQu Wenruo 
2092c5a41562SQu Wenruo 	/*
2093c5a41562SQu Wenruo 	 * Skip the csum search if:
2094c5a41562SQu Wenruo 	 *
2095c5a41562SQu Wenruo 	 * - The rbio doesn't belong to data block groups
2096c5a41562SQu Wenruo 	 *   Then we are doing IO for tree blocks, no need to search csums.
2097c5a41562SQu Wenruo 	 *
2098c5a41562SQu Wenruo 	 * - The rbio belongs to mixed block groups
2099c5a41562SQu Wenruo 	 *   This is to avoid deadlock, as we're already holding the full
2100c5a41562SQu Wenruo 	 *   stripe lock, if we trigger a metadata read, and it needs to do
2101c5a41562SQu Wenruo 	 *   raid56 recovery, we will deadlock.
2102c5a41562SQu Wenruo 	 */
2103c5a41562SQu Wenruo 	if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2104c5a41562SQu Wenruo 	    rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2105c5a41562SQu Wenruo 		return;
2106c5a41562SQu Wenruo 
2107c5a41562SQu Wenruo 	rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2108c5a41562SQu Wenruo 				 fs_info->csum_size, GFP_NOFS);
2109c5a41562SQu Wenruo 	rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2110c5a41562SQu Wenruo 					  GFP_NOFS);
2111c5a41562SQu Wenruo 	if (!rbio->csum_buf || !rbio->csum_bitmap) {
2112c5a41562SQu Wenruo 		ret = -ENOMEM;
2113c5a41562SQu Wenruo 		goto error;
2114c5a41562SQu Wenruo 	}
2115c5a41562SQu Wenruo 
2116c5a41562SQu Wenruo 	ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
2117c5a41562SQu Wenruo 					rbio->csum_buf, rbio->csum_bitmap);
2118c5a41562SQu Wenruo 	if (ret < 0)
2119c5a41562SQu Wenruo 		goto error;
2120c5a41562SQu Wenruo 	if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2121c5a41562SQu Wenruo 		goto no_csum;
2122c5a41562SQu Wenruo 	return;
2123c5a41562SQu Wenruo 
2124c5a41562SQu Wenruo error:
2125c5a41562SQu Wenruo 	/*
2126c5a41562SQu Wenruo 	 * We failed to allocate memory or grab the csum, but it's not fatal,
2127c5a41562SQu Wenruo 	 * we can still continue.  But better to warn users that RMW is no
2128c5a41562SQu Wenruo 	 * longer safe for this particular sub-stripe write.
2129c5a41562SQu Wenruo 	 */
2130c5a41562SQu Wenruo 	btrfs_warn_rl(fs_info,
2131c5a41562SQu Wenruo "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2132c5a41562SQu Wenruo 			rbio->bioc->raid_map[0], ret);
2133c5a41562SQu Wenruo no_csum:
2134c5a41562SQu Wenruo 	kfree(rbio->csum_buf);
2135c5a41562SQu Wenruo 	bitmap_free(rbio->csum_bitmap);
2136c5a41562SQu Wenruo 	rbio->csum_buf = NULL;
2137c5a41562SQu Wenruo 	rbio->csum_bitmap = NULL;
2138c5a41562SQu Wenruo }
2139c5a41562SQu Wenruo 
21407a315072SQu Wenruo static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
21415eb30ee2SQu Wenruo {
214202efa3a6SChristoph Hellwig 	struct bio_list bio_list = BIO_EMPTY_LIST;
214302efa3a6SChristoph Hellwig 	int total_sector_nr;
214402efa3a6SChristoph Hellwig 	int ret = 0;
21455eb30ee2SQu Wenruo 
2146c5a41562SQu Wenruo 	/*
2147c5a41562SQu Wenruo 	 * Fill the data csums we need for data verification.  We need to fill
2148c5a41562SQu Wenruo 	 * the csum_bitmap/csum_buf first, as our endio function will try to
2149c5a41562SQu Wenruo 	 * verify the data sectors.
2150c5a41562SQu Wenruo 	 */
2151c5a41562SQu Wenruo 	fill_data_csums(rbio);
2152c5a41562SQu Wenruo 
215302efa3a6SChristoph Hellwig 	/*
215402efa3a6SChristoph Hellwig 	 * Build a list of bios to read all sectors (including data and P/Q).
215502efa3a6SChristoph Hellwig 	 *
215602efa3a6SChristoph Hellwig 	 * This behavior is to compensate the later csum verification and recovery.
215702efa3a6SChristoph Hellwig 	 */
215802efa3a6SChristoph Hellwig 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
215902efa3a6SChristoph Hellwig 	     total_sector_nr++) {
216002efa3a6SChristoph Hellwig 		struct sector_ptr *sector;
216102efa3a6SChristoph Hellwig 		int stripe = total_sector_nr / rbio->stripe_nsectors;
216202efa3a6SChristoph Hellwig 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
21635eb30ee2SQu Wenruo 
216402efa3a6SChristoph Hellwig 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
216502efa3a6SChristoph Hellwig 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
216602efa3a6SChristoph Hellwig 			       stripe, sectornr, REQ_OP_READ);
216702efa3a6SChristoph Hellwig 		if (ret) {
216802efa3a6SChristoph Hellwig 			bio_list_put(&bio_list);
216902efa3a6SChristoph Hellwig 			return ret;
217002efa3a6SChristoph Hellwig 		}
217102efa3a6SChristoph Hellwig 	}
21727a315072SQu Wenruo 
21737a315072SQu Wenruo 	/*
21747a315072SQu Wenruo 	 * We may or may not have any corrupted sectors (including missing dev
21757a315072SQu Wenruo 	 * and csum mismatch), just let recover_sectors() to handle them all.
21767a315072SQu Wenruo 	 */
217702efa3a6SChristoph Hellwig 	submit_read_wait_bio_list(rbio, &bio_list);
217802efa3a6SChristoph Hellwig 	return recover_sectors(rbio);
21795eb30ee2SQu Wenruo }
21805eb30ee2SQu Wenruo 
21815eb30ee2SQu Wenruo static void raid_wait_write_end_io(struct bio *bio)
21825eb30ee2SQu Wenruo {
21835eb30ee2SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
21845eb30ee2SQu Wenruo 	blk_status_t err = bio->bi_status;
21855eb30ee2SQu Wenruo 
2186ad3daf1cSQu Wenruo 	if (err)
21872942a50dSQu Wenruo 		rbio_update_error_bitmap(rbio, bio);
21885eb30ee2SQu Wenruo 	bio_put(bio);
21895eb30ee2SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
21905eb30ee2SQu Wenruo 		wake_up(&rbio->io_wait);
21915eb30ee2SQu Wenruo }
21925eb30ee2SQu Wenruo 
21935eb30ee2SQu Wenruo static void submit_write_bios(struct btrfs_raid_bio *rbio,
21945eb30ee2SQu Wenruo 			      struct bio_list *bio_list)
21955eb30ee2SQu Wenruo {
21965eb30ee2SQu Wenruo 	struct bio *bio;
21975eb30ee2SQu Wenruo 
21985eb30ee2SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
21995eb30ee2SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
22005eb30ee2SQu Wenruo 		bio->bi_end_io = raid_wait_write_end_io;
22015eb30ee2SQu Wenruo 
22025eb30ee2SQu Wenruo 		if (trace_raid56_write_stripe_enabled()) {
22035eb30ee2SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
22045eb30ee2SQu Wenruo 
22055eb30ee2SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
22065eb30ee2SQu Wenruo 			trace_raid56_write_stripe(rbio, bio, &trace_info);
22075eb30ee2SQu Wenruo 		}
22085eb30ee2SQu Wenruo 		submit_bio(bio);
22095eb30ee2SQu Wenruo 	}
22105eb30ee2SQu Wenruo }
22115eb30ee2SQu Wenruo 
22127a315072SQu Wenruo /*
22137a315072SQu Wenruo  * To determine if we need to read any sector from the disk.
22147a315072SQu Wenruo  * Should only be utilized in RMW path, to skip cached rbio.
22157a315072SQu Wenruo  */
22167a315072SQu Wenruo static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
22177a315072SQu Wenruo {
22187a315072SQu Wenruo 	int i;
22197a315072SQu Wenruo 
22207a315072SQu Wenruo 	for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
22217a315072SQu Wenruo 		struct sector_ptr *sector = &rbio->stripe_sectors[i];
22227a315072SQu Wenruo 
22237a315072SQu Wenruo 		/*
22247a315072SQu Wenruo 		 * We have a sector which doesn't have page nor uptodate,
22257a315072SQu Wenruo 		 * thus this rbio can not be cached one, as cached one must
22267a315072SQu Wenruo 		 * have all its data sectors present and uptodate.
22277a315072SQu Wenruo 		 */
22287a315072SQu Wenruo 		if (!sector->page || !sector->uptodate)
22297a315072SQu Wenruo 			return true;
22307a315072SQu Wenruo 	}
22317a315072SQu Wenruo 	return false;
22327a315072SQu Wenruo }
22337a315072SQu Wenruo 
22341d0ef1caSChristoph Hellwig static void rmw_rbio(struct btrfs_raid_bio *rbio)
22355eb30ee2SQu Wenruo {
22365eb30ee2SQu Wenruo 	struct bio_list bio_list;
22375eb30ee2SQu Wenruo 	int sectornr;
22385eb30ee2SQu Wenruo 	int ret = 0;
22395eb30ee2SQu Wenruo 
22405eb30ee2SQu Wenruo 	/*
22415eb30ee2SQu Wenruo 	 * Allocate the pages for parity first, as P/Q pages will always be
22425eb30ee2SQu Wenruo 	 * needed for both full-stripe and sub-stripe writes.
22435eb30ee2SQu Wenruo 	 */
22445eb30ee2SQu Wenruo 	ret = alloc_rbio_parity_pages(rbio);
22455eb30ee2SQu Wenruo 	if (ret < 0)
22461d0ef1caSChristoph Hellwig 		goto out;
22475eb30ee2SQu Wenruo 
22487a315072SQu Wenruo 	/*
22497a315072SQu Wenruo 	 * Either full stripe write, or we have every data sector already
22507a315072SQu Wenruo 	 * cached, can go to write path immediately.
22517a315072SQu Wenruo 	 */
22524d762701SChristoph Hellwig 	if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
22535eb30ee2SQu Wenruo 		/*
22544d762701SChristoph Hellwig 		 * Now we're doing sub-stripe write, also need all data stripes
22554d762701SChristoph Hellwig 		 * to do the full RMW.
22565eb30ee2SQu Wenruo 		 */
22575eb30ee2SQu Wenruo 		ret = alloc_rbio_data_pages(rbio);
22585eb30ee2SQu Wenruo 		if (ret < 0)
22591d0ef1caSChristoph Hellwig 			goto out;
22605eb30ee2SQu Wenruo 
22615eb30ee2SQu Wenruo 		index_rbio_pages(rbio);
22625eb30ee2SQu Wenruo 
22637a315072SQu Wenruo 		ret = rmw_read_wait_recover(rbio);
22645eb30ee2SQu Wenruo 		if (ret < 0)
22651d0ef1caSChristoph Hellwig 			goto out;
22664d762701SChristoph Hellwig 	}
22675eb30ee2SQu Wenruo 
22685eb30ee2SQu Wenruo 	/*
22695eb30ee2SQu Wenruo 	 * At this stage we're not allowed to add any new bios to the
22705eb30ee2SQu Wenruo 	 * bio list any more, anyone else that wants to change this stripe
22715eb30ee2SQu Wenruo 	 * needs to do their own rmw.
22725eb30ee2SQu Wenruo 	 */
227374cc3600SChristoph Hellwig 	spin_lock(&rbio->bio_list_lock);
22745eb30ee2SQu Wenruo 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
227574cc3600SChristoph Hellwig 	spin_unlock(&rbio->bio_list_lock);
22765eb30ee2SQu Wenruo 
22772942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
22785eb30ee2SQu Wenruo 
22795eb30ee2SQu Wenruo 	index_rbio_pages(rbio);
22805eb30ee2SQu Wenruo 
22815eb30ee2SQu Wenruo 	/*
22825eb30ee2SQu Wenruo 	 * We don't cache full rbios because we're assuming
22835eb30ee2SQu Wenruo 	 * the higher layers are unlikely to use this area of
22845eb30ee2SQu Wenruo 	 * the disk again soon.  If they do use it again,
22855eb30ee2SQu Wenruo 	 * hopefully they will send another full bio.
22865eb30ee2SQu Wenruo 	 */
22875eb30ee2SQu Wenruo 	if (!rbio_is_full(rbio))
22885eb30ee2SQu Wenruo 		cache_rbio_pages(rbio);
22895eb30ee2SQu Wenruo 	else
22905eb30ee2SQu Wenruo 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
22915eb30ee2SQu Wenruo 
22925eb30ee2SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
22935eb30ee2SQu Wenruo 		generate_pq_vertical(rbio, sectornr);
22945eb30ee2SQu Wenruo 
22955eb30ee2SQu Wenruo 	bio_list_init(&bio_list);
22965eb30ee2SQu Wenruo 	ret = rmw_assemble_write_bios(rbio, &bio_list);
22975eb30ee2SQu Wenruo 	if (ret < 0)
22981d0ef1caSChristoph Hellwig 		goto out;
22995eb30ee2SQu Wenruo 
23005eb30ee2SQu Wenruo 	/* We should have at least one bio assembled. */
23015eb30ee2SQu Wenruo 	ASSERT(bio_list_size(&bio_list));
23025eb30ee2SQu Wenruo 	submit_write_bios(rbio, &bio_list);
23035eb30ee2SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
23045eb30ee2SQu Wenruo 
2305ad3daf1cSQu Wenruo 	/* We may have more errors than our tolerance during the read. */
2306ad3daf1cSQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2307ad3daf1cSQu Wenruo 		int found_errors;
2308ad3daf1cSQu Wenruo 
2309ad3daf1cSQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2310ad3daf1cSQu Wenruo 		if (found_errors > rbio->bioc->max_errors) {
23115eb30ee2SQu Wenruo 			ret = -EIO;
2312ad3daf1cSQu Wenruo 			break;
2313ad3daf1cSQu Wenruo 		}
2314ad3daf1cSQu Wenruo 	}
23151d0ef1caSChristoph Hellwig out:
23161d0ef1caSChristoph Hellwig 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
23175eb30ee2SQu Wenruo }
23185eb30ee2SQu Wenruo 
231993723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work)
232053b381b3SDavid Woodhouse {
232153b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
232253b381b3SDavid Woodhouse 
232353b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
23241d0ef1caSChristoph Hellwig 	if (lock_stripe_add(rbio) == 0)
23251d0ef1caSChristoph Hellwig 		rmw_rbio(rbio);
232693723095SQu Wenruo }
232793723095SQu Wenruo 
232893723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work)
232993723095SQu Wenruo {
23301d0ef1caSChristoph Hellwig 	rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
233153b381b3SDavid Woodhouse }
233253b381b3SDavid Woodhouse 
23335a6ac9eaSMiao Xie /*
23345a6ac9eaSMiao Xie  * The following code is used to scrub/replace the parity stripe
23355a6ac9eaSMiao Xie  *
23364c664611SQu Wenruo  * Caller must have already increased bio_counter for getting @bioc.
2337ae6529c3SQu Wenruo  *
23385a6ac9eaSMiao Xie  * Note: We need make sure all the pages that add into the scrub/replace
23395a6ac9eaSMiao Xie  * raid bio are correct and not be changed during the scrub/replace. That
23405a6ac9eaSMiao Xie  * is those pages just hold metadata or file data with checksum.
23415a6ac9eaSMiao Xie  */
23425a6ac9eaSMiao Xie 
23436a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
23446a258d72SQu Wenruo 				struct btrfs_io_context *bioc,
2345ff18a4afSChristoph Hellwig 				struct btrfs_device *scrub_dev,
23465a6ac9eaSMiao Xie 				unsigned long *dbitmap, int stripe_nsectors)
23475a6ac9eaSMiao Xie {
23486a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
23495a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
23505a6ac9eaSMiao Xie 	int i;
23515a6ac9eaSMiao Xie 
2352ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
23535a6ac9eaSMiao Xie 	if (IS_ERR(rbio))
23545a6ac9eaSMiao Xie 		return NULL;
23555a6ac9eaSMiao Xie 	bio_list_add(&rbio->bio_list, bio);
23565a6ac9eaSMiao Xie 	/*
23575a6ac9eaSMiao Xie 	 * This is a special bio which is used to hold the completion handler
23585a6ac9eaSMiao Xie 	 * and make the scrub rbio is similar to the other types
23595a6ac9eaSMiao Xie 	 */
23605a6ac9eaSMiao Xie 	ASSERT(!bio->bi_iter.bi_size);
23615a6ac9eaSMiao Xie 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
23625a6ac9eaSMiao Xie 
23639cd3a7ebSLiu Bo 	/*
23644c664611SQu Wenruo 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
23659cd3a7ebSLiu Bo 	 * to the end position, so this search can start from the first parity
23669cd3a7ebSLiu Bo 	 * stripe.
23679cd3a7ebSLiu Bo 	 */
23689cd3a7ebSLiu Bo 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
23694c664611SQu Wenruo 		if (bioc->stripes[i].dev == scrub_dev) {
23705a6ac9eaSMiao Xie 			rbio->scrubp = i;
23715a6ac9eaSMiao Xie 			break;
23725a6ac9eaSMiao Xie 		}
23735a6ac9eaSMiao Xie 	}
23749cd3a7ebSLiu Bo 	ASSERT(i < rbio->real_stripes);
23755a6ac9eaSMiao Xie 
2376c67c68ebSQu Wenruo 	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
23775a6ac9eaSMiao Xie 	return rbio;
23785a6ac9eaSMiao Xie }
23795a6ac9eaSMiao Xie 
2380b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */
2381b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
23826346f6bfSQu Wenruo 			    unsigned int pgoff, u64 logical)
23835a6ac9eaSMiao Xie {
23846346f6bfSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
23855a6ac9eaSMiao Xie 	int stripe_offset;
23865a6ac9eaSMiao Xie 	int index;
23875a6ac9eaSMiao Xie 
23884c664611SQu Wenruo 	ASSERT(logical >= rbio->bioc->raid_map[0]);
23896346f6bfSQu Wenruo 	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2390ff18a4afSChristoph Hellwig 				       BTRFS_STRIPE_LEN * rbio->nr_data);
23914c664611SQu Wenruo 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
23926346f6bfSQu Wenruo 	index = stripe_offset / sectorsize;
23936346f6bfSQu Wenruo 	rbio->bio_sectors[index].page = page;
23946346f6bfSQu Wenruo 	rbio->bio_sectors[index].pgoff = pgoff;
23955a6ac9eaSMiao Xie }
23965a6ac9eaSMiao Xie 
23975a6ac9eaSMiao Xie /*
23985a6ac9eaSMiao Xie  * We just scrub the parity that we have correct data on the same horizontal,
23995a6ac9eaSMiao Xie  * so we needn't allocate all pages for all the stripes.
24005a6ac9eaSMiao Xie  */
24015a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
24025a6ac9eaSMiao Xie {
24033907ce29SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2404aee35e4bSQu Wenruo 	int total_sector_nr;
24055a6ac9eaSMiao Xie 
2406aee35e4bSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2407aee35e4bSQu Wenruo 	     total_sector_nr++) {
24083907ce29SQu Wenruo 		struct page *page;
2409aee35e4bSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2410aee35e4bSQu Wenruo 		int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
24113907ce29SQu Wenruo 
2412aee35e4bSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
2413aee35e4bSQu Wenruo 			continue;
24145a6ac9eaSMiao Xie 		if (rbio->stripe_pages[index])
24155a6ac9eaSMiao Xie 			continue;
2416b0ee5e1eSDavid Sterba 		page = alloc_page(GFP_NOFS);
24175a6ac9eaSMiao Xie 		if (!page)
24185a6ac9eaSMiao Xie 			return -ENOMEM;
24195a6ac9eaSMiao Xie 		rbio->stripe_pages[index] = page;
24205a6ac9eaSMiao Xie 	}
2421eb357060SQu Wenruo 	index_stripe_sectors(rbio);
24225a6ac9eaSMiao Xie 	return 0;
24235a6ac9eaSMiao Xie }
24245a6ac9eaSMiao Xie 
24256bfd0133SQu Wenruo static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
24265a6ac9eaSMiao Xie {
24274c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
242846900662SQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
24291389053eSKees Cook 	void **pointers = rbio->finish_pointers;
2430c67c68ebSQu Wenruo 	unsigned long *pbitmap = &rbio->finish_pbitmap;
24315a6ac9eaSMiao Xie 	int nr_data = rbio->nr_data;
24325a6ac9eaSMiao Xie 	int stripe;
24333e77605dSQu Wenruo 	int sectornr;
2434c17af965SDavid Sterba 	bool has_qstripe;
243546900662SQu Wenruo 	struct sector_ptr p_sector = { 0 };
243646900662SQu Wenruo 	struct sector_ptr q_sector = { 0 };
24375a6ac9eaSMiao Xie 	struct bio_list bio_list;
243876035976SMiao Xie 	int is_replace = 0;
24395a6ac9eaSMiao Xie 	int ret;
24405a6ac9eaSMiao Xie 
24415a6ac9eaSMiao Xie 	bio_list_init(&bio_list);
24425a6ac9eaSMiao Xie 
2443c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
2444c17af965SDavid Sterba 		has_qstripe = false;
2445c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
2446c17af965SDavid Sterba 		has_qstripe = true;
2447c17af965SDavid Sterba 	else
24485a6ac9eaSMiao Xie 		BUG();
24495a6ac9eaSMiao Xie 
2450*1faf3885SQu Wenruo 	/*
2451*1faf3885SQu Wenruo 	 * Replace is running and our P/Q stripe is being replaced, then we
2452*1faf3885SQu Wenruo 	 * need to duplicate the final write to replace target.
2453*1faf3885SQu Wenruo 	 */
2454*1faf3885SQu Wenruo 	if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
245576035976SMiao Xie 		is_replace = 1;
2456c67c68ebSQu Wenruo 		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
245776035976SMiao Xie 	}
245876035976SMiao Xie 
24595a6ac9eaSMiao Xie 	/*
24605a6ac9eaSMiao Xie 	 * Because the higher layers(scrubber) are unlikely to
24615a6ac9eaSMiao Xie 	 * use this area of the disk again soon, so don't cache
24625a6ac9eaSMiao Xie 	 * it.
24635a6ac9eaSMiao Xie 	 */
24645a6ac9eaSMiao Xie 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
24655a6ac9eaSMiao Xie 
24665a6ac9eaSMiao Xie 	if (!need_check)
24675a6ac9eaSMiao Xie 		goto writeback;
24685a6ac9eaSMiao Xie 
246946900662SQu Wenruo 	p_sector.page = alloc_page(GFP_NOFS);
247046900662SQu Wenruo 	if (!p_sector.page)
24716bfd0133SQu Wenruo 		return -ENOMEM;
247246900662SQu Wenruo 	p_sector.pgoff = 0;
247346900662SQu Wenruo 	p_sector.uptodate = 1;
24745a6ac9eaSMiao Xie 
2475c17af965SDavid Sterba 	if (has_qstripe) {
2476d70cef0dSIra Weiny 		/* RAID6, allocate and map temp space for the Q stripe */
247746900662SQu Wenruo 		q_sector.page = alloc_page(GFP_NOFS);
247846900662SQu Wenruo 		if (!q_sector.page) {
247946900662SQu Wenruo 			__free_page(p_sector.page);
248046900662SQu Wenruo 			p_sector.page = NULL;
24816bfd0133SQu Wenruo 			return -ENOMEM;
24825a6ac9eaSMiao Xie 		}
248346900662SQu Wenruo 		q_sector.pgoff = 0;
248446900662SQu Wenruo 		q_sector.uptodate = 1;
248546900662SQu Wenruo 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
24865a6ac9eaSMiao Xie 	}
24875a6ac9eaSMiao Xie 
24882942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
24895a6ac9eaSMiao Xie 
2490d70cef0dSIra Weiny 	/* Map the parity stripe just once */
249146900662SQu Wenruo 	pointers[nr_data] = kmap_local_page(p_sector.page);
2492d70cef0dSIra Weiny 
2493c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
249446900662SQu Wenruo 		struct sector_ptr *sector;
24955a6ac9eaSMiao Xie 		void *parity;
249646900662SQu Wenruo 
24975a6ac9eaSMiao Xie 		/* first collect one page from each data stripe */
24985a6ac9eaSMiao Xie 		for (stripe = 0; stripe < nr_data; stripe++) {
249946900662SQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
250046900662SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
250146900662SQu Wenruo 					   sector->pgoff;
25025a6ac9eaSMiao Xie 		}
25035a6ac9eaSMiao Xie 
2504c17af965SDavid Sterba 		if (has_qstripe) {
2505d70cef0dSIra Weiny 			/* RAID6, call the library function to fill in our P/Q */
250646900662SQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
25075a6ac9eaSMiao Xie 						pointers);
25085a6ac9eaSMiao Xie 		} else {
25095a6ac9eaSMiao Xie 			/* raid5 */
251046900662SQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
251146900662SQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
25125a6ac9eaSMiao Xie 		}
25135a6ac9eaSMiao Xie 
251401327610SNicholas D Steeves 		/* Check scrubbing parity and repair it */
251546900662SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
251646900662SQu Wenruo 		parity = kmap_local_page(sector->page) + sector->pgoff;
251746900662SQu Wenruo 		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
251846900662SQu Wenruo 			memcpy(parity, pointers[rbio->scrubp], sectorsize);
25195a6ac9eaSMiao Xie 		else
25205a6ac9eaSMiao Xie 			/* Parity is right, needn't writeback */
2521c67c68ebSQu Wenruo 			bitmap_clear(&rbio->dbitmap, sectornr, 1);
252258c1a35cSIra Weiny 		kunmap_local(parity);
25235a6ac9eaSMiao Xie 
252494a0b58dSIra Weiny 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
252594a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
25265a6ac9eaSMiao Xie 	}
25275a6ac9eaSMiao Xie 
252894a0b58dSIra Weiny 	kunmap_local(pointers[nr_data]);
252946900662SQu Wenruo 	__free_page(p_sector.page);
253046900662SQu Wenruo 	p_sector.page = NULL;
253146900662SQu Wenruo 	if (q_sector.page) {
253294a0b58dSIra Weiny 		kunmap_local(pointers[rbio->real_stripes - 1]);
253346900662SQu Wenruo 		__free_page(q_sector.page);
253446900662SQu Wenruo 		q_sector.page = NULL;
2535d70cef0dSIra Weiny 	}
25365a6ac9eaSMiao Xie 
25375a6ac9eaSMiao Xie writeback:
25385a6ac9eaSMiao Xie 	/*
25395a6ac9eaSMiao Xie 	 * time to start writing.  Make bios for everything from the
25405a6ac9eaSMiao Xie 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
25415a6ac9eaSMiao Xie 	 * everything else.
25425a6ac9eaSMiao Xie 	 */
2543c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
25443e77605dSQu Wenruo 		struct sector_ptr *sector;
25455a6ac9eaSMiao Xie 
25463e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25473e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2548ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_WRITE);
25495a6ac9eaSMiao Xie 		if (ret)
25505a6ac9eaSMiao Xie 			goto cleanup;
25515a6ac9eaSMiao Xie 	}
25525a6ac9eaSMiao Xie 
255376035976SMiao Xie 	if (!is_replace)
255476035976SMiao Xie 		goto submit_write;
255576035976SMiao Xie 
2556*1faf3885SQu Wenruo 	/*
2557*1faf3885SQu Wenruo 	 * Replace is running and our parity stripe needs to be duplicated to
2558*1faf3885SQu Wenruo 	 * the target device.  Check we have a valid source stripe number.
2559*1faf3885SQu Wenruo 	 */
2560*1faf3885SQu Wenruo 	ASSERT(rbio->bioc->replace_stripe_src >= 0);
25613e77605dSQu Wenruo 	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
25623e77605dSQu Wenruo 		struct sector_ptr *sector;
256376035976SMiao Xie 
25643e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25653e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
2566*1faf3885SQu Wenruo 					 rbio->real_stripes,
2567ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_WRITE);
256876035976SMiao Xie 		if (ret)
256976035976SMiao Xie 			goto cleanup;
257076035976SMiao Xie 	}
257176035976SMiao Xie 
257276035976SMiao Xie submit_write:
25736bfd0133SQu Wenruo 	submit_write_bios(rbio, &bio_list);
25746bfd0133SQu Wenruo 	return 0;
25755a6ac9eaSMiao Xie 
25765a6ac9eaSMiao Xie cleanup:
2577801fcfc5SChristoph Hellwig 	bio_list_put(&bio_list);
25786bfd0133SQu Wenruo 	return ret;
25795a6ac9eaSMiao Xie }
25805a6ac9eaSMiao Xie 
25815a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
25825a6ac9eaSMiao Xie {
25835a6ac9eaSMiao Xie 	if (stripe >= 0 && stripe < rbio->nr_data)
25845a6ac9eaSMiao Xie 		return 1;
25855a6ac9eaSMiao Xie 	return 0;
25865a6ac9eaSMiao Xie }
25875a6ac9eaSMiao Xie 
25886bfd0133SQu Wenruo static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
25895a6ac9eaSMiao Xie {
259075b47033SQu Wenruo 	void **pointers = NULL;
259175b47033SQu Wenruo 	void **unmap_array = NULL;
259275b47033SQu Wenruo 	int sector_nr;
2593e7fc357eSJosef Bacik 	int ret = 0;
25946bfd0133SQu Wenruo 
25955a6ac9eaSMiao Xie 	/*
259675b47033SQu Wenruo 	 * @pointers array stores the pointer for each sector.
259775b47033SQu Wenruo 	 *
259875b47033SQu Wenruo 	 * @unmap_array stores copy of pointers that does not get reordered
259975b47033SQu Wenruo 	 * during reconstruction so that kunmap_local works.
26005a6ac9eaSMiao Xie 	 */
260175b47033SQu Wenruo 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
260275b47033SQu Wenruo 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
260375b47033SQu Wenruo 	if (!pointers || !unmap_array) {
260475b47033SQu Wenruo 		ret = -ENOMEM;
260575b47033SQu Wenruo 		goto out;
260675b47033SQu Wenruo 	}
26075a6ac9eaSMiao Xie 
260875b47033SQu Wenruo 	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
260975b47033SQu Wenruo 		int dfail = 0, failp = -1;
261075b47033SQu Wenruo 		int faila;
261175b47033SQu Wenruo 		int failb;
261275b47033SQu Wenruo 		int found_errors;
261375b47033SQu Wenruo 
261475b47033SQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
261575b47033SQu Wenruo 							 &faila, &failb);
261675b47033SQu Wenruo 		if (found_errors > rbio->bioc->max_errors) {
261775b47033SQu Wenruo 			ret = -EIO;
261875b47033SQu Wenruo 			goto out;
261975b47033SQu Wenruo 		}
262075b47033SQu Wenruo 		if (found_errors == 0)
262175b47033SQu Wenruo 			continue;
262275b47033SQu Wenruo 
262375b47033SQu Wenruo 		/* We should have at least one error here. */
262475b47033SQu Wenruo 		ASSERT(faila >= 0 || failb >= 0);
262575b47033SQu Wenruo 
262675b47033SQu Wenruo 		if (is_data_stripe(rbio, faila))
262775b47033SQu Wenruo 			dfail++;
262875b47033SQu Wenruo 		else if (is_parity_stripe(faila))
262975b47033SQu Wenruo 			failp = faila;
263075b47033SQu Wenruo 
263175b47033SQu Wenruo 		if (is_data_stripe(rbio, failb))
263275b47033SQu Wenruo 			dfail++;
263375b47033SQu Wenruo 		else if (is_parity_stripe(failb))
263475b47033SQu Wenruo 			failp = failb;
26355a6ac9eaSMiao Xie 		/*
263675b47033SQu Wenruo 		 * Because we can not use a scrubbing parity to repair the
263775b47033SQu Wenruo 		 * data, so the capability of the repair is declined.  (In the
263875b47033SQu Wenruo 		 * case of RAID5, we can not repair anything.)
263975b47033SQu Wenruo 		 */
264075b47033SQu Wenruo 		if (dfail > rbio->bioc->max_errors - 1) {
264175b47033SQu Wenruo 			ret = -EIO;
264275b47033SQu Wenruo 			goto out;
264375b47033SQu Wenruo 		}
264475b47033SQu Wenruo 		/*
264575b47033SQu Wenruo 		 * If all data is good, only parity is correctly, just repair
264675b47033SQu Wenruo 		 * the parity, no need to recover data stripes.
26475a6ac9eaSMiao Xie 		 */
26486bfd0133SQu Wenruo 		if (dfail == 0)
264975b47033SQu Wenruo 			continue;
26505a6ac9eaSMiao Xie 
26515a6ac9eaSMiao Xie 		/*
26525a6ac9eaSMiao Xie 		 * Here means we got one corrupted data stripe and one
265375b47033SQu Wenruo 		 * corrupted parity on RAID6, if the corrupted parity is
265475b47033SQu Wenruo 		 * scrubbing parity, luckily, use the other one to repair the
265575b47033SQu Wenruo 		 * data, or we can not repair the data stripe.
26565a6ac9eaSMiao Xie 		 */
265775b47033SQu Wenruo 		if (failp != rbio->scrubp) {
265875b47033SQu Wenruo 			ret = -EIO;
265975b47033SQu Wenruo 			goto out;
266075b47033SQu Wenruo 		}
26615a6ac9eaSMiao Xie 
266275b47033SQu Wenruo 		ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
266375b47033SQu Wenruo 		if (ret < 0)
266475b47033SQu Wenruo 			goto out;
266575b47033SQu Wenruo 	}
266675b47033SQu Wenruo out:
266775b47033SQu Wenruo 	kfree(pointers);
266875b47033SQu Wenruo 	kfree(unmap_array);
26696bfd0133SQu Wenruo 	return ret;
26705a6ac9eaSMiao Xie }
26715a6ac9eaSMiao Xie 
267252f0c198SChristoph Hellwig static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
26735a6ac9eaSMiao Xie {
267452f0c198SChristoph Hellwig 	struct bio_list bio_list = BIO_EMPTY_LIST;
2675cb3450b7SQu Wenruo 	int total_sector_nr;
2676cb3450b7SQu Wenruo 	int ret = 0;
26775a6ac9eaSMiao Xie 
26781c10702eSQu Wenruo 	/* Build a list of bios to read all the missing parts. */
26791c10702eSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
26801c10702eSQu Wenruo 	     total_sector_nr++) {
26811c10702eSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
26821c10702eSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
26833e77605dSQu Wenruo 		struct sector_ptr *sector;
26841c10702eSQu Wenruo 
26851c10702eSQu Wenruo 		/* No data in the vertical stripe, no need to read. */
26861c10702eSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
26871c10702eSQu Wenruo 			continue;
26881c10702eSQu Wenruo 
26895a6ac9eaSMiao Xie 		/*
26901c10702eSQu Wenruo 		 * We want to find all the sectors missing from the rbio and
26911c10702eSQu Wenruo 		 * read them from the disk. If sector_in_rbio() finds a sector
26921c10702eSQu Wenruo 		 * in the bio list we don't need to read it off the stripe.
26935a6ac9eaSMiao Xie 		 */
26943e77605dSQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
26953e77605dSQu Wenruo 		if (sector)
26965a6ac9eaSMiao Xie 			continue;
26975a6ac9eaSMiao Xie 
26983e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
26995a6ac9eaSMiao Xie 		/*
27001c10702eSQu Wenruo 		 * The bio cache may have handed us an uptodate sector.  If so,
27011c10702eSQu Wenruo 		 * use it.
27025a6ac9eaSMiao Xie 		 */
27033e77605dSQu Wenruo 		if (sector->uptodate)
27045a6ac9eaSMiao Xie 			continue;
27055a6ac9eaSMiao Xie 
270652f0c198SChristoph Hellwig 		ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2707ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
270852f0c198SChristoph Hellwig 		if (ret) {
270952f0c198SChristoph Hellwig 			bio_list_put(&bio_list);
2710cb3450b7SQu Wenruo 			return ret;
2711cb3450b7SQu Wenruo 		}
271252f0c198SChristoph Hellwig 	}
271352f0c198SChristoph Hellwig 
271452f0c198SChristoph Hellwig 	submit_read_wait_bio_list(rbio, &bio_list);
271552f0c198SChristoph Hellwig 	return 0;
271652f0c198SChristoph Hellwig }
2717cb3450b7SQu Wenruo 
271808241d3cSChristoph Hellwig static void scrub_rbio(struct btrfs_raid_bio *rbio)
2719cb3450b7SQu Wenruo {
27206bfd0133SQu Wenruo 	bool need_check = false;
2721ad3daf1cSQu Wenruo 	int sector_nr;
2722cb3450b7SQu Wenruo 	int ret;
2723cb3450b7SQu Wenruo 
2724cb3450b7SQu Wenruo 	ret = alloc_rbio_essential_pages(rbio);
2725cb3450b7SQu Wenruo 	if (ret)
272608241d3cSChristoph Hellwig 		goto out;
2727cb3450b7SQu Wenruo 
27282942a50dSQu Wenruo 	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
27292942a50dSQu Wenruo 
273052f0c198SChristoph Hellwig 	ret = scrub_assemble_read_bios(rbio);
2731cb3450b7SQu Wenruo 	if (ret < 0)
273208241d3cSChristoph Hellwig 		goto out;
27336bfd0133SQu Wenruo 
273475b47033SQu Wenruo 	/* We may have some failures, recover the failed sectors first. */
27356bfd0133SQu Wenruo 	ret = recover_scrub_rbio(rbio);
27366bfd0133SQu Wenruo 	if (ret < 0)
273708241d3cSChristoph Hellwig 		goto out;
27386bfd0133SQu Wenruo 
27395a6ac9eaSMiao Xie 	/*
27406bfd0133SQu Wenruo 	 * We have every sector properly prepared. Can finish the scrub
27416bfd0133SQu Wenruo 	 * and writeback the good content.
27425a6ac9eaSMiao Xie 	 */
27436bfd0133SQu Wenruo 	ret = finish_parity_scrub(rbio, need_check);
27446bfd0133SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2745ad3daf1cSQu Wenruo 	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2746ad3daf1cSQu Wenruo 		int found_errors;
2747ad3daf1cSQu Wenruo 
2748ad3daf1cSQu Wenruo 		found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2749ad3daf1cSQu Wenruo 		if (found_errors > rbio->bioc->max_errors) {
27506bfd0133SQu Wenruo 			ret = -EIO;
2751ad3daf1cSQu Wenruo 			break;
2752ad3daf1cSQu Wenruo 		}
2753ad3daf1cSQu Wenruo 	}
275408241d3cSChristoph Hellwig out:
275508241d3cSChristoph Hellwig 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
27565a6ac9eaSMiao Xie }
27575a6ac9eaSMiao Xie 
27586bfd0133SQu Wenruo static void scrub_rbio_work_locked(struct work_struct *work)
27595a6ac9eaSMiao Xie {
276008241d3cSChristoph Hellwig 	scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
27615a6ac9eaSMiao Xie }
27625a6ac9eaSMiao Xie 
27635a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
27645a6ac9eaSMiao Xie {
27655a6ac9eaSMiao Xie 	if (!lock_stripe_add(rbio))
27666bfd0133SQu Wenruo 		start_async_work(rbio, scrub_rbio_work_locked);
27675a6ac9eaSMiao Xie }
2768b4ee1782SOmar Sandoval 
2769b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */
2770b4ee1782SOmar Sandoval 
2771b4ee1782SOmar Sandoval struct btrfs_raid_bio *
2772ff18a4afSChristoph Hellwig raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2773b4ee1782SOmar Sandoval {
27746a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
2775b4ee1782SOmar Sandoval 	struct btrfs_raid_bio *rbio;
2776b4ee1782SOmar Sandoval 
2777ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2778b4ee1782SOmar Sandoval 	if (IS_ERR(rbio))
2779b4ee1782SOmar Sandoval 		return NULL;
2780b4ee1782SOmar Sandoval 
2781b4ee1782SOmar Sandoval 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2782b4ee1782SOmar Sandoval 	bio_list_add(&rbio->bio_list, bio);
2783b4ee1782SOmar Sandoval 	/*
2784b4ee1782SOmar Sandoval 	 * This is a special bio which is used to hold the completion handler
2785b4ee1782SOmar Sandoval 	 * and make the scrub rbio is similar to the other types
2786b4ee1782SOmar Sandoval 	 */
2787b4ee1782SOmar Sandoval 	ASSERT(!bio->bi_iter.bi_size);
2788b4ee1782SOmar Sandoval 
27892942a50dSQu Wenruo 	set_rbio_range_error(rbio, bio);
2790b4ee1782SOmar Sandoval 
2791b4ee1782SOmar Sandoval 	return rbio;
2792b4ee1782SOmar Sandoval }
2793b4ee1782SOmar Sandoval 
2794b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2795b4ee1782SOmar Sandoval {
2796d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
2797b4ee1782SOmar Sandoval }
2798