xref: /linux/fs/btrfs/raid56.c (revision cb3450b7d7d0af6ed6ff60e174129938914083ab)
1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
253b381b3SDavid Woodhouse /*
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
453b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
553b381b3SDavid Woodhouse  */
6c1d7c514SDavid Sterba 
753b381b3SDavid Woodhouse #include <linux/sched.h>
853b381b3SDavid Woodhouse #include <linux/bio.h>
953b381b3SDavid Woodhouse #include <linux/slab.h>
1053b381b3SDavid Woodhouse #include <linux/blkdev.h>
1153b381b3SDavid Woodhouse #include <linux/raid/pq.h>
1253b381b3SDavid Woodhouse #include <linux/hash.h>
1353b381b3SDavid Woodhouse #include <linux/list_sort.h>
1453b381b3SDavid Woodhouse #include <linux/raid/xor.h>
15818e010bSDavid Sterba #include <linux/mm.h>
169b569ea0SJosef Bacik #include "messages.h"
17cea62800SJohannes Thumshirn #include "misc.h"
1853b381b3SDavid Woodhouse #include "ctree.h"
1953b381b3SDavid Woodhouse #include "disk-io.h"
2053b381b3SDavid Woodhouse #include "volumes.h"
2153b381b3SDavid Woodhouse #include "raid56.h"
2253b381b3SDavid Woodhouse #include "async-thread.h"
2353b381b3SDavid Woodhouse 
2453b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
2553b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
2653b381b3SDavid Woodhouse 
274ae10b3aSChris Mason /*
284ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
294ae10b3aSChris Mason  * of past RMW
304ae10b3aSChris Mason  */
314ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
324ae10b3aSChris Mason 
334ae10b3aSChris Mason /*
344ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
354ae10b3aSChris Mason  */
364ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
374ae10b3aSChris Mason 
384ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
394ae10b3aSChris Mason 
408a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS				11
418a953348SDavid Sterba 
428a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
438a953348SDavid Sterba struct btrfs_stripe_hash {
448a953348SDavid Sterba 	struct list_head hash_list;
458a953348SDavid Sterba 	spinlock_t lock;
468a953348SDavid Sterba };
478a953348SDavid Sterba 
488a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
498a953348SDavid Sterba struct btrfs_stripe_hash_table {
508a953348SDavid Sterba 	struct list_head stripe_cache;
518a953348SDavid Sterba 	spinlock_t cache_lock;
528a953348SDavid Sterba 	int cache_size;
538a953348SDavid Sterba 	struct btrfs_stripe_hash table[];
548a953348SDavid Sterba };
558a953348SDavid Sterba 
56eb357060SQu Wenruo /*
57eb357060SQu Wenruo  * A bvec like structure to present a sector inside a page.
58eb357060SQu Wenruo  *
59eb357060SQu Wenruo  * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
60eb357060SQu Wenruo  */
61eb357060SQu Wenruo struct sector_ptr {
62eb357060SQu Wenruo 	struct page *page;
6300425dd9SQu Wenruo 	unsigned int pgoff:24;
6400425dd9SQu Wenruo 	unsigned int uptodate:8;
65eb357060SQu Wenruo };
66eb357060SQu Wenruo 
6753b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
6893723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work);
6993723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work);
7053b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
7153b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
7253b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
7353b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
7453b381b3SDavid Woodhouse 
755a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
765a6ac9eaSMiao Xie 					 int need_check);
77385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work);
785a6ac9eaSMiao Xie 
79797d74b7SQu Wenruo static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
80797d74b7SQu Wenruo {
81797d74b7SQu Wenruo 	kfree(rbio->stripe_pages);
82797d74b7SQu Wenruo 	kfree(rbio->bio_sectors);
83797d74b7SQu Wenruo 	kfree(rbio->stripe_sectors);
84797d74b7SQu Wenruo 	kfree(rbio->finish_pointers);
85797d74b7SQu Wenruo }
86797d74b7SQu Wenruo 
87ff2b64a2SQu Wenruo static void free_raid_bio(struct btrfs_raid_bio *rbio)
88ff2b64a2SQu Wenruo {
89ff2b64a2SQu Wenruo 	int i;
90ff2b64a2SQu Wenruo 
91ff2b64a2SQu Wenruo 	if (!refcount_dec_and_test(&rbio->refs))
92ff2b64a2SQu Wenruo 		return;
93ff2b64a2SQu Wenruo 
94ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->stripe_cache));
95ff2b64a2SQu Wenruo 	WARN_ON(!list_empty(&rbio->hash_list));
96ff2b64a2SQu Wenruo 	WARN_ON(!bio_list_empty(&rbio->bio_list));
97ff2b64a2SQu Wenruo 
98ff2b64a2SQu Wenruo 	for (i = 0; i < rbio->nr_pages; i++) {
99ff2b64a2SQu Wenruo 		if (rbio->stripe_pages[i]) {
100ff2b64a2SQu Wenruo 			__free_page(rbio->stripe_pages[i]);
101ff2b64a2SQu Wenruo 			rbio->stripe_pages[i] = NULL;
102ff2b64a2SQu Wenruo 		}
103ff2b64a2SQu Wenruo 	}
104ff2b64a2SQu Wenruo 
105ff2b64a2SQu Wenruo 	btrfs_put_bioc(rbio->bioc);
106797d74b7SQu Wenruo 	free_raid_bio_pointers(rbio);
107ff2b64a2SQu Wenruo 	kfree(rbio);
108ff2b64a2SQu Wenruo }
109ff2b64a2SQu Wenruo 
110385de0efSChristoph Hellwig static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
111ac638859SDavid Sterba {
112385de0efSChristoph Hellwig 	INIT_WORK(&rbio->work, work_func);
113385de0efSChristoph Hellwig 	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
114ac638859SDavid Sterba }
115ac638859SDavid Sterba 
11653b381b3SDavid Woodhouse /*
11753b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
11853b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
11953b381b3SDavid Woodhouse  */
12053b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
12153b381b3SDavid Woodhouse {
12253b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
12353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
12453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
12553b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
12653b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
12753b381b3SDavid Woodhouse 	int i;
12853b381b3SDavid Woodhouse 
12953b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
13053b381b3SDavid Woodhouse 		return 0;
13153b381b3SDavid Woodhouse 
13283c8266aSDavid Sterba 	/*
13383c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
13483c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
13583c8266aSDavid Sterba 	 *
13683c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
13783c8266aSDavid Sterba 	 * of a failing mount.
13883c8266aSDavid Sterba 	 */
139ee787f95SDavid Sterba 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
14053b381b3SDavid Woodhouse 	if (!table)
14153b381b3SDavid Woodhouse 		return -ENOMEM;
14253b381b3SDavid Woodhouse 
1434ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
1444ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
1454ae10b3aSChris Mason 
14653b381b3SDavid Woodhouse 	h = table->table;
14753b381b3SDavid Woodhouse 
14853b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
14953b381b3SDavid Woodhouse 		cur = h + i;
15053b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
15153b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
15253b381b3SDavid Woodhouse 	}
15353b381b3SDavid Woodhouse 
15453b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
155f749303bSWang Shilong 	kvfree(x);
15653b381b3SDavid Woodhouse 	return 0;
15753b381b3SDavid Woodhouse }
15853b381b3SDavid Woodhouse 
15953b381b3SDavid Woodhouse /*
1604ae10b3aSChris Mason  * caching an rbio means to copy anything from the
161ac26df8bSQu Wenruo  * bio_sectors array into the stripe_pages array.  We
1624ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
1634ae10b3aSChris Mason  * to indicate if it has valid data
1644ae10b3aSChris Mason  *
1654ae10b3aSChris Mason  * once the caching is done, we set the cache ready
1664ae10b3aSChris Mason  * bit.
1674ae10b3aSChris Mason  */
1684ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
1694ae10b3aSChris Mason {
1704ae10b3aSChris Mason 	int i;
1714ae10b3aSChris Mason 	int ret;
1724ae10b3aSChris Mason 
1734ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
1744ae10b3aSChris Mason 	if (ret)
1754ae10b3aSChris Mason 		return;
1764ae10b3aSChris Mason 
17700425dd9SQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
17800425dd9SQu Wenruo 		/* Some range not covered by bio (partial write), skip it */
17988074c8bSQu Wenruo 		if (!rbio->bio_sectors[i].page) {
18088074c8bSQu Wenruo 			/*
18188074c8bSQu Wenruo 			 * Even if the sector is not covered by bio, if it is
18288074c8bSQu Wenruo 			 * a data sector it should still be uptodate as it is
18388074c8bSQu Wenruo 			 * read from disk.
18488074c8bSQu Wenruo 			 */
18588074c8bSQu Wenruo 			if (i < rbio->nr_data * rbio->stripe_nsectors)
18688074c8bSQu Wenruo 				ASSERT(rbio->stripe_sectors[i].uptodate);
18700425dd9SQu Wenruo 			continue;
18888074c8bSQu Wenruo 		}
18900425dd9SQu Wenruo 
19000425dd9SQu Wenruo 		ASSERT(rbio->stripe_sectors[i].page);
19100425dd9SQu Wenruo 		memcpy_page(rbio->stripe_sectors[i].page,
19200425dd9SQu Wenruo 			    rbio->stripe_sectors[i].pgoff,
19300425dd9SQu Wenruo 			    rbio->bio_sectors[i].page,
19400425dd9SQu Wenruo 			    rbio->bio_sectors[i].pgoff,
19500425dd9SQu Wenruo 			    rbio->bioc->fs_info->sectorsize);
19600425dd9SQu Wenruo 		rbio->stripe_sectors[i].uptodate = 1;
19700425dd9SQu Wenruo 	}
1984ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1994ae10b3aSChris Mason }
2004ae10b3aSChris Mason 
2014ae10b3aSChris Mason /*
20253b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
20353b381b3SDavid Woodhouse  */
20453b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
20553b381b3SDavid Woodhouse {
2064c664611SQu Wenruo 	u64 num = rbio->bioc->raid_map[0];
20753b381b3SDavid Woodhouse 
20853b381b3SDavid Woodhouse 	/*
20953b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
21053b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
21153b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
21253b381b3SDavid Woodhouse 	 * returns just one or two different values.
21353b381b3SDavid Woodhouse 	 *
21453b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
21553b381b3SDavid Woodhouse 	 */
21653b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
21753b381b3SDavid Woodhouse }
21853b381b3SDavid Woodhouse 
219d4e28d9bSQu Wenruo static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
220d4e28d9bSQu Wenruo 				       unsigned int page_nr)
221d4e28d9bSQu Wenruo {
222d4e28d9bSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
223d4e28d9bSQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
224d4e28d9bSQu Wenruo 	int i;
225d4e28d9bSQu Wenruo 
226d4e28d9bSQu Wenruo 	ASSERT(page_nr < rbio->nr_pages);
227d4e28d9bSQu Wenruo 
228d4e28d9bSQu Wenruo 	for (i = sectors_per_page * page_nr;
229d4e28d9bSQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page;
230d4e28d9bSQu Wenruo 	     i++) {
231d4e28d9bSQu Wenruo 		if (!rbio->stripe_sectors[i].uptodate)
232d4e28d9bSQu Wenruo 			return false;
233d4e28d9bSQu Wenruo 	}
234d4e28d9bSQu Wenruo 	return true;
235d4e28d9bSQu Wenruo }
236d4e28d9bSQu Wenruo 
23753b381b3SDavid Woodhouse /*
238eb357060SQu Wenruo  * Update the stripe_sectors[] array to use correct page and pgoff
239eb357060SQu Wenruo  *
240eb357060SQu Wenruo  * Should be called every time any page pointer in stripes_pages[] got modified.
241eb357060SQu Wenruo  */
242eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
243eb357060SQu Wenruo {
244eb357060SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
245eb357060SQu Wenruo 	u32 offset;
246eb357060SQu Wenruo 	int i;
247eb357060SQu Wenruo 
248eb357060SQu Wenruo 	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
249eb357060SQu Wenruo 		int page_index = offset >> PAGE_SHIFT;
250eb357060SQu Wenruo 
251eb357060SQu Wenruo 		ASSERT(page_index < rbio->nr_pages);
252eb357060SQu Wenruo 		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
253eb357060SQu Wenruo 		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
254eb357060SQu Wenruo 	}
255eb357060SQu Wenruo }
256eb357060SQu Wenruo 
2574d100466SQu Wenruo static void steal_rbio_page(struct btrfs_raid_bio *src,
2584d100466SQu Wenruo 			    struct btrfs_raid_bio *dest, int page_nr)
2594d100466SQu Wenruo {
2604d100466SQu Wenruo 	const u32 sectorsize = src->bioc->fs_info->sectorsize;
2614d100466SQu Wenruo 	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2624d100466SQu Wenruo 	int i;
2634d100466SQu Wenruo 
2644d100466SQu Wenruo 	if (dest->stripe_pages[page_nr])
2654d100466SQu Wenruo 		__free_page(dest->stripe_pages[page_nr]);
2664d100466SQu Wenruo 	dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
2674d100466SQu Wenruo 	src->stripe_pages[page_nr] = NULL;
2684d100466SQu Wenruo 
2694d100466SQu Wenruo 	/* Also update the sector->uptodate bits. */
2704d100466SQu Wenruo 	for (i = sectors_per_page * page_nr;
2714d100466SQu Wenruo 	     i < sectors_per_page * page_nr + sectors_per_page; i++)
2724d100466SQu Wenruo 		dest->stripe_sectors[i].uptodate = true;
2734d100466SQu Wenruo }
2744d100466SQu Wenruo 
27588074c8bSQu Wenruo static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
27688074c8bSQu Wenruo {
27788074c8bSQu Wenruo 	const int sector_nr = (page_nr << PAGE_SHIFT) >>
27888074c8bSQu Wenruo 			      rbio->bioc->fs_info->sectorsize_bits;
27988074c8bSQu Wenruo 
28088074c8bSQu Wenruo 	/*
28188074c8bSQu Wenruo 	 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
28288074c8bSQu Wenruo 	 * we won't have a page which is half data half parity.
28388074c8bSQu Wenruo 	 *
28488074c8bSQu Wenruo 	 * Thus if the first sector of the page belongs to data stripes, then
28588074c8bSQu Wenruo 	 * the full page belongs to data stripes.
28688074c8bSQu Wenruo 	 */
28788074c8bSQu Wenruo 	return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
28888074c8bSQu Wenruo }
28988074c8bSQu Wenruo 
290eb357060SQu Wenruo /*
291d4e28d9bSQu Wenruo  * Stealing an rbio means taking all the uptodate pages from the stripe array
292d4e28d9bSQu Wenruo  * in the source rbio and putting them into the destination rbio.
293d4e28d9bSQu Wenruo  *
294d4e28d9bSQu Wenruo  * This will also update the involved stripe_sectors[] which are referring to
295d4e28d9bSQu Wenruo  * the old pages.
2964ae10b3aSChris Mason  */
2974ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
2984ae10b3aSChris Mason {
2994ae10b3aSChris Mason 	int i;
3004ae10b3aSChris Mason 
3014ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3024ae10b3aSChris Mason 		return;
3034ae10b3aSChris Mason 
3044ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
30588074c8bSQu Wenruo 		struct page *p = src->stripe_pages[i];
30688074c8bSQu Wenruo 
30788074c8bSQu Wenruo 		/*
30888074c8bSQu Wenruo 		 * We don't need to steal P/Q pages as they will always be
30988074c8bSQu Wenruo 		 * regenerated for RMW or full write anyway.
31088074c8bSQu Wenruo 		 */
31188074c8bSQu Wenruo 		if (!is_data_stripe_page(src, i))
3124ae10b3aSChris Mason 			continue;
3134ae10b3aSChris Mason 
31488074c8bSQu Wenruo 		/*
31588074c8bSQu Wenruo 		 * If @src already has RBIO_CACHE_READY_BIT, it should have
31688074c8bSQu Wenruo 		 * all data stripe pages present and uptodate.
31788074c8bSQu Wenruo 		 */
31888074c8bSQu Wenruo 		ASSERT(p);
31988074c8bSQu Wenruo 		ASSERT(full_page_sectors_uptodate(src, i));
3204d100466SQu Wenruo 		steal_rbio_page(src, dest, i);
3214ae10b3aSChris Mason 	}
322eb357060SQu Wenruo 	index_stripe_sectors(dest);
323eb357060SQu Wenruo 	index_stripe_sectors(src);
3244ae10b3aSChris Mason }
3254ae10b3aSChris Mason 
3264ae10b3aSChris Mason /*
32753b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
32853b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
32953b381b3SDavid Woodhouse  * be discarded afterwards.
33053b381b3SDavid Woodhouse  *
33153b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
33253b381b3SDavid Woodhouse  */
33353b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
33453b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
33553b381b3SDavid Woodhouse {
33653b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
33753b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
338bd8f7e62SQu Wenruo 	/* Also inherit the bitmaps from @victim. */
339bd8f7e62SQu Wenruo 	bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
340bd8f7e62SQu Wenruo 		  dest->stripe_nsectors);
34153b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
34253b381b3SDavid Woodhouse }
34353b381b3SDavid Woodhouse 
34453b381b3SDavid Woodhouse /*
3454ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
3464ae10b3aSChris Mason  * must hold the hash table lock.
3474ae10b3aSChris Mason  */
3484ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
3494ae10b3aSChris Mason {
3504ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
3514ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
3524ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
3534ae10b3aSChris Mason 	int freeit = 0;
3544ae10b3aSChris Mason 
3554ae10b3aSChris Mason 	/*
3564ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
3574ae10b3aSChris Mason 	 */
3584ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
3594ae10b3aSChris Mason 		return;
3604ae10b3aSChris Mason 
3616a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
3624ae10b3aSChris Mason 	h = table->table + bucket;
3634ae10b3aSChris Mason 
3644ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
3654ae10b3aSChris Mason 	 * removing it from the hash table
3664ae10b3aSChris Mason 	 */
3674ae10b3aSChris Mason 	spin_lock(&h->lock);
3684ae10b3aSChris Mason 
3694ae10b3aSChris Mason 	/*
3704ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
3714ae10b3aSChris Mason 	 * to make sure the bio list is empty
3724ae10b3aSChris Mason 	 */
3734ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
3744ae10b3aSChris Mason 
3754ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
3764ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
3774ae10b3aSChris Mason 		table->cache_size -= 1;
3784ae10b3aSChris Mason 		freeit = 1;
3794ae10b3aSChris Mason 
3804ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
3814ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
3824ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
3834ae10b3aSChris Mason 		 * was held for the list.
3844ae10b3aSChris Mason 		 *
3854ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
3864ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
3874ae10b3aSChris Mason 		 * the corresponding ref
3884ae10b3aSChris Mason 		 */
3894ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
3904ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
3914ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
392dec95574SElena Reshetova 				refcount_dec(&rbio->refs);
3934ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
3944ae10b3aSChris Mason 			}
3954ae10b3aSChris Mason 		}
3964ae10b3aSChris Mason 	}
3974ae10b3aSChris Mason 
3984ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
3994ae10b3aSChris Mason 	spin_unlock(&h->lock);
4004ae10b3aSChris Mason 
4014ae10b3aSChris Mason 	if (freeit)
402ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
4034ae10b3aSChris Mason }
4044ae10b3aSChris Mason 
4054ae10b3aSChris Mason /*
4064ae10b3aSChris Mason  * prune a given rbio from the cache
4074ae10b3aSChris Mason  */
4084ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4094ae10b3aSChris Mason {
4104ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4114ae10b3aSChris Mason 	unsigned long flags;
4124ae10b3aSChris Mason 
4134ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4144ae10b3aSChris Mason 		return;
4154ae10b3aSChris Mason 
4166a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4174ae10b3aSChris Mason 
4184ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4194ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
4204ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4214ae10b3aSChris Mason }
4224ae10b3aSChris Mason 
4234ae10b3aSChris Mason /*
4244ae10b3aSChris Mason  * remove everything in the cache
4254ae10b3aSChris Mason  */
42648a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4274ae10b3aSChris Mason {
4284ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4294ae10b3aSChris Mason 	unsigned long flags;
4304ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4314ae10b3aSChris Mason 
4324ae10b3aSChris Mason 	table = info->stripe_hash_table;
4334ae10b3aSChris Mason 
4344ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4354ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4364ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4374ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4384ae10b3aSChris Mason 				  stripe_cache);
4394ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4404ae10b3aSChris Mason 	}
4414ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4424ae10b3aSChris Mason }
4434ae10b3aSChris Mason 
4444ae10b3aSChris Mason /*
4454ae10b3aSChris Mason  * remove all cached entries and free the hash table
4464ae10b3aSChris Mason  * used by unmount
44753b381b3SDavid Woodhouse  */
44853b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
44953b381b3SDavid Woodhouse {
45053b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
45153b381b3SDavid Woodhouse 		return;
4524ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
453f749303bSWang Shilong 	kvfree(info->stripe_hash_table);
45453b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
45553b381b3SDavid Woodhouse }
45653b381b3SDavid Woodhouse 
45753b381b3SDavid Woodhouse /*
4584ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
4594ae10b3aSChris Mason  * must have already been prepared by calling
4604ae10b3aSChris Mason  * cache_rbio_pages
4614ae10b3aSChris Mason  *
4624ae10b3aSChris Mason  * If this rbio was already cached, it gets
4634ae10b3aSChris Mason  * moved to the front of the lru.
4644ae10b3aSChris Mason  *
4654ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
4664ae10b3aSChris Mason  * prune an item.
4674ae10b3aSChris Mason  */
4684ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
4694ae10b3aSChris Mason {
4704ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4714ae10b3aSChris Mason 	unsigned long flags;
4724ae10b3aSChris Mason 
4734ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
4744ae10b3aSChris Mason 		return;
4754ae10b3aSChris Mason 
4766a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4774ae10b3aSChris Mason 
4784ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4794ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4804ae10b3aSChris Mason 
4814ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
4824ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483dec95574SElena Reshetova 		refcount_inc(&rbio->refs);
4844ae10b3aSChris Mason 
4854ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
4864ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
4874ae10b3aSChris Mason 	} else {
4884ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
4894ae10b3aSChris Mason 		table->cache_size += 1;
4904ae10b3aSChris Mason 	}
4914ae10b3aSChris Mason 
4924ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4934ae10b3aSChris Mason 
4944ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
4954ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
4964ae10b3aSChris Mason 
4974ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
4984ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4994ae10b3aSChris Mason 				  stripe_cache);
5004ae10b3aSChris Mason 
5014ae10b3aSChris Mason 		if (found != rbio)
5024ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
5034ae10b3aSChris Mason 	}
5044ae10b3aSChris Mason 
5054ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
5064ae10b3aSChris Mason }
5074ae10b3aSChris Mason 
5084ae10b3aSChris Mason /*
50953b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
51053b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
51153b381b3SDavid Woodhouse  * loop through.
51253b381b3SDavid Woodhouse  */
51353b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
51453b381b3SDavid Woodhouse {
51553b381b3SDavid Woodhouse 	int src_off = 0;
51653b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
51753b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
51853b381b3SDavid Woodhouse 
51953b381b3SDavid Woodhouse 	while(src_cnt > 0) {
52053b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
52153b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
52253b381b3SDavid Woodhouse 
52353b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
52453b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
52553b381b3SDavid Woodhouse 	}
52653b381b3SDavid Woodhouse }
52753b381b3SDavid Woodhouse 
52853b381b3SDavid Woodhouse /*
529176571a1SDavid Sterba  * Returns true if the bio list inside this rbio covers an entire stripe (no
530176571a1SDavid Sterba  * rmw required).
53153b381b3SDavid Woodhouse  */
53253b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
53353b381b3SDavid Woodhouse {
53453b381b3SDavid Woodhouse 	unsigned long flags;
535176571a1SDavid Sterba 	unsigned long size = rbio->bio_list_bytes;
536176571a1SDavid Sterba 	int ret = 1;
53753b381b3SDavid Woodhouse 
53853b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
539ff18a4afSChristoph Hellwig 	if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
540176571a1SDavid Sterba 		ret = 0;
541ff18a4afSChristoph Hellwig 	BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
54253b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
543176571a1SDavid Sterba 
54453b381b3SDavid Woodhouse 	return ret;
54553b381b3SDavid Woodhouse }
54653b381b3SDavid Woodhouse 
54753b381b3SDavid Woodhouse /*
54853b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
54953b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
55053b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
55153b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
55253b381b3SDavid Woodhouse  * locked for final IO
55353b381b3SDavid Woodhouse  *
55453b381b3SDavid Woodhouse  * The caller is responsible for locking such that
55553b381b3SDavid Woodhouse  * rmw_locked is safe to test
55653b381b3SDavid Woodhouse  */
55753b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
55853b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
55953b381b3SDavid Woodhouse {
56053b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
56153b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
56253b381b3SDavid Woodhouse 		return 0;
56353b381b3SDavid Woodhouse 
5644ae10b3aSChris Mason 	/*
5654ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
5664ae10b3aSChris Mason 	 * idea is that when we merge the destination
5674ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
56801327610SNicholas D Steeves 	 * steal from cached rbios though, other functions
5694ae10b3aSChris Mason 	 * handle that.
5704ae10b3aSChris Mason 	 */
5714ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
5724ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
5734ae10b3aSChris Mason 		return 0;
5744ae10b3aSChris Mason 
5754c664611SQu Wenruo 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
57653b381b3SDavid Woodhouse 		return 0;
57753b381b3SDavid Woodhouse 
5785a6ac9eaSMiao Xie 	/* we can't merge with different operations */
5795a6ac9eaSMiao Xie 	if (last->operation != cur->operation)
58053b381b3SDavid Woodhouse 		return 0;
5815a6ac9eaSMiao Xie 	/*
5825a6ac9eaSMiao Xie 	 * We've need read the full stripe from the drive.
5835a6ac9eaSMiao Xie 	 * check and repair the parity and write the new results.
5845a6ac9eaSMiao Xie 	 *
5855a6ac9eaSMiao Xie 	 * We're not allowed to add any new bios to the
5865a6ac9eaSMiao Xie 	 * bio list here, anyone else that wants to
5875a6ac9eaSMiao Xie 	 * change this stripe needs to do their own rmw.
5885a6ac9eaSMiao Xie 	 */
589db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
5905a6ac9eaSMiao Xie 		return 0;
59153b381b3SDavid Woodhouse 
592db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
593b4ee1782SOmar Sandoval 		return 0;
594b4ee1782SOmar Sandoval 
595cc54ff62SLiu Bo 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
596cc54ff62SLiu Bo 		int fa = last->faila;
597cc54ff62SLiu Bo 		int fb = last->failb;
598cc54ff62SLiu Bo 		int cur_fa = cur->faila;
599cc54ff62SLiu Bo 		int cur_fb = cur->failb;
600cc54ff62SLiu Bo 
601cc54ff62SLiu Bo 		if (last->faila >= last->failb) {
602cc54ff62SLiu Bo 			fa = last->failb;
603cc54ff62SLiu Bo 			fb = last->faila;
604cc54ff62SLiu Bo 		}
605cc54ff62SLiu Bo 
606cc54ff62SLiu Bo 		if (cur->faila >= cur->failb) {
607cc54ff62SLiu Bo 			cur_fa = cur->failb;
608cc54ff62SLiu Bo 			cur_fb = cur->faila;
609cc54ff62SLiu Bo 		}
610cc54ff62SLiu Bo 
611cc54ff62SLiu Bo 		if (fa != cur_fa || fb != cur_fb)
612cc54ff62SLiu Bo 			return 0;
613cc54ff62SLiu Bo 	}
61453b381b3SDavid Woodhouse 	return 1;
61553b381b3SDavid Woodhouse }
61653b381b3SDavid Woodhouse 
6173e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
6183e77605dSQu Wenruo 					     unsigned int stripe_nr,
6193e77605dSQu Wenruo 					     unsigned int sector_nr)
6203e77605dSQu Wenruo {
6213e77605dSQu Wenruo 	ASSERT(stripe_nr < rbio->real_stripes);
6223e77605dSQu Wenruo 	ASSERT(sector_nr < rbio->stripe_nsectors);
6233e77605dSQu Wenruo 
6243e77605dSQu Wenruo 	return stripe_nr * rbio->stripe_nsectors + sector_nr;
6253e77605dSQu Wenruo }
6263e77605dSQu Wenruo 
6273e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */
6283e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
6293e77605dSQu Wenruo 					     unsigned int stripe_nr,
6303e77605dSQu Wenruo 					     unsigned int sector_nr)
6313e77605dSQu Wenruo {
6323e77605dSQu Wenruo 	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
6333e77605dSQu Wenruo 							      sector_nr)];
6343e77605dSQu Wenruo }
6353e77605dSQu Wenruo 
6361145059aSQu Wenruo /* Grab a sector inside P stripe */
6371145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
6381145059aSQu Wenruo 					      unsigned int sector_nr)
639b7178a5fSZhao Lei {
6401145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
641b7178a5fSZhao Lei }
642b7178a5fSZhao Lei 
6431145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */
6441145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
6451145059aSQu Wenruo 					      unsigned int sector_nr)
64653b381b3SDavid Woodhouse {
6472c8cdd6eSMiao Xie 	if (rbio->nr_data + 1 == rbio->real_stripes)
64853b381b3SDavid Woodhouse 		return NULL;
6491145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
6501145059aSQu Wenruo }
6511145059aSQu Wenruo 
65253b381b3SDavid Woodhouse /*
65353b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
65453b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
65553b381b3SDavid Woodhouse  *
65653b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
65753b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
65853b381b3SDavid Woodhouse  * themselves.
65953b381b3SDavid Woodhouse  *
66053b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
66153b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
66253b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
66353b381b3SDavid Woodhouse  *
66453b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
66553b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
66653b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
66753b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
66853b381b3SDavid Woodhouse  * 1 is returned
66953b381b3SDavid Woodhouse  *
67053b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
67153b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
67253b381b3SDavid Woodhouse  * already been freed.
67353b381b3SDavid Woodhouse  */
67453b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
67553b381b3SDavid Woodhouse {
676721860d5SJohannes Thumshirn 	struct btrfs_stripe_hash *h;
67753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
67853b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
67953b381b3SDavid Woodhouse 	unsigned long flags;
68053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
6814ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
68253b381b3SDavid Woodhouse 	int ret = 0;
68353b381b3SDavid Woodhouse 
6846a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
685721860d5SJohannes Thumshirn 
68653b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
68753b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
6884c664611SQu Wenruo 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
6899d6cb1b0SJohannes Thumshirn 			continue;
6909d6cb1b0SJohannes Thumshirn 
69153b381b3SDavid Woodhouse 		spin_lock(&cur->bio_list_lock);
69253b381b3SDavid Woodhouse 
6939d6cb1b0SJohannes Thumshirn 		/* Can we steal this cached rbio's pages? */
6944ae10b3aSChris Mason 		if (bio_list_empty(&cur->bio_list) &&
6954ae10b3aSChris Mason 		    list_empty(&cur->plug_list) &&
6964ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
6974ae10b3aSChris Mason 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
6984ae10b3aSChris Mason 			list_del_init(&cur->hash_list);
699dec95574SElena Reshetova 			refcount_dec(&cur->refs);
7004ae10b3aSChris Mason 
7014ae10b3aSChris Mason 			steal_rbio(cur, rbio);
7024ae10b3aSChris Mason 			cache_drop = cur;
7034ae10b3aSChris Mason 			spin_unlock(&cur->bio_list_lock);
7044ae10b3aSChris Mason 
7054ae10b3aSChris Mason 			goto lockit;
7064ae10b3aSChris Mason 		}
7074ae10b3aSChris Mason 
7089d6cb1b0SJohannes Thumshirn 		/* Can we merge into the lock owner? */
70953b381b3SDavid Woodhouse 		if (rbio_can_merge(cur, rbio)) {
71053b381b3SDavid Woodhouse 			merge_rbio(cur, rbio);
71153b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
71253b381b3SDavid Woodhouse 			freeit = rbio;
71353b381b3SDavid Woodhouse 			ret = 1;
71453b381b3SDavid Woodhouse 			goto out;
71553b381b3SDavid Woodhouse 		}
71653b381b3SDavid Woodhouse 
7174ae10b3aSChris Mason 
71853b381b3SDavid Woodhouse 		/*
7199d6cb1b0SJohannes Thumshirn 		 * We couldn't merge with the running rbio, see if we can merge
7209d6cb1b0SJohannes Thumshirn 		 * with the pending ones.  We don't have to check for rmw_locked
7219d6cb1b0SJohannes Thumshirn 		 * because there is no way they are inside finish_rmw right now
72253b381b3SDavid Woodhouse 		 */
7239d6cb1b0SJohannes Thumshirn 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
72453b381b3SDavid Woodhouse 			if (rbio_can_merge(pending, rbio)) {
72553b381b3SDavid Woodhouse 				merge_rbio(pending, rbio);
72653b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
72753b381b3SDavid Woodhouse 				freeit = rbio;
72853b381b3SDavid Woodhouse 				ret = 1;
72953b381b3SDavid Woodhouse 				goto out;
73053b381b3SDavid Woodhouse 			}
73153b381b3SDavid Woodhouse 		}
73253b381b3SDavid Woodhouse 
7339d6cb1b0SJohannes Thumshirn 		/*
7349d6cb1b0SJohannes Thumshirn 		 * No merging, put us on the tail of the plug list, our rbio
7359d6cb1b0SJohannes Thumshirn 		 * will be started with the currently running rbio unlocks
73653b381b3SDavid Woodhouse 		 */
73753b381b3SDavid Woodhouse 		list_add_tail(&rbio->plug_list, &cur->plug_list);
73853b381b3SDavid Woodhouse 		spin_unlock(&cur->bio_list_lock);
73953b381b3SDavid Woodhouse 		ret = 1;
74053b381b3SDavid Woodhouse 		goto out;
74153b381b3SDavid Woodhouse 	}
7424ae10b3aSChris Mason lockit:
743dec95574SElena Reshetova 	refcount_inc(&rbio->refs);
74453b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
74553b381b3SDavid Woodhouse out:
74653b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
7474ae10b3aSChris Mason 	if (cache_drop)
7484ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
74953b381b3SDavid Woodhouse 	if (freeit)
750ff2b64a2SQu Wenruo 		free_raid_bio(freeit);
75153b381b3SDavid Woodhouse 	return ret;
75253b381b3SDavid Woodhouse }
75353b381b3SDavid Woodhouse 
754d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work);
755d817ce35SQu Wenruo 
75653b381b3SDavid Woodhouse /*
75753b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
75853b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
75953b381b3SDavid Woodhouse  */
76053b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
76153b381b3SDavid Woodhouse {
76253b381b3SDavid Woodhouse 	int bucket;
76353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
76453b381b3SDavid Woodhouse 	unsigned long flags;
7654ae10b3aSChris Mason 	int keep_cache = 0;
76653b381b3SDavid Woodhouse 
76753b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
7686a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
76953b381b3SDavid Woodhouse 
7704ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
7714ae10b3aSChris Mason 		cache_rbio(rbio);
7724ae10b3aSChris Mason 
77353b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
77453b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
77553b381b3SDavid Woodhouse 
77653b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
7774ae10b3aSChris Mason 		/*
7784ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
7794ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
7804ae10b3aSChris Mason 		 * to steal from later
7814ae10b3aSChris Mason 		 */
7824ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
7834ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
7844ae10b3aSChris Mason 			keep_cache = 1;
7854ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
7864ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
7874ae10b3aSChris Mason 			goto done;
7884ae10b3aSChris Mason 		}
78953b381b3SDavid Woodhouse 
79053b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
791dec95574SElena Reshetova 		refcount_dec(&rbio->refs);
79253b381b3SDavid Woodhouse 
79353b381b3SDavid Woodhouse 		/*
79453b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
79553b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
79653b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
79753b381b3SDavid Woodhouse 		 */
79853b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
79953b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
80053b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
80153b381b3SDavid Woodhouse 
80253b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
80353b381b3SDavid Woodhouse 					  plug_list);
80453b381b3SDavid Woodhouse 
80553b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
80653b381b3SDavid Woodhouse 
80753b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
808dec95574SElena Reshetova 			refcount_inc(&next->refs);
80953b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
81053b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
81153b381b3SDavid Woodhouse 
8121b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
813d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
814b4ee1782SOmar Sandoval 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
815b4ee1782SOmar Sandoval 				steal_rbio(rbio, next);
816d817ce35SQu Wenruo 				start_async_work(next, recover_rbio_work_locked);
817b4ee1782SOmar Sandoval 			} else if (next->operation == BTRFS_RBIO_WRITE) {
8184ae10b3aSChris Mason 				steal_rbio(rbio, next);
81993723095SQu Wenruo 				start_async_work(next, rmw_rbio_work_locked);
8205a6ac9eaSMiao Xie 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
8215a6ac9eaSMiao Xie 				steal_rbio(rbio, next);
822a81b747dSDavid Sterba 				start_async_work(next, scrub_parity_work);
8234ae10b3aSChris Mason 			}
82453b381b3SDavid Woodhouse 
82553b381b3SDavid Woodhouse 			goto done_nolock;
82653b381b3SDavid Woodhouse 		}
82753b381b3SDavid Woodhouse 	}
8284ae10b3aSChris Mason done:
82953b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
83053b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
83153b381b3SDavid Woodhouse 
83253b381b3SDavid Woodhouse done_nolock:
8334ae10b3aSChris Mason 	if (!keep_cache)
8344ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
83553b381b3SDavid Woodhouse }
83653b381b3SDavid Woodhouse 
8377583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
83853b381b3SDavid Woodhouse {
8397583d8d0SLiu Bo 	struct bio *next;
8407583d8d0SLiu Bo 
8417583d8d0SLiu Bo 	while (cur) {
8427583d8d0SLiu Bo 		next = cur->bi_next;
8437583d8d0SLiu Bo 		cur->bi_next = NULL;
8447583d8d0SLiu Bo 		cur->bi_status = err;
8457583d8d0SLiu Bo 		bio_endio(cur);
8467583d8d0SLiu Bo 		cur = next;
8477583d8d0SLiu Bo 	}
84853b381b3SDavid Woodhouse }
84953b381b3SDavid Woodhouse 
85053b381b3SDavid Woodhouse /*
85153b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
85253b381b3SDavid Woodhouse  * bio_list and calls end_io on them
85353b381b3SDavid Woodhouse  */
8544e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
85553b381b3SDavid Woodhouse {
85653b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
8577583d8d0SLiu Bo 	struct bio *extra;
8584245215dSMiao Xie 
859bd8f7e62SQu Wenruo 	/*
860bd8f7e62SQu Wenruo 	 * Clear the data bitmap, as the rbio may be cached for later usage.
861bd8f7e62SQu Wenruo 	 * do this before before unlock_stripe() so there will be no new bio
862bd8f7e62SQu Wenruo 	 * for this bio.
863bd8f7e62SQu Wenruo 	 */
864bd8f7e62SQu Wenruo 	bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
8654245215dSMiao Xie 
8667583d8d0SLiu Bo 	/*
8677583d8d0SLiu Bo 	 * At this moment, rbio->bio_list is empty, however since rbio does not
8687583d8d0SLiu Bo 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
8697583d8d0SLiu Bo 	 * hash list, rbio may be merged with others so that rbio->bio_list
8707583d8d0SLiu Bo 	 * becomes non-empty.
8717583d8d0SLiu Bo 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
8727583d8d0SLiu Bo 	 * more and we can call bio_endio() on all queued bios.
8737583d8d0SLiu Bo 	 */
8747583d8d0SLiu Bo 	unlock_stripe(rbio);
8757583d8d0SLiu Bo 	extra = bio_list_get(&rbio->bio_list);
876ff2b64a2SQu Wenruo 	free_raid_bio(rbio);
87753b381b3SDavid Woodhouse 
8787583d8d0SLiu Bo 	rbio_endio_bio_list(cur, err);
8797583d8d0SLiu Bo 	if (extra)
8807583d8d0SLiu Bo 		rbio_endio_bio_list(extra, err);
88153b381b3SDavid Woodhouse }
88253b381b3SDavid Woodhouse 
88353b381b3SDavid Woodhouse /*
88453b381b3SDavid Woodhouse  * end io function used by finish_rmw.  When we finally
88553b381b3SDavid Woodhouse  * get here, we've written a full stripe
88653b381b3SDavid Woodhouse  */
8874246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio)
88853b381b3SDavid Woodhouse {
88953b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
8904e4cbee9SChristoph Hellwig 	blk_status_t err = bio->bi_status;
891a6111d11SZhao Lei 	int max_errors;
89253b381b3SDavid Woodhouse 
89353b381b3SDavid Woodhouse 	if (err)
89453b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
89553b381b3SDavid Woodhouse 
89653b381b3SDavid Woodhouse 	bio_put(bio);
89753b381b3SDavid Woodhouse 
898b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
89953b381b3SDavid Woodhouse 		return;
90053b381b3SDavid Woodhouse 
90158efbc9fSOmar Sandoval 	err = BLK_STS_OK;
90253b381b3SDavid Woodhouse 
90353b381b3SDavid Woodhouse 	/* OK, we have read all the stripes we need to. */
904a6111d11SZhao Lei 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
9054c664611SQu Wenruo 		     0 : rbio->bioc->max_errors;
906a6111d11SZhao Lei 	if (atomic_read(&rbio->error) > max_errors)
9074e4cbee9SChristoph Hellwig 		err = BLK_STS_IOERR;
90853b381b3SDavid Woodhouse 
9094246a0b6SChristoph Hellwig 	rbio_orig_end_io(rbio, err);
91053b381b3SDavid Woodhouse }
91153b381b3SDavid Woodhouse 
91243dd529aSDavid Sterba /*
91343dd529aSDavid Sterba  * Get a sector pointer specified by its @stripe_nr and @sector_nr.
9143e77605dSQu Wenruo  *
9153e77605dSQu Wenruo  * @rbio:               The raid bio
9163e77605dSQu Wenruo  * @stripe_nr:          Stripe number, valid range [0, real_stripe)
9173e77605dSQu Wenruo  * @sector_nr:		Sector number inside the stripe,
9183e77605dSQu Wenruo  *			valid range [0, stripe_nsectors)
9193e77605dSQu Wenruo  * @bio_list_only:      Whether to use sectors inside the bio list only.
9203e77605dSQu Wenruo  *
9213e77605dSQu Wenruo  * The read/modify/write code wants to reuse the original bio page as much
9223e77605dSQu Wenruo  * as possible, and only use stripe_sectors as fallback.
9233e77605dSQu Wenruo  */
9243e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
9253e77605dSQu Wenruo 					 int stripe_nr, int sector_nr,
9263e77605dSQu Wenruo 					 bool bio_list_only)
9273e77605dSQu Wenruo {
9283e77605dSQu Wenruo 	struct sector_ptr *sector;
9293e77605dSQu Wenruo 	int index;
9303e77605dSQu Wenruo 
9313e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
9323e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
9333e77605dSQu Wenruo 
9343e77605dSQu Wenruo 	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
9353e77605dSQu Wenruo 	ASSERT(index >= 0 && index < rbio->nr_sectors);
9363e77605dSQu Wenruo 
9373e77605dSQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
9383e77605dSQu Wenruo 	sector = &rbio->bio_sectors[index];
9393e77605dSQu Wenruo 	if (sector->page || bio_list_only) {
9403e77605dSQu Wenruo 		/* Don't return sector without a valid page pointer */
9413e77605dSQu Wenruo 		if (!sector->page)
9423e77605dSQu Wenruo 			sector = NULL;
9433e77605dSQu Wenruo 		spin_unlock_irq(&rbio->bio_list_lock);
9443e77605dSQu Wenruo 		return sector;
9453e77605dSQu Wenruo 	}
9463e77605dSQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
9473e77605dSQu Wenruo 
9483e77605dSQu Wenruo 	return &rbio->stripe_sectors[index];
9493e77605dSQu Wenruo }
9503e77605dSQu Wenruo 
95153b381b3SDavid Woodhouse /*
95253b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
95353b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
95453b381b3SDavid Woodhouse  */
9552ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
956ff18a4afSChristoph Hellwig 					 struct btrfs_io_context *bioc)
95753b381b3SDavid Woodhouse {
958843de58bSQu Wenruo 	const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
959ff18a4afSChristoph Hellwig 	const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
960843de58bSQu Wenruo 	const unsigned int num_pages = stripe_npages * real_stripes;
961ff18a4afSChristoph Hellwig 	const unsigned int stripe_nsectors =
962ff18a4afSChristoph Hellwig 		BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
96394efbe19SQu Wenruo 	const unsigned int num_sectors = stripe_nsectors * real_stripes;
96453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
96553b381b3SDavid Woodhouse 
96694efbe19SQu Wenruo 	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
96794efbe19SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
968c67c68ebSQu Wenruo 	/*
969c67c68ebSQu Wenruo 	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
970c67c68ebSQu Wenruo 	 * (at most 16) should be no larger than BITS_PER_LONG.
971c67c68ebSQu Wenruo 	 */
972c67c68ebSQu Wenruo 	ASSERT(stripe_nsectors <= BITS_PER_LONG);
973843de58bSQu Wenruo 
974797d74b7SQu Wenruo 	rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
975af8e2d1dSMiao Xie 	if (!rbio)
97653b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
977797d74b7SQu Wenruo 	rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
978797d74b7SQu Wenruo 				     GFP_NOFS);
979797d74b7SQu Wenruo 	rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
980797d74b7SQu Wenruo 				    GFP_NOFS);
981797d74b7SQu Wenruo 	rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
982797d74b7SQu Wenruo 				       GFP_NOFS);
983797d74b7SQu Wenruo 	rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
984797d74b7SQu Wenruo 
985797d74b7SQu Wenruo 	if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
986797d74b7SQu Wenruo 	    !rbio->finish_pointers) {
987797d74b7SQu Wenruo 		free_raid_bio_pointers(rbio);
988797d74b7SQu Wenruo 		kfree(rbio);
989797d74b7SQu Wenruo 		return ERR_PTR(-ENOMEM);
990797d74b7SQu Wenruo 	}
99153b381b3SDavid Woodhouse 
99253b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
993d817ce35SQu Wenruo 	init_waitqueue_head(&rbio->io_wait);
99453b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
99553b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
9964ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
99753b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
998f1c29379SChristoph Hellwig 	btrfs_get_bioc(bioc);
9994c664611SQu Wenruo 	rbio->bioc = bioc;
100053b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
100194efbe19SQu Wenruo 	rbio->nr_sectors = num_sectors;
10022c8cdd6eSMiao Xie 	rbio->real_stripes = real_stripes;
10035a6ac9eaSMiao Xie 	rbio->stripe_npages = stripe_npages;
100494efbe19SQu Wenruo 	rbio->stripe_nsectors = stripe_nsectors;
100553b381b3SDavid Woodhouse 	rbio->faila = -1;
100653b381b3SDavid Woodhouse 	rbio->failb = -1;
1007dec95574SElena Reshetova 	refcount_set(&rbio->refs, 1);
1008b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
1009b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
101053b381b3SDavid Woodhouse 
10110b30f719SQu Wenruo 	ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
10120b30f719SQu Wenruo 	rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
101353b381b3SDavid Woodhouse 
101453b381b3SDavid Woodhouse 	return rbio;
101553b381b3SDavid Woodhouse }
101653b381b3SDavid Woodhouse 
101753b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
101853b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
101953b381b3SDavid Woodhouse {
1020eb357060SQu Wenruo 	int ret;
1021eb357060SQu Wenruo 
1022eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
1023eb357060SQu Wenruo 	if (ret < 0)
1024eb357060SQu Wenruo 		return ret;
1025eb357060SQu Wenruo 	/* Mapping all sectors */
1026eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1027eb357060SQu Wenruo 	return 0;
102853b381b3SDavid Woodhouse }
102953b381b3SDavid Woodhouse 
1030b7178a5fSZhao Lei /* only allocate pages for p/q stripes */
103153b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
103253b381b3SDavid Woodhouse {
1033f77183dcSQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1034eb357060SQu Wenruo 	int ret;
103553b381b3SDavid Woodhouse 
1036eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
1037dd137dd1SSweet Tea Dorminy 				     rbio->stripe_pages + data_pages);
1038eb357060SQu Wenruo 	if (ret < 0)
1039eb357060SQu Wenruo 		return ret;
1040eb357060SQu Wenruo 
1041eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1042eb357060SQu Wenruo 	return 0;
104353b381b3SDavid Woodhouse }
104453b381b3SDavid Woodhouse 
104553b381b3SDavid Woodhouse /*
10463e77605dSQu Wenruo  * Add a single sector @sector into our list of bios for IO.
10473e77605dSQu Wenruo  *
10483e77605dSQu Wenruo  * Return 0 if everything went well.
10493e77605dSQu Wenruo  * Return <0 for error.
105053b381b3SDavid Woodhouse  */
10513e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
105253b381b3SDavid Woodhouse 			      struct bio_list *bio_list,
10533e77605dSQu Wenruo 			      struct sector_ptr *sector,
10543e77605dSQu Wenruo 			      unsigned int stripe_nr,
10553e77605dSQu Wenruo 			      unsigned int sector_nr,
1056bf9486d6SBart Van Assche 			      enum req_op op)
105753b381b3SDavid Woodhouse {
10583e77605dSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
105953b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
106053b381b3SDavid Woodhouse 	int ret;
106153b381b3SDavid Woodhouse 	struct bio *bio;
10624c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
106353b381b3SDavid Woodhouse 	u64 disk_start;
106453b381b3SDavid Woodhouse 
10653e77605dSQu Wenruo 	/*
10663e77605dSQu Wenruo 	 * Note: here stripe_nr has taken device replace into consideration,
10673e77605dSQu Wenruo 	 * thus it can be larger than rbio->real_stripe.
10683e77605dSQu Wenruo 	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
10693e77605dSQu Wenruo 	 */
10703e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
10713e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
10723e77605dSQu Wenruo 	ASSERT(sector->page);
10733e77605dSQu Wenruo 
10744c664611SQu Wenruo 	stripe = &rbio->bioc->stripes[stripe_nr];
10753e77605dSQu Wenruo 	disk_start = stripe->physical + sector_nr * sectorsize;
107653b381b3SDavid Woodhouse 
107753b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
107853b381b3SDavid Woodhouse 	if (!stripe->dev->bdev)
107953b381b3SDavid Woodhouse 		return fail_rbio_index(rbio, stripe_nr);
108053b381b3SDavid Woodhouse 
108153b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
108253b381b3SDavid Woodhouse 	if (last) {
10831201b58bSDavid Sterba 		u64 last_end = last->bi_iter.bi_sector << 9;
10844f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
108553b381b3SDavid Woodhouse 
108653b381b3SDavid Woodhouse 		/*
108753b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
108853b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
108953b381b3SDavid Woodhouse 		 */
1090f90ae76aSNikolay Borisov 		if (last_end == disk_start && !last->bi_status &&
1091309dca30SChristoph Hellwig 		    last->bi_bdev == stripe->dev->bdev) {
10923e77605dSQu Wenruo 			ret = bio_add_page(last, sector->page, sectorsize,
10933e77605dSQu Wenruo 					   sector->pgoff);
10943e77605dSQu Wenruo 			if (ret == sectorsize)
109553b381b3SDavid Woodhouse 				return 0;
109653b381b3SDavid Woodhouse 		}
109753b381b3SDavid Woodhouse 	}
109853b381b3SDavid Woodhouse 
109953b381b3SDavid Woodhouse 	/* put a new bio on the list */
1100ff18a4afSChristoph Hellwig 	bio = bio_alloc(stripe->dev->bdev,
1101ff18a4afSChristoph Hellwig 			max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1102bf9486d6SBart Van Assche 			op, GFP_NOFS);
11034f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
1104e01bf588SChristoph Hellwig 	bio->bi_private = rbio;
110553b381b3SDavid Woodhouse 
11063e77605dSQu Wenruo 	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
110753b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
110853b381b3SDavid Woodhouse 	return 0;
110953b381b3SDavid Woodhouse }
111053b381b3SDavid Woodhouse 
111100425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
111200425dd9SQu Wenruo {
111300425dd9SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
111400425dd9SQu Wenruo 	struct bio_vec bvec;
111500425dd9SQu Wenruo 	struct bvec_iter iter;
111600425dd9SQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
111700425dd9SQu Wenruo 		     rbio->bioc->raid_map[0];
111800425dd9SQu Wenruo 
111900425dd9SQu Wenruo 	bio_for_each_segment(bvec, bio, iter) {
112000425dd9SQu Wenruo 		u32 bvec_offset;
112100425dd9SQu Wenruo 
112200425dd9SQu Wenruo 		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
112300425dd9SQu Wenruo 		     bvec_offset += sectorsize, offset += sectorsize) {
112400425dd9SQu Wenruo 			int index = offset / sectorsize;
112500425dd9SQu Wenruo 			struct sector_ptr *sector = &rbio->bio_sectors[index];
112600425dd9SQu Wenruo 
112700425dd9SQu Wenruo 			sector->page = bvec.bv_page;
112800425dd9SQu Wenruo 			sector->pgoff = bvec.bv_offset + bvec_offset;
112900425dd9SQu Wenruo 			ASSERT(sector->pgoff < PAGE_SIZE);
113000425dd9SQu Wenruo 		}
113100425dd9SQu Wenruo 	}
113200425dd9SQu Wenruo }
113300425dd9SQu Wenruo 
113453b381b3SDavid Woodhouse /*
113553b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
113653b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
113753b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
113853b381b3SDavid Woodhouse  * reconstruction.
113953b381b3SDavid Woodhouse  *
114053b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
114153b381b3SDavid Woodhouse  */
114253b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
114353b381b3SDavid Woodhouse {
114453b381b3SDavid Woodhouse 	struct bio *bio;
114553b381b3SDavid Woodhouse 
114653b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
114700425dd9SQu Wenruo 	bio_list_for_each(bio, &rbio->bio_list)
114800425dd9SQu Wenruo 		index_one_bio(rbio, bio);
114900425dd9SQu Wenruo 
115053b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
115153b381b3SDavid Woodhouse }
115253b381b3SDavid Woodhouse 
1153b8bea09aSQu Wenruo static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1154b8bea09aSQu Wenruo 			       struct raid56_bio_trace_info *trace_info)
1155b8bea09aSQu Wenruo {
1156b8bea09aSQu Wenruo 	const struct btrfs_io_context *bioc = rbio->bioc;
1157b8bea09aSQu Wenruo 	int i;
1158b8bea09aSQu Wenruo 
1159b8bea09aSQu Wenruo 	ASSERT(bioc);
1160b8bea09aSQu Wenruo 
1161b8bea09aSQu Wenruo 	/* We rely on bio->bi_bdev to find the stripe number. */
1162b8bea09aSQu Wenruo 	if (!bio->bi_bdev)
1163b8bea09aSQu Wenruo 		goto not_found;
1164b8bea09aSQu Wenruo 
1165b8bea09aSQu Wenruo 	for (i = 0; i < bioc->num_stripes; i++) {
1166b8bea09aSQu Wenruo 		if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1167b8bea09aSQu Wenruo 			continue;
1168b8bea09aSQu Wenruo 		trace_info->stripe_nr = i;
1169b8bea09aSQu Wenruo 		trace_info->devid = bioc->stripes[i].dev->devid;
1170b8bea09aSQu Wenruo 		trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1171b8bea09aSQu Wenruo 				     bioc->stripes[i].physical;
1172b8bea09aSQu Wenruo 		return;
1173b8bea09aSQu Wenruo 	}
1174b8bea09aSQu Wenruo 
1175b8bea09aSQu Wenruo not_found:
1176b8bea09aSQu Wenruo 	trace_info->devid = -1;
1177b8bea09aSQu Wenruo 	trace_info->offset = -1;
1178b8bea09aSQu Wenruo 	trace_info->stripe_nr = -1;
1179b8bea09aSQu Wenruo }
1180b8bea09aSQu Wenruo 
118130e3c897SQu Wenruo /* Generate PQ for one veritical stripe. */
118230e3c897SQu Wenruo static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
118330e3c897SQu Wenruo {
118430e3c897SQu Wenruo 	void **pointers = rbio->finish_pointers;
118530e3c897SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
118630e3c897SQu Wenruo 	struct sector_ptr *sector;
118730e3c897SQu Wenruo 	int stripe;
118830e3c897SQu Wenruo 	const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
118930e3c897SQu Wenruo 
119030e3c897SQu Wenruo 	/* First collect one sector from each data stripe */
119130e3c897SQu Wenruo 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
119230e3c897SQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 0);
119330e3c897SQu Wenruo 		pointers[stripe] = kmap_local_page(sector->page) +
119430e3c897SQu Wenruo 				   sector->pgoff;
119530e3c897SQu Wenruo 	}
119630e3c897SQu Wenruo 
119730e3c897SQu Wenruo 	/* Then add the parity stripe */
119830e3c897SQu Wenruo 	sector = rbio_pstripe_sector(rbio, sectornr);
119930e3c897SQu Wenruo 	sector->uptodate = 1;
120030e3c897SQu Wenruo 	pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
120130e3c897SQu Wenruo 
120230e3c897SQu Wenruo 	if (has_qstripe) {
120330e3c897SQu Wenruo 		/*
120430e3c897SQu Wenruo 		 * RAID6, add the qstripe and call the library function
120530e3c897SQu Wenruo 		 * to fill in our p/q
120630e3c897SQu Wenruo 		 */
120730e3c897SQu Wenruo 		sector = rbio_qstripe_sector(rbio, sectornr);
120830e3c897SQu Wenruo 		sector->uptodate = 1;
120930e3c897SQu Wenruo 		pointers[stripe++] = kmap_local_page(sector->page) +
121030e3c897SQu Wenruo 				     sector->pgoff;
121130e3c897SQu Wenruo 
121230e3c897SQu Wenruo 		raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
121330e3c897SQu Wenruo 					pointers);
121430e3c897SQu Wenruo 	} else {
121530e3c897SQu Wenruo 		/* raid5 */
121630e3c897SQu Wenruo 		memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
121730e3c897SQu Wenruo 		run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
121830e3c897SQu Wenruo 	}
121930e3c897SQu Wenruo 	for (stripe = stripe - 1; stripe >= 0; stripe--)
122030e3c897SQu Wenruo 		kunmap_local(pointers[stripe]);
122130e3c897SQu Wenruo }
122230e3c897SQu Wenruo 
12236486d21cSQu Wenruo static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
12246486d21cSQu Wenruo 				   struct bio_list *bio_list)
12256486d21cSQu Wenruo {
12266486d21cSQu Wenruo 	struct bio *bio;
12276486d21cSQu Wenruo 	/* The total sector number inside the full stripe. */
12286486d21cSQu Wenruo 	int total_sector_nr;
12296486d21cSQu Wenruo 	int sectornr;
12306486d21cSQu Wenruo 	int stripe;
12316486d21cSQu Wenruo 	int ret;
12326486d21cSQu Wenruo 
12336486d21cSQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
12346486d21cSQu Wenruo 
12356486d21cSQu Wenruo 	/* We should have at least one data sector. */
12366486d21cSQu Wenruo 	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
12376486d21cSQu Wenruo 
12386486d21cSQu Wenruo 	/*
12395eb30ee2SQu Wenruo 	 * Reset errors, as we may have errors inherited from from degraded
12405eb30ee2SQu Wenruo 	 * write.
12415eb30ee2SQu Wenruo 	 */
12425eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
12435eb30ee2SQu Wenruo 	rbio->faila = -1;
12445eb30ee2SQu Wenruo 	rbio->failb = -1;
12455eb30ee2SQu Wenruo 
12465eb30ee2SQu Wenruo 	/*
12476486d21cSQu Wenruo 	 * Start assembly.  Make bios for everything from the higher layers (the
12486486d21cSQu Wenruo 	 * bio_list in our rbio) and our P/Q.  Ignore everything else.
12496486d21cSQu Wenruo 	 */
12506486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12516486d21cSQu Wenruo 	     total_sector_nr++) {
12526486d21cSQu Wenruo 		struct sector_ptr *sector;
12536486d21cSQu Wenruo 
12546486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12556486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12566486d21cSQu Wenruo 
12576486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
12586486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
12596486d21cSQu Wenruo 			continue;
12606486d21cSQu Wenruo 
12616486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
12626486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
12636486d21cSQu Wenruo 			if (!sector)
12646486d21cSQu Wenruo 				continue;
12656486d21cSQu Wenruo 		} else {
12666486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
12676486d21cSQu Wenruo 		}
12686486d21cSQu Wenruo 
12696486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
12706486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
12716486d21cSQu Wenruo 		if (ret)
12726486d21cSQu Wenruo 			goto error;
12736486d21cSQu Wenruo 	}
12746486d21cSQu Wenruo 
12756486d21cSQu Wenruo 	if (likely(!rbio->bioc->num_tgtdevs))
12766486d21cSQu Wenruo 		return 0;
12776486d21cSQu Wenruo 
12786486d21cSQu Wenruo 	/* Make a copy for the replace target device. */
12796486d21cSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
12806486d21cSQu Wenruo 	     total_sector_nr++) {
12816486d21cSQu Wenruo 		struct sector_ptr *sector;
12826486d21cSQu Wenruo 
12836486d21cSQu Wenruo 		stripe = total_sector_nr / rbio->stripe_nsectors;
12846486d21cSQu Wenruo 		sectornr = total_sector_nr % rbio->stripe_nsectors;
12856486d21cSQu Wenruo 
12866486d21cSQu Wenruo 		if (!rbio->bioc->tgtdev_map[stripe]) {
12876486d21cSQu Wenruo 			/*
12886486d21cSQu Wenruo 			 * We can skip the whole stripe completely, note
12896486d21cSQu Wenruo 			 * total_sector_nr will be increased by one anyway.
12906486d21cSQu Wenruo 			 */
12916486d21cSQu Wenruo 			ASSERT(sectornr == 0);
12926486d21cSQu Wenruo 			total_sector_nr += rbio->stripe_nsectors - 1;
12936486d21cSQu Wenruo 			continue;
12946486d21cSQu Wenruo 		}
12956486d21cSQu Wenruo 
12966486d21cSQu Wenruo 		/* This vertical stripe has no data, skip it. */
12976486d21cSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
12986486d21cSQu Wenruo 			continue;
12996486d21cSQu Wenruo 
13006486d21cSQu Wenruo 		if (stripe < rbio->nr_data) {
13016486d21cSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13026486d21cSQu Wenruo 			if (!sector)
13036486d21cSQu Wenruo 				continue;
13046486d21cSQu Wenruo 		} else {
13056486d21cSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
13066486d21cSQu Wenruo 		}
13076486d21cSQu Wenruo 
13086486d21cSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector,
13096486d21cSQu Wenruo 					 rbio->bioc->tgtdev_map[stripe],
13106486d21cSQu Wenruo 					 sectornr, REQ_OP_WRITE);
13116486d21cSQu Wenruo 		if (ret)
13126486d21cSQu Wenruo 			goto error;
13136486d21cSQu Wenruo 	}
13146486d21cSQu Wenruo 
13156486d21cSQu Wenruo 	return 0;
13166486d21cSQu Wenruo error:
13176486d21cSQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
13186486d21cSQu Wenruo 		bio_put(bio);
13196486d21cSQu Wenruo 	return -EIO;
13206486d21cSQu Wenruo }
13216486d21cSQu Wenruo 
132253b381b3SDavid Woodhouse /*
132353b381b3SDavid Woodhouse  * this is called from one of two situations.  We either
132453b381b3SDavid Woodhouse  * have a full stripe from the higher layers, or we've read all
132553b381b3SDavid Woodhouse  * the missing bits off disk.
132653b381b3SDavid Woodhouse  *
132753b381b3SDavid Woodhouse  * This will calculate the parity and then send down any
132853b381b3SDavid Woodhouse  * changed blocks.
132953b381b3SDavid Woodhouse  */
133053b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
133153b381b3SDavid Woodhouse {
133236920044SQu Wenruo 	/* The total sector number inside the full stripe. */
133336920044SQu Wenruo 	/* Sector number inside a stripe. */
13343e77605dSQu Wenruo 	int sectornr;
133553b381b3SDavid Woodhouse 	struct bio_list bio_list;
133653b381b3SDavid Woodhouse 	struct bio *bio;
133753b381b3SDavid Woodhouse 	int ret;
133853b381b3SDavid Woodhouse 
133953b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
134053b381b3SDavid Woodhouse 
1341bd8f7e62SQu Wenruo 	/* We should have at least one data sector. */
1342bd8f7e62SQu Wenruo 	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1343bd8f7e62SQu Wenruo 
134453b381b3SDavid Woodhouse 	/* at this point we either have a full stripe,
134553b381b3SDavid Woodhouse 	 * or we've read the full stripe from the drive.
134653b381b3SDavid Woodhouse 	 * recalculate the parity and write the new results.
134753b381b3SDavid Woodhouse 	 *
134853b381b3SDavid Woodhouse 	 * We're not allowed to add any new bios to the
134953b381b3SDavid Woodhouse 	 * bio list here, anyone else that wants to
135053b381b3SDavid Woodhouse 	 * change this stripe needs to do their own rmw.
135153b381b3SDavid Woodhouse 	 */
135253b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
135353b381b3SDavid Woodhouse 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
135453b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
135553b381b3SDavid Woodhouse 
1356b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
135753b381b3SDavid Woodhouse 
135853b381b3SDavid Woodhouse 	/*
135953b381b3SDavid Woodhouse 	 * now that we've set rmw_locked, run through the
136053b381b3SDavid Woodhouse 	 * bio list one last time and map the page pointers
13614ae10b3aSChris Mason 	 *
13624ae10b3aSChris Mason 	 * We don't cache full rbios because we're assuming
13634ae10b3aSChris Mason 	 * the higher layers are unlikely to use this area of
13644ae10b3aSChris Mason 	 * the disk again soon.  If they do use it again,
13654ae10b3aSChris Mason 	 * hopefully they will send another full bio.
136653b381b3SDavid Woodhouse 	 */
136753b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
13684ae10b3aSChris Mason 	if (!rbio_is_full(rbio))
13694ae10b3aSChris Mason 		cache_rbio_pages(rbio);
13704ae10b3aSChris Mason 	else
13714ae10b3aSChris Mason 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
137253b381b3SDavid Woodhouse 
137330e3c897SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
137430e3c897SQu Wenruo 		generate_pq_vertical(rbio, sectornr);
137553b381b3SDavid Woodhouse 
13766486d21cSQu Wenruo 	ret = rmw_assemble_write_bios(rbio, &bio_list);
13776486d21cSQu Wenruo 	if (ret < 0)
137853b381b3SDavid Woodhouse 		goto cleanup;
137953b381b3SDavid Woodhouse 
1380b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1381b89e1b01SMiao Xie 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
138253b381b3SDavid Woodhouse 
1383bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
138453b381b3SDavid Woodhouse 		bio->bi_end_io = raid_write_end_io;
13854e49ea4aSMike Christie 
1386b8bea09aSQu Wenruo 		if (trace_raid56_write_stripe_enabled()) {
1387b8bea09aSQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
1388b8bea09aSQu Wenruo 
1389b8bea09aSQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
1390b8bea09aSQu Wenruo 			trace_raid56_write_stripe(rbio, bio, &trace_info);
1391b8bea09aSQu Wenruo 		}
13924e49ea4aSMike Christie 		submit_bio(bio);
139353b381b3SDavid Woodhouse 	}
139453b381b3SDavid Woodhouse 	return;
139553b381b3SDavid Woodhouse 
139653b381b3SDavid Woodhouse cleanup:
139758efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1398785884fcSLiu Bo 
1399785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
1400785884fcSLiu Bo 		bio_put(bio);
140153b381b3SDavid Woodhouse }
140253b381b3SDavid Woodhouse 
140353b381b3SDavid Woodhouse /*
140453b381b3SDavid Woodhouse  * helper to find the stripe number for a given bio.  Used to figure out which
140553b381b3SDavid Woodhouse  * stripe has failed.  This expects the bio to correspond to a physical disk,
140653b381b3SDavid Woodhouse  * so it looks up based on physical sector numbers.
140753b381b3SDavid Woodhouse  */
140853b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio,
140953b381b3SDavid Woodhouse 			   struct bio *bio)
141053b381b3SDavid Woodhouse {
14114f024f37SKent Overstreet 	u64 physical = bio->bi_iter.bi_sector;
141253b381b3SDavid Woodhouse 	int i;
14134c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
141453b381b3SDavid Woodhouse 
141553b381b3SDavid Woodhouse 	physical <<= 9;
141653b381b3SDavid Woodhouse 
14174c664611SQu Wenruo 	for (i = 0; i < rbio->bioc->num_stripes; i++) {
14184c664611SQu Wenruo 		stripe = &rbio->bioc->stripes[i];
1419ff18a4afSChristoph Hellwig 		if (in_range(physical, stripe->physical, BTRFS_STRIPE_LEN) &&
1420309dca30SChristoph Hellwig 		    stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
142153b381b3SDavid Woodhouse 			return i;
142253b381b3SDavid Woodhouse 		}
142353b381b3SDavid Woodhouse 	}
142453b381b3SDavid Woodhouse 	return -1;
142553b381b3SDavid Woodhouse }
142653b381b3SDavid Woodhouse 
142753b381b3SDavid Woodhouse /*
142853b381b3SDavid Woodhouse  * helper to find the stripe number for a given
142953b381b3SDavid Woodhouse  * bio (before mapping).  Used to figure out which stripe has
143053b381b3SDavid Woodhouse  * failed.  This looks up based on logical block numbers.
143153b381b3SDavid Woodhouse  */
143253b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
143353b381b3SDavid Woodhouse 				   struct bio *bio)
143453b381b3SDavid Woodhouse {
14351201b58bSDavid Sterba 	u64 logical = bio->bi_iter.bi_sector << 9;
143653b381b3SDavid Woodhouse 	int i;
143753b381b3SDavid Woodhouse 
143853b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_data; i++) {
14394c664611SQu Wenruo 		u64 stripe_start = rbio->bioc->raid_map[i];
144083025863SNikolay Borisov 
1441ff18a4afSChristoph Hellwig 		if (in_range(logical, stripe_start, BTRFS_STRIPE_LEN))
144253b381b3SDavid Woodhouse 			return i;
144353b381b3SDavid Woodhouse 	}
144453b381b3SDavid Woodhouse 	return -1;
144553b381b3SDavid Woodhouse }
144653b381b3SDavid Woodhouse 
144753b381b3SDavid Woodhouse /*
144853b381b3SDavid Woodhouse  * returns -EIO if we had too many failures
144953b381b3SDavid Woodhouse  */
145053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
145153b381b3SDavid Woodhouse {
145253b381b3SDavid Woodhouse 	unsigned long flags;
145353b381b3SDavid Woodhouse 	int ret = 0;
145453b381b3SDavid Woodhouse 
145553b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
145653b381b3SDavid Woodhouse 
145753b381b3SDavid Woodhouse 	/* we already know this stripe is bad, move on */
145853b381b3SDavid Woodhouse 	if (rbio->faila == failed || rbio->failb == failed)
145953b381b3SDavid Woodhouse 		goto out;
146053b381b3SDavid Woodhouse 
146153b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
146253b381b3SDavid Woodhouse 		/* first failure on this rbio */
146353b381b3SDavid Woodhouse 		rbio->faila = failed;
1464b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
146553b381b3SDavid Woodhouse 	} else if (rbio->failb == -1) {
146653b381b3SDavid Woodhouse 		/* second failure on this rbio */
146753b381b3SDavid Woodhouse 		rbio->failb = failed;
1468b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
146953b381b3SDavid Woodhouse 	} else {
147053b381b3SDavid Woodhouse 		ret = -EIO;
147153b381b3SDavid Woodhouse 	}
147253b381b3SDavid Woodhouse out:
147353b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
147453b381b3SDavid Woodhouse 
147553b381b3SDavid Woodhouse 	return ret;
147653b381b3SDavid Woodhouse }
147753b381b3SDavid Woodhouse 
147853b381b3SDavid Woodhouse /*
147953b381b3SDavid Woodhouse  * helper to fail a stripe based on a physical disk
148053b381b3SDavid Woodhouse  * bio.
148153b381b3SDavid Woodhouse  */
148253b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
148353b381b3SDavid Woodhouse 			   struct bio *bio)
148453b381b3SDavid Woodhouse {
148553b381b3SDavid Woodhouse 	int failed = find_bio_stripe(rbio, bio);
148653b381b3SDavid Woodhouse 
148753b381b3SDavid Woodhouse 	if (failed < 0)
148853b381b3SDavid Woodhouse 		return -EIO;
148953b381b3SDavid Woodhouse 
149053b381b3SDavid Woodhouse 	return fail_rbio_index(rbio, failed);
149153b381b3SDavid Woodhouse }
149253b381b3SDavid Woodhouse 
149353b381b3SDavid Woodhouse /*
14945fdb7afcSQu Wenruo  * For subpage case, we can no longer set page Uptodate directly for
14955fdb7afcSQu Wenruo  * stripe_pages[], thus we need to locate the sector.
14965fdb7afcSQu Wenruo  */
14975fdb7afcSQu Wenruo static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
14985fdb7afcSQu Wenruo 					     struct page *page,
14995fdb7afcSQu Wenruo 					     unsigned int pgoff)
15005fdb7afcSQu Wenruo {
15015fdb7afcSQu Wenruo 	int i;
15025fdb7afcSQu Wenruo 
15035fdb7afcSQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
15045fdb7afcSQu Wenruo 		struct sector_ptr *sector = &rbio->stripe_sectors[i];
15055fdb7afcSQu Wenruo 
15065fdb7afcSQu Wenruo 		if (sector->page == page && sector->pgoff == pgoff)
15075fdb7afcSQu Wenruo 			return sector;
15085fdb7afcSQu Wenruo 	}
15095fdb7afcSQu Wenruo 	return NULL;
15105fdb7afcSQu Wenruo }
15115fdb7afcSQu Wenruo 
15125fdb7afcSQu Wenruo /*
151353b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
151453b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
151553b381b3SDavid Woodhouse  */
15165fdb7afcSQu Wenruo static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
151753b381b3SDavid Woodhouse {
15185fdb7afcSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
15190198e5b7SLiu Bo 	struct bio_vec *bvec;
15206dc4f100SMing Lei 	struct bvec_iter_all iter_all;
152153b381b3SDavid Woodhouse 
15220198e5b7SLiu Bo 	ASSERT(!bio_flagged(bio, BIO_CLONED));
15236592e58cSFilipe Manana 
15245fdb7afcSQu Wenruo 	bio_for_each_segment_all(bvec, bio, iter_all) {
15255fdb7afcSQu Wenruo 		struct sector_ptr *sector;
15265fdb7afcSQu Wenruo 		int pgoff;
15275fdb7afcSQu Wenruo 
15285fdb7afcSQu Wenruo 		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
15295fdb7afcSQu Wenruo 		     pgoff += sectorsize) {
15305fdb7afcSQu Wenruo 			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
15315fdb7afcSQu Wenruo 			ASSERT(sector);
15325fdb7afcSQu Wenruo 			if (sector)
15335fdb7afcSQu Wenruo 				sector->uptodate = 1;
15345fdb7afcSQu Wenruo 		}
15355fdb7afcSQu Wenruo 	}
153653b381b3SDavid Woodhouse }
153753b381b3SDavid Woodhouse 
1538d817ce35SQu Wenruo static void raid_wait_read_end_io(struct bio *bio)
1539d817ce35SQu Wenruo {
1540d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
1541d817ce35SQu Wenruo 
1542d817ce35SQu Wenruo 	if (bio->bi_status)
1543d817ce35SQu Wenruo 		fail_bio_stripe(rbio, bio);
1544d817ce35SQu Wenruo 	else
1545d817ce35SQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
1546d817ce35SQu Wenruo 
1547d817ce35SQu Wenruo 	bio_put(bio);
1548d817ce35SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
1549d817ce35SQu Wenruo 		wake_up(&rbio->io_wait);
1550d817ce35SQu Wenruo }
1551d817ce35SQu Wenruo 
1552d817ce35SQu Wenruo static void submit_read_bios(struct btrfs_raid_bio *rbio,
1553d817ce35SQu Wenruo 			     struct bio_list *bio_list)
1554d817ce35SQu Wenruo {
1555d817ce35SQu Wenruo 	struct bio *bio;
1556d817ce35SQu Wenruo 
1557d817ce35SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1558d817ce35SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
1559d817ce35SQu Wenruo 		bio->bi_end_io = raid_wait_read_end_io;
1560d817ce35SQu Wenruo 
1561d817ce35SQu Wenruo 		if (trace_raid56_scrub_read_recover_enabled()) {
1562d817ce35SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
1563d817ce35SQu Wenruo 
1564d817ce35SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
1565d817ce35SQu Wenruo 			trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1566d817ce35SQu Wenruo 		}
1567d817ce35SQu Wenruo 		submit_bio(bio);
1568d817ce35SQu Wenruo 	}
1569d817ce35SQu Wenruo }
1570d817ce35SQu Wenruo 
1571d34e123dSChristoph Hellwig static void raid56_bio_end_io(struct bio *bio)
157253b381b3SDavid Woodhouse {
157353b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
157453b381b3SDavid Woodhouse 
15754e4cbee9SChristoph Hellwig 	if (bio->bi_status)
157653b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
157753b381b3SDavid Woodhouse 	else
15785fdb7afcSQu Wenruo 		set_bio_pages_uptodate(rbio, bio);
157953b381b3SDavid Woodhouse 
158053b381b3SDavid Woodhouse 	bio_put(bio);
158153b381b3SDavid Woodhouse 
1582d34e123dSChristoph Hellwig 	if (atomic_dec_and_test(&rbio->stripes_pending))
1583d34e123dSChristoph Hellwig 		queue_work(rbio->bioc->fs_info->endio_raid56_workers,
1584d34e123dSChristoph Hellwig 			   &rbio->end_io_work);
1585d34e123dSChristoph Hellwig }
158653b381b3SDavid Woodhouse 
1587509c27aaSQu Wenruo static int rmw_assemble_read_bios(struct btrfs_raid_bio *rbio,
1588509c27aaSQu Wenruo 				  struct bio_list *bio_list)
158953b381b3SDavid Woodhouse {
1590550cdeb3SQu Wenruo 	const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data;
159153b381b3SDavid Woodhouse 	struct bio *bio;
1592509c27aaSQu Wenruo 	int total_sector_nr;
1593509c27aaSQu Wenruo 	int ret = 0;
159453b381b3SDavid Woodhouse 
1595509c27aaSQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
159653b381b3SDavid Woodhouse 
1597550cdeb3SQu Wenruo 	/* Build a list of bios to read all the missing data sectors. */
1598550cdeb3SQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < nr_data_sectors;
1599550cdeb3SQu Wenruo 	     total_sector_nr++) {
16003e77605dSQu Wenruo 		struct sector_ptr *sector;
1601550cdeb3SQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
1602550cdeb3SQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
16033e77605dSQu Wenruo 
160453b381b3SDavid Woodhouse 		/*
1605550cdeb3SQu Wenruo 		 * We want to find all the sectors missing from the rbio and
1606550cdeb3SQu Wenruo 		 * read them from the disk.  If sector_in_rbio() finds a page
1607550cdeb3SQu Wenruo 		 * in the bio list we don't need to read it off the stripe.
160853b381b3SDavid Woodhouse 		 */
16093e77605dSQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
16103e77605dSQu Wenruo 		if (sector)
161153b381b3SDavid Woodhouse 			continue;
161253b381b3SDavid Woodhouse 
16133e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
16144ae10b3aSChris Mason 		/*
1615550cdeb3SQu Wenruo 		 * The bio cache may have handed us an uptodate page.  If so,
1616550cdeb3SQu Wenruo 		 * use it.
16174ae10b3aSChris Mason 		 */
16183e77605dSQu Wenruo 		if (sector->uptodate)
16194ae10b3aSChris Mason 			continue;
16204ae10b3aSChris Mason 
1621509c27aaSQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector,
1622ff18a4afSChristoph Hellwig 			       stripe, sectornr, REQ_OP_READ);
162353b381b3SDavid Woodhouse 		if (ret)
162453b381b3SDavid Woodhouse 			goto cleanup;
162553b381b3SDavid Woodhouse 	}
1626509c27aaSQu Wenruo 	return 0;
1627509c27aaSQu Wenruo 
1628509c27aaSQu Wenruo cleanup:
1629509c27aaSQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
1630509c27aaSQu Wenruo 		bio_put(bio);
1631509c27aaSQu Wenruo 	return ret;
1632509c27aaSQu Wenruo }
1633509c27aaSQu Wenruo 
16345eb30ee2SQu Wenruo static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
16355eb30ee2SQu Wenruo {
16365eb30ee2SQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
16375eb30ee2SQu Wenruo 	int ret;
16385eb30ee2SQu Wenruo 
16395eb30ee2SQu Wenruo 	ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
16405eb30ee2SQu Wenruo 	if (ret < 0)
16415eb30ee2SQu Wenruo 		return ret;
16425eb30ee2SQu Wenruo 
16435eb30ee2SQu Wenruo 	index_stripe_sectors(rbio);
16445eb30ee2SQu Wenruo 	return 0;
16455eb30ee2SQu Wenruo }
16465eb30ee2SQu Wenruo 
1647509c27aaSQu Wenruo /*
16486ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
16496ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
16506ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
16516ac0f488SChris Mason  * we sort the list by logical block number and merge
16526ac0f488SChris Mason  * everything we can into the same rbios
16536ac0f488SChris Mason  */
16546ac0f488SChris Mason struct btrfs_plug_cb {
16556ac0f488SChris Mason 	struct blk_plug_cb cb;
16566ac0f488SChris Mason 	struct btrfs_fs_info *info;
16576ac0f488SChris Mason 	struct list_head rbio_list;
1658385de0efSChristoph Hellwig 	struct work_struct work;
16596ac0f488SChris Mason };
16606ac0f488SChris Mason 
16616ac0f488SChris Mason /*
16626ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
16636ac0f488SChris Mason  */
16644f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a,
16654f0f586bSSami Tolvanen 		    const struct list_head *b)
16666ac0f488SChris Mason {
1667214cc184SDavid Sterba 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
16686ac0f488SChris Mason 						       plug_list);
1669214cc184SDavid Sterba 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
16706ac0f488SChris Mason 						       plug_list);
16714f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
16724f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
16736ac0f488SChris Mason 
16746ac0f488SChris Mason 	if (a_sector < b_sector)
16756ac0f488SChris Mason 		return -1;
16766ac0f488SChris Mason 	if (a_sector > b_sector)
16776ac0f488SChris Mason 		return 1;
16786ac0f488SChris Mason 	return 0;
16796ac0f488SChris Mason }
16806ac0f488SChris Mason 
168193723095SQu Wenruo static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
16826ac0f488SChris Mason {
168393723095SQu Wenruo 	struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
16846ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
16856ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
16866ac0f488SChris Mason 
16876ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
168893723095SQu Wenruo 
16896ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
16906ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
16916ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
16926ac0f488SChris Mason 		list_del_init(&cur->plug_list);
16936ac0f488SChris Mason 
16946ac0f488SChris Mason 		if (rbio_is_full(cur)) {
169593723095SQu Wenruo 			/* We have a full stripe, queue it down. */
169693723095SQu Wenruo 			start_async_work(cur, rmw_rbio_work);
16976ac0f488SChris Mason 			continue;
16986ac0f488SChris Mason 		}
16996ac0f488SChris Mason 		if (last) {
17006ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
17016ac0f488SChris Mason 				merge_rbio(last, cur);
1702ff2b64a2SQu Wenruo 				free_raid_bio(cur);
17036ac0f488SChris Mason 				continue;
17046ac0f488SChris Mason 			}
170593723095SQu Wenruo 			start_async_work(last, rmw_rbio_work);
17066ac0f488SChris Mason 		}
17076ac0f488SChris Mason 		last = cur;
17086ac0f488SChris Mason 	}
170993723095SQu Wenruo 	if (last)
171093723095SQu Wenruo 		start_async_work(last, rmw_rbio_work);
17116ac0f488SChris Mason 	kfree(plug);
17126ac0f488SChris Mason }
17136ac0f488SChris Mason 
1714bd8f7e62SQu Wenruo /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1715bd8f7e62SQu Wenruo static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1716bd8f7e62SQu Wenruo {
1717bd8f7e62SQu Wenruo 	const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1718bd8f7e62SQu Wenruo 	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1719bd8f7e62SQu Wenruo 	const u64 full_stripe_start = rbio->bioc->raid_map[0];
1720bd8f7e62SQu Wenruo 	const u32 orig_len = orig_bio->bi_iter.bi_size;
1721bd8f7e62SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
1722bd8f7e62SQu Wenruo 	u64 cur_logical;
1723bd8f7e62SQu Wenruo 
1724bd8f7e62SQu Wenruo 	ASSERT(orig_logical >= full_stripe_start &&
1725bd8f7e62SQu Wenruo 	       orig_logical + orig_len <= full_stripe_start +
1726ff18a4afSChristoph Hellwig 	       rbio->nr_data * BTRFS_STRIPE_LEN);
1727bd8f7e62SQu Wenruo 
1728bd8f7e62SQu Wenruo 	bio_list_add(&rbio->bio_list, orig_bio);
1729bd8f7e62SQu Wenruo 	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1730bd8f7e62SQu Wenruo 
1731bd8f7e62SQu Wenruo 	/* Update the dbitmap. */
1732bd8f7e62SQu Wenruo 	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1733bd8f7e62SQu Wenruo 	     cur_logical += sectorsize) {
1734bd8f7e62SQu Wenruo 		int bit = ((u32)(cur_logical - full_stripe_start) >>
1735bd8f7e62SQu Wenruo 			   fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1736bd8f7e62SQu Wenruo 
1737bd8f7e62SQu Wenruo 		set_bit(bit, &rbio->dbitmap);
1738bd8f7e62SQu Wenruo 	}
1739bd8f7e62SQu Wenruo }
1740bd8f7e62SQu Wenruo 
17416ac0f488SChris Mason /*
174253b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
174353b381b3SDavid Woodhouse  */
174431683f4aSChristoph Hellwig void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
174553b381b3SDavid Woodhouse {
17466a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
174753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
17486ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
17496ac0f488SChris Mason 	struct blk_plug_cb *cb;
175031683f4aSChristoph Hellwig 	int ret = 0;
175153b381b3SDavid Woodhouse 
1752ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
1753af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
175431683f4aSChristoph Hellwig 		ret = PTR_ERR(rbio);
1755f1c29379SChristoph Hellwig 		goto fail;
1756af8e2d1dSMiao Xie 	}
17571b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
1758bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
17596ac0f488SChris Mason 
17606ac0f488SChris Mason 	/*
176193723095SQu Wenruo 	 * Don't plug on full rbios, just get them out the door
17626ac0f488SChris Mason 	 * as quickly as we can
17636ac0f488SChris Mason 	 */
176493723095SQu Wenruo 	if (rbio_is_full(rbio))
176593723095SQu Wenruo 		goto queue_rbio;
17666ac0f488SChris Mason 
176793723095SQu Wenruo 	cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
17686ac0f488SChris Mason 	if (cb) {
17696ac0f488SChris Mason 		plug = container_of(cb, struct btrfs_plug_cb, cb);
17706ac0f488SChris Mason 		if (!plug->info) {
17710b246afaSJeff Mahoney 			plug->info = fs_info;
17726ac0f488SChris Mason 			INIT_LIST_HEAD(&plug->rbio_list);
17736ac0f488SChris Mason 		}
17746ac0f488SChris Mason 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
177593723095SQu Wenruo 		return;
177653b381b3SDavid Woodhouse 	}
177793723095SQu Wenruo queue_rbio:
177893723095SQu Wenruo 	/*
177993723095SQu Wenruo 	 * Either we don't have any existing plug, or we're doing a full stripe,
178093723095SQu Wenruo 	 * can queue the rmw work now.
178193723095SQu Wenruo 	 */
178293723095SQu Wenruo 	start_async_work(rbio, rmw_rbio_work);
178331683f4aSChristoph Hellwig 
178431683f4aSChristoph Hellwig 	return;
178531683f4aSChristoph Hellwig 
1786f1c29379SChristoph Hellwig fail:
178731683f4aSChristoph Hellwig 	bio->bi_status = errno_to_blk_status(ret);
178831683f4aSChristoph Hellwig 	bio_endio(bio);
17896ac0f488SChris Mason }
179053b381b3SDavid Woodhouse 
179153b381b3SDavid Woodhouse /*
17929c5ff9b4SQu Wenruo  * Recover a vertical stripe specified by @sector_nr.
17939c5ff9b4SQu Wenruo  * @*pointers are the pre-allocated pointers by the caller, so we don't
17949c5ff9b4SQu Wenruo  * need to allocate/free the pointers again and again.
17959c5ff9b4SQu Wenruo  */
17969c5ff9b4SQu Wenruo static void recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
17979c5ff9b4SQu Wenruo 			     void **pointers, void **unmap_array)
17989c5ff9b4SQu Wenruo {
17999c5ff9b4SQu Wenruo 	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
18009c5ff9b4SQu Wenruo 	struct sector_ptr *sector;
18019c5ff9b4SQu Wenruo 	const u32 sectorsize = fs_info->sectorsize;
18029c5ff9b4SQu Wenruo 	const int faila = rbio->faila;
18039c5ff9b4SQu Wenruo 	const int failb = rbio->failb;
18049c5ff9b4SQu Wenruo 	int stripe_nr;
18059c5ff9b4SQu Wenruo 
18069c5ff9b4SQu Wenruo 	/*
18079c5ff9b4SQu Wenruo 	 * Now we just use bitmap to mark the horizontal stripes in
18089c5ff9b4SQu Wenruo 	 * which we have data when doing parity scrub.
18099c5ff9b4SQu Wenruo 	 */
18109c5ff9b4SQu Wenruo 	if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
18119c5ff9b4SQu Wenruo 	    !test_bit(sector_nr, &rbio->dbitmap))
18129c5ff9b4SQu Wenruo 		return;
18139c5ff9b4SQu Wenruo 
18149c5ff9b4SQu Wenruo 	/*
18159c5ff9b4SQu Wenruo 	 * Setup our array of pointers with sectors from each stripe
18169c5ff9b4SQu Wenruo 	 *
18179c5ff9b4SQu Wenruo 	 * NOTE: store a duplicate array of pointers to preserve the
18189c5ff9b4SQu Wenruo 	 * pointer order.
18199c5ff9b4SQu Wenruo 	 */
18209c5ff9b4SQu Wenruo 	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
18219c5ff9b4SQu Wenruo 		/*
18229c5ff9b4SQu Wenruo 		 * If we're rebuilding a read, we have to use
18239c5ff9b4SQu Wenruo 		 * pages from the bio list
18249c5ff9b4SQu Wenruo 		 */
18259c5ff9b4SQu Wenruo 		if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
18269c5ff9b4SQu Wenruo 		     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
18279c5ff9b4SQu Wenruo 		    (stripe_nr == faila || stripe_nr == failb)) {
18289c5ff9b4SQu Wenruo 			sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
18299c5ff9b4SQu Wenruo 		} else {
18309c5ff9b4SQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
18319c5ff9b4SQu Wenruo 		}
18329c5ff9b4SQu Wenruo 		ASSERT(sector->page);
18339c5ff9b4SQu Wenruo 		pointers[stripe_nr] = kmap_local_page(sector->page) +
18349c5ff9b4SQu Wenruo 				   sector->pgoff;
18359c5ff9b4SQu Wenruo 		unmap_array[stripe_nr] = pointers[stripe_nr];
18369c5ff9b4SQu Wenruo 	}
18379c5ff9b4SQu Wenruo 
18389c5ff9b4SQu Wenruo 	/* All raid6 handling here */
18399c5ff9b4SQu Wenruo 	if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
18409c5ff9b4SQu Wenruo 		/* Single failure, rebuild from parity raid5 style */
18419c5ff9b4SQu Wenruo 		if (failb < 0) {
18429c5ff9b4SQu Wenruo 			if (faila == rbio->nr_data)
18439c5ff9b4SQu Wenruo 				/*
18449c5ff9b4SQu Wenruo 				 * Just the P stripe has failed, without
18459c5ff9b4SQu Wenruo 				 * a bad data or Q stripe.
18469c5ff9b4SQu Wenruo 				 * We have nothing to do, just skip the
18479c5ff9b4SQu Wenruo 				 * recovery for this stripe.
18489c5ff9b4SQu Wenruo 				 */
18499c5ff9b4SQu Wenruo 				goto cleanup;
18509c5ff9b4SQu Wenruo 			/*
18519c5ff9b4SQu Wenruo 			 * a single failure in raid6 is rebuilt
18529c5ff9b4SQu Wenruo 			 * in the pstripe code below
18539c5ff9b4SQu Wenruo 			 */
18549c5ff9b4SQu Wenruo 			goto pstripe;
18559c5ff9b4SQu Wenruo 		}
18569c5ff9b4SQu Wenruo 
18579c5ff9b4SQu Wenruo 		/*
18589c5ff9b4SQu Wenruo 		 * If the q stripe is failed, do a pstripe reconstruction from
18599c5ff9b4SQu Wenruo 		 * the xors.
18609c5ff9b4SQu Wenruo 		 * If both the q stripe and the P stripe are failed, we're
18619c5ff9b4SQu Wenruo 		 * here due to a crc mismatch and we can't give them the
18629c5ff9b4SQu Wenruo 		 * data they want.
18639c5ff9b4SQu Wenruo 		 */
18649c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
18659c5ff9b4SQu Wenruo 			if (rbio->bioc->raid_map[faila] ==
18669c5ff9b4SQu Wenruo 			    RAID5_P_STRIPE)
18679c5ff9b4SQu Wenruo 				/*
18689c5ff9b4SQu Wenruo 				 * Only P and Q are corrupted.
18699c5ff9b4SQu Wenruo 				 * We only care about data stripes recovery,
18709c5ff9b4SQu Wenruo 				 * can skip this vertical stripe.
18719c5ff9b4SQu Wenruo 				 */
18729c5ff9b4SQu Wenruo 				goto cleanup;
18739c5ff9b4SQu Wenruo 			/*
18749c5ff9b4SQu Wenruo 			 * Otherwise we have one bad data stripe and
18759c5ff9b4SQu Wenruo 			 * a good P stripe.  raid5!
18769c5ff9b4SQu Wenruo 			 */
18779c5ff9b4SQu Wenruo 			goto pstripe;
18789c5ff9b4SQu Wenruo 		}
18799c5ff9b4SQu Wenruo 
18809c5ff9b4SQu Wenruo 		if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
18819c5ff9b4SQu Wenruo 			raid6_datap_recov(rbio->real_stripes, sectorsize,
18829c5ff9b4SQu Wenruo 					  faila, pointers);
18839c5ff9b4SQu Wenruo 		} else {
18849c5ff9b4SQu Wenruo 			raid6_2data_recov(rbio->real_stripes, sectorsize,
18859c5ff9b4SQu Wenruo 					  faila, failb, pointers);
18869c5ff9b4SQu Wenruo 		}
18879c5ff9b4SQu Wenruo 	} else {
18889c5ff9b4SQu Wenruo 		void *p;
18899c5ff9b4SQu Wenruo 
18909c5ff9b4SQu Wenruo 		/* Rebuild from P stripe here (raid5 or raid6). */
18919c5ff9b4SQu Wenruo 		ASSERT(failb == -1);
18929c5ff9b4SQu Wenruo pstripe:
18939c5ff9b4SQu Wenruo 		/* Copy parity block into failed block to start with */
18949c5ff9b4SQu Wenruo 		memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
18959c5ff9b4SQu Wenruo 
18969c5ff9b4SQu Wenruo 		/* Rearrange the pointer array */
18979c5ff9b4SQu Wenruo 		p = pointers[faila];
18989c5ff9b4SQu Wenruo 		for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
18999c5ff9b4SQu Wenruo 		     stripe_nr++)
19009c5ff9b4SQu Wenruo 			pointers[stripe_nr] = pointers[stripe_nr + 1];
19019c5ff9b4SQu Wenruo 		pointers[rbio->nr_data - 1] = p;
19029c5ff9b4SQu Wenruo 
19039c5ff9b4SQu Wenruo 		/* Xor in the rest */
19049c5ff9b4SQu Wenruo 		run_xor(pointers, rbio->nr_data - 1, sectorsize);
19059c5ff9b4SQu Wenruo 
19069c5ff9b4SQu Wenruo 	}
19079c5ff9b4SQu Wenruo 
19089c5ff9b4SQu Wenruo 	/*
19099c5ff9b4SQu Wenruo 	 * No matter if this is a RMW or recovery, we should have all
19109c5ff9b4SQu Wenruo 	 * failed sectors repaired in the vertical stripe, thus they are now
19119c5ff9b4SQu Wenruo 	 * uptodate.
19129c5ff9b4SQu Wenruo 	 * Especially if we determine to cache the rbio, we need to
19139c5ff9b4SQu Wenruo 	 * have at least all data sectors uptodate.
19149c5ff9b4SQu Wenruo 	 */
19159c5ff9b4SQu Wenruo 	if (rbio->faila >= 0) {
19169c5ff9b4SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->faila, sector_nr);
19179c5ff9b4SQu Wenruo 		sector->uptodate = 1;
19189c5ff9b4SQu Wenruo 	}
19199c5ff9b4SQu Wenruo 	if (rbio->failb >= 0) {
19209c5ff9b4SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->failb, sector_nr);
19219c5ff9b4SQu Wenruo 		sector->uptodate = 1;
19229c5ff9b4SQu Wenruo 	}
19239c5ff9b4SQu Wenruo 
19249c5ff9b4SQu Wenruo cleanup:
19259c5ff9b4SQu Wenruo 	for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
19269c5ff9b4SQu Wenruo 		kunmap_local(unmap_array[stripe_nr]);
19279c5ff9b4SQu Wenruo }
19289c5ff9b4SQu Wenruo 
1929ec936b03SQu Wenruo static int recover_sectors(struct btrfs_raid_bio *rbio)
193053b381b3SDavid Woodhouse {
19319c5ff9b4SQu Wenruo 	void **pointers = NULL;
19329c5ff9b4SQu Wenruo 	void **unmap_array = NULL;
1933ec936b03SQu Wenruo 	int sectornr;
1934ec936b03SQu Wenruo 	int ret = 0;
193553b381b3SDavid Woodhouse 
193607e4d380SQu Wenruo 	/*
1937ec936b03SQu Wenruo 	 * @pointers array stores the pointer for each sector.
1938ec936b03SQu Wenruo 	 *
1939ec936b03SQu Wenruo 	 * @unmap_array stores copy of pointers that does not get reordered
1940ec936b03SQu Wenruo 	 * during reconstruction so that kunmap_local works.
194107e4d380SQu Wenruo 	 */
194231e818feSDavid Sterba 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
194394a0b58dSIra Weiny 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1944ec936b03SQu Wenruo 	if (!pointers || !unmap_array) {
1945ec936b03SQu Wenruo 		ret = -ENOMEM;
1946ec936b03SQu Wenruo 		goto out;
194794a0b58dSIra Weiny 	}
194894a0b58dSIra Weiny 
19499c5ff9b4SQu Wenruo 	/* Make sure faila and fail b are in order. */
19509c5ff9b4SQu Wenruo 	if (rbio->faila >= 0 && rbio->failb >= 0 && rbio->faila > rbio->failb)
19519c5ff9b4SQu Wenruo 		swap(rbio->faila, rbio->failb);
195253b381b3SDavid Woodhouse 
1953b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1954b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
195553b381b3SDavid Woodhouse 		spin_lock_irq(&rbio->bio_list_lock);
195653b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
195753b381b3SDavid Woodhouse 		spin_unlock_irq(&rbio->bio_list_lock);
195853b381b3SDavid Woodhouse 	}
195953b381b3SDavid Woodhouse 
196053b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
196153b381b3SDavid Woodhouse 
19629c5ff9b4SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
19639c5ff9b4SQu Wenruo 		recover_vertical(rbio, sectornr, pointers, unmap_array);
196453b381b3SDavid Woodhouse 
1965ec936b03SQu Wenruo out:
196653b381b3SDavid Woodhouse 	kfree(pointers);
1967ec936b03SQu Wenruo 	kfree(unmap_array);
1968ec936b03SQu Wenruo 	return ret;
1969ec936b03SQu Wenruo }
1970ec936b03SQu Wenruo 
1971ec936b03SQu Wenruo /*
1972ec936b03SQu Wenruo  * all parity reconstruction happens here.  We've read in everything
1973ec936b03SQu Wenruo  * we can find from the drives and this does the heavy lifting of
1974ec936b03SQu Wenruo  * sorting the good from the bad.
1975ec936b03SQu Wenruo  */
1976ec936b03SQu Wenruo static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1977ec936b03SQu Wenruo {
1978ec936b03SQu Wenruo 	int ret;
1979ec936b03SQu Wenruo 
1980ec936b03SQu Wenruo 	ret = recover_sectors(rbio);
198153b381b3SDavid Woodhouse 
1982580c6efaSLiu Bo 	/*
1983580c6efaSLiu Bo 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1984580c6efaSLiu Bo 	 * valid rbio which is consistent with ondisk content, thus such a
1985580c6efaSLiu Bo 	 * valid rbio can be cached to avoid further disk reads.
1986580c6efaSLiu Bo 	 */
1987580c6efaSLiu Bo 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1988580c6efaSLiu Bo 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
198944ac474dSLiu Bo 		/*
199044ac474dSLiu Bo 		 * - In case of two failures, where rbio->failb != -1:
199144ac474dSLiu Bo 		 *
199244ac474dSLiu Bo 		 *   Do not cache this rbio since the above read reconstruction
199344ac474dSLiu Bo 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
199444ac474dSLiu Bo 		 *   changed some content of stripes which are not identical to
199544ac474dSLiu Bo 		 *   on-disk content any more, otherwise, a later write/recover
199644ac474dSLiu Bo 		 *   may steal stripe_pages from this rbio and end up with
199744ac474dSLiu Bo 		 *   corruptions or rebuild failures.
199844ac474dSLiu Bo 		 *
199944ac474dSLiu Bo 		 * - In case of single failure, where rbio->failb == -1:
200044ac474dSLiu Bo 		 *
200144ac474dSLiu Bo 		 *   Cache this rbio iff the above read reconstruction is
200252042d8eSAndrea Gelmini 		 *   executed without problems.
200344ac474dSLiu Bo 		 */
2004ec936b03SQu Wenruo 		if (!ret && rbio->failb < 0)
20054ae10b3aSChris Mason 			cache_rbio_pages(rbio);
20064ae10b3aSChris Mason 		else
20074ae10b3aSChris Mason 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
20084ae10b3aSChris Mason 
2009ec936b03SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2010ec936b03SQu Wenruo 	} else if (!ret) {
201153b381b3SDavid Woodhouse 		rbio->faila = -1;
201253b381b3SDavid Woodhouse 		rbio->failb = -1;
20135a6ac9eaSMiao Xie 
20145a6ac9eaSMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE)
201553b381b3SDavid Woodhouse 			finish_rmw(rbio);
20165a6ac9eaSMiao Xie 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
20175a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
20185a6ac9eaSMiao Xie 		else
20195a6ac9eaSMiao Xie 			BUG();
202053b381b3SDavid Woodhouse 	} else {
2021ec936b03SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
202253b381b3SDavid Woodhouse 	}
202353b381b3SDavid Woodhouse }
202453b381b3SDavid Woodhouse 
2025d31968d9SQu Wenruo static int recover_assemble_read_bios(struct btrfs_raid_bio *rbio,
2026d31968d9SQu Wenruo 				      struct bio_list *bio_list)
202753b381b3SDavid Woodhouse {
202853b381b3SDavid Woodhouse 	struct bio *bio;
2029d31968d9SQu Wenruo 	int total_sector_nr;
2030d31968d9SQu Wenruo 	int ret = 0;
203153b381b3SDavid Woodhouse 
2032d31968d9SQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
203353b381b3SDavid Woodhouse 	/*
2034f6065f8eSQu Wenruo 	 * Read everything that hasn't failed. However this time we will
2035f6065f8eSQu Wenruo 	 * not trust any cached sector.
2036f6065f8eSQu Wenruo 	 * As we may read out some stale data but higher layer is not reading
2037f6065f8eSQu Wenruo 	 * that stale part.
2038f6065f8eSQu Wenruo 	 *
2039f6065f8eSQu Wenruo 	 * So here we always re-read everything in recovery path.
204053b381b3SDavid Woodhouse 	 */
2041ef340fccSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2042ef340fccSQu Wenruo 	     total_sector_nr++) {
2043ef340fccSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
2044ef340fccSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
20453e77605dSQu Wenruo 		struct sector_ptr *sector;
204653b381b3SDavid Woodhouse 
2047ef340fccSQu Wenruo 		if (rbio->faila == stripe || rbio->failb == stripe) {
2048ef340fccSQu Wenruo 			atomic_inc(&rbio->error);
2049ef340fccSQu Wenruo 			/* Skip the current stripe. */
2050ef340fccSQu Wenruo 			ASSERT(sectornr == 0);
2051ef340fccSQu Wenruo 			total_sector_nr += rbio->stripe_nsectors - 1;
205253b381b3SDavid Woodhouse 			continue;
2053ef340fccSQu Wenruo 		}
205453b381b3SDavid Woodhouse 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
2055d31968d9SQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2056ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
205753b381b3SDavid Woodhouse 		if (ret < 0)
2058d31968d9SQu Wenruo 			goto error;
205953b381b3SDavid Woodhouse 	}
2060d31968d9SQu Wenruo 	return 0;
2061d31968d9SQu Wenruo error:
2062d31968d9SQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
2063d31968d9SQu Wenruo 		bio_put(bio);
2064d31968d9SQu Wenruo 
2065d31968d9SQu Wenruo 	return -EIO;
2066d31968d9SQu Wenruo }
2067d31968d9SQu Wenruo 
2068d817ce35SQu Wenruo static int recover_rbio(struct btrfs_raid_bio *rbio)
2069d817ce35SQu Wenruo {
2070d817ce35SQu Wenruo 	struct bio_list bio_list;
2071d817ce35SQu Wenruo 	struct bio *bio;
2072d817ce35SQu Wenruo 	int ret;
2073d817ce35SQu Wenruo 
2074d817ce35SQu Wenruo 	/*
2075d817ce35SQu Wenruo 	 * Either we're doing recover for a read failure or degraded write,
2076d817ce35SQu Wenruo 	 * caller should have set faila/b correctly.
2077d817ce35SQu Wenruo 	 */
2078d817ce35SQu Wenruo 	ASSERT(rbio->faila >= 0 || rbio->failb >= 0);
2079d817ce35SQu Wenruo 	bio_list_init(&bio_list);
2080d817ce35SQu Wenruo 
2081d817ce35SQu Wenruo 	/*
2082d817ce35SQu Wenruo 	 * Reset error to 0, as we will later increase error for missing
2083d817ce35SQu Wenruo 	 * devices.
2084d817ce35SQu Wenruo 	 */
2085d817ce35SQu Wenruo 	atomic_set(&rbio->error, 0);
2086d817ce35SQu Wenruo 
2087d817ce35SQu Wenruo 	/* For recovery, we need to read all sectors including P/Q. */
2088d817ce35SQu Wenruo 	ret = alloc_rbio_pages(rbio);
2089d817ce35SQu Wenruo 	if (ret < 0)
2090d817ce35SQu Wenruo 		goto out;
2091d817ce35SQu Wenruo 
2092d817ce35SQu Wenruo 	index_rbio_pages(rbio);
2093d817ce35SQu Wenruo 
2094d817ce35SQu Wenruo 	ret = recover_assemble_read_bios(rbio, &bio_list);
2095d817ce35SQu Wenruo 	if (ret < 0)
2096d817ce35SQu Wenruo 		goto out;
2097d817ce35SQu Wenruo 
2098d817ce35SQu Wenruo 	submit_read_bios(rbio, &bio_list);
2099d817ce35SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2100d817ce35SQu Wenruo 
2101d817ce35SQu Wenruo 	/* We have more errors than our tolerance during the read. */
2102d817ce35SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors) {
2103d817ce35SQu Wenruo 		ret = -EIO;
2104d817ce35SQu Wenruo 		goto out;
2105d817ce35SQu Wenruo 	}
2106d817ce35SQu Wenruo 
2107d817ce35SQu Wenruo 	ret = recover_sectors(rbio);
2108d817ce35SQu Wenruo 
2109d817ce35SQu Wenruo out:
2110d817ce35SQu Wenruo 	while ((bio = bio_list_pop(&bio_list)))
2111d817ce35SQu Wenruo 		bio_put(bio);
2112d817ce35SQu Wenruo 
2113d817ce35SQu Wenruo 	return ret;
2114d817ce35SQu Wenruo }
2115d817ce35SQu Wenruo 
2116d817ce35SQu Wenruo static void recover_rbio_work(struct work_struct *work)
2117d817ce35SQu Wenruo {
2118d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio;
2119d817ce35SQu Wenruo 	int ret;
2120d817ce35SQu Wenruo 
2121d817ce35SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
2122d817ce35SQu Wenruo 
2123d817ce35SQu Wenruo 	ret = lock_stripe_add(rbio);
2124d817ce35SQu Wenruo 	if (ret == 0) {
2125d817ce35SQu Wenruo 		ret = recover_rbio(rbio);
2126d817ce35SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2127d817ce35SQu Wenruo 	}
2128d817ce35SQu Wenruo }
2129d817ce35SQu Wenruo 
2130d817ce35SQu Wenruo static void recover_rbio_work_locked(struct work_struct *work)
2131d817ce35SQu Wenruo {
2132d817ce35SQu Wenruo 	struct btrfs_raid_bio *rbio;
2133d817ce35SQu Wenruo 	int ret;
2134d817ce35SQu Wenruo 
2135d817ce35SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
2136d817ce35SQu Wenruo 
2137d817ce35SQu Wenruo 	ret = recover_rbio(rbio);
2138d817ce35SQu Wenruo 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2139d817ce35SQu Wenruo }
2140d817ce35SQu Wenruo 
2141d31968d9SQu Wenruo /*
214253b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
214353b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
214453b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
214553b381b3SDavid Woodhouse  * of the drive.
214653b381b3SDavid Woodhouse  */
21476065fd95SChristoph Hellwig void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2148f1c29379SChristoph Hellwig 			   int mirror_num)
214953b381b3SDavid Woodhouse {
21506a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
215153b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
215253b381b3SDavid Woodhouse 
2153ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2154af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
21556065fd95SChristoph Hellwig 		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2156d817ce35SQu Wenruo 		bio_endio(bio);
2157d817ce35SQu Wenruo 		return;
2158af8e2d1dSMiao Xie 	}
215953b381b3SDavid Woodhouse 
21601b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2161bd8f7e62SQu Wenruo 	rbio_add_bio(rbio, bio);
216253b381b3SDavid Woodhouse 
216353b381b3SDavid Woodhouse 	rbio->faila = find_logical_bio_stripe(rbio, bio);
216453b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
21650b246afaSJeff Mahoney 		btrfs_warn(fs_info,
21664c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
21671201b58bSDavid Sterba 			   __func__, bio->bi_iter.bi_sector << 9,
21684c664611SQu Wenruo 			   (u64)bio->bi_iter.bi_size, bioc->map_type);
2169ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
21706065fd95SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
2171d817ce35SQu Wenruo 		bio_endio(bio);
2172d817ce35SQu Wenruo 		return;
217353b381b3SDavid Woodhouse 	}
217453b381b3SDavid Woodhouse 
217553b381b3SDavid Woodhouse 	/*
21768810f751SLiu Bo 	 * Loop retry:
21778810f751SLiu Bo 	 * for 'mirror == 2', reconstruct from all other stripes.
21788810f751SLiu Bo 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
217953b381b3SDavid Woodhouse 	 */
21808810f751SLiu Bo 	if (mirror_num > 2) {
21818810f751SLiu Bo 		/*
21828810f751SLiu Bo 		 * 'mirror == 3' is to fail the p stripe and
21838810f751SLiu Bo 		 * reconstruct from the q stripe.  'mirror > 3' is to
21848810f751SLiu Bo 		 * fail a data stripe and reconstruct from p+q stripe.
21858810f751SLiu Bo 		 */
21868810f751SLiu Bo 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
21878810f751SLiu Bo 		ASSERT(rbio->failb > 0);
21888810f751SLiu Bo 		if (rbio->failb <= rbio->faila)
21898810f751SLiu Bo 			rbio->failb--;
21908810f751SLiu Bo 	}
219153b381b3SDavid Woodhouse 
2192d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
219353b381b3SDavid Woodhouse }
219453b381b3SDavid Woodhouse 
21955eb30ee2SQu Wenruo static int rmw_read_and_wait(struct btrfs_raid_bio *rbio)
21965eb30ee2SQu Wenruo {
21975eb30ee2SQu Wenruo 	struct bio_list bio_list;
21985eb30ee2SQu Wenruo 	struct bio *bio;
21995eb30ee2SQu Wenruo 	int ret;
22005eb30ee2SQu Wenruo 
22015eb30ee2SQu Wenruo 	bio_list_init(&bio_list);
22025eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
22035eb30ee2SQu Wenruo 
22045eb30ee2SQu Wenruo 	ret = rmw_assemble_read_bios(rbio, &bio_list);
22055eb30ee2SQu Wenruo 	if (ret < 0)
22065eb30ee2SQu Wenruo 		goto out;
22075eb30ee2SQu Wenruo 
22085eb30ee2SQu Wenruo 	submit_read_bios(rbio, &bio_list);
22095eb30ee2SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
22105eb30ee2SQu Wenruo 	return ret;
22115eb30ee2SQu Wenruo out:
22125eb30ee2SQu Wenruo 	while ((bio = bio_list_pop(&bio_list)))
22135eb30ee2SQu Wenruo 		bio_put(bio);
22145eb30ee2SQu Wenruo 
22155eb30ee2SQu Wenruo 	return ret;
22165eb30ee2SQu Wenruo }
22175eb30ee2SQu Wenruo 
22185eb30ee2SQu Wenruo static void raid_wait_write_end_io(struct bio *bio)
22195eb30ee2SQu Wenruo {
22205eb30ee2SQu Wenruo 	struct btrfs_raid_bio *rbio = bio->bi_private;
22215eb30ee2SQu Wenruo 	blk_status_t err = bio->bi_status;
22225eb30ee2SQu Wenruo 
22235eb30ee2SQu Wenruo 	if (err)
22245eb30ee2SQu Wenruo 		fail_bio_stripe(rbio, bio);
22255eb30ee2SQu Wenruo 	bio_put(bio);
22265eb30ee2SQu Wenruo 	if (atomic_dec_and_test(&rbio->stripes_pending))
22275eb30ee2SQu Wenruo 		wake_up(&rbio->io_wait);
22285eb30ee2SQu Wenruo }
22295eb30ee2SQu Wenruo 
22305eb30ee2SQu Wenruo static void submit_write_bios(struct btrfs_raid_bio *rbio,
22315eb30ee2SQu Wenruo 			      struct bio_list *bio_list)
22325eb30ee2SQu Wenruo {
22335eb30ee2SQu Wenruo 	struct bio *bio;
22345eb30ee2SQu Wenruo 
22355eb30ee2SQu Wenruo 	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
22365eb30ee2SQu Wenruo 	while ((bio = bio_list_pop(bio_list))) {
22375eb30ee2SQu Wenruo 		bio->bi_end_io = raid_wait_write_end_io;
22385eb30ee2SQu Wenruo 
22395eb30ee2SQu Wenruo 		if (trace_raid56_write_stripe_enabled()) {
22405eb30ee2SQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
22415eb30ee2SQu Wenruo 
22425eb30ee2SQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
22435eb30ee2SQu Wenruo 			trace_raid56_write_stripe(rbio, bio, &trace_info);
22445eb30ee2SQu Wenruo 		}
22455eb30ee2SQu Wenruo 		submit_bio(bio);
22465eb30ee2SQu Wenruo 	}
22475eb30ee2SQu Wenruo }
22485eb30ee2SQu Wenruo 
224993723095SQu Wenruo static int rmw_rbio(struct btrfs_raid_bio *rbio)
22505eb30ee2SQu Wenruo {
22515eb30ee2SQu Wenruo 	struct bio_list bio_list;
22525eb30ee2SQu Wenruo 	int sectornr;
22535eb30ee2SQu Wenruo 	int ret = 0;
22545eb30ee2SQu Wenruo 
22555eb30ee2SQu Wenruo 	/*
22565eb30ee2SQu Wenruo 	 * Allocate the pages for parity first, as P/Q pages will always be
22575eb30ee2SQu Wenruo 	 * needed for both full-stripe and sub-stripe writes.
22585eb30ee2SQu Wenruo 	 */
22595eb30ee2SQu Wenruo 	ret = alloc_rbio_parity_pages(rbio);
22605eb30ee2SQu Wenruo 	if (ret < 0)
22615eb30ee2SQu Wenruo 		return ret;
22625eb30ee2SQu Wenruo 
22635eb30ee2SQu Wenruo 	/* Full stripe write, can write the full stripe right now. */
22645eb30ee2SQu Wenruo 	if (rbio_is_full(rbio))
22655eb30ee2SQu Wenruo 		goto write;
22665eb30ee2SQu Wenruo 	/*
22675eb30ee2SQu Wenruo 	 * Now we're doing sub-stripe write, also need all data stripes to do
22685eb30ee2SQu Wenruo 	 * the full RMW.
22695eb30ee2SQu Wenruo 	 */
22705eb30ee2SQu Wenruo 	ret = alloc_rbio_data_pages(rbio);
22715eb30ee2SQu Wenruo 	if (ret < 0)
22725eb30ee2SQu Wenruo 		return ret;
22735eb30ee2SQu Wenruo 
22745eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
22755eb30ee2SQu Wenruo 	index_rbio_pages(rbio);
22765eb30ee2SQu Wenruo 
22775eb30ee2SQu Wenruo 	ret = rmw_read_and_wait(rbio);
22785eb30ee2SQu Wenruo 	if (ret < 0)
22795eb30ee2SQu Wenruo 		return ret;
22805eb30ee2SQu Wenruo 
22815eb30ee2SQu Wenruo 	/* Too many read errors, beyond our tolerance. */
22825eb30ee2SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
22835eb30ee2SQu Wenruo 		return ret;
22845eb30ee2SQu Wenruo 
22855eb30ee2SQu Wenruo 	/* Have read failures but under tolerance, needs recovery. */
22865eb30ee2SQu Wenruo 	if (rbio->faila >= 0 || rbio->failb >= 0) {
22875eb30ee2SQu Wenruo 		ret = recover_rbio(rbio);
22885eb30ee2SQu Wenruo 		if (ret < 0)
22895eb30ee2SQu Wenruo 			return ret;
22905eb30ee2SQu Wenruo 	}
22915eb30ee2SQu Wenruo write:
22925eb30ee2SQu Wenruo 	/*
22935eb30ee2SQu Wenruo 	 * At this stage we're not allowed to add any new bios to the
22945eb30ee2SQu Wenruo 	 * bio list any more, anyone else that wants to change this stripe
22955eb30ee2SQu Wenruo 	 * needs to do their own rmw.
22965eb30ee2SQu Wenruo 	 */
22975eb30ee2SQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
22985eb30ee2SQu Wenruo 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
22995eb30ee2SQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
23005eb30ee2SQu Wenruo 
23015eb30ee2SQu Wenruo 	atomic_set(&rbio->error, 0);
23025eb30ee2SQu Wenruo 
23035eb30ee2SQu Wenruo 	index_rbio_pages(rbio);
23045eb30ee2SQu Wenruo 
23055eb30ee2SQu Wenruo 	/*
23065eb30ee2SQu Wenruo 	 * We don't cache full rbios because we're assuming
23075eb30ee2SQu Wenruo 	 * the higher layers are unlikely to use this area of
23085eb30ee2SQu Wenruo 	 * the disk again soon.  If they do use it again,
23095eb30ee2SQu Wenruo 	 * hopefully they will send another full bio.
23105eb30ee2SQu Wenruo 	 */
23115eb30ee2SQu Wenruo 	if (!rbio_is_full(rbio))
23125eb30ee2SQu Wenruo 		cache_rbio_pages(rbio);
23135eb30ee2SQu Wenruo 	else
23145eb30ee2SQu Wenruo 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
23155eb30ee2SQu Wenruo 
23165eb30ee2SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
23175eb30ee2SQu Wenruo 		generate_pq_vertical(rbio, sectornr);
23185eb30ee2SQu Wenruo 
23195eb30ee2SQu Wenruo 	bio_list_init(&bio_list);
23205eb30ee2SQu Wenruo 	ret = rmw_assemble_write_bios(rbio, &bio_list);
23215eb30ee2SQu Wenruo 	if (ret < 0)
23225eb30ee2SQu Wenruo 		return ret;
23235eb30ee2SQu Wenruo 
23245eb30ee2SQu Wenruo 	/* We should have at least one bio assembled. */
23255eb30ee2SQu Wenruo 	ASSERT(bio_list_size(&bio_list));
23265eb30ee2SQu Wenruo 	submit_write_bios(rbio, &bio_list);
23275eb30ee2SQu Wenruo 	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
23285eb30ee2SQu Wenruo 
23295eb30ee2SQu Wenruo 	/* We have more errors than our tolerance during the read. */
23305eb30ee2SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
23315eb30ee2SQu Wenruo 		ret = -EIO;
23325eb30ee2SQu Wenruo 	return ret;
23335eb30ee2SQu Wenruo }
23345eb30ee2SQu Wenruo 
233593723095SQu Wenruo static void rmw_rbio_work(struct work_struct *work)
233653b381b3SDavid Woodhouse {
233753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
233893723095SQu Wenruo 	int ret;
233953b381b3SDavid Woodhouse 
234053b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
234193723095SQu Wenruo 
234293723095SQu Wenruo 	ret = lock_stripe_add(rbio);
234393723095SQu Wenruo 	if (ret == 0) {
234493723095SQu Wenruo 		ret = rmw_rbio(rbio);
234593723095SQu Wenruo 		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
234693723095SQu Wenruo 	}
234793723095SQu Wenruo }
234893723095SQu Wenruo 
234993723095SQu Wenruo static void rmw_rbio_work_locked(struct work_struct *work)
235093723095SQu Wenruo {
235193723095SQu Wenruo 	struct btrfs_raid_bio *rbio;
235293723095SQu Wenruo 	int ret;
235393723095SQu Wenruo 
235493723095SQu Wenruo 	rbio = container_of(work, struct btrfs_raid_bio, work);
235593723095SQu Wenruo 
235693723095SQu Wenruo 	ret = rmw_rbio(rbio);
235793723095SQu Wenruo 	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
235853b381b3SDavid Woodhouse }
235953b381b3SDavid Woodhouse 
23605a6ac9eaSMiao Xie /*
23615a6ac9eaSMiao Xie  * The following code is used to scrub/replace the parity stripe
23625a6ac9eaSMiao Xie  *
23634c664611SQu Wenruo  * Caller must have already increased bio_counter for getting @bioc.
2364ae6529c3SQu Wenruo  *
23655a6ac9eaSMiao Xie  * Note: We need make sure all the pages that add into the scrub/replace
23665a6ac9eaSMiao Xie  * raid bio are correct and not be changed during the scrub/replace. That
23675a6ac9eaSMiao Xie  * is those pages just hold metadata or file data with checksum.
23685a6ac9eaSMiao Xie  */
23695a6ac9eaSMiao Xie 
23706a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
23716a258d72SQu Wenruo 				struct btrfs_io_context *bioc,
2372ff18a4afSChristoph Hellwig 				struct btrfs_device *scrub_dev,
23735a6ac9eaSMiao Xie 				unsigned long *dbitmap, int stripe_nsectors)
23745a6ac9eaSMiao Xie {
23756a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
23765a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
23775a6ac9eaSMiao Xie 	int i;
23785a6ac9eaSMiao Xie 
2379ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
23805a6ac9eaSMiao Xie 	if (IS_ERR(rbio))
23815a6ac9eaSMiao Xie 		return NULL;
23825a6ac9eaSMiao Xie 	bio_list_add(&rbio->bio_list, bio);
23835a6ac9eaSMiao Xie 	/*
23845a6ac9eaSMiao Xie 	 * This is a special bio which is used to hold the completion handler
23855a6ac9eaSMiao Xie 	 * and make the scrub rbio is similar to the other types
23865a6ac9eaSMiao Xie 	 */
23875a6ac9eaSMiao Xie 	ASSERT(!bio->bi_iter.bi_size);
23885a6ac9eaSMiao Xie 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
23895a6ac9eaSMiao Xie 
23909cd3a7ebSLiu Bo 	/*
23914c664611SQu Wenruo 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
23929cd3a7ebSLiu Bo 	 * to the end position, so this search can start from the first parity
23939cd3a7ebSLiu Bo 	 * stripe.
23949cd3a7ebSLiu Bo 	 */
23959cd3a7ebSLiu Bo 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
23964c664611SQu Wenruo 		if (bioc->stripes[i].dev == scrub_dev) {
23975a6ac9eaSMiao Xie 			rbio->scrubp = i;
23985a6ac9eaSMiao Xie 			break;
23995a6ac9eaSMiao Xie 		}
24005a6ac9eaSMiao Xie 	}
24019cd3a7ebSLiu Bo 	ASSERT(i < rbio->real_stripes);
24025a6ac9eaSMiao Xie 
2403c67c68ebSQu Wenruo 	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
24045a6ac9eaSMiao Xie 	return rbio;
24055a6ac9eaSMiao Xie }
24065a6ac9eaSMiao Xie 
2407b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */
2408b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
24096346f6bfSQu Wenruo 			    unsigned int pgoff, u64 logical)
24105a6ac9eaSMiao Xie {
24116346f6bfSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
24125a6ac9eaSMiao Xie 	int stripe_offset;
24135a6ac9eaSMiao Xie 	int index;
24145a6ac9eaSMiao Xie 
24154c664611SQu Wenruo 	ASSERT(logical >= rbio->bioc->raid_map[0]);
24166346f6bfSQu Wenruo 	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2417ff18a4afSChristoph Hellwig 				       BTRFS_STRIPE_LEN * rbio->nr_data);
24184c664611SQu Wenruo 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
24196346f6bfSQu Wenruo 	index = stripe_offset / sectorsize;
24206346f6bfSQu Wenruo 	rbio->bio_sectors[index].page = page;
24216346f6bfSQu Wenruo 	rbio->bio_sectors[index].pgoff = pgoff;
24225a6ac9eaSMiao Xie }
24235a6ac9eaSMiao Xie 
24245a6ac9eaSMiao Xie /*
24255a6ac9eaSMiao Xie  * We just scrub the parity that we have correct data on the same horizontal,
24265a6ac9eaSMiao Xie  * so we needn't allocate all pages for all the stripes.
24275a6ac9eaSMiao Xie  */
24285a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
24295a6ac9eaSMiao Xie {
24303907ce29SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2431aee35e4bSQu Wenruo 	int total_sector_nr;
24325a6ac9eaSMiao Xie 
2433aee35e4bSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2434aee35e4bSQu Wenruo 	     total_sector_nr++) {
24353907ce29SQu Wenruo 		struct page *page;
2436aee35e4bSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2437aee35e4bSQu Wenruo 		int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
24383907ce29SQu Wenruo 
2439aee35e4bSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
2440aee35e4bSQu Wenruo 			continue;
24415a6ac9eaSMiao Xie 		if (rbio->stripe_pages[index])
24425a6ac9eaSMiao Xie 			continue;
2443b0ee5e1eSDavid Sterba 		page = alloc_page(GFP_NOFS);
24445a6ac9eaSMiao Xie 		if (!page)
24455a6ac9eaSMiao Xie 			return -ENOMEM;
24465a6ac9eaSMiao Xie 		rbio->stripe_pages[index] = page;
24475a6ac9eaSMiao Xie 	}
2448eb357060SQu Wenruo 	index_stripe_sectors(rbio);
24495a6ac9eaSMiao Xie 	return 0;
24505a6ac9eaSMiao Xie }
24515a6ac9eaSMiao Xie 
24525a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
24535a6ac9eaSMiao Xie 					 int need_check)
24545a6ac9eaSMiao Xie {
24554c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
245646900662SQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
24571389053eSKees Cook 	void **pointers = rbio->finish_pointers;
2458c67c68ebSQu Wenruo 	unsigned long *pbitmap = &rbio->finish_pbitmap;
24595a6ac9eaSMiao Xie 	int nr_data = rbio->nr_data;
24605a6ac9eaSMiao Xie 	int stripe;
24613e77605dSQu Wenruo 	int sectornr;
2462c17af965SDavid Sterba 	bool has_qstripe;
246346900662SQu Wenruo 	struct sector_ptr p_sector = { 0 };
246446900662SQu Wenruo 	struct sector_ptr q_sector = { 0 };
24655a6ac9eaSMiao Xie 	struct bio_list bio_list;
24665a6ac9eaSMiao Xie 	struct bio *bio;
246776035976SMiao Xie 	int is_replace = 0;
24685a6ac9eaSMiao Xie 	int ret;
24695a6ac9eaSMiao Xie 
24705a6ac9eaSMiao Xie 	bio_list_init(&bio_list);
24715a6ac9eaSMiao Xie 
2472c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
2473c17af965SDavid Sterba 		has_qstripe = false;
2474c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
2475c17af965SDavid Sterba 		has_qstripe = true;
2476c17af965SDavid Sterba 	else
24775a6ac9eaSMiao Xie 		BUG();
24785a6ac9eaSMiao Xie 
24794c664611SQu Wenruo 	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
248076035976SMiao Xie 		is_replace = 1;
2481c67c68ebSQu Wenruo 		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
248276035976SMiao Xie 	}
248376035976SMiao Xie 
24845a6ac9eaSMiao Xie 	/*
24855a6ac9eaSMiao Xie 	 * Because the higher layers(scrubber) are unlikely to
24865a6ac9eaSMiao Xie 	 * use this area of the disk again soon, so don't cache
24875a6ac9eaSMiao Xie 	 * it.
24885a6ac9eaSMiao Xie 	 */
24895a6ac9eaSMiao Xie 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
24905a6ac9eaSMiao Xie 
24915a6ac9eaSMiao Xie 	if (!need_check)
24925a6ac9eaSMiao Xie 		goto writeback;
24935a6ac9eaSMiao Xie 
249446900662SQu Wenruo 	p_sector.page = alloc_page(GFP_NOFS);
249546900662SQu Wenruo 	if (!p_sector.page)
24965a6ac9eaSMiao Xie 		goto cleanup;
249746900662SQu Wenruo 	p_sector.pgoff = 0;
249846900662SQu Wenruo 	p_sector.uptodate = 1;
24995a6ac9eaSMiao Xie 
2500c17af965SDavid Sterba 	if (has_qstripe) {
2501d70cef0dSIra Weiny 		/* RAID6, allocate and map temp space for the Q stripe */
250246900662SQu Wenruo 		q_sector.page = alloc_page(GFP_NOFS);
250346900662SQu Wenruo 		if (!q_sector.page) {
250446900662SQu Wenruo 			__free_page(p_sector.page);
250546900662SQu Wenruo 			p_sector.page = NULL;
25065a6ac9eaSMiao Xie 			goto cleanup;
25075a6ac9eaSMiao Xie 		}
250846900662SQu Wenruo 		q_sector.pgoff = 0;
250946900662SQu Wenruo 		q_sector.uptodate = 1;
251046900662SQu Wenruo 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
25115a6ac9eaSMiao Xie 	}
25125a6ac9eaSMiao Xie 
25135a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
25145a6ac9eaSMiao Xie 
2515d70cef0dSIra Weiny 	/* Map the parity stripe just once */
251646900662SQu Wenruo 	pointers[nr_data] = kmap_local_page(p_sector.page);
2517d70cef0dSIra Weiny 
2518c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
251946900662SQu Wenruo 		struct sector_ptr *sector;
25205a6ac9eaSMiao Xie 		void *parity;
252146900662SQu Wenruo 
25225a6ac9eaSMiao Xie 		/* first collect one page from each data stripe */
25235a6ac9eaSMiao Xie 		for (stripe = 0; stripe < nr_data; stripe++) {
252446900662SQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
252546900662SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
252646900662SQu Wenruo 					   sector->pgoff;
25275a6ac9eaSMiao Xie 		}
25285a6ac9eaSMiao Xie 
2529c17af965SDavid Sterba 		if (has_qstripe) {
2530d70cef0dSIra Weiny 			/* RAID6, call the library function to fill in our P/Q */
253146900662SQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
25325a6ac9eaSMiao Xie 						pointers);
25335a6ac9eaSMiao Xie 		} else {
25345a6ac9eaSMiao Xie 			/* raid5 */
253546900662SQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
253646900662SQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
25375a6ac9eaSMiao Xie 		}
25385a6ac9eaSMiao Xie 
253901327610SNicholas D Steeves 		/* Check scrubbing parity and repair it */
254046900662SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
254146900662SQu Wenruo 		parity = kmap_local_page(sector->page) + sector->pgoff;
254246900662SQu Wenruo 		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
254346900662SQu Wenruo 			memcpy(parity, pointers[rbio->scrubp], sectorsize);
25445a6ac9eaSMiao Xie 		else
25455a6ac9eaSMiao Xie 			/* Parity is right, needn't writeback */
2546c67c68ebSQu Wenruo 			bitmap_clear(&rbio->dbitmap, sectornr, 1);
254758c1a35cSIra Weiny 		kunmap_local(parity);
25485a6ac9eaSMiao Xie 
254994a0b58dSIra Weiny 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
255094a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
25515a6ac9eaSMiao Xie 	}
25525a6ac9eaSMiao Xie 
255394a0b58dSIra Weiny 	kunmap_local(pointers[nr_data]);
255446900662SQu Wenruo 	__free_page(p_sector.page);
255546900662SQu Wenruo 	p_sector.page = NULL;
255646900662SQu Wenruo 	if (q_sector.page) {
255794a0b58dSIra Weiny 		kunmap_local(pointers[rbio->real_stripes - 1]);
255846900662SQu Wenruo 		__free_page(q_sector.page);
255946900662SQu Wenruo 		q_sector.page = NULL;
2560d70cef0dSIra Weiny 	}
25615a6ac9eaSMiao Xie 
25625a6ac9eaSMiao Xie writeback:
25635a6ac9eaSMiao Xie 	/*
25645a6ac9eaSMiao Xie 	 * time to start writing.  Make bios for everything from the
25655a6ac9eaSMiao Xie 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
25665a6ac9eaSMiao Xie 	 * everything else.
25675a6ac9eaSMiao Xie 	 */
2568c67c68ebSQu Wenruo 	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
25693e77605dSQu Wenruo 		struct sector_ptr *sector;
25705a6ac9eaSMiao Xie 
25713e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25723e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2573ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_WRITE);
25745a6ac9eaSMiao Xie 		if (ret)
25755a6ac9eaSMiao Xie 			goto cleanup;
25765a6ac9eaSMiao Xie 	}
25775a6ac9eaSMiao Xie 
257876035976SMiao Xie 	if (!is_replace)
257976035976SMiao Xie 		goto submit_write;
258076035976SMiao Xie 
25813e77605dSQu Wenruo 	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
25823e77605dSQu Wenruo 		struct sector_ptr *sector;
258376035976SMiao Xie 
25843e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25853e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
25864c664611SQu Wenruo 				       bioc->tgtdev_map[rbio->scrubp],
2587ff18a4afSChristoph Hellwig 				       sectornr, REQ_OP_WRITE);
258876035976SMiao Xie 		if (ret)
258976035976SMiao Xie 			goto cleanup;
259076035976SMiao Xie 	}
259176035976SMiao Xie 
259276035976SMiao Xie submit_write:
25935a6ac9eaSMiao Xie 	nr_data = bio_list_size(&bio_list);
25945a6ac9eaSMiao Xie 	if (!nr_data) {
25955a6ac9eaSMiao Xie 		/* Every parity is right */
259658efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_OK);
25975a6ac9eaSMiao Xie 		return;
25985a6ac9eaSMiao Xie 	}
25995a6ac9eaSMiao Xie 
26005a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, nr_data);
26015a6ac9eaSMiao Xie 
2602bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
2603a6111d11SZhao Lei 		bio->bi_end_io = raid_write_end_io;
26044e49ea4aSMike Christie 
2605b8bea09aSQu Wenruo 		if (trace_raid56_scrub_write_stripe_enabled()) {
2606b8bea09aSQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
2607b8bea09aSQu Wenruo 
2608b8bea09aSQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
2609b8bea09aSQu Wenruo 			trace_raid56_scrub_write_stripe(rbio, bio, &trace_info);
2610b8bea09aSQu Wenruo 		}
26114e49ea4aSMike Christie 		submit_bio(bio);
26125a6ac9eaSMiao Xie 	}
26135a6ac9eaSMiao Xie 	return;
26145a6ac9eaSMiao Xie 
26155a6ac9eaSMiao Xie cleanup:
261658efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2617785884fcSLiu Bo 
2618785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2619785884fcSLiu Bo 		bio_put(bio);
26205a6ac9eaSMiao Xie }
26215a6ac9eaSMiao Xie 
26225a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
26235a6ac9eaSMiao Xie {
26245a6ac9eaSMiao Xie 	if (stripe >= 0 && stripe < rbio->nr_data)
26255a6ac9eaSMiao Xie 		return 1;
26265a6ac9eaSMiao Xie 	return 0;
26275a6ac9eaSMiao Xie }
26285a6ac9eaSMiao Xie 
26295a6ac9eaSMiao Xie /*
26305a6ac9eaSMiao Xie  * While we're doing the parity check and repair, we could have errors
26315a6ac9eaSMiao Xie  * in reading pages off the disk.  This checks for errors and if we're
26325a6ac9eaSMiao Xie  * not able to read the page it'll trigger parity reconstruction.  The
26335a6ac9eaSMiao Xie  * parity scrub will be finished after we've reconstructed the failed
26345a6ac9eaSMiao Xie  * stripes
26355a6ac9eaSMiao Xie  */
26365a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
26375a6ac9eaSMiao Xie {
26384c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
26395a6ac9eaSMiao Xie 		goto cleanup;
26405a6ac9eaSMiao Xie 
26415a6ac9eaSMiao Xie 	if (rbio->faila >= 0 || rbio->failb >= 0) {
26425a6ac9eaSMiao Xie 		int dfail = 0, failp = -1;
26435a6ac9eaSMiao Xie 
26445a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->faila))
26455a6ac9eaSMiao Xie 			dfail++;
26465a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->faila))
26475a6ac9eaSMiao Xie 			failp = rbio->faila;
26485a6ac9eaSMiao Xie 
26495a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->failb))
26505a6ac9eaSMiao Xie 			dfail++;
26515a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->failb))
26525a6ac9eaSMiao Xie 			failp = rbio->failb;
26535a6ac9eaSMiao Xie 
26545a6ac9eaSMiao Xie 		/*
26555a6ac9eaSMiao Xie 		 * Because we can not use a scrubbing parity to repair
26565a6ac9eaSMiao Xie 		 * the data, so the capability of the repair is declined.
26575a6ac9eaSMiao Xie 		 * (In the case of RAID5, we can not repair anything)
26585a6ac9eaSMiao Xie 		 */
26594c664611SQu Wenruo 		if (dfail > rbio->bioc->max_errors - 1)
26605a6ac9eaSMiao Xie 			goto cleanup;
26615a6ac9eaSMiao Xie 
26625a6ac9eaSMiao Xie 		/*
26635a6ac9eaSMiao Xie 		 * If all data is good, only parity is correctly, just
26645a6ac9eaSMiao Xie 		 * repair the parity.
26655a6ac9eaSMiao Xie 		 */
26665a6ac9eaSMiao Xie 		if (dfail == 0) {
26675a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
26685a6ac9eaSMiao Xie 			return;
26695a6ac9eaSMiao Xie 		}
26705a6ac9eaSMiao Xie 
26715a6ac9eaSMiao Xie 		/*
26725a6ac9eaSMiao Xie 		 * Here means we got one corrupted data stripe and one
26735a6ac9eaSMiao Xie 		 * corrupted parity on RAID6, if the corrupted parity
267401327610SNicholas D Steeves 		 * is scrubbing parity, luckily, use the other one to repair
26755a6ac9eaSMiao Xie 		 * the data, or we can not repair the data stripe.
26765a6ac9eaSMiao Xie 		 */
26775a6ac9eaSMiao Xie 		if (failp != rbio->scrubp)
26785a6ac9eaSMiao Xie 			goto cleanup;
26795a6ac9eaSMiao Xie 
26805a6ac9eaSMiao Xie 		__raid_recover_end_io(rbio);
26815a6ac9eaSMiao Xie 	} else {
26825a6ac9eaSMiao Xie 		finish_parity_scrub(rbio, 1);
26835a6ac9eaSMiao Xie 	}
26845a6ac9eaSMiao Xie 	return;
26855a6ac9eaSMiao Xie 
26865a6ac9eaSMiao Xie cleanup:
268758efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
26885a6ac9eaSMiao Xie }
26895a6ac9eaSMiao Xie 
26905a6ac9eaSMiao Xie /*
26915a6ac9eaSMiao Xie  * end io for the read phase of the rmw cycle.  All the bios here are physical
26925a6ac9eaSMiao Xie  * stripe bios we've read from the disk so we can recalculate the parity of the
26935a6ac9eaSMiao Xie  * stripe.
26945a6ac9eaSMiao Xie  *
26955a6ac9eaSMiao Xie  * This will usually kick off finish_rmw once all the bios are read in, but it
26965a6ac9eaSMiao Xie  * may trigger parity reconstruction if we had any errors along the way
26975a6ac9eaSMiao Xie  */
2698d34e123dSChristoph Hellwig static void raid56_parity_scrub_end_io_work(struct work_struct *work)
26995a6ac9eaSMiao Xie {
2700d34e123dSChristoph Hellwig 	struct btrfs_raid_bio *rbio =
2701d34e123dSChristoph Hellwig 		container_of(work, struct btrfs_raid_bio, end_io_work);
27025a6ac9eaSMiao Xie 
27035a6ac9eaSMiao Xie 	/*
2704d34e123dSChristoph Hellwig 	 * This will normally call finish_rmw to start our write, but if there
2705d34e123dSChristoph Hellwig 	 * are any failed stripes we'll reconstruct from parity first
27065a6ac9eaSMiao Xie 	 */
27075a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
27085a6ac9eaSMiao Xie }
27095a6ac9eaSMiao Xie 
2710*cb3450b7SQu Wenruo static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio,
2711*cb3450b7SQu Wenruo 				    struct bio_list *bio_list)
27125a6ac9eaSMiao Xie {
27135a6ac9eaSMiao Xie 	struct bio *bio;
2714*cb3450b7SQu Wenruo 	int total_sector_nr;
2715*cb3450b7SQu Wenruo 	int ret = 0;
27165a6ac9eaSMiao Xie 
2717*cb3450b7SQu Wenruo 	ASSERT(bio_list_size(bio_list) == 0);
2718785884fcSLiu Bo 
27191c10702eSQu Wenruo 	/* Build a list of bios to read all the missing parts. */
27201c10702eSQu Wenruo 	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
27211c10702eSQu Wenruo 	     total_sector_nr++) {
27221c10702eSQu Wenruo 		int sectornr = total_sector_nr % rbio->stripe_nsectors;
27231c10702eSQu Wenruo 		int stripe = total_sector_nr / rbio->stripe_nsectors;
27243e77605dSQu Wenruo 		struct sector_ptr *sector;
27251c10702eSQu Wenruo 
27261c10702eSQu Wenruo 		/* No data in the vertical stripe, no need to read. */
27271c10702eSQu Wenruo 		if (!test_bit(sectornr, &rbio->dbitmap))
27281c10702eSQu Wenruo 			continue;
27291c10702eSQu Wenruo 
27305a6ac9eaSMiao Xie 		/*
27311c10702eSQu Wenruo 		 * We want to find all the sectors missing from the rbio and
27321c10702eSQu Wenruo 		 * read them from the disk. If sector_in_rbio() finds a sector
27331c10702eSQu Wenruo 		 * in the bio list we don't need to read it off the stripe.
27345a6ac9eaSMiao Xie 		 */
27353e77605dSQu Wenruo 		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
27363e77605dSQu Wenruo 		if (sector)
27375a6ac9eaSMiao Xie 			continue;
27385a6ac9eaSMiao Xie 
27393e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, stripe, sectornr);
27405a6ac9eaSMiao Xie 		/*
27411c10702eSQu Wenruo 		 * The bio cache may have handed us an uptodate sector.  If so,
27421c10702eSQu Wenruo 		 * use it.
27435a6ac9eaSMiao Xie 		 */
27443e77605dSQu Wenruo 		if (sector->uptodate)
27455a6ac9eaSMiao Xie 			continue;
27465a6ac9eaSMiao Xie 
2747*cb3450b7SQu Wenruo 		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2748ff18a4afSChristoph Hellwig 					 sectornr, REQ_OP_READ);
27495a6ac9eaSMiao Xie 		if (ret)
2750*cb3450b7SQu Wenruo 			goto error;
27515a6ac9eaSMiao Xie 	}
2752*cb3450b7SQu Wenruo 	return 0;
2753*cb3450b7SQu Wenruo error:
2754*cb3450b7SQu Wenruo 	while ((bio = bio_list_pop(bio_list)))
2755*cb3450b7SQu Wenruo 		bio_put(bio);
2756*cb3450b7SQu Wenruo 	return ret;
2757*cb3450b7SQu Wenruo }
2758*cb3450b7SQu Wenruo 
2759*cb3450b7SQu Wenruo static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2760*cb3450b7SQu Wenruo {
2761*cb3450b7SQu Wenruo 	int bios_to_read = 0;
2762*cb3450b7SQu Wenruo 	struct bio_list bio_list;
2763*cb3450b7SQu Wenruo 	int ret;
2764*cb3450b7SQu Wenruo 	struct bio *bio;
2765*cb3450b7SQu Wenruo 
2766*cb3450b7SQu Wenruo 	bio_list_init(&bio_list);
2767*cb3450b7SQu Wenruo 
2768*cb3450b7SQu Wenruo 	ret = alloc_rbio_essential_pages(rbio);
2769*cb3450b7SQu Wenruo 	if (ret)
2770*cb3450b7SQu Wenruo 		goto cleanup;
2771*cb3450b7SQu Wenruo 
2772*cb3450b7SQu Wenruo 	atomic_set(&rbio->error, 0);
2773*cb3450b7SQu Wenruo 	ret = scrub_assemble_read_bios(rbio, &bio_list);
2774*cb3450b7SQu Wenruo 	if (ret < 0)
2775*cb3450b7SQu Wenruo 		goto cleanup;
27765a6ac9eaSMiao Xie 
27775a6ac9eaSMiao Xie 	bios_to_read = bio_list_size(&bio_list);
27785a6ac9eaSMiao Xie 	if (!bios_to_read) {
27795a6ac9eaSMiao Xie 		/*
27805a6ac9eaSMiao Xie 		 * this can happen if others have merged with
27815a6ac9eaSMiao Xie 		 * us, it means there is nothing left to read.
27825a6ac9eaSMiao Xie 		 * But if there are missing devices it may not be
27835a6ac9eaSMiao Xie 		 * safe to do the full stripe write yet.
27845a6ac9eaSMiao Xie 		 */
27855a6ac9eaSMiao Xie 		goto finish;
27865a6ac9eaSMiao Xie 	}
27875a6ac9eaSMiao Xie 
27885a6ac9eaSMiao Xie 	/*
27894c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
27904c664611SQu Wenruo 	 * touch it after that.
27915a6ac9eaSMiao Xie 	 */
27925a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
2793d34e123dSChristoph Hellwig 	INIT_WORK(&rbio->end_io_work, raid56_parity_scrub_end_io_work);
2794bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
2795d34e123dSChristoph Hellwig 		bio->bi_end_io = raid56_bio_end_io;
27965a6ac9eaSMiao Xie 
2797b8bea09aSQu Wenruo 		if (trace_raid56_scrub_read_enabled()) {
2798b8bea09aSQu Wenruo 			struct raid56_bio_trace_info trace_info = { 0 };
27995a6ac9eaSMiao Xie 
2800b8bea09aSQu Wenruo 			bio_get_trace_info(rbio, bio, &trace_info);
2801b8bea09aSQu Wenruo 			trace_raid56_scrub_read(rbio, bio, &trace_info);
2802b8bea09aSQu Wenruo 		}
28034e49ea4aSMike Christie 		submit_bio(bio);
28045a6ac9eaSMiao Xie 	}
28055a6ac9eaSMiao Xie 	/* the actual write will happen once the reads are done */
28065a6ac9eaSMiao Xie 	return;
28075a6ac9eaSMiao Xie 
28085a6ac9eaSMiao Xie cleanup:
280958efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2810785884fcSLiu Bo 
2811785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2812785884fcSLiu Bo 		bio_put(bio);
2813785884fcSLiu Bo 
28145a6ac9eaSMiao Xie 	return;
28155a6ac9eaSMiao Xie 
28165a6ac9eaSMiao Xie finish:
28175a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
28185a6ac9eaSMiao Xie }
28195a6ac9eaSMiao Xie 
2820385de0efSChristoph Hellwig static void scrub_parity_work(struct work_struct *work)
28215a6ac9eaSMiao Xie {
28225a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
28235a6ac9eaSMiao Xie 
28245a6ac9eaSMiao Xie 	rbio = container_of(work, struct btrfs_raid_bio, work);
28255a6ac9eaSMiao Xie 	raid56_parity_scrub_stripe(rbio);
28265a6ac9eaSMiao Xie }
28275a6ac9eaSMiao Xie 
28285a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
28295a6ac9eaSMiao Xie {
28305a6ac9eaSMiao Xie 	if (!lock_stripe_add(rbio))
2831a81b747dSDavid Sterba 		start_async_work(rbio, scrub_parity_work);
28325a6ac9eaSMiao Xie }
2833b4ee1782SOmar Sandoval 
2834b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */
2835b4ee1782SOmar Sandoval 
2836b4ee1782SOmar Sandoval struct btrfs_raid_bio *
2837ff18a4afSChristoph Hellwig raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2838b4ee1782SOmar Sandoval {
28396a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
2840b4ee1782SOmar Sandoval 	struct btrfs_raid_bio *rbio;
2841b4ee1782SOmar Sandoval 
2842ff18a4afSChristoph Hellwig 	rbio = alloc_rbio(fs_info, bioc);
2843b4ee1782SOmar Sandoval 	if (IS_ERR(rbio))
2844b4ee1782SOmar Sandoval 		return NULL;
2845b4ee1782SOmar Sandoval 
2846b4ee1782SOmar Sandoval 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2847b4ee1782SOmar Sandoval 	bio_list_add(&rbio->bio_list, bio);
2848b4ee1782SOmar Sandoval 	/*
2849b4ee1782SOmar Sandoval 	 * This is a special bio which is used to hold the completion handler
2850b4ee1782SOmar Sandoval 	 * and make the scrub rbio is similar to the other types
2851b4ee1782SOmar Sandoval 	 */
2852b4ee1782SOmar Sandoval 	ASSERT(!bio->bi_iter.bi_size);
2853b4ee1782SOmar Sandoval 
2854b4ee1782SOmar Sandoval 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2855b4ee1782SOmar Sandoval 	if (rbio->faila == -1) {
2856f15fb2cdSQu Wenruo 		btrfs_warn_rl(fs_info,
2857f15fb2cdSQu Wenruo 	"can not determine the failed stripe number for full stripe %llu",
2858f15fb2cdSQu Wenruo 			      bioc->raid_map[0]);
2859ff2b64a2SQu Wenruo 		free_raid_bio(rbio);
2860b4ee1782SOmar Sandoval 		return NULL;
2861b4ee1782SOmar Sandoval 	}
2862b4ee1782SOmar Sandoval 
2863b4ee1782SOmar Sandoval 	return rbio;
2864b4ee1782SOmar Sandoval }
2865b4ee1782SOmar Sandoval 
2866b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2867b4ee1782SOmar Sandoval {
2868d817ce35SQu Wenruo 	start_async_work(rbio, recover_rbio_work);
2869b4ee1782SOmar Sandoval }
2870