xref: /linux/fs/btrfs/raid56.c (revision ac26df8b3b02101056ca868109d2f24ed396bba8)
1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
253b381b3SDavid Woodhouse /*
353b381b3SDavid Woodhouse  * Copyright (C) 2012 Fusion-io  All rights reserved.
453b381b3SDavid Woodhouse  * Copyright (C) 2012 Intel Corp. All rights reserved.
553b381b3SDavid Woodhouse  */
6c1d7c514SDavid Sterba 
753b381b3SDavid Woodhouse #include <linux/sched.h>
853b381b3SDavid Woodhouse #include <linux/bio.h>
953b381b3SDavid Woodhouse #include <linux/slab.h>
1053b381b3SDavid Woodhouse #include <linux/blkdev.h>
1153b381b3SDavid Woodhouse #include <linux/raid/pq.h>
1253b381b3SDavid Woodhouse #include <linux/hash.h>
1353b381b3SDavid Woodhouse #include <linux/list_sort.h>
1453b381b3SDavid Woodhouse #include <linux/raid/xor.h>
15818e010bSDavid Sterba #include <linux/mm.h>
16cea62800SJohannes Thumshirn #include "misc.h"
1753b381b3SDavid Woodhouse #include "ctree.h"
1853b381b3SDavid Woodhouse #include "disk-io.h"
1953b381b3SDavid Woodhouse #include "volumes.h"
2053b381b3SDavid Woodhouse #include "raid56.h"
2153b381b3SDavid Woodhouse #include "async-thread.h"
2253b381b3SDavid Woodhouse 
2353b381b3SDavid Woodhouse /* set when additional merges to this rbio are not allowed */
2453b381b3SDavid Woodhouse #define RBIO_RMW_LOCKED_BIT	1
2553b381b3SDavid Woodhouse 
264ae10b3aSChris Mason /*
274ae10b3aSChris Mason  * set when this rbio is sitting in the hash, but it is just a cache
284ae10b3aSChris Mason  * of past RMW
294ae10b3aSChris Mason  */
304ae10b3aSChris Mason #define RBIO_CACHE_BIT		2
314ae10b3aSChris Mason 
324ae10b3aSChris Mason /*
334ae10b3aSChris Mason  * set when it is safe to trust the stripe_pages for caching
344ae10b3aSChris Mason  */
354ae10b3aSChris Mason #define RBIO_CACHE_READY_BIT	3
364ae10b3aSChris Mason 
374ae10b3aSChris Mason #define RBIO_CACHE_SIZE 1024
384ae10b3aSChris Mason 
398a953348SDavid Sterba #define BTRFS_STRIPE_HASH_TABLE_BITS				11
408a953348SDavid Sterba 
418a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
428a953348SDavid Sterba struct btrfs_stripe_hash {
438a953348SDavid Sterba 	struct list_head hash_list;
448a953348SDavid Sterba 	spinlock_t lock;
458a953348SDavid Sterba };
468a953348SDavid Sterba 
478a953348SDavid Sterba /* Used by the raid56 code to lock stripes for read/modify/write */
488a953348SDavid Sterba struct btrfs_stripe_hash_table {
498a953348SDavid Sterba 	struct list_head stripe_cache;
508a953348SDavid Sterba 	spinlock_t cache_lock;
518a953348SDavid Sterba 	int cache_size;
528a953348SDavid Sterba 	struct btrfs_stripe_hash table[];
538a953348SDavid Sterba };
548a953348SDavid Sterba 
55eb357060SQu Wenruo /*
56eb357060SQu Wenruo  * A bvec like structure to present a sector inside a page.
57eb357060SQu Wenruo  *
58eb357060SQu Wenruo  * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
59eb357060SQu Wenruo  */
60eb357060SQu Wenruo struct sector_ptr {
61eb357060SQu Wenruo 	struct page *page;
6200425dd9SQu Wenruo 	unsigned int pgoff:24;
6300425dd9SQu Wenruo 	unsigned int uptodate:8;
64eb357060SQu Wenruo };
65eb357060SQu Wenruo 
661b94b556SMiao Xie enum btrfs_rbio_ops {
67b4ee1782SOmar Sandoval 	BTRFS_RBIO_WRITE,
68b4ee1782SOmar Sandoval 	BTRFS_RBIO_READ_REBUILD,
69b4ee1782SOmar Sandoval 	BTRFS_RBIO_PARITY_SCRUB,
70b4ee1782SOmar Sandoval 	BTRFS_RBIO_REBUILD_MISSING,
711b94b556SMiao Xie };
721b94b556SMiao Xie 
7353b381b3SDavid Woodhouse struct btrfs_raid_bio {
744c664611SQu Wenruo 	struct btrfs_io_context *bioc;
7553b381b3SDavid Woodhouse 
7653b381b3SDavid Woodhouse 	/* while we're doing rmw on a stripe
7753b381b3SDavid Woodhouse 	 * we put it into a hash table so we can
7853b381b3SDavid Woodhouse 	 * lock the stripe and merge more rbios
7953b381b3SDavid Woodhouse 	 * into it.
8053b381b3SDavid Woodhouse 	 */
8153b381b3SDavid Woodhouse 	struct list_head hash_list;
8253b381b3SDavid Woodhouse 
8353b381b3SDavid Woodhouse 	/*
844ae10b3aSChris Mason 	 * LRU list for the stripe cache
854ae10b3aSChris Mason 	 */
864ae10b3aSChris Mason 	struct list_head stripe_cache;
874ae10b3aSChris Mason 
884ae10b3aSChris Mason 	/*
8953b381b3SDavid Woodhouse 	 * for scheduling work in the helper threads
9053b381b3SDavid Woodhouse 	 */
9153b381b3SDavid Woodhouse 	struct btrfs_work work;
9253b381b3SDavid Woodhouse 
9353b381b3SDavid Woodhouse 	/*
9453b381b3SDavid Woodhouse 	 * bio list and bio_list_lock are used
9553b381b3SDavid Woodhouse 	 * to add more bios into the stripe
9653b381b3SDavid Woodhouse 	 * in hopes of avoiding the full rmw
9753b381b3SDavid Woodhouse 	 */
9853b381b3SDavid Woodhouse 	struct bio_list bio_list;
9953b381b3SDavid Woodhouse 	spinlock_t bio_list_lock;
10053b381b3SDavid Woodhouse 
1016ac0f488SChris Mason 	/* also protected by the bio_list_lock, the
1026ac0f488SChris Mason 	 * plug list is used by the plugging code
1036ac0f488SChris Mason 	 * to collect partial bios while plugged.  The
1046ac0f488SChris Mason 	 * stripe locking code also uses it to hand off
10553b381b3SDavid Woodhouse 	 * the stripe lock to the next pending IO
10653b381b3SDavid Woodhouse 	 */
10753b381b3SDavid Woodhouse 	struct list_head plug_list;
10853b381b3SDavid Woodhouse 
10953b381b3SDavid Woodhouse 	/*
11053b381b3SDavid Woodhouse 	 * flags that tell us if it is safe to
11153b381b3SDavid Woodhouse 	 * merge with this bio
11253b381b3SDavid Woodhouse 	 */
11353b381b3SDavid Woodhouse 	unsigned long flags;
11453b381b3SDavid Woodhouse 
11553b381b3SDavid Woodhouse 	/*
11653b381b3SDavid Woodhouse 	 * set if we're doing a parity rebuild
11753b381b3SDavid Woodhouse 	 * for a read from higher up, which is handled
11853b381b3SDavid Woodhouse 	 * differently from a parity rebuild as part of
11953b381b3SDavid Woodhouse 	 * rmw
12053b381b3SDavid Woodhouse 	 */
1211b94b556SMiao Xie 	enum btrfs_rbio_ops operation;
12253b381b3SDavid Woodhouse 
12329b06838SQu Wenruo 	/* Size of each individual stripe on disk */
12429b06838SQu Wenruo 	u32 stripe_len;
12553b381b3SDavid Woodhouse 
12629b06838SQu Wenruo 	/* How many pages there are for the full stripe including P/Q */
12729b06838SQu Wenruo 	u16 nr_pages;
12853b381b3SDavid Woodhouse 
12994efbe19SQu Wenruo 	/* How many sectors there are for the full stripe including P/Q */
13094efbe19SQu Wenruo 	u16 nr_sectors;
13194efbe19SQu Wenruo 
13229b06838SQu Wenruo 	/* Number of data stripes (no p/q) */
13329b06838SQu Wenruo 	u8 nr_data;
13429b06838SQu Wenruo 
13529b06838SQu Wenruo 	/* Numer of all stripes (including P/Q) */
13629b06838SQu Wenruo 	u8 real_stripes;
13729b06838SQu Wenruo 
13829b06838SQu Wenruo 	/* How many pages there are for each stripe */
13929b06838SQu Wenruo 	u8 stripe_npages;
14029b06838SQu Wenruo 
14194efbe19SQu Wenruo 	/* How many sectors there are for each stripe */
14294efbe19SQu Wenruo 	u8 stripe_nsectors;
14394efbe19SQu Wenruo 
14429b06838SQu Wenruo 	/* First bad stripe, -1 means no corruption */
14529b06838SQu Wenruo 	s8 faila;
14629b06838SQu Wenruo 
14729b06838SQu Wenruo 	/* Second bad stripe (for RAID6 use) */
14829b06838SQu Wenruo 	s8 failb;
14929b06838SQu Wenruo 
15029b06838SQu Wenruo 	/* Stripe number that we're scrubbing  */
15129b06838SQu Wenruo 	u8 scrubp;
15253b381b3SDavid Woodhouse 
15353b381b3SDavid Woodhouse 	/*
15453b381b3SDavid Woodhouse 	 * size of all the bios in the bio_list.  This
15553b381b3SDavid Woodhouse 	 * helps us decide if the rbio maps to a full
15653b381b3SDavid Woodhouse 	 * stripe or not
15753b381b3SDavid Woodhouse 	 */
15853b381b3SDavid Woodhouse 	int bio_list_bytes;
15953b381b3SDavid Woodhouse 
1604245215dSMiao Xie 	int generic_bio_cnt;
1614245215dSMiao Xie 
162dec95574SElena Reshetova 	refcount_t refs;
16353b381b3SDavid Woodhouse 
164b89e1b01SMiao Xie 	atomic_t stripes_pending;
165b89e1b01SMiao Xie 
166b89e1b01SMiao Xie 	atomic_t error;
16753b381b3SDavid Woodhouse 	/*
16853b381b3SDavid Woodhouse 	 * these are two arrays of pointers.  We allocate the
16953b381b3SDavid Woodhouse 	 * rbio big enough to hold them both and setup their
17053b381b3SDavid Woodhouse 	 * locations when the rbio is allocated
17153b381b3SDavid Woodhouse 	 */
17253b381b3SDavid Woodhouse 
17353b381b3SDavid Woodhouse 	/* pointers to pages that we allocated for
17453b381b3SDavid Woodhouse 	 * reading/writing stripes directly from the disk (including P/Q)
17553b381b3SDavid Woodhouse 	 */
17653b381b3SDavid Woodhouse 	struct page **stripe_pages;
17753b381b3SDavid Woodhouse 
17800425dd9SQu Wenruo 	/* Pointers to the sectors in the bio_list, for faster lookup */
17900425dd9SQu Wenruo 	struct sector_ptr *bio_sectors;
18000425dd9SQu Wenruo 
18153b381b3SDavid Woodhouse 	/*
182eb357060SQu Wenruo 	 * For subpage support, we need to map each sector to above
183eb357060SQu Wenruo 	 * stripe_pages.
1845a6ac9eaSMiao Xie 	 */
185eb357060SQu Wenruo 	struct sector_ptr *stripe_sectors;
186eb357060SQu Wenruo 
187eb357060SQu Wenruo 	/* Bitmap to record which horizontal stripe has data */
1885a6ac9eaSMiao Xie 	unsigned long *dbitmap;
1891389053eSKees Cook 
1901389053eSKees Cook 	/* allocated with real_stripes-many pointers for finish_*() calls */
1911389053eSKees Cook 	void **finish_pointers;
1921389053eSKees Cook 
19394efbe19SQu Wenruo 	/* Allocated with stripe_nsectors-many bits for finish_*() calls */
1941389053eSKees Cook 	unsigned long *finish_pbitmap;
19553b381b3SDavid Woodhouse };
19653b381b3SDavid Woodhouse 
19753b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
19853b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
19953b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work);
20053b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work);
20153b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
20253b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
20353b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio);
20453b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio);
20553b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
20653b381b3SDavid Woodhouse 
2075a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2085a6ac9eaSMiao Xie 					 int need_check);
209a81b747dSDavid Sterba static void scrub_parity_work(struct btrfs_work *work);
2105a6ac9eaSMiao Xie 
211ac638859SDavid Sterba static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
212ac638859SDavid Sterba {
213a0cac0ecSOmar Sandoval 	btrfs_init_work(&rbio->work, work_func, NULL, NULL);
2146a258d72SQu Wenruo 	btrfs_queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
215ac638859SDavid Sterba }
216ac638859SDavid Sterba 
21753b381b3SDavid Woodhouse /*
21853b381b3SDavid Woodhouse  * the stripe hash table is used for locking, and to collect
21953b381b3SDavid Woodhouse  * bios in hopes of making a full stripe
22053b381b3SDavid Woodhouse  */
22153b381b3SDavid Woodhouse int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
22253b381b3SDavid Woodhouse {
22353b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *table;
22453b381b3SDavid Woodhouse 	struct btrfs_stripe_hash_table *x;
22553b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *cur;
22653b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
22753b381b3SDavid Woodhouse 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
22853b381b3SDavid Woodhouse 	int i;
22953b381b3SDavid Woodhouse 
23053b381b3SDavid Woodhouse 	if (info->stripe_hash_table)
23153b381b3SDavid Woodhouse 		return 0;
23253b381b3SDavid Woodhouse 
23383c8266aSDavid Sterba 	/*
23483c8266aSDavid Sterba 	 * The table is large, starting with order 4 and can go as high as
23583c8266aSDavid Sterba 	 * order 7 in case lock debugging is turned on.
23683c8266aSDavid Sterba 	 *
23783c8266aSDavid Sterba 	 * Try harder to allocate and fallback to vmalloc to lower the chance
23883c8266aSDavid Sterba 	 * of a failing mount.
23983c8266aSDavid Sterba 	 */
240ee787f95SDavid Sterba 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
24153b381b3SDavid Woodhouse 	if (!table)
24253b381b3SDavid Woodhouse 		return -ENOMEM;
24353b381b3SDavid Woodhouse 
2444ae10b3aSChris Mason 	spin_lock_init(&table->cache_lock);
2454ae10b3aSChris Mason 	INIT_LIST_HEAD(&table->stripe_cache);
2464ae10b3aSChris Mason 
24753b381b3SDavid Woodhouse 	h = table->table;
24853b381b3SDavid Woodhouse 
24953b381b3SDavid Woodhouse 	for (i = 0; i < num_entries; i++) {
25053b381b3SDavid Woodhouse 		cur = h + i;
25153b381b3SDavid Woodhouse 		INIT_LIST_HEAD(&cur->hash_list);
25253b381b3SDavid Woodhouse 		spin_lock_init(&cur->lock);
25353b381b3SDavid Woodhouse 	}
25453b381b3SDavid Woodhouse 
25553b381b3SDavid Woodhouse 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
256f749303bSWang Shilong 	kvfree(x);
25753b381b3SDavid Woodhouse 	return 0;
25853b381b3SDavid Woodhouse }
25953b381b3SDavid Woodhouse 
26053b381b3SDavid Woodhouse /*
2614ae10b3aSChris Mason  * caching an rbio means to copy anything from the
262*ac26df8bSQu Wenruo  * bio_sectors array into the stripe_pages array.  We
2634ae10b3aSChris Mason  * use the page uptodate bit in the stripe cache array
2644ae10b3aSChris Mason  * to indicate if it has valid data
2654ae10b3aSChris Mason  *
2664ae10b3aSChris Mason  * once the caching is done, we set the cache ready
2674ae10b3aSChris Mason  * bit.
2684ae10b3aSChris Mason  */
2694ae10b3aSChris Mason static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
2704ae10b3aSChris Mason {
2714ae10b3aSChris Mason 	int i;
2724ae10b3aSChris Mason 	int ret;
2734ae10b3aSChris Mason 
2744ae10b3aSChris Mason 	ret = alloc_rbio_pages(rbio);
2754ae10b3aSChris Mason 	if (ret)
2764ae10b3aSChris Mason 		return;
2774ae10b3aSChris Mason 
27800425dd9SQu Wenruo 	for (i = 0; i < rbio->nr_sectors; i++) {
27900425dd9SQu Wenruo 		/* Some range not covered by bio (partial write), skip it */
28000425dd9SQu Wenruo 		if (!rbio->bio_sectors[i].page)
28100425dd9SQu Wenruo 			continue;
28200425dd9SQu Wenruo 
28300425dd9SQu Wenruo 		ASSERT(rbio->stripe_sectors[i].page);
28400425dd9SQu Wenruo 		memcpy_page(rbio->stripe_sectors[i].page,
28500425dd9SQu Wenruo 			    rbio->stripe_sectors[i].pgoff,
28600425dd9SQu Wenruo 			    rbio->bio_sectors[i].page,
28700425dd9SQu Wenruo 			    rbio->bio_sectors[i].pgoff,
28800425dd9SQu Wenruo 			    rbio->bioc->fs_info->sectorsize);
28900425dd9SQu Wenruo 		rbio->stripe_sectors[i].uptodate = 1;
29000425dd9SQu Wenruo 	}
2914ae10b3aSChris Mason 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2924ae10b3aSChris Mason }
2934ae10b3aSChris Mason 
2944ae10b3aSChris Mason /*
29553b381b3SDavid Woodhouse  * we hash on the first logical address of the stripe
29653b381b3SDavid Woodhouse  */
29753b381b3SDavid Woodhouse static int rbio_bucket(struct btrfs_raid_bio *rbio)
29853b381b3SDavid Woodhouse {
2994c664611SQu Wenruo 	u64 num = rbio->bioc->raid_map[0];
30053b381b3SDavid Woodhouse 
30153b381b3SDavid Woodhouse 	/*
30253b381b3SDavid Woodhouse 	 * we shift down quite a bit.  We're using byte
30353b381b3SDavid Woodhouse 	 * addressing, and most of the lower bits are zeros.
30453b381b3SDavid Woodhouse 	 * This tends to upset hash_64, and it consistently
30553b381b3SDavid Woodhouse 	 * returns just one or two different values.
30653b381b3SDavid Woodhouse 	 *
30753b381b3SDavid Woodhouse 	 * shifting off the lower bits fixes things.
30853b381b3SDavid Woodhouse 	 */
30953b381b3SDavid Woodhouse 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
31053b381b3SDavid Woodhouse }
31153b381b3SDavid Woodhouse 
31253b381b3SDavid Woodhouse /*
313eb357060SQu Wenruo  * Update the stripe_sectors[] array to use correct page and pgoff
314eb357060SQu Wenruo  *
315eb357060SQu Wenruo  * Should be called every time any page pointer in stripes_pages[] got modified.
316eb357060SQu Wenruo  */
317eb357060SQu Wenruo static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
318eb357060SQu Wenruo {
319eb357060SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
320eb357060SQu Wenruo 	u32 offset;
321eb357060SQu Wenruo 	int i;
322eb357060SQu Wenruo 
323eb357060SQu Wenruo 	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
324eb357060SQu Wenruo 		int page_index = offset >> PAGE_SHIFT;
325eb357060SQu Wenruo 
326eb357060SQu Wenruo 		ASSERT(page_index < rbio->nr_pages);
327eb357060SQu Wenruo 		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
328eb357060SQu Wenruo 		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
329eb357060SQu Wenruo 	}
330eb357060SQu Wenruo }
331eb357060SQu Wenruo 
332eb357060SQu Wenruo /*
3334ae10b3aSChris Mason  * stealing an rbio means taking all the uptodate pages from the stripe
3344ae10b3aSChris Mason  * array in the source rbio and putting them into the destination rbio
3354ae10b3aSChris Mason  */
3364ae10b3aSChris Mason static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
3374ae10b3aSChris Mason {
3384ae10b3aSChris Mason 	int i;
3394ae10b3aSChris Mason 	struct page *s;
3404ae10b3aSChris Mason 	struct page *d;
3414ae10b3aSChris Mason 
3424ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
3434ae10b3aSChris Mason 		return;
3444ae10b3aSChris Mason 
3454ae10b3aSChris Mason 	for (i = 0; i < dest->nr_pages; i++) {
3464ae10b3aSChris Mason 		s = src->stripe_pages[i];
3474ae10b3aSChris Mason 		if (!s || !PageUptodate(s)) {
3484ae10b3aSChris Mason 			continue;
3494ae10b3aSChris Mason 		}
3504ae10b3aSChris Mason 
3514ae10b3aSChris Mason 		d = dest->stripe_pages[i];
3524ae10b3aSChris Mason 		if (d)
3534ae10b3aSChris Mason 			__free_page(d);
3544ae10b3aSChris Mason 
3554ae10b3aSChris Mason 		dest->stripe_pages[i] = s;
3564ae10b3aSChris Mason 		src->stripe_pages[i] = NULL;
3574ae10b3aSChris Mason 	}
358eb357060SQu Wenruo 	index_stripe_sectors(dest);
359eb357060SQu Wenruo 	index_stripe_sectors(src);
3604ae10b3aSChris Mason }
3614ae10b3aSChris Mason 
3624ae10b3aSChris Mason /*
36353b381b3SDavid Woodhouse  * merging means we take the bio_list from the victim and
36453b381b3SDavid Woodhouse  * splice it into the destination.  The victim should
36553b381b3SDavid Woodhouse  * be discarded afterwards.
36653b381b3SDavid Woodhouse  *
36753b381b3SDavid Woodhouse  * must be called with dest->rbio_list_lock held
36853b381b3SDavid Woodhouse  */
36953b381b3SDavid Woodhouse static void merge_rbio(struct btrfs_raid_bio *dest,
37053b381b3SDavid Woodhouse 		       struct btrfs_raid_bio *victim)
37153b381b3SDavid Woodhouse {
37253b381b3SDavid Woodhouse 	bio_list_merge(&dest->bio_list, &victim->bio_list);
37353b381b3SDavid Woodhouse 	dest->bio_list_bytes += victim->bio_list_bytes;
3744245215dSMiao Xie 	dest->generic_bio_cnt += victim->generic_bio_cnt;
37553b381b3SDavid Woodhouse 	bio_list_init(&victim->bio_list);
37653b381b3SDavid Woodhouse }
37753b381b3SDavid Woodhouse 
37853b381b3SDavid Woodhouse /*
3794ae10b3aSChris Mason  * used to prune items that are in the cache.  The caller
3804ae10b3aSChris Mason  * must hold the hash table lock.
3814ae10b3aSChris Mason  */
3824ae10b3aSChris Mason static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
3834ae10b3aSChris Mason {
3844ae10b3aSChris Mason 	int bucket = rbio_bucket(rbio);
3854ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
3864ae10b3aSChris Mason 	struct btrfs_stripe_hash *h;
3874ae10b3aSChris Mason 	int freeit = 0;
3884ae10b3aSChris Mason 
3894ae10b3aSChris Mason 	/*
3904ae10b3aSChris Mason 	 * check the bit again under the hash table lock.
3914ae10b3aSChris Mason 	 */
3924ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
3934ae10b3aSChris Mason 		return;
3944ae10b3aSChris Mason 
3956a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
3964ae10b3aSChris Mason 	h = table->table + bucket;
3974ae10b3aSChris Mason 
3984ae10b3aSChris Mason 	/* hold the lock for the bucket because we may be
3994ae10b3aSChris Mason 	 * removing it from the hash table
4004ae10b3aSChris Mason 	 */
4014ae10b3aSChris Mason 	spin_lock(&h->lock);
4024ae10b3aSChris Mason 
4034ae10b3aSChris Mason 	/*
4044ae10b3aSChris Mason 	 * hold the lock for the bio list because we need
4054ae10b3aSChris Mason 	 * to make sure the bio list is empty
4064ae10b3aSChris Mason 	 */
4074ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
4084ae10b3aSChris Mason 
4094ae10b3aSChris Mason 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
4104ae10b3aSChris Mason 		list_del_init(&rbio->stripe_cache);
4114ae10b3aSChris Mason 		table->cache_size -= 1;
4124ae10b3aSChris Mason 		freeit = 1;
4134ae10b3aSChris Mason 
4144ae10b3aSChris Mason 		/* if the bio list isn't empty, this rbio is
4154ae10b3aSChris Mason 		 * still involved in an IO.  We take it out
4164ae10b3aSChris Mason 		 * of the cache list, and drop the ref that
4174ae10b3aSChris Mason 		 * was held for the list.
4184ae10b3aSChris Mason 		 *
4194ae10b3aSChris Mason 		 * If the bio_list was empty, we also remove
4204ae10b3aSChris Mason 		 * the rbio from the hash_table, and drop
4214ae10b3aSChris Mason 		 * the corresponding ref
4224ae10b3aSChris Mason 		 */
4234ae10b3aSChris Mason 		if (bio_list_empty(&rbio->bio_list)) {
4244ae10b3aSChris Mason 			if (!list_empty(&rbio->hash_list)) {
4254ae10b3aSChris Mason 				list_del_init(&rbio->hash_list);
426dec95574SElena Reshetova 				refcount_dec(&rbio->refs);
4274ae10b3aSChris Mason 				BUG_ON(!list_empty(&rbio->plug_list));
4284ae10b3aSChris Mason 			}
4294ae10b3aSChris Mason 		}
4304ae10b3aSChris Mason 	}
4314ae10b3aSChris Mason 
4324ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
4334ae10b3aSChris Mason 	spin_unlock(&h->lock);
4344ae10b3aSChris Mason 
4354ae10b3aSChris Mason 	if (freeit)
4364ae10b3aSChris Mason 		__free_raid_bio(rbio);
4374ae10b3aSChris Mason }
4384ae10b3aSChris Mason 
4394ae10b3aSChris Mason /*
4404ae10b3aSChris Mason  * prune a given rbio from the cache
4414ae10b3aSChris Mason  */
4424ae10b3aSChris Mason static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
4434ae10b3aSChris Mason {
4444ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4454ae10b3aSChris Mason 	unsigned long flags;
4464ae10b3aSChris Mason 
4474ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
4484ae10b3aSChris Mason 		return;
4494ae10b3aSChris Mason 
4506a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
4514ae10b3aSChris Mason 
4524ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4534ae10b3aSChris Mason 	__remove_rbio_from_cache(rbio);
4544ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4554ae10b3aSChris Mason }
4564ae10b3aSChris Mason 
4574ae10b3aSChris Mason /*
4584ae10b3aSChris Mason  * remove everything in the cache
4594ae10b3aSChris Mason  */
46048a3b636SEric Sandeen static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4614ae10b3aSChris Mason {
4624ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
4634ae10b3aSChris Mason 	unsigned long flags;
4644ae10b3aSChris Mason 	struct btrfs_raid_bio *rbio;
4654ae10b3aSChris Mason 
4664ae10b3aSChris Mason 	table = info->stripe_hash_table;
4674ae10b3aSChris Mason 
4684ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
4694ae10b3aSChris Mason 	while (!list_empty(&table->stripe_cache)) {
4704ae10b3aSChris Mason 		rbio = list_entry(table->stripe_cache.next,
4714ae10b3aSChris Mason 				  struct btrfs_raid_bio,
4724ae10b3aSChris Mason 				  stripe_cache);
4734ae10b3aSChris Mason 		__remove_rbio_from_cache(rbio);
4744ae10b3aSChris Mason 	}
4754ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
4764ae10b3aSChris Mason }
4774ae10b3aSChris Mason 
4784ae10b3aSChris Mason /*
4794ae10b3aSChris Mason  * remove all cached entries and free the hash table
4804ae10b3aSChris Mason  * used by unmount
48153b381b3SDavid Woodhouse  */
48253b381b3SDavid Woodhouse void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
48353b381b3SDavid Woodhouse {
48453b381b3SDavid Woodhouse 	if (!info->stripe_hash_table)
48553b381b3SDavid Woodhouse 		return;
4864ae10b3aSChris Mason 	btrfs_clear_rbio_cache(info);
487f749303bSWang Shilong 	kvfree(info->stripe_hash_table);
48853b381b3SDavid Woodhouse 	info->stripe_hash_table = NULL;
48953b381b3SDavid Woodhouse }
49053b381b3SDavid Woodhouse 
49153b381b3SDavid Woodhouse /*
4924ae10b3aSChris Mason  * insert an rbio into the stripe cache.  It
4934ae10b3aSChris Mason  * must have already been prepared by calling
4944ae10b3aSChris Mason  * cache_rbio_pages
4954ae10b3aSChris Mason  *
4964ae10b3aSChris Mason  * If this rbio was already cached, it gets
4974ae10b3aSChris Mason  * moved to the front of the lru.
4984ae10b3aSChris Mason  *
4994ae10b3aSChris Mason  * If the size of the rbio cache is too big, we
5004ae10b3aSChris Mason  * prune an item.
5014ae10b3aSChris Mason  */
5024ae10b3aSChris Mason static void cache_rbio(struct btrfs_raid_bio *rbio)
5034ae10b3aSChris Mason {
5044ae10b3aSChris Mason 	struct btrfs_stripe_hash_table *table;
5054ae10b3aSChris Mason 	unsigned long flags;
5064ae10b3aSChris Mason 
5074ae10b3aSChris Mason 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
5084ae10b3aSChris Mason 		return;
5094ae10b3aSChris Mason 
5106a258d72SQu Wenruo 	table = rbio->bioc->fs_info->stripe_hash_table;
5114ae10b3aSChris Mason 
5124ae10b3aSChris Mason 	spin_lock_irqsave(&table->cache_lock, flags);
5134ae10b3aSChris Mason 	spin_lock(&rbio->bio_list_lock);
5144ae10b3aSChris Mason 
5154ae10b3aSChris Mason 	/* bump our ref if we were not in the list before */
5164ae10b3aSChris Mason 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
517dec95574SElena Reshetova 		refcount_inc(&rbio->refs);
5184ae10b3aSChris Mason 
5194ae10b3aSChris Mason 	if (!list_empty(&rbio->stripe_cache)){
5204ae10b3aSChris Mason 		list_move(&rbio->stripe_cache, &table->stripe_cache);
5214ae10b3aSChris Mason 	} else {
5224ae10b3aSChris Mason 		list_add(&rbio->stripe_cache, &table->stripe_cache);
5234ae10b3aSChris Mason 		table->cache_size += 1;
5244ae10b3aSChris Mason 	}
5254ae10b3aSChris Mason 
5264ae10b3aSChris Mason 	spin_unlock(&rbio->bio_list_lock);
5274ae10b3aSChris Mason 
5284ae10b3aSChris Mason 	if (table->cache_size > RBIO_CACHE_SIZE) {
5294ae10b3aSChris Mason 		struct btrfs_raid_bio *found;
5304ae10b3aSChris Mason 
5314ae10b3aSChris Mason 		found = list_entry(table->stripe_cache.prev,
5324ae10b3aSChris Mason 				  struct btrfs_raid_bio,
5334ae10b3aSChris Mason 				  stripe_cache);
5344ae10b3aSChris Mason 
5354ae10b3aSChris Mason 		if (found != rbio)
5364ae10b3aSChris Mason 			__remove_rbio_from_cache(found);
5374ae10b3aSChris Mason 	}
5384ae10b3aSChris Mason 
5394ae10b3aSChris Mason 	spin_unlock_irqrestore(&table->cache_lock, flags);
5404ae10b3aSChris Mason }
5414ae10b3aSChris Mason 
5424ae10b3aSChris Mason /*
54353b381b3SDavid Woodhouse  * helper function to run the xor_blocks api.  It is only
54453b381b3SDavid Woodhouse  * able to do MAX_XOR_BLOCKS at a time, so we need to
54553b381b3SDavid Woodhouse  * loop through.
54653b381b3SDavid Woodhouse  */
54753b381b3SDavid Woodhouse static void run_xor(void **pages, int src_cnt, ssize_t len)
54853b381b3SDavid Woodhouse {
54953b381b3SDavid Woodhouse 	int src_off = 0;
55053b381b3SDavid Woodhouse 	int xor_src_cnt = 0;
55153b381b3SDavid Woodhouse 	void *dest = pages[src_cnt];
55253b381b3SDavid Woodhouse 
55353b381b3SDavid Woodhouse 	while(src_cnt > 0) {
55453b381b3SDavid Woodhouse 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
55553b381b3SDavid Woodhouse 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
55653b381b3SDavid Woodhouse 
55753b381b3SDavid Woodhouse 		src_cnt -= xor_src_cnt;
55853b381b3SDavid Woodhouse 		src_off += xor_src_cnt;
55953b381b3SDavid Woodhouse 	}
56053b381b3SDavid Woodhouse }
56153b381b3SDavid Woodhouse 
56253b381b3SDavid Woodhouse /*
563176571a1SDavid Sterba  * Returns true if the bio list inside this rbio covers an entire stripe (no
564176571a1SDavid Sterba  * rmw required).
56553b381b3SDavid Woodhouse  */
56653b381b3SDavid Woodhouse static int rbio_is_full(struct btrfs_raid_bio *rbio)
56753b381b3SDavid Woodhouse {
56853b381b3SDavid Woodhouse 	unsigned long flags;
569176571a1SDavid Sterba 	unsigned long size = rbio->bio_list_bytes;
570176571a1SDavid Sterba 	int ret = 1;
57153b381b3SDavid Woodhouse 
57253b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
573176571a1SDavid Sterba 	if (size != rbio->nr_data * rbio->stripe_len)
574176571a1SDavid Sterba 		ret = 0;
575176571a1SDavid Sterba 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
57653b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
577176571a1SDavid Sterba 
57853b381b3SDavid Woodhouse 	return ret;
57953b381b3SDavid Woodhouse }
58053b381b3SDavid Woodhouse 
58153b381b3SDavid Woodhouse /*
58253b381b3SDavid Woodhouse  * returns 1 if it is safe to merge two rbios together.
58353b381b3SDavid Woodhouse  * The merging is safe if the two rbios correspond to
58453b381b3SDavid Woodhouse  * the same stripe and if they are both going in the same
58553b381b3SDavid Woodhouse  * direction (read vs write), and if neither one is
58653b381b3SDavid Woodhouse  * locked for final IO
58753b381b3SDavid Woodhouse  *
58853b381b3SDavid Woodhouse  * The caller is responsible for locking such that
58953b381b3SDavid Woodhouse  * rmw_locked is safe to test
59053b381b3SDavid Woodhouse  */
59153b381b3SDavid Woodhouse static int rbio_can_merge(struct btrfs_raid_bio *last,
59253b381b3SDavid Woodhouse 			  struct btrfs_raid_bio *cur)
59353b381b3SDavid Woodhouse {
59453b381b3SDavid Woodhouse 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
59553b381b3SDavid Woodhouse 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
59653b381b3SDavid Woodhouse 		return 0;
59753b381b3SDavid Woodhouse 
5984ae10b3aSChris Mason 	/*
5994ae10b3aSChris Mason 	 * we can't merge with cached rbios, since the
6004ae10b3aSChris Mason 	 * idea is that when we merge the destination
6014ae10b3aSChris Mason 	 * rbio is going to run our IO for us.  We can
60201327610SNicholas D Steeves 	 * steal from cached rbios though, other functions
6034ae10b3aSChris Mason 	 * handle that.
6044ae10b3aSChris Mason 	 */
6054ae10b3aSChris Mason 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
6064ae10b3aSChris Mason 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
6074ae10b3aSChris Mason 		return 0;
6084ae10b3aSChris Mason 
6094c664611SQu Wenruo 	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
61053b381b3SDavid Woodhouse 		return 0;
61153b381b3SDavid Woodhouse 
6125a6ac9eaSMiao Xie 	/* we can't merge with different operations */
6135a6ac9eaSMiao Xie 	if (last->operation != cur->operation)
61453b381b3SDavid Woodhouse 		return 0;
6155a6ac9eaSMiao Xie 	/*
6165a6ac9eaSMiao Xie 	 * We've need read the full stripe from the drive.
6175a6ac9eaSMiao Xie 	 * check and repair the parity and write the new results.
6185a6ac9eaSMiao Xie 	 *
6195a6ac9eaSMiao Xie 	 * We're not allowed to add any new bios to the
6205a6ac9eaSMiao Xie 	 * bio list here, anyone else that wants to
6215a6ac9eaSMiao Xie 	 * change this stripe needs to do their own rmw.
6225a6ac9eaSMiao Xie 	 */
623db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
6245a6ac9eaSMiao Xie 		return 0;
62553b381b3SDavid Woodhouse 
626db34be19SLiu Bo 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
627b4ee1782SOmar Sandoval 		return 0;
628b4ee1782SOmar Sandoval 
629cc54ff62SLiu Bo 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
630cc54ff62SLiu Bo 		int fa = last->faila;
631cc54ff62SLiu Bo 		int fb = last->failb;
632cc54ff62SLiu Bo 		int cur_fa = cur->faila;
633cc54ff62SLiu Bo 		int cur_fb = cur->failb;
634cc54ff62SLiu Bo 
635cc54ff62SLiu Bo 		if (last->faila >= last->failb) {
636cc54ff62SLiu Bo 			fa = last->failb;
637cc54ff62SLiu Bo 			fb = last->faila;
638cc54ff62SLiu Bo 		}
639cc54ff62SLiu Bo 
640cc54ff62SLiu Bo 		if (cur->faila >= cur->failb) {
641cc54ff62SLiu Bo 			cur_fa = cur->failb;
642cc54ff62SLiu Bo 			cur_fb = cur->faila;
643cc54ff62SLiu Bo 		}
644cc54ff62SLiu Bo 
645cc54ff62SLiu Bo 		if (fa != cur_fa || fb != cur_fb)
646cc54ff62SLiu Bo 			return 0;
647cc54ff62SLiu Bo 	}
64853b381b3SDavid Woodhouse 	return 1;
64953b381b3SDavid Woodhouse }
65053b381b3SDavid Woodhouse 
6513e77605dSQu Wenruo static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
6523e77605dSQu Wenruo 					     unsigned int stripe_nr,
6533e77605dSQu Wenruo 					     unsigned int sector_nr)
6543e77605dSQu Wenruo {
6553e77605dSQu Wenruo 	ASSERT(stripe_nr < rbio->real_stripes);
6563e77605dSQu Wenruo 	ASSERT(sector_nr < rbio->stripe_nsectors);
6573e77605dSQu Wenruo 
6583e77605dSQu Wenruo 	return stripe_nr * rbio->stripe_nsectors + sector_nr;
6593e77605dSQu Wenruo }
6603e77605dSQu Wenruo 
6613e77605dSQu Wenruo /* Return a sector from rbio->stripe_sectors, not from the bio list */
6623e77605dSQu Wenruo static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
6633e77605dSQu Wenruo 					     unsigned int stripe_nr,
6643e77605dSQu Wenruo 					     unsigned int sector_nr)
6653e77605dSQu Wenruo {
6663e77605dSQu Wenruo 	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
6673e77605dSQu Wenruo 							      sector_nr)];
6683e77605dSQu Wenruo }
6693e77605dSQu Wenruo 
6701145059aSQu Wenruo /* Grab a sector inside P stripe */
6711145059aSQu Wenruo static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
6721145059aSQu Wenruo 					      unsigned int sector_nr)
673b7178a5fSZhao Lei {
6741145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
675b7178a5fSZhao Lei }
676b7178a5fSZhao Lei 
6771145059aSQu Wenruo /* Grab a sector inside Q stripe, return NULL if not RAID6 */
6781145059aSQu Wenruo static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
6791145059aSQu Wenruo 					      unsigned int sector_nr)
68053b381b3SDavid Woodhouse {
6812c8cdd6eSMiao Xie 	if (rbio->nr_data + 1 == rbio->real_stripes)
68253b381b3SDavid Woodhouse 		return NULL;
6831145059aSQu Wenruo 	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
6841145059aSQu Wenruo }
6851145059aSQu Wenruo 
68653b381b3SDavid Woodhouse /*
68753b381b3SDavid Woodhouse  * The first stripe in the table for a logical address
68853b381b3SDavid Woodhouse  * has the lock.  rbios are added in one of three ways:
68953b381b3SDavid Woodhouse  *
69053b381b3SDavid Woodhouse  * 1) Nobody has the stripe locked yet.  The rbio is given
69153b381b3SDavid Woodhouse  * the lock and 0 is returned.  The caller must start the IO
69253b381b3SDavid Woodhouse  * themselves.
69353b381b3SDavid Woodhouse  *
69453b381b3SDavid Woodhouse  * 2) Someone has the stripe locked, but we're able to merge
69553b381b3SDavid Woodhouse  * with the lock owner.  The rbio is freed and the IO will
69653b381b3SDavid Woodhouse  * start automatically along with the existing rbio.  1 is returned.
69753b381b3SDavid Woodhouse  *
69853b381b3SDavid Woodhouse  * 3) Someone has the stripe locked, but we're not able to merge.
69953b381b3SDavid Woodhouse  * The rbio is added to the lock owner's plug list, or merged into
70053b381b3SDavid Woodhouse  * an rbio already on the plug list.  When the lock owner unlocks,
70153b381b3SDavid Woodhouse  * the next rbio on the list is run and the IO is started automatically.
70253b381b3SDavid Woodhouse  * 1 is returned
70353b381b3SDavid Woodhouse  *
70453b381b3SDavid Woodhouse  * If we return 0, the caller still owns the rbio and must continue with
70553b381b3SDavid Woodhouse  * IO submission.  If we return 1, the caller must assume the rbio has
70653b381b3SDavid Woodhouse  * already been freed.
70753b381b3SDavid Woodhouse  */
70853b381b3SDavid Woodhouse static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
70953b381b3SDavid Woodhouse {
710721860d5SJohannes Thumshirn 	struct btrfs_stripe_hash *h;
71153b381b3SDavid Woodhouse 	struct btrfs_raid_bio *cur;
71253b381b3SDavid Woodhouse 	struct btrfs_raid_bio *pending;
71353b381b3SDavid Woodhouse 	unsigned long flags;
71453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *freeit = NULL;
7154ae10b3aSChris Mason 	struct btrfs_raid_bio *cache_drop = NULL;
71653b381b3SDavid Woodhouse 	int ret = 0;
71753b381b3SDavid Woodhouse 
7186a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
719721860d5SJohannes Thumshirn 
72053b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
72153b381b3SDavid Woodhouse 	list_for_each_entry(cur, &h->hash_list, hash_list) {
7224c664611SQu Wenruo 		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
7239d6cb1b0SJohannes Thumshirn 			continue;
7249d6cb1b0SJohannes Thumshirn 
72553b381b3SDavid Woodhouse 		spin_lock(&cur->bio_list_lock);
72653b381b3SDavid Woodhouse 
7279d6cb1b0SJohannes Thumshirn 		/* Can we steal this cached rbio's pages? */
7284ae10b3aSChris Mason 		if (bio_list_empty(&cur->bio_list) &&
7294ae10b3aSChris Mason 		    list_empty(&cur->plug_list) &&
7304ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
7314ae10b3aSChris Mason 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
7324ae10b3aSChris Mason 			list_del_init(&cur->hash_list);
733dec95574SElena Reshetova 			refcount_dec(&cur->refs);
7344ae10b3aSChris Mason 
7354ae10b3aSChris Mason 			steal_rbio(cur, rbio);
7364ae10b3aSChris Mason 			cache_drop = cur;
7374ae10b3aSChris Mason 			spin_unlock(&cur->bio_list_lock);
7384ae10b3aSChris Mason 
7394ae10b3aSChris Mason 			goto lockit;
7404ae10b3aSChris Mason 		}
7414ae10b3aSChris Mason 
7429d6cb1b0SJohannes Thumshirn 		/* Can we merge into the lock owner? */
74353b381b3SDavid Woodhouse 		if (rbio_can_merge(cur, rbio)) {
74453b381b3SDavid Woodhouse 			merge_rbio(cur, rbio);
74553b381b3SDavid Woodhouse 			spin_unlock(&cur->bio_list_lock);
74653b381b3SDavid Woodhouse 			freeit = rbio;
74753b381b3SDavid Woodhouse 			ret = 1;
74853b381b3SDavid Woodhouse 			goto out;
74953b381b3SDavid Woodhouse 		}
75053b381b3SDavid Woodhouse 
7514ae10b3aSChris Mason 
75253b381b3SDavid Woodhouse 		/*
7539d6cb1b0SJohannes Thumshirn 		 * We couldn't merge with the running rbio, see if we can merge
7549d6cb1b0SJohannes Thumshirn 		 * with the pending ones.  We don't have to check for rmw_locked
7559d6cb1b0SJohannes Thumshirn 		 * because there is no way they are inside finish_rmw right now
75653b381b3SDavid Woodhouse 		 */
7579d6cb1b0SJohannes Thumshirn 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
75853b381b3SDavid Woodhouse 			if (rbio_can_merge(pending, rbio)) {
75953b381b3SDavid Woodhouse 				merge_rbio(pending, rbio);
76053b381b3SDavid Woodhouse 				spin_unlock(&cur->bio_list_lock);
76153b381b3SDavid Woodhouse 				freeit = rbio;
76253b381b3SDavid Woodhouse 				ret = 1;
76353b381b3SDavid Woodhouse 				goto out;
76453b381b3SDavid Woodhouse 			}
76553b381b3SDavid Woodhouse 		}
76653b381b3SDavid Woodhouse 
7679d6cb1b0SJohannes Thumshirn 		/*
7689d6cb1b0SJohannes Thumshirn 		 * No merging, put us on the tail of the plug list, our rbio
7699d6cb1b0SJohannes Thumshirn 		 * will be started with the currently running rbio unlocks
77053b381b3SDavid Woodhouse 		 */
77153b381b3SDavid Woodhouse 		list_add_tail(&rbio->plug_list, &cur->plug_list);
77253b381b3SDavid Woodhouse 		spin_unlock(&cur->bio_list_lock);
77353b381b3SDavid Woodhouse 		ret = 1;
77453b381b3SDavid Woodhouse 		goto out;
77553b381b3SDavid Woodhouse 	}
7764ae10b3aSChris Mason lockit:
777dec95574SElena Reshetova 	refcount_inc(&rbio->refs);
77853b381b3SDavid Woodhouse 	list_add(&rbio->hash_list, &h->hash_list);
77953b381b3SDavid Woodhouse out:
78053b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
7814ae10b3aSChris Mason 	if (cache_drop)
7824ae10b3aSChris Mason 		remove_rbio_from_cache(cache_drop);
78353b381b3SDavid Woodhouse 	if (freeit)
78453b381b3SDavid Woodhouse 		__free_raid_bio(freeit);
78553b381b3SDavid Woodhouse 	return ret;
78653b381b3SDavid Woodhouse }
78753b381b3SDavid Woodhouse 
78853b381b3SDavid Woodhouse /*
78953b381b3SDavid Woodhouse  * called as rmw or parity rebuild is completed.  If the plug list has more
79053b381b3SDavid Woodhouse  * rbios waiting for this stripe, the next one on the list will be started
79153b381b3SDavid Woodhouse  */
79253b381b3SDavid Woodhouse static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
79353b381b3SDavid Woodhouse {
79453b381b3SDavid Woodhouse 	int bucket;
79553b381b3SDavid Woodhouse 	struct btrfs_stripe_hash *h;
79653b381b3SDavid Woodhouse 	unsigned long flags;
7974ae10b3aSChris Mason 	int keep_cache = 0;
79853b381b3SDavid Woodhouse 
79953b381b3SDavid Woodhouse 	bucket = rbio_bucket(rbio);
8006a258d72SQu Wenruo 	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
80153b381b3SDavid Woodhouse 
8024ae10b3aSChris Mason 	if (list_empty(&rbio->plug_list))
8034ae10b3aSChris Mason 		cache_rbio(rbio);
8044ae10b3aSChris Mason 
80553b381b3SDavid Woodhouse 	spin_lock_irqsave(&h->lock, flags);
80653b381b3SDavid Woodhouse 	spin_lock(&rbio->bio_list_lock);
80753b381b3SDavid Woodhouse 
80853b381b3SDavid Woodhouse 	if (!list_empty(&rbio->hash_list)) {
8094ae10b3aSChris Mason 		/*
8104ae10b3aSChris Mason 		 * if we're still cached and there is no other IO
8114ae10b3aSChris Mason 		 * to perform, just leave this rbio here for others
8124ae10b3aSChris Mason 		 * to steal from later
8134ae10b3aSChris Mason 		 */
8144ae10b3aSChris Mason 		if (list_empty(&rbio->plug_list) &&
8154ae10b3aSChris Mason 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
8164ae10b3aSChris Mason 			keep_cache = 1;
8174ae10b3aSChris Mason 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
8184ae10b3aSChris Mason 			BUG_ON(!bio_list_empty(&rbio->bio_list));
8194ae10b3aSChris Mason 			goto done;
8204ae10b3aSChris Mason 		}
82153b381b3SDavid Woodhouse 
82253b381b3SDavid Woodhouse 		list_del_init(&rbio->hash_list);
823dec95574SElena Reshetova 		refcount_dec(&rbio->refs);
82453b381b3SDavid Woodhouse 
82553b381b3SDavid Woodhouse 		/*
82653b381b3SDavid Woodhouse 		 * we use the plug list to hold all the rbios
82753b381b3SDavid Woodhouse 		 * waiting for the chance to lock this stripe.
82853b381b3SDavid Woodhouse 		 * hand the lock over to one of them.
82953b381b3SDavid Woodhouse 		 */
83053b381b3SDavid Woodhouse 		if (!list_empty(&rbio->plug_list)) {
83153b381b3SDavid Woodhouse 			struct btrfs_raid_bio *next;
83253b381b3SDavid Woodhouse 			struct list_head *head = rbio->plug_list.next;
83353b381b3SDavid Woodhouse 
83453b381b3SDavid Woodhouse 			next = list_entry(head, struct btrfs_raid_bio,
83553b381b3SDavid Woodhouse 					  plug_list);
83653b381b3SDavid Woodhouse 
83753b381b3SDavid Woodhouse 			list_del_init(&rbio->plug_list);
83853b381b3SDavid Woodhouse 
83953b381b3SDavid Woodhouse 			list_add(&next->hash_list, &h->hash_list);
840dec95574SElena Reshetova 			refcount_inc(&next->refs);
84153b381b3SDavid Woodhouse 			spin_unlock(&rbio->bio_list_lock);
84253b381b3SDavid Woodhouse 			spin_unlock_irqrestore(&h->lock, flags);
84353b381b3SDavid Woodhouse 
8441b94b556SMiao Xie 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
845e66d8d5aSDavid Sterba 				start_async_work(next, read_rebuild_work);
846b4ee1782SOmar Sandoval 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
847b4ee1782SOmar Sandoval 				steal_rbio(rbio, next);
848e66d8d5aSDavid Sterba 				start_async_work(next, read_rebuild_work);
849b4ee1782SOmar Sandoval 			} else if (next->operation == BTRFS_RBIO_WRITE) {
8504ae10b3aSChris Mason 				steal_rbio(rbio, next);
851cf6a4a75SDavid Sterba 				start_async_work(next, rmw_work);
8525a6ac9eaSMiao Xie 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
8535a6ac9eaSMiao Xie 				steal_rbio(rbio, next);
854a81b747dSDavid Sterba 				start_async_work(next, scrub_parity_work);
8554ae10b3aSChris Mason 			}
85653b381b3SDavid Woodhouse 
85753b381b3SDavid Woodhouse 			goto done_nolock;
85853b381b3SDavid Woodhouse 		}
85953b381b3SDavid Woodhouse 	}
8604ae10b3aSChris Mason done:
86153b381b3SDavid Woodhouse 	spin_unlock(&rbio->bio_list_lock);
86253b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&h->lock, flags);
86353b381b3SDavid Woodhouse 
86453b381b3SDavid Woodhouse done_nolock:
8654ae10b3aSChris Mason 	if (!keep_cache)
8664ae10b3aSChris Mason 		remove_rbio_from_cache(rbio);
86753b381b3SDavid Woodhouse }
86853b381b3SDavid Woodhouse 
86953b381b3SDavid Woodhouse static void __free_raid_bio(struct btrfs_raid_bio *rbio)
87053b381b3SDavid Woodhouse {
87153b381b3SDavid Woodhouse 	int i;
87253b381b3SDavid Woodhouse 
873dec95574SElena Reshetova 	if (!refcount_dec_and_test(&rbio->refs))
87453b381b3SDavid Woodhouse 		return;
87553b381b3SDavid Woodhouse 
8764ae10b3aSChris Mason 	WARN_ON(!list_empty(&rbio->stripe_cache));
87753b381b3SDavid Woodhouse 	WARN_ON(!list_empty(&rbio->hash_list));
87853b381b3SDavid Woodhouse 	WARN_ON(!bio_list_empty(&rbio->bio_list));
87953b381b3SDavid Woodhouse 
88053b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_pages; i++) {
88153b381b3SDavid Woodhouse 		if (rbio->stripe_pages[i]) {
88253b381b3SDavid Woodhouse 			__free_page(rbio->stripe_pages[i]);
88353b381b3SDavid Woodhouse 			rbio->stripe_pages[i] = NULL;
88453b381b3SDavid Woodhouse 		}
88553b381b3SDavid Woodhouse 	}
886af8e2d1dSMiao Xie 
8874c664611SQu Wenruo 	btrfs_put_bioc(rbio->bioc);
88853b381b3SDavid Woodhouse 	kfree(rbio);
88953b381b3SDavid Woodhouse }
89053b381b3SDavid Woodhouse 
8917583d8d0SLiu Bo static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
89253b381b3SDavid Woodhouse {
8937583d8d0SLiu Bo 	struct bio *next;
8947583d8d0SLiu Bo 
8957583d8d0SLiu Bo 	while (cur) {
8967583d8d0SLiu Bo 		next = cur->bi_next;
8977583d8d0SLiu Bo 		cur->bi_next = NULL;
8987583d8d0SLiu Bo 		cur->bi_status = err;
8997583d8d0SLiu Bo 		bio_endio(cur);
9007583d8d0SLiu Bo 		cur = next;
9017583d8d0SLiu Bo 	}
90253b381b3SDavid Woodhouse }
90353b381b3SDavid Woodhouse 
90453b381b3SDavid Woodhouse /*
90553b381b3SDavid Woodhouse  * this frees the rbio and runs through all the bios in the
90653b381b3SDavid Woodhouse  * bio_list and calls end_io on them
90753b381b3SDavid Woodhouse  */
9084e4cbee9SChristoph Hellwig static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
90953b381b3SDavid Woodhouse {
91053b381b3SDavid Woodhouse 	struct bio *cur = bio_list_get(&rbio->bio_list);
9117583d8d0SLiu Bo 	struct bio *extra;
9124245215dSMiao Xie 
9134245215dSMiao Xie 	if (rbio->generic_bio_cnt)
9146a258d72SQu Wenruo 		btrfs_bio_counter_sub(rbio->bioc->fs_info, rbio->generic_bio_cnt);
9154245215dSMiao Xie 
9167583d8d0SLiu Bo 	/*
9177583d8d0SLiu Bo 	 * At this moment, rbio->bio_list is empty, however since rbio does not
9187583d8d0SLiu Bo 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
9197583d8d0SLiu Bo 	 * hash list, rbio may be merged with others so that rbio->bio_list
9207583d8d0SLiu Bo 	 * becomes non-empty.
9217583d8d0SLiu Bo 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
9227583d8d0SLiu Bo 	 * more and we can call bio_endio() on all queued bios.
9237583d8d0SLiu Bo 	 */
9247583d8d0SLiu Bo 	unlock_stripe(rbio);
9257583d8d0SLiu Bo 	extra = bio_list_get(&rbio->bio_list);
9267583d8d0SLiu Bo 	__free_raid_bio(rbio);
92753b381b3SDavid Woodhouse 
9287583d8d0SLiu Bo 	rbio_endio_bio_list(cur, err);
9297583d8d0SLiu Bo 	if (extra)
9307583d8d0SLiu Bo 		rbio_endio_bio_list(extra, err);
93153b381b3SDavid Woodhouse }
93253b381b3SDavid Woodhouse 
93353b381b3SDavid Woodhouse /*
93453b381b3SDavid Woodhouse  * end io function used by finish_rmw.  When we finally
93553b381b3SDavid Woodhouse  * get here, we've written a full stripe
93653b381b3SDavid Woodhouse  */
9374246a0b6SChristoph Hellwig static void raid_write_end_io(struct bio *bio)
93853b381b3SDavid Woodhouse {
93953b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
9404e4cbee9SChristoph Hellwig 	blk_status_t err = bio->bi_status;
941a6111d11SZhao Lei 	int max_errors;
94253b381b3SDavid Woodhouse 
94353b381b3SDavid Woodhouse 	if (err)
94453b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
94553b381b3SDavid Woodhouse 
94653b381b3SDavid Woodhouse 	bio_put(bio);
94753b381b3SDavid Woodhouse 
948b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
94953b381b3SDavid Woodhouse 		return;
95053b381b3SDavid Woodhouse 
95158efbc9fSOmar Sandoval 	err = BLK_STS_OK;
95253b381b3SDavid Woodhouse 
95353b381b3SDavid Woodhouse 	/* OK, we have read all the stripes we need to. */
954a6111d11SZhao Lei 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
9554c664611SQu Wenruo 		     0 : rbio->bioc->max_errors;
956a6111d11SZhao Lei 	if (atomic_read(&rbio->error) > max_errors)
9574e4cbee9SChristoph Hellwig 		err = BLK_STS_IOERR;
95853b381b3SDavid Woodhouse 
9594246a0b6SChristoph Hellwig 	rbio_orig_end_io(rbio, err);
96053b381b3SDavid Woodhouse }
96153b381b3SDavid Woodhouse 
9623e77605dSQu Wenruo /**
9633e77605dSQu Wenruo  * Get a sector pointer specified by its @stripe_nr and @sector_nr
9643e77605dSQu Wenruo  *
9653e77605dSQu Wenruo  * @rbio:               The raid bio
9663e77605dSQu Wenruo  * @stripe_nr:          Stripe number, valid range [0, real_stripe)
9673e77605dSQu Wenruo  * @sector_nr:		Sector number inside the stripe,
9683e77605dSQu Wenruo  *			valid range [0, stripe_nsectors)
9693e77605dSQu Wenruo  * @bio_list_only:      Whether to use sectors inside the bio list only.
9703e77605dSQu Wenruo  *
9713e77605dSQu Wenruo  * The read/modify/write code wants to reuse the original bio page as much
9723e77605dSQu Wenruo  * as possible, and only use stripe_sectors as fallback.
9733e77605dSQu Wenruo  */
9743e77605dSQu Wenruo static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
9753e77605dSQu Wenruo 					 int stripe_nr, int sector_nr,
9763e77605dSQu Wenruo 					 bool bio_list_only)
9773e77605dSQu Wenruo {
9783e77605dSQu Wenruo 	struct sector_ptr *sector;
9793e77605dSQu Wenruo 	int index;
9803e77605dSQu Wenruo 
9813e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
9823e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
9833e77605dSQu Wenruo 
9843e77605dSQu Wenruo 	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
9853e77605dSQu Wenruo 	ASSERT(index >= 0 && index < rbio->nr_sectors);
9863e77605dSQu Wenruo 
9873e77605dSQu Wenruo 	spin_lock_irq(&rbio->bio_list_lock);
9883e77605dSQu Wenruo 	sector = &rbio->bio_sectors[index];
9893e77605dSQu Wenruo 	if (sector->page || bio_list_only) {
9903e77605dSQu Wenruo 		/* Don't return sector without a valid page pointer */
9913e77605dSQu Wenruo 		if (!sector->page)
9923e77605dSQu Wenruo 			sector = NULL;
9933e77605dSQu Wenruo 		spin_unlock_irq(&rbio->bio_list_lock);
9943e77605dSQu Wenruo 		return sector;
9953e77605dSQu Wenruo 	}
9963e77605dSQu Wenruo 	spin_unlock_irq(&rbio->bio_list_lock);
9973e77605dSQu Wenruo 
9983e77605dSQu Wenruo 	return &rbio->stripe_sectors[index];
9993e77605dSQu Wenruo }
10003e77605dSQu Wenruo 
100153b381b3SDavid Woodhouse /*
100253b381b3SDavid Woodhouse  * allocation and initial setup for the btrfs_raid_bio.  Not
100353b381b3SDavid Woodhouse  * this does not allocate any pages for rbio->pages.
100453b381b3SDavid Woodhouse  */
10052ff7e61eSJeff Mahoney static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
10064c664611SQu Wenruo 					 struct btrfs_io_context *bioc,
1007cc353a8bSQu Wenruo 					 u32 stripe_len)
100853b381b3SDavid Woodhouse {
1009843de58bSQu Wenruo 	const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
1010843de58bSQu Wenruo 	const unsigned int stripe_npages = stripe_len >> PAGE_SHIFT;
1011843de58bSQu Wenruo 	const unsigned int num_pages = stripe_npages * real_stripes;
101294efbe19SQu Wenruo 	const unsigned int stripe_nsectors = stripe_len >> fs_info->sectorsize_bits;
101394efbe19SQu Wenruo 	const unsigned int num_sectors = stripe_nsectors * real_stripes;
101453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
101553b381b3SDavid Woodhouse 	int nr_data = 0;
101653b381b3SDavid Woodhouse 	void *p;
101753b381b3SDavid Woodhouse 
1018843de58bSQu Wenruo 	ASSERT(IS_ALIGNED(stripe_len, PAGE_SIZE));
101994efbe19SQu Wenruo 	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
102094efbe19SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
1021843de58bSQu Wenruo 
10221389053eSKees Cook 	rbio = kzalloc(sizeof(*rbio) +
10231389053eSKees Cook 		       sizeof(*rbio->stripe_pages) * num_pages +
102400425dd9SQu Wenruo 		       sizeof(*rbio->bio_sectors) * num_sectors +
1025eb357060SQu Wenruo 		       sizeof(*rbio->stripe_sectors) * num_sectors +
10261389053eSKees Cook 		       sizeof(*rbio->finish_pointers) * real_stripes +
102794efbe19SQu Wenruo 		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_nsectors) +
102894efbe19SQu Wenruo 		       sizeof(*rbio->finish_pbitmap) * BITS_TO_LONGS(stripe_nsectors),
10291389053eSKees Cook 		       GFP_NOFS);
1030af8e2d1dSMiao Xie 	if (!rbio)
103153b381b3SDavid Woodhouse 		return ERR_PTR(-ENOMEM);
103253b381b3SDavid Woodhouse 
103353b381b3SDavid Woodhouse 	bio_list_init(&rbio->bio_list);
103453b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->plug_list);
103553b381b3SDavid Woodhouse 	spin_lock_init(&rbio->bio_list_lock);
10364ae10b3aSChris Mason 	INIT_LIST_HEAD(&rbio->stripe_cache);
103753b381b3SDavid Woodhouse 	INIT_LIST_HEAD(&rbio->hash_list);
10384c664611SQu Wenruo 	rbio->bioc = bioc;
103953b381b3SDavid Woodhouse 	rbio->stripe_len = stripe_len;
104053b381b3SDavid Woodhouse 	rbio->nr_pages = num_pages;
104194efbe19SQu Wenruo 	rbio->nr_sectors = num_sectors;
10422c8cdd6eSMiao Xie 	rbio->real_stripes = real_stripes;
10435a6ac9eaSMiao Xie 	rbio->stripe_npages = stripe_npages;
104494efbe19SQu Wenruo 	rbio->stripe_nsectors = stripe_nsectors;
104553b381b3SDavid Woodhouse 	rbio->faila = -1;
104653b381b3SDavid Woodhouse 	rbio->failb = -1;
1047dec95574SElena Reshetova 	refcount_set(&rbio->refs, 1);
1048b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
1049b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, 0);
105053b381b3SDavid Woodhouse 
105153b381b3SDavid Woodhouse 	/*
1052*ac26df8bSQu Wenruo 	 * The stripe_pages, bio_sectors, etc arrays point to the extra memory
1053*ac26df8bSQu Wenruo 	 * we allocated past the end of the rbio.
105453b381b3SDavid Woodhouse 	 */
105553b381b3SDavid Woodhouse 	p = rbio + 1;
10561389053eSKees Cook #define CONSUME_ALLOC(ptr, count)	do {				\
10571389053eSKees Cook 		ptr = p;						\
10581389053eSKees Cook 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
10591389053eSKees Cook 	} while (0)
10601389053eSKees Cook 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
106100425dd9SQu Wenruo 	CONSUME_ALLOC(rbio->bio_sectors, num_sectors);
1062eb357060SQu Wenruo 	CONSUME_ALLOC(rbio->stripe_sectors, num_sectors);
10631389053eSKees Cook 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
106494efbe19SQu Wenruo 	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_nsectors));
106594efbe19SQu Wenruo 	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_nsectors));
10661389053eSKees Cook #undef  CONSUME_ALLOC
106753b381b3SDavid Woodhouse 
10684c664611SQu Wenruo 	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
106910f11900SZhao Lei 		nr_data = real_stripes - 1;
10704c664611SQu Wenruo 	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
10712c8cdd6eSMiao Xie 		nr_data = real_stripes - 2;
107253b381b3SDavid Woodhouse 	else
107310f11900SZhao Lei 		BUG();
107453b381b3SDavid Woodhouse 
107553b381b3SDavid Woodhouse 	rbio->nr_data = nr_data;
107653b381b3SDavid Woodhouse 	return rbio;
107753b381b3SDavid Woodhouse }
107853b381b3SDavid Woodhouse 
107953b381b3SDavid Woodhouse /* allocate pages for all the stripes in the bio, including parity */
108053b381b3SDavid Woodhouse static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
108153b381b3SDavid Woodhouse {
1082eb357060SQu Wenruo 	int ret;
1083eb357060SQu Wenruo 
1084eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
1085eb357060SQu Wenruo 	if (ret < 0)
1086eb357060SQu Wenruo 		return ret;
1087eb357060SQu Wenruo 	/* Mapping all sectors */
1088eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1089eb357060SQu Wenruo 	return 0;
109053b381b3SDavid Woodhouse }
109153b381b3SDavid Woodhouse 
1092b7178a5fSZhao Lei /* only allocate pages for p/q stripes */
109353b381b3SDavid Woodhouse static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
109453b381b3SDavid Woodhouse {
1095f77183dcSQu Wenruo 	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1096eb357060SQu Wenruo 	int ret;
109753b381b3SDavid Woodhouse 
1098eb357060SQu Wenruo 	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
1099dd137dd1SSweet Tea Dorminy 				     rbio->stripe_pages + data_pages);
1100eb357060SQu Wenruo 	if (ret < 0)
1101eb357060SQu Wenruo 		return ret;
1102eb357060SQu Wenruo 
1103eb357060SQu Wenruo 	index_stripe_sectors(rbio);
1104eb357060SQu Wenruo 	return 0;
110553b381b3SDavid Woodhouse }
110653b381b3SDavid Woodhouse 
110753b381b3SDavid Woodhouse /*
11083e77605dSQu Wenruo  * Add a single sector @sector into our list of bios for IO.
11093e77605dSQu Wenruo  *
11103e77605dSQu Wenruo  * Return 0 if everything went well.
11113e77605dSQu Wenruo  * Return <0 for error.
111253b381b3SDavid Woodhouse  */
11133e77605dSQu Wenruo static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
111453b381b3SDavid Woodhouse 			      struct bio_list *bio_list,
11153e77605dSQu Wenruo 			      struct sector_ptr *sector,
11163e77605dSQu Wenruo 			      unsigned int stripe_nr,
11173e77605dSQu Wenruo 			      unsigned int sector_nr,
1118e01bf588SChristoph Hellwig 			      unsigned long bio_max_len,
1119e01bf588SChristoph Hellwig 			      unsigned int opf)
112053b381b3SDavid Woodhouse {
11213e77605dSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
112253b381b3SDavid Woodhouse 	struct bio *last = bio_list->tail;
112353b381b3SDavid Woodhouse 	int ret;
112453b381b3SDavid Woodhouse 	struct bio *bio;
11254c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
112653b381b3SDavid Woodhouse 	u64 disk_start;
112753b381b3SDavid Woodhouse 
11283e77605dSQu Wenruo 	/*
11293e77605dSQu Wenruo 	 * Note: here stripe_nr has taken device replace into consideration,
11303e77605dSQu Wenruo 	 * thus it can be larger than rbio->real_stripe.
11313e77605dSQu Wenruo 	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
11323e77605dSQu Wenruo 	 */
11333e77605dSQu Wenruo 	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
11343e77605dSQu Wenruo 	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
11353e77605dSQu Wenruo 	ASSERT(sector->page);
11363e77605dSQu Wenruo 
11373e77605dSQu Wenruo 	/* We don't yet support subpage, thus pgoff should always be 0 */
11383e77605dSQu Wenruo 	ASSERT(sector->pgoff == 0);
11393e77605dSQu Wenruo 
11404c664611SQu Wenruo 	stripe = &rbio->bioc->stripes[stripe_nr];
11413e77605dSQu Wenruo 	disk_start = stripe->physical + sector_nr * sectorsize;
114253b381b3SDavid Woodhouse 
114353b381b3SDavid Woodhouse 	/* if the device is missing, just fail this stripe */
114453b381b3SDavid Woodhouse 	if (!stripe->dev->bdev)
114553b381b3SDavid Woodhouse 		return fail_rbio_index(rbio, stripe_nr);
114653b381b3SDavid Woodhouse 
114753b381b3SDavid Woodhouse 	/* see if we can add this page onto our existing bio */
114853b381b3SDavid Woodhouse 	if (last) {
11491201b58bSDavid Sterba 		u64 last_end = last->bi_iter.bi_sector << 9;
11504f024f37SKent Overstreet 		last_end += last->bi_iter.bi_size;
115153b381b3SDavid Woodhouse 
115253b381b3SDavid Woodhouse 		/*
115353b381b3SDavid Woodhouse 		 * we can't merge these if they are from different
115453b381b3SDavid Woodhouse 		 * devices or if they are not contiguous
115553b381b3SDavid Woodhouse 		 */
1156f90ae76aSNikolay Borisov 		if (last_end == disk_start && !last->bi_status &&
1157309dca30SChristoph Hellwig 		    last->bi_bdev == stripe->dev->bdev) {
11583e77605dSQu Wenruo 			ret = bio_add_page(last, sector->page, sectorsize,
11593e77605dSQu Wenruo 					   sector->pgoff);
11603e77605dSQu Wenruo 			if (ret == sectorsize)
116153b381b3SDavid Woodhouse 				return 0;
116253b381b3SDavid Woodhouse 		}
116353b381b3SDavid Woodhouse 	}
116453b381b3SDavid Woodhouse 
116553b381b3SDavid Woodhouse 	/* put a new bio on the list */
1166e1b4b44eSChristoph Hellwig 	bio = bio_alloc(stripe->dev->bdev, max(bio_max_len >> PAGE_SHIFT, 1UL),
1167e1b4b44eSChristoph Hellwig 			opf, GFP_NOFS);
11684f024f37SKent Overstreet 	bio->bi_iter.bi_sector = disk_start >> 9;
1169e01bf588SChristoph Hellwig 	bio->bi_private = rbio;
117053b381b3SDavid Woodhouse 
11713e77605dSQu Wenruo 	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
117253b381b3SDavid Woodhouse 	bio_list_add(bio_list, bio);
117353b381b3SDavid Woodhouse 	return 0;
117453b381b3SDavid Woodhouse }
117553b381b3SDavid Woodhouse 
117653b381b3SDavid Woodhouse /*
117753b381b3SDavid Woodhouse  * while we're doing the read/modify/write cycle, we could
117853b381b3SDavid Woodhouse  * have errors in reading pages off the disk.  This checks
117953b381b3SDavid Woodhouse  * for errors and if we're not able to read the page it'll
118053b381b3SDavid Woodhouse  * trigger parity reconstruction.  The rmw will be finished
118153b381b3SDavid Woodhouse  * after we've reconstructed the failed stripes
118253b381b3SDavid Woodhouse  */
118353b381b3SDavid Woodhouse static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
118453b381b3SDavid Woodhouse {
118553b381b3SDavid Woodhouse 	if (rbio->faila >= 0 || rbio->failb >= 0) {
11862c8cdd6eSMiao Xie 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
118753b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
118853b381b3SDavid Woodhouse 	} else {
118953b381b3SDavid Woodhouse 		finish_rmw(rbio);
119053b381b3SDavid Woodhouse 	}
119153b381b3SDavid Woodhouse }
119253b381b3SDavid Woodhouse 
119300425dd9SQu Wenruo static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
119400425dd9SQu Wenruo {
119500425dd9SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
119600425dd9SQu Wenruo 	struct bio_vec bvec;
119700425dd9SQu Wenruo 	struct bvec_iter iter;
119800425dd9SQu Wenruo 	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
119900425dd9SQu Wenruo 		     rbio->bioc->raid_map[0];
120000425dd9SQu Wenruo 
120100425dd9SQu Wenruo 	if (bio_flagged(bio, BIO_CLONED))
120200425dd9SQu Wenruo 		bio->bi_iter = btrfs_bio(bio)->iter;
120300425dd9SQu Wenruo 
120400425dd9SQu Wenruo 	bio_for_each_segment(bvec, bio, iter) {
120500425dd9SQu Wenruo 		u32 bvec_offset;
120600425dd9SQu Wenruo 
120700425dd9SQu Wenruo 		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
120800425dd9SQu Wenruo 		     bvec_offset += sectorsize, offset += sectorsize) {
120900425dd9SQu Wenruo 			int index = offset / sectorsize;
121000425dd9SQu Wenruo 			struct sector_ptr *sector = &rbio->bio_sectors[index];
121100425dd9SQu Wenruo 
121200425dd9SQu Wenruo 			sector->page = bvec.bv_page;
121300425dd9SQu Wenruo 			sector->pgoff = bvec.bv_offset + bvec_offset;
121400425dd9SQu Wenruo 			ASSERT(sector->pgoff < PAGE_SIZE);
121500425dd9SQu Wenruo 		}
121600425dd9SQu Wenruo 	}
121700425dd9SQu Wenruo }
121800425dd9SQu Wenruo 
121953b381b3SDavid Woodhouse /*
122053b381b3SDavid Woodhouse  * helper function to walk our bio list and populate the bio_pages array with
122153b381b3SDavid Woodhouse  * the result.  This seems expensive, but it is faster than constantly
122253b381b3SDavid Woodhouse  * searching through the bio list as we setup the IO in finish_rmw or stripe
122353b381b3SDavid Woodhouse  * reconstruction.
122453b381b3SDavid Woodhouse  *
122553b381b3SDavid Woodhouse  * This must be called before you trust the answers from page_in_rbio
122653b381b3SDavid Woodhouse  */
122753b381b3SDavid Woodhouse static void index_rbio_pages(struct btrfs_raid_bio *rbio)
122853b381b3SDavid Woodhouse {
122953b381b3SDavid Woodhouse 	struct bio *bio;
123053b381b3SDavid Woodhouse 
123153b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
123200425dd9SQu Wenruo 	bio_list_for_each(bio, &rbio->bio_list)
123300425dd9SQu Wenruo 		index_one_bio(rbio, bio);
123400425dd9SQu Wenruo 
123553b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
123653b381b3SDavid Woodhouse }
123753b381b3SDavid Woodhouse 
123853b381b3SDavid Woodhouse /*
123953b381b3SDavid Woodhouse  * this is called from one of two situations.  We either
124053b381b3SDavid Woodhouse  * have a full stripe from the higher layers, or we've read all
124153b381b3SDavid Woodhouse  * the missing bits off disk.
124253b381b3SDavid Woodhouse  *
124353b381b3SDavid Woodhouse  * This will calculate the parity and then send down any
124453b381b3SDavid Woodhouse  * changed blocks.
124553b381b3SDavid Woodhouse  */
124653b381b3SDavid Woodhouse static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
124753b381b3SDavid Woodhouse {
12484c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
12491145059aSQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
12501389053eSKees Cook 	void **pointers = rbio->finish_pointers;
125153b381b3SDavid Woodhouse 	int nr_data = rbio->nr_data;
125253b381b3SDavid Woodhouse 	int stripe;
12533e77605dSQu Wenruo 	int sectornr;
1254c17af965SDavid Sterba 	bool has_qstripe;
125553b381b3SDavid Woodhouse 	struct bio_list bio_list;
125653b381b3SDavid Woodhouse 	struct bio *bio;
125753b381b3SDavid Woodhouse 	int ret;
125853b381b3SDavid Woodhouse 
125953b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
126053b381b3SDavid Woodhouse 
1261c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
1262c17af965SDavid Sterba 		has_qstripe = false;
1263c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
1264c17af965SDavid Sterba 		has_qstripe = true;
1265c17af965SDavid Sterba 	else
126653b381b3SDavid Woodhouse 		BUG();
126753b381b3SDavid Woodhouse 
126853b381b3SDavid Woodhouse 	/* at this point we either have a full stripe,
126953b381b3SDavid Woodhouse 	 * or we've read the full stripe from the drive.
127053b381b3SDavid Woodhouse 	 * recalculate the parity and write the new results.
127153b381b3SDavid Woodhouse 	 *
127253b381b3SDavid Woodhouse 	 * We're not allowed to add any new bios to the
127353b381b3SDavid Woodhouse 	 * bio list here, anyone else that wants to
127453b381b3SDavid Woodhouse 	 * change this stripe needs to do their own rmw.
127553b381b3SDavid Woodhouse 	 */
127653b381b3SDavid Woodhouse 	spin_lock_irq(&rbio->bio_list_lock);
127753b381b3SDavid Woodhouse 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
127853b381b3SDavid Woodhouse 	spin_unlock_irq(&rbio->bio_list_lock);
127953b381b3SDavid Woodhouse 
1280b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
128153b381b3SDavid Woodhouse 
128253b381b3SDavid Woodhouse 	/*
128353b381b3SDavid Woodhouse 	 * now that we've set rmw_locked, run through the
128453b381b3SDavid Woodhouse 	 * bio list one last time and map the page pointers
12854ae10b3aSChris Mason 	 *
12864ae10b3aSChris Mason 	 * We don't cache full rbios because we're assuming
12874ae10b3aSChris Mason 	 * the higher layers are unlikely to use this area of
12884ae10b3aSChris Mason 	 * the disk again soon.  If they do use it again,
12894ae10b3aSChris Mason 	 * hopefully they will send another full bio.
129053b381b3SDavid Woodhouse 	 */
129153b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
12924ae10b3aSChris Mason 	if (!rbio_is_full(rbio))
12934ae10b3aSChris Mason 		cache_rbio_pages(rbio);
12944ae10b3aSChris Mason 	else
12954ae10b3aSChris Mason 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
129653b381b3SDavid Woodhouse 
12973e77605dSQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
12981145059aSQu Wenruo 		struct sector_ptr *sector;
12991145059aSQu Wenruo 
13001145059aSQu Wenruo 		/* First collect one sector from each data stripe */
130153b381b3SDavid Woodhouse 		for (stripe = 0; stripe < nr_data; stripe++) {
13021145059aSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
13031145059aSQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
13041145059aSQu Wenruo 					   sector->pgoff;
130553b381b3SDavid Woodhouse 		}
130653b381b3SDavid Woodhouse 
13071145059aSQu Wenruo 		/* Then add the parity stripe */
13081145059aSQu Wenruo 		sector = rbio_pstripe_sector(rbio, sectornr);
13091145059aSQu Wenruo 		sector->uptodate = 1;
13101145059aSQu Wenruo 		pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
131153b381b3SDavid Woodhouse 
1312c17af965SDavid Sterba 		if (has_qstripe) {
131353b381b3SDavid Woodhouse 			/*
13141145059aSQu Wenruo 			 * RAID6, add the qstripe and call the library function
13151145059aSQu Wenruo 			 * to fill in our p/q
131653b381b3SDavid Woodhouse 			 */
13171145059aSQu Wenruo 			sector = rbio_qstripe_sector(rbio, sectornr);
13181145059aSQu Wenruo 			sector->uptodate = 1;
13191145059aSQu Wenruo 			pointers[stripe++] = kmap_local_page(sector->page) +
13201145059aSQu Wenruo 					     sector->pgoff;
132153b381b3SDavid Woodhouse 
13221145059aSQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
132353b381b3SDavid Woodhouse 						pointers);
132453b381b3SDavid Woodhouse 		} else {
132553b381b3SDavid Woodhouse 			/* raid5 */
13261145059aSQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
13271145059aSQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
132853b381b3SDavid Woodhouse 		}
132994a0b58dSIra Weiny 		for (stripe = stripe - 1; stripe >= 0; stripe--)
133094a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
133153b381b3SDavid Woodhouse 	}
133253b381b3SDavid Woodhouse 
133353b381b3SDavid Woodhouse 	/*
133453b381b3SDavid Woodhouse 	 * time to start writing.  Make bios for everything from the
133553b381b3SDavid Woodhouse 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
133653b381b3SDavid Woodhouse 	 * everything else.
133753b381b3SDavid Woodhouse 	 */
13382c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
13393e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
13403e77605dSQu Wenruo 			struct sector_ptr *sector;
13413e77605dSQu Wenruo 
134253b381b3SDavid Woodhouse 			if (stripe < rbio->nr_data) {
13433e77605dSQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13443e77605dSQu Wenruo 				if (!sector)
134553b381b3SDavid Woodhouse 					continue;
134653b381b3SDavid Woodhouse 			} else {
13473e77605dSQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
134853b381b3SDavid Woodhouse 			}
134953b381b3SDavid Woodhouse 
13503e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
13513e77605dSQu Wenruo 						 sectornr, rbio->stripe_len,
1352e01bf588SChristoph Hellwig 						 REQ_OP_WRITE);
135353b381b3SDavid Woodhouse 			if (ret)
135453b381b3SDavid Woodhouse 				goto cleanup;
135553b381b3SDavid Woodhouse 		}
135653b381b3SDavid Woodhouse 	}
135753b381b3SDavid Woodhouse 
13584c664611SQu Wenruo 	if (likely(!bioc->num_tgtdevs))
13592c8cdd6eSMiao Xie 		goto write_data;
13602c8cdd6eSMiao Xie 
13612c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
13624c664611SQu Wenruo 		if (!bioc->tgtdev_map[stripe])
13632c8cdd6eSMiao Xie 			continue;
13642c8cdd6eSMiao Xie 
13653e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
13663e77605dSQu Wenruo 			struct sector_ptr *sector;
13673e77605dSQu Wenruo 
13682c8cdd6eSMiao Xie 			if (stripe < rbio->nr_data) {
13693e77605dSQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 1);
13703e77605dSQu Wenruo 				if (!sector)
13712c8cdd6eSMiao Xie 					continue;
13722c8cdd6eSMiao Xie 			} else {
13733e77605dSQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
13742c8cdd6eSMiao Xie 			}
13752c8cdd6eSMiao Xie 
13763e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
13774c664611SQu Wenruo 					       rbio->bioc->tgtdev_map[stripe],
13783e77605dSQu Wenruo 					       sectornr, rbio->stripe_len,
1379e01bf588SChristoph Hellwig 					       REQ_OP_WRITE);
13802c8cdd6eSMiao Xie 			if (ret)
13812c8cdd6eSMiao Xie 				goto cleanup;
13822c8cdd6eSMiao Xie 		}
13832c8cdd6eSMiao Xie 	}
13842c8cdd6eSMiao Xie 
13852c8cdd6eSMiao Xie write_data:
1386b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1387b89e1b01SMiao Xie 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
138853b381b3SDavid Woodhouse 
1389bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
139053b381b3SDavid Woodhouse 		bio->bi_end_io = raid_write_end_io;
13914e49ea4aSMike Christie 
13924e49ea4aSMike Christie 		submit_bio(bio);
139353b381b3SDavid Woodhouse 	}
139453b381b3SDavid Woodhouse 	return;
139553b381b3SDavid Woodhouse 
139653b381b3SDavid Woodhouse cleanup:
139758efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1398785884fcSLiu Bo 
1399785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
1400785884fcSLiu Bo 		bio_put(bio);
140153b381b3SDavid Woodhouse }
140253b381b3SDavid Woodhouse 
140353b381b3SDavid Woodhouse /*
140453b381b3SDavid Woodhouse  * helper to find the stripe number for a given bio.  Used to figure out which
140553b381b3SDavid Woodhouse  * stripe has failed.  This expects the bio to correspond to a physical disk,
140653b381b3SDavid Woodhouse  * so it looks up based on physical sector numbers.
140753b381b3SDavid Woodhouse  */
140853b381b3SDavid Woodhouse static int find_bio_stripe(struct btrfs_raid_bio *rbio,
140953b381b3SDavid Woodhouse 			   struct bio *bio)
141053b381b3SDavid Woodhouse {
14114f024f37SKent Overstreet 	u64 physical = bio->bi_iter.bi_sector;
141253b381b3SDavid Woodhouse 	int i;
14134c664611SQu Wenruo 	struct btrfs_io_stripe *stripe;
141453b381b3SDavid Woodhouse 
141553b381b3SDavid Woodhouse 	physical <<= 9;
141653b381b3SDavid Woodhouse 
14174c664611SQu Wenruo 	for (i = 0; i < rbio->bioc->num_stripes; i++) {
14184c664611SQu Wenruo 		stripe = &rbio->bioc->stripes[i];
141983025863SNikolay Borisov 		if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1420309dca30SChristoph Hellwig 		    stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
142153b381b3SDavid Woodhouse 			return i;
142253b381b3SDavid Woodhouse 		}
142353b381b3SDavid Woodhouse 	}
142453b381b3SDavid Woodhouse 	return -1;
142553b381b3SDavid Woodhouse }
142653b381b3SDavid Woodhouse 
142753b381b3SDavid Woodhouse /*
142853b381b3SDavid Woodhouse  * helper to find the stripe number for a given
142953b381b3SDavid Woodhouse  * bio (before mapping).  Used to figure out which stripe has
143053b381b3SDavid Woodhouse  * failed.  This looks up based on logical block numbers.
143153b381b3SDavid Woodhouse  */
143253b381b3SDavid Woodhouse static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
143353b381b3SDavid Woodhouse 				   struct bio *bio)
143453b381b3SDavid Woodhouse {
14351201b58bSDavid Sterba 	u64 logical = bio->bi_iter.bi_sector << 9;
143653b381b3SDavid Woodhouse 	int i;
143753b381b3SDavid Woodhouse 
143853b381b3SDavid Woodhouse 	for (i = 0; i < rbio->nr_data; i++) {
14394c664611SQu Wenruo 		u64 stripe_start = rbio->bioc->raid_map[i];
144083025863SNikolay Borisov 
144183025863SNikolay Borisov 		if (in_range(logical, stripe_start, rbio->stripe_len))
144253b381b3SDavid Woodhouse 			return i;
144353b381b3SDavid Woodhouse 	}
144453b381b3SDavid Woodhouse 	return -1;
144553b381b3SDavid Woodhouse }
144653b381b3SDavid Woodhouse 
144753b381b3SDavid Woodhouse /*
144853b381b3SDavid Woodhouse  * returns -EIO if we had too many failures
144953b381b3SDavid Woodhouse  */
145053b381b3SDavid Woodhouse static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
145153b381b3SDavid Woodhouse {
145253b381b3SDavid Woodhouse 	unsigned long flags;
145353b381b3SDavid Woodhouse 	int ret = 0;
145453b381b3SDavid Woodhouse 
145553b381b3SDavid Woodhouse 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
145653b381b3SDavid Woodhouse 
145753b381b3SDavid Woodhouse 	/* we already know this stripe is bad, move on */
145853b381b3SDavid Woodhouse 	if (rbio->faila == failed || rbio->failb == failed)
145953b381b3SDavid Woodhouse 		goto out;
146053b381b3SDavid Woodhouse 
146153b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
146253b381b3SDavid Woodhouse 		/* first failure on this rbio */
146353b381b3SDavid Woodhouse 		rbio->faila = failed;
1464b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
146553b381b3SDavid Woodhouse 	} else if (rbio->failb == -1) {
146653b381b3SDavid Woodhouse 		/* second failure on this rbio */
146753b381b3SDavid Woodhouse 		rbio->failb = failed;
1468b89e1b01SMiao Xie 		atomic_inc(&rbio->error);
146953b381b3SDavid Woodhouse 	} else {
147053b381b3SDavid Woodhouse 		ret = -EIO;
147153b381b3SDavid Woodhouse 	}
147253b381b3SDavid Woodhouse out:
147353b381b3SDavid Woodhouse 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
147453b381b3SDavid Woodhouse 
147553b381b3SDavid Woodhouse 	return ret;
147653b381b3SDavid Woodhouse }
147753b381b3SDavid Woodhouse 
147853b381b3SDavid Woodhouse /*
147953b381b3SDavid Woodhouse  * helper to fail a stripe based on a physical disk
148053b381b3SDavid Woodhouse  * bio.
148153b381b3SDavid Woodhouse  */
148253b381b3SDavid Woodhouse static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
148353b381b3SDavid Woodhouse 			   struct bio *bio)
148453b381b3SDavid Woodhouse {
148553b381b3SDavid Woodhouse 	int failed = find_bio_stripe(rbio, bio);
148653b381b3SDavid Woodhouse 
148753b381b3SDavid Woodhouse 	if (failed < 0)
148853b381b3SDavid Woodhouse 		return -EIO;
148953b381b3SDavid Woodhouse 
149053b381b3SDavid Woodhouse 	return fail_rbio_index(rbio, failed);
149153b381b3SDavid Woodhouse }
149253b381b3SDavid Woodhouse 
149353b381b3SDavid Woodhouse /*
149453b381b3SDavid Woodhouse  * this sets each page in the bio uptodate.  It should only be used on private
149553b381b3SDavid Woodhouse  * rbio pages, nothing that comes in from the higher layers
149653b381b3SDavid Woodhouse  */
149753b381b3SDavid Woodhouse static void set_bio_pages_uptodate(struct bio *bio)
149853b381b3SDavid Woodhouse {
14990198e5b7SLiu Bo 	struct bio_vec *bvec;
15006dc4f100SMing Lei 	struct bvec_iter_all iter_all;
150153b381b3SDavid Woodhouse 
15020198e5b7SLiu Bo 	ASSERT(!bio_flagged(bio, BIO_CLONED));
15036592e58cSFilipe Manana 
15042b070cfeSChristoph Hellwig 	bio_for_each_segment_all(bvec, bio, iter_all)
15050198e5b7SLiu Bo 		SetPageUptodate(bvec->bv_page);
150653b381b3SDavid Woodhouse }
150753b381b3SDavid Woodhouse 
150853b381b3SDavid Woodhouse /*
150953b381b3SDavid Woodhouse  * end io for the read phase of the rmw cycle.  All the bios here are physical
151053b381b3SDavid Woodhouse  * stripe bios we've read from the disk so we can recalculate the parity of the
151153b381b3SDavid Woodhouse  * stripe.
151253b381b3SDavid Woodhouse  *
151353b381b3SDavid Woodhouse  * This will usually kick off finish_rmw once all the bios are read in, but it
151453b381b3SDavid Woodhouse  * may trigger parity reconstruction if we had any errors along the way
151553b381b3SDavid Woodhouse  */
15164246a0b6SChristoph Hellwig static void raid_rmw_end_io(struct bio *bio)
151753b381b3SDavid Woodhouse {
151853b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
151953b381b3SDavid Woodhouse 
15204e4cbee9SChristoph Hellwig 	if (bio->bi_status)
152153b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
152253b381b3SDavid Woodhouse 	else
152353b381b3SDavid Woodhouse 		set_bio_pages_uptodate(bio);
152453b381b3SDavid Woodhouse 
152553b381b3SDavid Woodhouse 	bio_put(bio);
152653b381b3SDavid Woodhouse 
1527b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
152853b381b3SDavid Woodhouse 		return;
152953b381b3SDavid Woodhouse 
15304c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
153153b381b3SDavid Woodhouse 		goto cleanup;
153253b381b3SDavid Woodhouse 
153353b381b3SDavid Woodhouse 	/*
153453b381b3SDavid Woodhouse 	 * this will normally call finish_rmw to start our write
153553b381b3SDavid Woodhouse 	 * but if there are any failed stripes we'll reconstruct
153653b381b3SDavid Woodhouse 	 * from parity first
153753b381b3SDavid Woodhouse 	 */
153853b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
153953b381b3SDavid Woodhouse 	return;
154053b381b3SDavid Woodhouse 
154153b381b3SDavid Woodhouse cleanup:
154253b381b3SDavid Woodhouse 
154358efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
154453b381b3SDavid Woodhouse }
154553b381b3SDavid Woodhouse 
154653b381b3SDavid Woodhouse /*
154753b381b3SDavid Woodhouse  * the stripe must be locked by the caller.  It will
154853b381b3SDavid Woodhouse  * unlock after all the writes are done
154953b381b3SDavid Woodhouse  */
155053b381b3SDavid Woodhouse static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
155153b381b3SDavid Woodhouse {
155253b381b3SDavid Woodhouse 	int bios_to_read = 0;
155353b381b3SDavid Woodhouse 	struct bio_list bio_list;
155453b381b3SDavid Woodhouse 	int ret;
15553e77605dSQu Wenruo 	int sectornr;
155653b381b3SDavid Woodhouse 	int stripe;
155753b381b3SDavid Woodhouse 	struct bio *bio;
155853b381b3SDavid Woodhouse 
155953b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
156053b381b3SDavid Woodhouse 
156153b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
156253b381b3SDavid Woodhouse 	if (ret)
156353b381b3SDavid Woodhouse 		goto cleanup;
156453b381b3SDavid Woodhouse 
156553b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
156653b381b3SDavid Woodhouse 
1567b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
156853b381b3SDavid Woodhouse 	/*
156953b381b3SDavid Woodhouse 	 * build a list of bios to read all the missing parts of this
157053b381b3SDavid Woodhouse 	 * stripe
157153b381b3SDavid Woodhouse 	 */
157253b381b3SDavid Woodhouse 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
15733e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
15743e77605dSQu Wenruo 			struct sector_ptr *sector;
15753e77605dSQu Wenruo 
157653b381b3SDavid Woodhouse 			/*
15773e77605dSQu Wenruo 			 * We want to find all the sectors missing from the
15783e77605dSQu Wenruo 			 * rbio and read them from the disk.  If * sector_in_rbio()
15793e77605dSQu Wenruo 			 * finds a page in the bio list we don't need to read
15803e77605dSQu Wenruo 			 * it off the stripe.
158153b381b3SDavid Woodhouse 			 */
15823e77605dSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
15833e77605dSQu Wenruo 			if (sector)
158453b381b3SDavid Woodhouse 				continue;
158553b381b3SDavid Woodhouse 
15863e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
15874ae10b3aSChris Mason 			/*
15883e77605dSQu Wenruo 			 * The bio cache may have handed us an uptodate page.
15893e77605dSQu Wenruo 			 * If so, be happy and use it.
15904ae10b3aSChris Mason 			 */
15913e77605dSQu Wenruo 			if (sector->uptodate)
15924ae10b3aSChris Mason 				continue;
15934ae10b3aSChris Mason 
15943e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
15953e77605dSQu Wenruo 				       stripe, sectornr, rbio->stripe_len,
1596e01bf588SChristoph Hellwig 				       REQ_OP_READ);
159753b381b3SDavid Woodhouse 			if (ret)
159853b381b3SDavid Woodhouse 				goto cleanup;
159953b381b3SDavid Woodhouse 		}
160053b381b3SDavid Woodhouse 	}
160153b381b3SDavid Woodhouse 
160253b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
160353b381b3SDavid Woodhouse 	if (!bios_to_read) {
160453b381b3SDavid Woodhouse 		/*
160553b381b3SDavid Woodhouse 		 * this can happen if others have merged with
160653b381b3SDavid Woodhouse 		 * us, it means there is nothing left to read.
160753b381b3SDavid Woodhouse 		 * But if there are missing devices it may not be
160853b381b3SDavid Woodhouse 		 * safe to do the full stripe write yet.
160953b381b3SDavid Woodhouse 		 */
161053b381b3SDavid Woodhouse 		goto finish;
161153b381b3SDavid Woodhouse 	}
161253b381b3SDavid Woodhouse 
161353b381b3SDavid Woodhouse 	/*
16144c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
16154c664611SQu Wenruo 	 * touch it after that.
161653b381b3SDavid Woodhouse 	 */
1617b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
1618bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
161953b381b3SDavid Woodhouse 		bio->bi_end_io = raid_rmw_end_io;
162053b381b3SDavid Woodhouse 
16216a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
162253b381b3SDavid Woodhouse 
16234e49ea4aSMike Christie 		submit_bio(bio);
162453b381b3SDavid Woodhouse 	}
162553b381b3SDavid Woodhouse 	/* the actual write will happen once the reads are done */
162653b381b3SDavid Woodhouse 	return 0;
162753b381b3SDavid Woodhouse 
162853b381b3SDavid Woodhouse cleanup:
162958efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1630785884fcSLiu Bo 
1631785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
1632785884fcSLiu Bo 		bio_put(bio);
1633785884fcSLiu Bo 
163453b381b3SDavid Woodhouse 	return -EIO;
163553b381b3SDavid Woodhouse 
163653b381b3SDavid Woodhouse finish:
163753b381b3SDavid Woodhouse 	validate_rbio_for_rmw(rbio);
163853b381b3SDavid Woodhouse 	return 0;
163953b381b3SDavid Woodhouse }
164053b381b3SDavid Woodhouse 
164153b381b3SDavid Woodhouse /*
164253b381b3SDavid Woodhouse  * if the upper layers pass in a full stripe, we thank them by only allocating
164353b381b3SDavid Woodhouse  * enough pages to hold the parity, and sending it all down quickly.
164453b381b3SDavid Woodhouse  */
164553b381b3SDavid Woodhouse static int full_stripe_write(struct btrfs_raid_bio *rbio)
164653b381b3SDavid Woodhouse {
164753b381b3SDavid Woodhouse 	int ret;
164853b381b3SDavid Woodhouse 
164953b381b3SDavid Woodhouse 	ret = alloc_rbio_parity_pages(rbio);
16503cd846d1SMiao Xie 	if (ret) {
16513cd846d1SMiao Xie 		__free_raid_bio(rbio);
165253b381b3SDavid Woodhouse 		return ret;
16533cd846d1SMiao Xie 	}
165453b381b3SDavid Woodhouse 
165553b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
165653b381b3SDavid Woodhouse 	if (ret == 0)
165753b381b3SDavid Woodhouse 		finish_rmw(rbio);
165853b381b3SDavid Woodhouse 	return 0;
165953b381b3SDavid Woodhouse }
166053b381b3SDavid Woodhouse 
166153b381b3SDavid Woodhouse /*
166253b381b3SDavid Woodhouse  * partial stripe writes get handed over to async helpers.
166353b381b3SDavid Woodhouse  * We're really hoping to merge a few more writes into this
166453b381b3SDavid Woodhouse  * rbio before calculating new parity
166553b381b3SDavid Woodhouse  */
166653b381b3SDavid Woodhouse static int partial_stripe_write(struct btrfs_raid_bio *rbio)
166753b381b3SDavid Woodhouse {
166853b381b3SDavid Woodhouse 	int ret;
166953b381b3SDavid Woodhouse 
167053b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
167153b381b3SDavid Woodhouse 	if (ret == 0)
1672cf6a4a75SDavid Sterba 		start_async_work(rbio, rmw_work);
167353b381b3SDavid Woodhouse 	return 0;
167453b381b3SDavid Woodhouse }
167553b381b3SDavid Woodhouse 
167653b381b3SDavid Woodhouse /*
167753b381b3SDavid Woodhouse  * sometimes while we were reading from the drive to
167853b381b3SDavid Woodhouse  * recalculate parity, enough new bios come into create
167953b381b3SDavid Woodhouse  * a full stripe.  So we do a check here to see if we can
168053b381b3SDavid Woodhouse  * go directly to finish_rmw
168153b381b3SDavid Woodhouse  */
168253b381b3SDavid Woodhouse static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
168353b381b3SDavid Woodhouse {
168453b381b3SDavid Woodhouse 	/* head off into rmw land if we don't have a full stripe */
168553b381b3SDavid Woodhouse 	if (!rbio_is_full(rbio))
168653b381b3SDavid Woodhouse 		return partial_stripe_write(rbio);
168753b381b3SDavid Woodhouse 	return full_stripe_write(rbio);
168853b381b3SDavid Woodhouse }
168953b381b3SDavid Woodhouse 
169053b381b3SDavid Woodhouse /*
16916ac0f488SChris Mason  * We use plugging call backs to collect full stripes.
16926ac0f488SChris Mason  * Any time we get a partial stripe write while plugged
16936ac0f488SChris Mason  * we collect it into a list.  When the unplug comes down,
16946ac0f488SChris Mason  * we sort the list by logical block number and merge
16956ac0f488SChris Mason  * everything we can into the same rbios
16966ac0f488SChris Mason  */
16976ac0f488SChris Mason struct btrfs_plug_cb {
16986ac0f488SChris Mason 	struct blk_plug_cb cb;
16996ac0f488SChris Mason 	struct btrfs_fs_info *info;
17006ac0f488SChris Mason 	struct list_head rbio_list;
17016ac0f488SChris Mason 	struct btrfs_work work;
17026ac0f488SChris Mason };
17036ac0f488SChris Mason 
17046ac0f488SChris Mason /*
17056ac0f488SChris Mason  * rbios on the plug list are sorted for easier merging.
17066ac0f488SChris Mason  */
17074f0f586bSSami Tolvanen static int plug_cmp(void *priv, const struct list_head *a,
17084f0f586bSSami Tolvanen 		    const struct list_head *b)
17096ac0f488SChris Mason {
1710214cc184SDavid Sterba 	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
17116ac0f488SChris Mason 						       plug_list);
1712214cc184SDavid Sterba 	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
17136ac0f488SChris Mason 						       plug_list);
17144f024f37SKent Overstreet 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
17154f024f37SKent Overstreet 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
17166ac0f488SChris Mason 
17176ac0f488SChris Mason 	if (a_sector < b_sector)
17186ac0f488SChris Mason 		return -1;
17196ac0f488SChris Mason 	if (a_sector > b_sector)
17206ac0f488SChris Mason 		return 1;
17216ac0f488SChris Mason 	return 0;
17226ac0f488SChris Mason }
17236ac0f488SChris Mason 
17246ac0f488SChris Mason static void run_plug(struct btrfs_plug_cb *plug)
17256ac0f488SChris Mason {
17266ac0f488SChris Mason 	struct btrfs_raid_bio *cur;
17276ac0f488SChris Mason 	struct btrfs_raid_bio *last = NULL;
17286ac0f488SChris Mason 
17296ac0f488SChris Mason 	/*
17306ac0f488SChris Mason 	 * sort our plug list then try to merge
17316ac0f488SChris Mason 	 * everything we can in hopes of creating full
17326ac0f488SChris Mason 	 * stripes.
17336ac0f488SChris Mason 	 */
17346ac0f488SChris Mason 	list_sort(NULL, &plug->rbio_list, plug_cmp);
17356ac0f488SChris Mason 	while (!list_empty(&plug->rbio_list)) {
17366ac0f488SChris Mason 		cur = list_entry(plug->rbio_list.next,
17376ac0f488SChris Mason 				 struct btrfs_raid_bio, plug_list);
17386ac0f488SChris Mason 		list_del_init(&cur->plug_list);
17396ac0f488SChris Mason 
17406ac0f488SChris Mason 		if (rbio_is_full(cur)) {
1741c7b562c5SDavid Sterba 			int ret;
1742c7b562c5SDavid Sterba 
17436ac0f488SChris Mason 			/* we have a full stripe, send it down */
1744c7b562c5SDavid Sterba 			ret = full_stripe_write(cur);
1745c7b562c5SDavid Sterba 			BUG_ON(ret);
17466ac0f488SChris Mason 			continue;
17476ac0f488SChris Mason 		}
17486ac0f488SChris Mason 		if (last) {
17496ac0f488SChris Mason 			if (rbio_can_merge(last, cur)) {
17506ac0f488SChris Mason 				merge_rbio(last, cur);
17516ac0f488SChris Mason 				__free_raid_bio(cur);
17526ac0f488SChris Mason 				continue;
17536ac0f488SChris Mason 
17546ac0f488SChris Mason 			}
17556ac0f488SChris Mason 			__raid56_parity_write(last);
17566ac0f488SChris Mason 		}
17576ac0f488SChris Mason 		last = cur;
17586ac0f488SChris Mason 	}
17596ac0f488SChris Mason 	if (last) {
17606ac0f488SChris Mason 		__raid56_parity_write(last);
17616ac0f488SChris Mason 	}
17626ac0f488SChris Mason 	kfree(plug);
17636ac0f488SChris Mason }
17646ac0f488SChris Mason 
17656ac0f488SChris Mason /*
17666ac0f488SChris Mason  * if the unplug comes from schedule, we have to push the
17676ac0f488SChris Mason  * work off to a helper thread
17686ac0f488SChris Mason  */
17696ac0f488SChris Mason static void unplug_work(struct btrfs_work *work)
17706ac0f488SChris Mason {
17716ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
17726ac0f488SChris Mason 	plug = container_of(work, struct btrfs_plug_cb, work);
17736ac0f488SChris Mason 	run_plug(plug);
17746ac0f488SChris Mason }
17756ac0f488SChris Mason 
17766ac0f488SChris Mason static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
17776ac0f488SChris Mason {
17786ac0f488SChris Mason 	struct btrfs_plug_cb *plug;
17796ac0f488SChris Mason 	plug = container_of(cb, struct btrfs_plug_cb, cb);
17806ac0f488SChris Mason 
17816ac0f488SChris Mason 	if (from_schedule) {
1782a0cac0ecSOmar Sandoval 		btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1783d05a33acSQu Wenruo 		btrfs_queue_work(plug->info->rmw_workers,
17846ac0f488SChris Mason 				 &plug->work);
17856ac0f488SChris Mason 		return;
17866ac0f488SChris Mason 	}
17876ac0f488SChris Mason 	run_plug(plug);
17886ac0f488SChris Mason }
17896ac0f488SChris Mason 
17906ac0f488SChris Mason /*
179153b381b3SDavid Woodhouse  * our main entry point for writes from the rest of the FS.
179253b381b3SDavid Woodhouse  */
1793cc353a8bSQu Wenruo int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len)
179453b381b3SDavid Woodhouse {
17956a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
179653b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
17976ac0f488SChris Mason 	struct btrfs_plug_cb *plug = NULL;
17986ac0f488SChris Mason 	struct blk_plug_cb *cb;
17994245215dSMiao Xie 	int ret;
180053b381b3SDavid Woodhouse 
18014c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
1802af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
18034c664611SQu Wenruo 		btrfs_put_bioc(bioc);
180453b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
1805af8e2d1dSMiao Xie 	}
180653b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
18074f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
18081b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_WRITE;
18096ac0f488SChris Mason 
18100b246afaSJeff Mahoney 	btrfs_bio_counter_inc_noblocked(fs_info);
18114245215dSMiao Xie 	rbio->generic_bio_cnt = 1;
18124245215dSMiao Xie 
18136ac0f488SChris Mason 	/*
18146ac0f488SChris Mason 	 * don't plug on full rbios, just get them out the door
18156ac0f488SChris Mason 	 * as quickly as we can
18166ac0f488SChris Mason 	 */
18174245215dSMiao Xie 	if (rbio_is_full(rbio)) {
18184245215dSMiao Xie 		ret = full_stripe_write(rbio);
18194245215dSMiao Xie 		if (ret)
18200b246afaSJeff Mahoney 			btrfs_bio_counter_dec(fs_info);
18214245215dSMiao Xie 		return ret;
18224245215dSMiao Xie 	}
18236ac0f488SChris Mason 
18240b246afaSJeff Mahoney 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
18256ac0f488SChris Mason 	if (cb) {
18266ac0f488SChris Mason 		plug = container_of(cb, struct btrfs_plug_cb, cb);
18276ac0f488SChris Mason 		if (!plug->info) {
18280b246afaSJeff Mahoney 			plug->info = fs_info;
18296ac0f488SChris Mason 			INIT_LIST_HEAD(&plug->rbio_list);
18306ac0f488SChris Mason 		}
18316ac0f488SChris Mason 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
18324245215dSMiao Xie 		ret = 0;
18336ac0f488SChris Mason 	} else {
18344245215dSMiao Xie 		ret = __raid56_parity_write(rbio);
18354245215dSMiao Xie 		if (ret)
18360b246afaSJeff Mahoney 			btrfs_bio_counter_dec(fs_info);
183753b381b3SDavid Woodhouse 	}
18384245215dSMiao Xie 	return ret;
18396ac0f488SChris Mason }
184053b381b3SDavid Woodhouse 
184153b381b3SDavid Woodhouse /*
184253b381b3SDavid Woodhouse  * all parity reconstruction happens here.  We've read in everything
184353b381b3SDavid Woodhouse  * we can find from the drives and this does the heavy lifting of
184453b381b3SDavid Woodhouse  * sorting the good from the bad.
184553b381b3SDavid Woodhouse  */
184653b381b3SDavid Woodhouse static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
184753b381b3SDavid Woodhouse {
184807e4d380SQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
184907e4d380SQu Wenruo 	int sectornr, stripe;
185053b381b3SDavid Woodhouse 	void **pointers;
185194a0b58dSIra Weiny 	void **unmap_array;
185253b381b3SDavid Woodhouse 	int faila = -1, failb = -1;
185358efbc9fSOmar Sandoval 	blk_status_t err;
185453b381b3SDavid Woodhouse 	int i;
185553b381b3SDavid Woodhouse 
185607e4d380SQu Wenruo 	/*
185707e4d380SQu Wenruo 	 * This array stores the pointer for each sector, thus it has the extra
185807e4d380SQu Wenruo 	 * pgoff value added from each sector
185907e4d380SQu Wenruo 	 */
186031e818feSDavid Sterba 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
186153b381b3SDavid Woodhouse 	if (!pointers) {
186258efbc9fSOmar Sandoval 		err = BLK_STS_RESOURCE;
186353b381b3SDavid Woodhouse 		goto cleanup_io;
186453b381b3SDavid Woodhouse 	}
186553b381b3SDavid Woodhouse 
186694a0b58dSIra Weiny 	/*
186794a0b58dSIra Weiny 	 * Store copy of pointers that does not get reordered during
186894a0b58dSIra Weiny 	 * reconstruction so that kunmap_local works.
186994a0b58dSIra Weiny 	 */
187094a0b58dSIra Weiny 	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
187194a0b58dSIra Weiny 	if (!unmap_array) {
187294a0b58dSIra Weiny 		err = BLK_STS_RESOURCE;
187394a0b58dSIra Weiny 		goto cleanup_pointers;
187494a0b58dSIra Weiny 	}
187594a0b58dSIra Weiny 
187653b381b3SDavid Woodhouse 	faila = rbio->faila;
187753b381b3SDavid Woodhouse 	failb = rbio->failb;
187853b381b3SDavid Woodhouse 
1879b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1880b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
188153b381b3SDavid Woodhouse 		spin_lock_irq(&rbio->bio_list_lock);
188253b381b3SDavid Woodhouse 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
188353b381b3SDavid Woodhouse 		spin_unlock_irq(&rbio->bio_list_lock);
188453b381b3SDavid Woodhouse 	}
188553b381b3SDavid Woodhouse 
188653b381b3SDavid Woodhouse 	index_rbio_pages(rbio);
188753b381b3SDavid Woodhouse 
188807e4d380SQu Wenruo 	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
188907e4d380SQu Wenruo 		struct sector_ptr *sector;
189007e4d380SQu Wenruo 
18915a6ac9eaSMiao Xie 		/*
18925a6ac9eaSMiao Xie 		 * Now we just use bitmap to mark the horizontal stripes in
18935a6ac9eaSMiao Xie 		 * which we have data when doing parity scrub.
18945a6ac9eaSMiao Xie 		 */
18955a6ac9eaSMiao Xie 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
189607e4d380SQu Wenruo 		    !test_bit(sectornr, rbio->dbitmap))
18975a6ac9eaSMiao Xie 			continue;
18985a6ac9eaSMiao Xie 
189994a0b58dSIra Weiny 		/*
190007e4d380SQu Wenruo 		 * Setup our array of pointers with sectors from each stripe
190194a0b58dSIra Weiny 		 *
190294a0b58dSIra Weiny 		 * NOTE: store a duplicate array of pointers to preserve the
190394a0b58dSIra Weiny 		 * pointer order
190453b381b3SDavid Woodhouse 		 */
19052c8cdd6eSMiao Xie 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
190653b381b3SDavid Woodhouse 			/*
190707e4d380SQu Wenruo 			 * If we're rebuilding a read, we have to use
190853b381b3SDavid Woodhouse 			 * pages from the bio list
190953b381b3SDavid Woodhouse 			 */
1910b4ee1782SOmar Sandoval 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1911b4ee1782SOmar Sandoval 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
191253b381b3SDavid Woodhouse 			    (stripe == faila || stripe == failb)) {
191307e4d380SQu Wenruo 				sector = sector_in_rbio(rbio, stripe, sectornr, 0);
191453b381b3SDavid Woodhouse 			} else {
191507e4d380SQu Wenruo 				sector = rbio_stripe_sector(rbio, stripe, sectornr);
191653b381b3SDavid Woodhouse 			}
191707e4d380SQu Wenruo 			ASSERT(sector->page);
191807e4d380SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
191907e4d380SQu Wenruo 					   sector->pgoff;
192094a0b58dSIra Weiny 			unmap_array[stripe] = pointers[stripe];
192153b381b3SDavid Woodhouse 		}
192253b381b3SDavid Woodhouse 
192307e4d380SQu Wenruo 		/* All raid6 handling here */
19244c664611SQu Wenruo 		if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
192507e4d380SQu Wenruo 			/* Single failure, rebuild from parity raid5 style */
192653b381b3SDavid Woodhouse 			if (failb < 0) {
192753b381b3SDavid Woodhouse 				if (faila == rbio->nr_data) {
192853b381b3SDavid Woodhouse 					/*
192953b381b3SDavid Woodhouse 					 * Just the P stripe has failed, without
193053b381b3SDavid Woodhouse 					 * a bad data or Q stripe.
193153b381b3SDavid Woodhouse 					 * TODO, we should redo the xor here.
193253b381b3SDavid Woodhouse 					 */
193358efbc9fSOmar Sandoval 					err = BLK_STS_IOERR;
193453b381b3SDavid Woodhouse 					goto cleanup;
193553b381b3SDavid Woodhouse 				}
193653b381b3SDavid Woodhouse 				/*
193753b381b3SDavid Woodhouse 				 * a single failure in raid6 is rebuilt
193853b381b3SDavid Woodhouse 				 * in the pstripe code below
193953b381b3SDavid Woodhouse 				 */
194053b381b3SDavid Woodhouse 				goto pstripe;
194153b381b3SDavid Woodhouse 			}
194253b381b3SDavid Woodhouse 
194353b381b3SDavid Woodhouse 			/* make sure our ps and qs are in order */
1944b7d2083aSNikolay Borisov 			if (faila > failb)
1945b7d2083aSNikolay Borisov 				swap(faila, failb);
194653b381b3SDavid Woodhouse 
194753b381b3SDavid Woodhouse 			/* if the q stripe is failed, do a pstripe reconstruction
194853b381b3SDavid Woodhouse 			 * from the xors.
194953b381b3SDavid Woodhouse 			 * If both the q stripe and the P stripe are failed, we're
195053b381b3SDavid Woodhouse 			 * here due to a crc mismatch and we can't give them the
195153b381b3SDavid Woodhouse 			 * data they want
195253b381b3SDavid Woodhouse 			 */
19534c664611SQu Wenruo 			if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
19544c664611SQu Wenruo 				if (rbio->bioc->raid_map[faila] ==
19558e5cfb55SZhao Lei 				    RAID5_P_STRIPE) {
195658efbc9fSOmar Sandoval 					err = BLK_STS_IOERR;
195753b381b3SDavid Woodhouse 					goto cleanup;
195853b381b3SDavid Woodhouse 				}
195953b381b3SDavid Woodhouse 				/*
196053b381b3SDavid Woodhouse 				 * otherwise we have one bad data stripe and
196153b381b3SDavid Woodhouse 				 * a good P stripe.  raid5!
196253b381b3SDavid Woodhouse 				 */
196353b381b3SDavid Woodhouse 				goto pstripe;
196453b381b3SDavid Woodhouse 			}
196553b381b3SDavid Woodhouse 
19664c664611SQu Wenruo 			if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
19672c8cdd6eSMiao Xie 				raid6_datap_recov(rbio->real_stripes,
196807e4d380SQu Wenruo 						  sectorsize, faila, pointers);
196953b381b3SDavid Woodhouse 			} else {
19702c8cdd6eSMiao Xie 				raid6_2data_recov(rbio->real_stripes,
197107e4d380SQu Wenruo 						  sectorsize, faila, failb,
197253b381b3SDavid Woodhouse 						  pointers);
197353b381b3SDavid Woodhouse 			}
197453b381b3SDavid Woodhouse 		} else {
197553b381b3SDavid Woodhouse 			void *p;
197653b381b3SDavid Woodhouse 
197753b381b3SDavid Woodhouse 			/* rebuild from P stripe here (raid5 or raid6) */
197853b381b3SDavid Woodhouse 			BUG_ON(failb != -1);
197953b381b3SDavid Woodhouse pstripe:
198053b381b3SDavid Woodhouse 			/* Copy parity block into failed block to start with */
198107e4d380SQu Wenruo 			memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
198253b381b3SDavid Woodhouse 
198353b381b3SDavid Woodhouse 			/* rearrange the pointer array */
198453b381b3SDavid Woodhouse 			p = pointers[faila];
198553b381b3SDavid Woodhouse 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
198653b381b3SDavid Woodhouse 				pointers[stripe] = pointers[stripe + 1];
198753b381b3SDavid Woodhouse 			pointers[rbio->nr_data - 1] = p;
198853b381b3SDavid Woodhouse 
198953b381b3SDavid Woodhouse 			/* xor in the rest */
199007e4d380SQu Wenruo 			run_xor(pointers, rbio->nr_data - 1, sectorsize);
199153b381b3SDavid Woodhouse 		}
199253b381b3SDavid Woodhouse 		/* if we're doing this rebuild as part of an rmw, go through
199353b381b3SDavid Woodhouse 		 * and set all of our private rbio pages in the
199453b381b3SDavid Woodhouse 		 * failed stripes as uptodate.  This way finish_rmw will
199553b381b3SDavid Woodhouse 		 * know they can be trusted.  If this was a read reconstruction,
199653b381b3SDavid Woodhouse 		 * other endio functions will fiddle the uptodate bits
199753b381b3SDavid Woodhouse 		 */
19981b94b556SMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE) {
199907e4d380SQu Wenruo 			for (i = 0;  i < rbio->stripe_nsectors; i++) {
200053b381b3SDavid Woodhouse 				if (faila != -1) {
200107e4d380SQu Wenruo 					sector = rbio_stripe_sector(rbio, faila, i);
200207e4d380SQu Wenruo 					sector->uptodate = 1;
200353b381b3SDavid Woodhouse 				}
200453b381b3SDavid Woodhouse 				if (failb != -1) {
200507e4d380SQu Wenruo 					sector = rbio_stripe_sector(rbio, failb, i);
200607e4d380SQu Wenruo 					sector->uptodate = 1;
200753b381b3SDavid Woodhouse 				}
200853b381b3SDavid Woodhouse 			}
200953b381b3SDavid Woodhouse 		}
201094a0b58dSIra Weiny 		for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
201194a0b58dSIra Weiny 			kunmap_local(unmap_array[stripe]);
201253b381b3SDavid Woodhouse 	}
201353b381b3SDavid Woodhouse 
201458efbc9fSOmar Sandoval 	err = BLK_STS_OK;
201553b381b3SDavid Woodhouse cleanup:
201694a0b58dSIra Weiny 	kfree(unmap_array);
201794a0b58dSIra Weiny cleanup_pointers:
201853b381b3SDavid Woodhouse 	kfree(pointers);
201953b381b3SDavid Woodhouse 
202053b381b3SDavid Woodhouse cleanup_io:
2021580c6efaSLiu Bo 	/*
2022580c6efaSLiu Bo 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2023580c6efaSLiu Bo 	 * valid rbio which is consistent with ondisk content, thus such a
2024580c6efaSLiu Bo 	 * valid rbio can be cached to avoid further disk reads.
2025580c6efaSLiu Bo 	 */
2026580c6efaSLiu Bo 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2027580c6efaSLiu Bo 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
202844ac474dSLiu Bo 		/*
202944ac474dSLiu Bo 		 * - In case of two failures, where rbio->failb != -1:
203044ac474dSLiu Bo 		 *
203144ac474dSLiu Bo 		 *   Do not cache this rbio since the above read reconstruction
203244ac474dSLiu Bo 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
203344ac474dSLiu Bo 		 *   changed some content of stripes which are not identical to
203444ac474dSLiu Bo 		 *   on-disk content any more, otherwise, a later write/recover
203544ac474dSLiu Bo 		 *   may steal stripe_pages from this rbio and end up with
203644ac474dSLiu Bo 		 *   corruptions or rebuild failures.
203744ac474dSLiu Bo 		 *
203844ac474dSLiu Bo 		 * - In case of single failure, where rbio->failb == -1:
203944ac474dSLiu Bo 		 *
204044ac474dSLiu Bo 		 *   Cache this rbio iff the above read reconstruction is
204152042d8eSAndrea Gelmini 		 *   executed without problems.
204244ac474dSLiu Bo 		 */
204344ac474dSLiu Bo 		if (err == BLK_STS_OK && rbio->failb < 0)
20444ae10b3aSChris Mason 			cache_rbio_pages(rbio);
20454ae10b3aSChris Mason 		else
20464ae10b3aSChris Mason 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
20474ae10b3aSChris Mason 
20484246a0b6SChristoph Hellwig 		rbio_orig_end_io(rbio, err);
204958efbc9fSOmar Sandoval 	} else if (err == BLK_STS_OK) {
205053b381b3SDavid Woodhouse 		rbio->faila = -1;
205153b381b3SDavid Woodhouse 		rbio->failb = -1;
20525a6ac9eaSMiao Xie 
20535a6ac9eaSMiao Xie 		if (rbio->operation == BTRFS_RBIO_WRITE)
205453b381b3SDavid Woodhouse 			finish_rmw(rbio);
20555a6ac9eaSMiao Xie 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
20565a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
20575a6ac9eaSMiao Xie 		else
20585a6ac9eaSMiao Xie 			BUG();
205953b381b3SDavid Woodhouse 	} else {
20604246a0b6SChristoph Hellwig 		rbio_orig_end_io(rbio, err);
206153b381b3SDavid Woodhouse 	}
206253b381b3SDavid Woodhouse }
206353b381b3SDavid Woodhouse 
206453b381b3SDavid Woodhouse /*
206553b381b3SDavid Woodhouse  * This is called only for stripes we've read from disk to
206653b381b3SDavid Woodhouse  * reconstruct the parity.
206753b381b3SDavid Woodhouse  */
20684246a0b6SChristoph Hellwig static void raid_recover_end_io(struct bio *bio)
206953b381b3SDavid Woodhouse {
207053b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio = bio->bi_private;
207153b381b3SDavid Woodhouse 
207253b381b3SDavid Woodhouse 	/*
207353b381b3SDavid Woodhouse 	 * we only read stripe pages off the disk, set them
207453b381b3SDavid Woodhouse 	 * up to date if there were no errors
207553b381b3SDavid Woodhouse 	 */
20764e4cbee9SChristoph Hellwig 	if (bio->bi_status)
207753b381b3SDavid Woodhouse 		fail_bio_stripe(rbio, bio);
207853b381b3SDavid Woodhouse 	else
207953b381b3SDavid Woodhouse 		set_bio_pages_uptodate(bio);
208053b381b3SDavid Woodhouse 	bio_put(bio);
208153b381b3SDavid Woodhouse 
2082b89e1b01SMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
208353b381b3SDavid Woodhouse 		return;
208453b381b3SDavid Woodhouse 
20854c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
208658efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
208753b381b3SDavid Woodhouse 	else
208853b381b3SDavid Woodhouse 		__raid_recover_end_io(rbio);
208953b381b3SDavid Woodhouse }
209053b381b3SDavid Woodhouse 
209153b381b3SDavid Woodhouse /*
209253b381b3SDavid Woodhouse  * reads everything we need off the disk to reconstruct
209353b381b3SDavid Woodhouse  * the parity. endio handlers trigger final reconstruction
209453b381b3SDavid Woodhouse  * when the IO is done.
209553b381b3SDavid Woodhouse  *
209653b381b3SDavid Woodhouse  * This is used both for reads from the higher layers and for
209753b381b3SDavid Woodhouse  * parity construction required to finish a rmw cycle.
209853b381b3SDavid Woodhouse  */
209953b381b3SDavid Woodhouse static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
210053b381b3SDavid Woodhouse {
210153b381b3SDavid Woodhouse 	int bios_to_read = 0;
210253b381b3SDavid Woodhouse 	struct bio_list bio_list;
210353b381b3SDavid Woodhouse 	int ret;
21043e77605dSQu Wenruo 	int sectornr;
210553b381b3SDavid Woodhouse 	int stripe;
210653b381b3SDavid Woodhouse 	struct bio *bio;
210753b381b3SDavid Woodhouse 
210853b381b3SDavid Woodhouse 	bio_list_init(&bio_list);
210953b381b3SDavid Woodhouse 
211053b381b3SDavid Woodhouse 	ret = alloc_rbio_pages(rbio);
211153b381b3SDavid Woodhouse 	if (ret)
211253b381b3SDavid Woodhouse 		goto cleanup;
211353b381b3SDavid Woodhouse 
2114b89e1b01SMiao Xie 	atomic_set(&rbio->error, 0);
211553b381b3SDavid Woodhouse 
211653b381b3SDavid Woodhouse 	/*
21174ae10b3aSChris Mason 	 * read everything that hasn't failed.  Thanks to the
21184ae10b3aSChris Mason 	 * stripe cache, it is possible that some or all of these
21194ae10b3aSChris Mason 	 * pages are going to be uptodate.
212053b381b3SDavid Woodhouse 	 */
21212c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
21225588383eSLiu Bo 		if (rbio->faila == stripe || rbio->failb == stripe) {
2123b89e1b01SMiao Xie 			atomic_inc(&rbio->error);
212453b381b3SDavid Woodhouse 			continue;
21255588383eSLiu Bo 		}
212653b381b3SDavid Woodhouse 
21273e77605dSQu Wenruo 		for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
21283e77605dSQu Wenruo 			struct sector_ptr *sector;
212953b381b3SDavid Woodhouse 
213053b381b3SDavid Woodhouse 			/*
213153b381b3SDavid Woodhouse 			 * the rmw code may have already read this
213253b381b3SDavid Woodhouse 			 * page in
213353b381b3SDavid Woodhouse 			 */
21343e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
21353e77605dSQu Wenruo 			if (sector->uptodate)
213653b381b3SDavid Woodhouse 				continue;
213753b381b3SDavid Woodhouse 
21383e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
21393e77605dSQu Wenruo 						 stripe, sectornr, rbio->stripe_len,
2140e01bf588SChristoph Hellwig 						 REQ_OP_READ);
214153b381b3SDavid Woodhouse 			if (ret < 0)
214253b381b3SDavid Woodhouse 				goto cleanup;
214353b381b3SDavid Woodhouse 		}
214453b381b3SDavid Woodhouse 	}
214553b381b3SDavid Woodhouse 
214653b381b3SDavid Woodhouse 	bios_to_read = bio_list_size(&bio_list);
214753b381b3SDavid Woodhouse 	if (!bios_to_read) {
214853b381b3SDavid Woodhouse 		/*
214953b381b3SDavid Woodhouse 		 * we might have no bios to read just because the pages
215053b381b3SDavid Woodhouse 		 * were up to date, or we might have no bios to read because
215153b381b3SDavid Woodhouse 		 * the devices were gone.
215253b381b3SDavid Woodhouse 		 */
21534c664611SQu Wenruo 		if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
215453b381b3SDavid Woodhouse 			__raid_recover_end_io(rbio);
2155813f8a0eSNikolay Borisov 			return 0;
215653b381b3SDavid Woodhouse 		} else {
215753b381b3SDavid Woodhouse 			goto cleanup;
215853b381b3SDavid Woodhouse 		}
215953b381b3SDavid Woodhouse 	}
216053b381b3SDavid Woodhouse 
216153b381b3SDavid Woodhouse 	/*
21624c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
21634c664611SQu Wenruo 	 * touch it after that.
216453b381b3SDavid Woodhouse 	 */
2165b89e1b01SMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
2166bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
216753b381b3SDavid Woodhouse 		bio->bi_end_io = raid_recover_end_io;
216853b381b3SDavid Woodhouse 
21696a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
217053b381b3SDavid Woodhouse 
21714e49ea4aSMike Christie 		submit_bio(bio);
217253b381b3SDavid Woodhouse 	}
2173813f8a0eSNikolay Borisov 
217453b381b3SDavid Woodhouse 	return 0;
217553b381b3SDavid Woodhouse 
217653b381b3SDavid Woodhouse cleanup:
2177b4ee1782SOmar Sandoval 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2178b4ee1782SOmar Sandoval 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
217958efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2180785884fcSLiu Bo 
2181785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2182785884fcSLiu Bo 		bio_put(bio);
2183785884fcSLiu Bo 
218453b381b3SDavid Woodhouse 	return -EIO;
218553b381b3SDavid Woodhouse }
218653b381b3SDavid Woodhouse 
218753b381b3SDavid Woodhouse /*
218853b381b3SDavid Woodhouse  * the main entry point for reads from the higher layers.  This
218953b381b3SDavid Woodhouse  * is really only called when the normal read path had a failure,
219053b381b3SDavid Woodhouse  * so we assume the bio they send down corresponds to a failed part
219153b381b3SDavid Woodhouse  * of the drive.
219253b381b3SDavid Woodhouse  */
21936a258d72SQu Wenruo int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2194cc353a8bSQu Wenruo 			  u32 stripe_len, int mirror_num, int generic_io)
219553b381b3SDavid Woodhouse {
21966a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
219753b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
219853b381b3SDavid Woodhouse 	int ret;
219953b381b3SDavid Woodhouse 
2200abad60c6SLiu Bo 	if (generic_io) {
22014c664611SQu Wenruo 		ASSERT(bioc->mirror_num == mirror_num);
2202c3a3b19bSQu Wenruo 		btrfs_bio(bio)->mirror_num = mirror_num;
2203abad60c6SLiu Bo 	}
2204abad60c6SLiu Bo 
22054c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
2206af8e2d1dSMiao Xie 	if (IS_ERR(rbio)) {
22076e9606d2SZhao Lei 		if (generic_io)
22084c664611SQu Wenruo 			btrfs_put_bioc(bioc);
220953b381b3SDavid Woodhouse 		return PTR_ERR(rbio);
2210af8e2d1dSMiao Xie 	}
221153b381b3SDavid Woodhouse 
22121b94b556SMiao Xie 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
221353b381b3SDavid Woodhouse 	bio_list_add(&rbio->bio_list, bio);
22144f024f37SKent Overstreet 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
221553b381b3SDavid Woodhouse 
221653b381b3SDavid Woodhouse 	rbio->faila = find_logical_bio_stripe(rbio, bio);
221753b381b3SDavid Woodhouse 	if (rbio->faila == -1) {
22180b246afaSJeff Mahoney 		btrfs_warn(fs_info,
22194c664611SQu Wenruo "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
22201201b58bSDavid Sterba 			   __func__, bio->bi_iter.bi_sector << 9,
22214c664611SQu Wenruo 			   (u64)bio->bi_iter.bi_size, bioc->map_type);
22226e9606d2SZhao Lei 		if (generic_io)
22234c664611SQu Wenruo 			btrfs_put_bioc(bioc);
222453b381b3SDavid Woodhouse 		kfree(rbio);
222553b381b3SDavid Woodhouse 		return -EIO;
222653b381b3SDavid Woodhouse 	}
222753b381b3SDavid Woodhouse 
22284245215dSMiao Xie 	if (generic_io) {
22290b246afaSJeff Mahoney 		btrfs_bio_counter_inc_noblocked(fs_info);
22304245215dSMiao Xie 		rbio->generic_bio_cnt = 1;
22314245215dSMiao Xie 	} else {
22324c664611SQu Wenruo 		btrfs_get_bioc(bioc);
22334245215dSMiao Xie 	}
22344245215dSMiao Xie 
223553b381b3SDavid Woodhouse 	/*
22368810f751SLiu Bo 	 * Loop retry:
22378810f751SLiu Bo 	 * for 'mirror == 2', reconstruct from all other stripes.
22388810f751SLiu Bo 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
223953b381b3SDavid Woodhouse 	 */
22408810f751SLiu Bo 	if (mirror_num > 2) {
22418810f751SLiu Bo 		/*
22428810f751SLiu Bo 		 * 'mirror == 3' is to fail the p stripe and
22438810f751SLiu Bo 		 * reconstruct from the q stripe.  'mirror > 3' is to
22448810f751SLiu Bo 		 * fail a data stripe and reconstruct from p+q stripe.
22458810f751SLiu Bo 		 */
22468810f751SLiu Bo 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
22478810f751SLiu Bo 		ASSERT(rbio->failb > 0);
22488810f751SLiu Bo 		if (rbio->failb <= rbio->faila)
22498810f751SLiu Bo 			rbio->failb--;
22508810f751SLiu Bo 	}
225153b381b3SDavid Woodhouse 
225253b381b3SDavid Woodhouse 	ret = lock_stripe_add(rbio);
225353b381b3SDavid Woodhouse 
225453b381b3SDavid Woodhouse 	/*
225553b381b3SDavid Woodhouse 	 * __raid56_parity_recover will end the bio with
225653b381b3SDavid Woodhouse 	 * any errors it hits.  We don't want to return
225753b381b3SDavid Woodhouse 	 * its error value up the stack because our caller
225853b381b3SDavid Woodhouse 	 * will end up calling bio_endio with any nonzero
225953b381b3SDavid Woodhouse 	 * return
226053b381b3SDavid Woodhouse 	 */
226153b381b3SDavid Woodhouse 	if (ret == 0)
226253b381b3SDavid Woodhouse 		__raid56_parity_recover(rbio);
226353b381b3SDavid Woodhouse 	/*
226453b381b3SDavid Woodhouse 	 * our rbio has been added to the list of
226553b381b3SDavid Woodhouse 	 * rbios that will be handled after the
226653b381b3SDavid Woodhouse 	 * currently lock owner is done
226753b381b3SDavid Woodhouse 	 */
226853b381b3SDavid Woodhouse 	return 0;
226953b381b3SDavid Woodhouse 
227053b381b3SDavid Woodhouse }
227153b381b3SDavid Woodhouse 
227253b381b3SDavid Woodhouse static void rmw_work(struct btrfs_work *work)
227353b381b3SDavid Woodhouse {
227453b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
227553b381b3SDavid Woodhouse 
227653b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
227753b381b3SDavid Woodhouse 	raid56_rmw_stripe(rbio);
227853b381b3SDavid Woodhouse }
227953b381b3SDavid Woodhouse 
228053b381b3SDavid Woodhouse static void read_rebuild_work(struct btrfs_work *work)
228153b381b3SDavid Woodhouse {
228253b381b3SDavid Woodhouse 	struct btrfs_raid_bio *rbio;
228353b381b3SDavid Woodhouse 
228453b381b3SDavid Woodhouse 	rbio = container_of(work, struct btrfs_raid_bio, work);
228553b381b3SDavid Woodhouse 	__raid56_parity_recover(rbio);
228653b381b3SDavid Woodhouse }
22875a6ac9eaSMiao Xie 
22885a6ac9eaSMiao Xie /*
22895a6ac9eaSMiao Xie  * The following code is used to scrub/replace the parity stripe
22905a6ac9eaSMiao Xie  *
22914c664611SQu Wenruo  * Caller must have already increased bio_counter for getting @bioc.
2292ae6529c3SQu Wenruo  *
22935a6ac9eaSMiao Xie  * Note: We need make sure all the pages that add into the scrub/replace
22945a6ac9eaSMiao Xie  * raid bio are correct and not be changed during the scrub/replace. That
22955a6ac9eaSMiao Xie  * is those pages just hold metadata or file data with checksum.
22965a6ac9eaSMiao Xie  */
22975a6ac9eaSMiao Xie 
22986a258d72SQu Wenruo struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
22996a258d72SQu Wenruo 				struct btrfs_io_context *bioc,
2300cc353a8bSQu Wenruo 				u32 stripe_len, struct btrfs_device *scrub_dev,
23015a6ac9eaSMiao Xie 				unsigned long *dbitmap, int stripe_nsectors)
23025a6ac9eaSMiao Xie {
23036a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
23045a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
23055a6ac9eaSMiao Xie 	int i;
23065a6ac9eaSMiao Xie 
23074c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, stripe_len);
23085a6ac9eaSMiao Xie 	if (IS_ERR(rbio))
23095a6ac9eaSMiao Xie 		return NULL;
23105a6ac9eaSMiao Xie 	bio_list_add(&rbio->bio_list, bio);
23115a6ac9eaSMiao Xie 	/*
23125a6ac9eaSMiao Xie 	 * This is a special bio which is used to hold the completion handler
23135a6ac9eaSMiao Xie 	 * and make the scrub rbio is similar to the other types
23145a6ac9eaSMiao Xie 	 */
23155a6ac9eaSMiao Xie 	ASSERT(!bio->bi_iter.bi_size);
23165a6ac9eaSMiao Xie 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
23175a6ac9eaSMiao Xie 
23189cd3a7ebSLiu Bo 	/*
23194c664611SQu Wenruo 	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
23209cd3a7ebSLiu Bo 	 * to the end position, so this search can start from the first parity
23219cd3a7ebSLiu Bo 	 * stripe.
23229cd3a7ebSLiu Bo 	 */
23239cd3a7ebSLiu Bo 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
23244c664611SQu Wenruo 		if (bioc->stripes[i].dev == scrub_dev) {
23255a6ac9eaSMiao Xie 			rbio->scrubp = i;
23265a6ac9eaSMiao Xie 			break;
23275a6ac9eaSMiao Xie 		}
23285a6ac9eaSMiao Xie 	}
23299cd3a7ebSLiu Bo 	ASSERT(i < rbio->real_stripes);
23305a6ac9eaSMiao Xie 
23315a6ac9eaSMiao Xie 	/* Now we just support the sectorsize equals to page size */
23320b246afaSJeff Mahoney 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
23335a6ac9eaSMiao Xie 	ASSERT(rbio->stripe_npages == stripe_nsectors);
23345a6ac9eaSMiao Xie 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
23355a6ac9eaSMiao Xie 
2336ae6529c3SQu Wenruo 	/*
23374c664611SQu Wenruo 	 * We have already increased bio_counter when getting bioc, record it
2338ae6529c3SQu Wenruo 	 * so we can free it at rbio_orig_end_io().
2339ae6529c3SQu Wenruo 	 */
2340ae6529c3SQu Wenruo 	rbio->generic_bio_cnt = 1;
2341ae6529c3SQu Wenruo 
23425a6ac9eaSMiao Xie 	return rbio;
23435a6ac9eaSMiao Xie }
23445a6ac9eaSMiao Xie 
2345b4ee1782SOmar Sandoval /* Used for both parity scrub and missing. */
2346b4ee1782SOmar Sandoval void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
23476346f6bfSQu Wenruo 			    unsigned int pgoff, u64 logical)
23485a6ac9eaSMiao Xie {
23496346f6bfSQu Wenruo 	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
23505a6ac9eaSMiao Xie 	int stripe_offset;
23515a6ac9eaSMiao Xie 	int index;
23525a6ac9eaSMiao Xie 
23534c664611SQu Wenruo 	ASSERT(logical >= rbio->bioc->raid_map[0]);
23546346f6bfSQu Wenruo 	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
23555a6ac9eaSMiao Xie 				rbio->stripe_len * rbio->nr_data);
23564c664611SQu Wenruo 	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
23576346f6bfSQu Wenruo 	index = stripe_offset / sectorsize;
23586346f6bfSQu Wenruo 	rbio->bio_sectors[index].page = page;
23596346f6bfSQu Wenruo 	rbio->bio_sectors[index].pgoff = pgoff;
23605a6ac9eaSMiao Xie }
23615a6ac9eaSMiao Xie 
23625a6ac9eaSMiao Xie /*
23635a6ac9eaSMiao Xie  * We just scrub the parity that we have correct data on the same horizontal,
23645a6ac9eaSMiao Xie  * so we needn't allocate all pages for all the stripes.
23655a6ac9eaSMiao Xie  */
23665a6ac9eaSMiao Xie static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
23675a6ac9eaSMiao Xie {
23685a6ac9eaSMiao Xie 	int i;
23695a6ac9eaSMiao Xie 	int bit;
23705a6ac9eaSMiao Xie 	int index;
23715a6ac9eaSMiao Xie 	struct page *page;
23725a6ac9eaSMiao Xie 
23735a6ac9eaSMiao Xie 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
23742c8cdd6eSMiao Xie 		for (i = 0; i < rbio->real_stripes; i++) {
23755a6ac9eaSMiao Xie 			index = i * rbio->stripe_npages + bit;
23765a6ac9eaSMiao Xie 			if (rbio->stripe_pages[index])
23775a6ac9eaSMiao Xie 				continue;
23785a6ac9eaSMiao Xie 
2379b0ee5e1eSDavid Sterba 			page = alloc_page(GFP_NOFS);
23805a6ac9eaSMiao Xie 			if (!page)
23815a6ac9eaSMiao Xie 				return -ENOMEM;
23825a6ac9eaSMiao Xie 			rbio->stripe_pages[index] = page;
23835a6ac9eaSMiao Xie 		}
23845a6ac9eaSMiao Xie 	}
2385eb357060SQu Wenruo 	index_stripe_sectors(rbio);
23865a6ac9eaSMiao Xie 	return 0;
23875a6ac9eaSMiao Xie }
23885a6ac9eaSMiao Xie 
23895a6ac9eaSMiao Xie static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
23905a6ac9eaSMiao Xie 					 int need_check)
23915a6ac9eaSMiao Xie {
23924c664611SQu Wenruo 	struct btrfs_io_context *bioc = rbio->bioc;
239346900662SQu Wenruo 	const u32 sectorsize = bioc->fs_info->sectorsize;
23941389053eSKees Cook 	void **pointers = rbio->finish_pointers;
23951389053eSKees Cook 	unsigned long *pbitmap = rbio->finish_pbitmap;
23965a6ac9eaSMiao Xie 	int nr_data = rbio->nr_data;
23975a6ac9eaSMiao Xie 	int stripe;
23983e77605dSQu Wenruo 	int sectornr;
2399c17af965SDavid Sterba 	bool has_qstripe;
240046900662SQu Wenruo 	struct sector_ptr p_sector = { 0 };
240146900662SQu Wenruo 	struct sector_ptr q_sector = { 0 };
24025a6ac9eaSMiao Xie 	struct bio_list bio_list;
24035a6ac9eaSMiao Xie 	struct bio *bio;
240476035976SMiao Xie 	int is_replace = 0;
24055a6ac9eaSMiao Xie 	int ret;
24065a6ac9eaSMiao Xie 
24075a6ac9eaSMiao Xie 	bio_list_init(&bio_list);
24085a6ac9eaSMiao Xie 
2409c17af965SDavid Sterba 	if (rbio->real_stripes - rbio->nr_data == 1)
2410c17af965SDavid Sterba 		has_qstripe = false;
2411c17af965SDavid Sterba 	else if (rbio->real_stripes - rbio->nr_data == 2)
2412c17af965SDavid Sterba 		has_qstripe = true;
2413c17af965SDavid Sterba 	else
24145a6ac9eaSMiao Xie 		BUG();
24155a6ac9eaSMiao Xie 
24164c664611SQu Wenruo 	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
241776035976SMiao Xie 		is_replace = 1;
24183e77605dSQu Wenruo 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_nsectors);
241976035976SMiao Xie 	}
242076035976SMiao Xie 
24215a6ac9eaSMiao Xie 	/*
24225a6ac9eaSMiao Xie 	 * Because the higher layers(scrubber) are unlikely to
24235a6ac9eaSMiao Xie 	 * use this area of the disk again soon, so don't cache
24245a6ac9eaSMiao Xie 	 * it.
24255a6ac9eaSMiao Xie 	 */
24265a6ac9eaSMiao Xie 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
24275a6ac9eaSMiao Xie 
24285a6ac9eaSMiao Xie 	if (!need_check)
24295a6ac9eaSMiao Xie 		goto writeback;
24305a6ac9eaSMiao Xie 
243146900662SQu Wenruo 	p_sector.page = alloc_page(GFP_NOFS);
243246900662SQu Wenruo 	if (!p_sector.page)
24335a6ac9eaSMiao Xie 		goto cleanup;
243446900662SQu Wenruo 	p_sector.pgoff = 0;
243546900662SQu Wenruo 	p_sector.uptodate = 1;
24365a6ac9eaSMiao Xie 
2437c17af965SDavid Sterba 	if (has_qstripe) {
2438d70cef0dSIra Weiny 		/* RAID6, allocate and map temp space for the Q stripe */
243946900662SQu Wenruo 		q_sector.page = alloc_page(GFP_NOFS);
244046900662SQu Wenruo 		if (!q_sector.page) {
244146900662SQu Wenruo 			__free_page(p_sector.page);
244246900662SQu Wenruo 			p_sector.page = NULL;
24435a6ac9eaSMiao Xie 			goto cleanup;
24445a6ac9eaSMiao Xie 		}
244546900662SQu Wenruo 		q_sector.pgoff = 0;
244646900662SQu Wenruo 		q_sector.uptodate = 1;
244746900662SQu Wenruo 		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
24485a6ac9eaSMiao Xie 	}
24495a6ac9eaSMiao Xie 
24505a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
24515a6ac9eaSMiao Xie 
2452d70cef0dSIra Weiny 	/* Map the parity stripe just once */
245346900662SQu Wenruo 	pointers[nr_data] = kmap_local_page(p_sector.page);
2454d70cef0dSIra Weiny 
24553e77605dSQu Wenruo 	for_each_set_bit(sectornr, rbio->dbitmap, rbio->stripe_nsectors) {
245646900662SQu Wenruo 		struct sector_ptr *sector;
24575a6ac9eaSMiao Xie 		void *parity;
245846900662SQu Wenruo 
24595a6ac9eaSMiao Xie 		/* first collect one page from each data stripe */
24605a6ac9eaSMiao Xie 		for (stripe = 0; stripe < nr_data; stripe++) {
246146900662SQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
246246900662SQu Wenruo 			pointers[stripe] = kmap_local_page(sector->page) +
246346900662SQu Wenruo 					   sector->pgoff;
24645a6ac9eaSMiao Xie 		}
24655a6ac9eaSMiao Xie 
2466c17af965SDavid Sterba 		if (has_qstripe) {
2467d70cef0dSIra Weiny 			/* RAID6, call the library function to fill in our P/Q */
246846900662SQu Wenruo 			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
24695a6ac9eaSMiao Xie 						pointers);
24705a6ac9eaSMiao Xie 		} else {
24715a6ac9eaSMiao Xie 			/* raid5 */
247246900662SQu Wenruo 			memcpy(pointers[nr_data], pointers[0], sectorsize);
247346900662SQu Wenruo 			run_xor(pointers + 1, nr_data - 1, sectorsize);
24745a6ac9eaSMiao Xie 		}
24755a6ac9eaSMiao Xie 
247601327610SNicholas D Steeves 		/* Check scrubbing parity and repair it */
247746900662SQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
247846900662SQu Wenruo 		parity = kmap_local_page(sector->page) + sector->pgoff;
247946900662SQu Wenruo 		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
248046900662SQu Wenruo 			memcpy(parity, pointers[rbio->scrubp], sectorsize);
24815a6ac9eaSMiao Xie 		else
24825a6ac9eaSMiao Xie 			/* Parity is right, needn't writeback */
24833e77605dSQu Wenruo 			bitmap_clear(rbio->dbitmap, sectornr, 1);
248458c1a35cSIra Weiny 		kunmap_local(parity);
24855a6ac9eaSMiao Xie 
248694a0b58dSIra Weiny 		for (stripe = nr_data - 1; stripe >= 0; stripe--)
248794a0b58dSIra Weiny 			kunmap_local(pointers[stripe]);
24885a6ac9eaSMiao Xie 	}
24895a6ac9eaSMiao Xie 
249094a0b58dSIra Weiny 	kunmap_local(pointers[nr_data]);
249146900662SQu Wenruo 	__free_page(p_sector.page);
249246900662SQu Wenruo 	p_sector.page = NULL;
249346900662SQu Wenruo 	if (q_sector.page) {
249494a0b58dSIra Weiny 		kunmap_local(pointers[rbio->real_stripes - 1]);
249546900662SQu Wenruo 		__free_page(q_sector.page);
249646900662SQu Wenruo 		q_sector.page = NULL;
2497d70cef0dSIra Weiny 	}
24985a6ac9eaSMiao Xie 
24995a6ac9eaSMiao Xie writeback:
25005a6ac9eaSMiao Xie 	/*
25015a6ac9eaSMiao Xie 	 * time to start writing.  Make bios for everything from the
25025a6ac9eaSMiao Xie 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
25035a6ac9eaSMiao Xie 	 * everything else.
25045a6ac9eaSMiao Xie 	 */
25053e77605dSQu Wenruo 	for_each_set_bit(sectornr, rbio->dbitmap, rbio->stripe_nsectors) {
25063e77605dSQu Wenruo 		struct sector_ptr *sector;
25075a6ac9eaSMiao Xie 
25083e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25093e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
25103e77605dSQu Wenruo 					 sectornr, rbio->stripe_len, REQ_OP_WRITE);
25115a6ac9eaSMiao Xie 		if (ret)
25125a6ac9eaSMiao Xie 			goto cleanup;
25135a6ac9eaSMiao Xie 	}
25145a6ac9eaSMiao Xie 
251576035976SMiao Xie 	if (!is_replace)
251676035976SMiao Xie 		goto submit_write;
251776035976SMiao Xie 
25183e77605dSQu Wenruo 	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
25193e77605dSQu Wenruo 		struct sector_ptr *sector;
252076035976SMiao Xie 
25213e77605dSQu Wenruo 		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
25223e77605dSQu Wenruo 		ret = rbio_add_io_sector(rbio, &bio_list, sector,
25234c664611SQu Wenruo 				       bioc->tgtdev_map[rbio->scrubp],
25243e77605dSQu Wenruo 				       sectornr, rbio->stripe_len, REQ_OP_WRITE);
252576035976SMiao Xie 		if (ret)
252676035976SMiao Xie 			goto cleanup;
252776035976SMiao Xie 	}
252876035976SMiao Xie 
252976035976SMiao Xie submit_write:
25305a6ac9eaSMiao Xie 	nr_data = bio_list_size(&bio_list);
25315a6ac9eaSMiao Xie 	if (!nr_data) {
25325a6ac9eaSMiao Xie 		/* Every parity is right */
253358efbc9fSOmar Sandoval 		rbio_orig_end_io(rbio, BLK_STS_OK);
25345a6ac9eaSMiao Xie 		return;
25355a6ac9eaSMiao Xie 	}
25365a6ac9eaSMiao Xie 
25375a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, nr_data);
25385a6ac9eaSMiao Xie 
2539bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
2540a6111d11SZhao Lei 		bio->bi_end_io = raid_write_end_io;
25414e49ea4aSMike Christie 
25424e49ea4aSMike Christie 		submit_bio(bio);
25435a6ac9eaSMiao Xie 	}
25445a6ac9eaSMiao Xie 	return;
25455a6ac9eaSMiao Xie 
25465a6ac9eaSMiao Xie cleanup:
254758efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2548785884fcSLiu Bo 
2549785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2550785884fcSLiu Bo 		bio_put(bio);
25515a6ac9eaSMiao Xie }
25525a6ac9eaSMiao Xie 
25535a6ac9eaSMiao Xie static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
25545a6ac9eaSMiao Xie {
25555a6ac9eaSMiao Xie 	if (stripe >= 0 && stripe < rbio->nr_data)
25565a6ac9eaSMiao Xie 		return 1;
25575a6ac9eaSMiao Xie 	return 0;
25585a6ac9eaSMiao Xie }
25595a6ac9eaSMiao Xie 
25605a6ac9eaSMiao Xie /*
25615a6ac9eaSMiao Xie  * While we're doing the parity check and repair, we could have errors
25625a6ac9eaSMiao Xie  * in reading pages off the disk.  This checks for errors and if we're
25635a6ac9eaSMiao Xie  * not able to read the page it'll trigger parity reconstruction.  The
25645a6ac9eaSMiao Xie  * parity scrub will be finished after we've reconstructed the failed
25655a6ac9eaSMiao Xie  * stripes
25665a6ac9eaSMiao Xie  */
25675a6ac9eaSMiao Xie static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
25685a6ac9eaSMiao Xie {
25694c664611SQu Wenruo 	if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
25705a6ac9eaSMiao Xie 		goto cleanup;
25715a6ac9eaSMiao Xie 
25725a6ac9eaSMiao Xie 	if (rbio->faila >= 0 || rbio->failb >= 0) {
25735a6ac9eaSMiao Xie 		int dfail = 0, failp = -1;
25745a6ac9eaSMiao Xie 
25755a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->faila))
25765a6ac9eaSMiao Xie 			dfail++;
25775a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->faila))
25785a6ac9eaSMiao Xie 			failp = rbio->faila;
25795a6ac9eaSMiao Xie 
25805a6ac9eaSMiao Xie 		if (is_data_stripe(rbio, rbio->failb))
25815a6ac9eaSMiao Xie 			dfail++;
25825a6ac9eaSMiao Xie 		else if (is_parity_stripe(rbio->failb))
25835a6ac9eaSMiao Xie 			failp = rbio->failb;
25845a6ac9eaSMiao Xie 
25855a6ac9eaSMiao Xie 		/*
25865a6ac9eaSMiao Xie 		 * Because we can not use a scrubbing parity to repair
25875a6ac9eaSMiao Xie 		 * the data, so the capability of the repair is declined.
25885a6ac9eaSMiao Xie 		 * (In the case of RAID5, we can not repair anything)
25895a6ac9eaSMiao Xie 		 */
25904c664611SQu Wenruo 		if (dfail > rbio->bioc->max_errors - 1)
25915a6ac9eaSMiao Xie 			goto cleanup;
25925a6ac9eaSMiao Xie 
25935a6ac9eaSMiao Xie 		/*
25945a6ac9eaSMiao Xie 		 * If all data is good, only parity is correctly, just
25955a6ac9eaSMiao Xie 		 * repair the parity.
25965a6ac9eaSMiao Xie 		 */
25975a6ac9eaSMiao Xie 		if (dfail == 0) {
25985a6ac9eaSMiao Xie 			finish_parity_scrub(rbio, 0);
25995a6ac9eaSMiao Xie 			return;
26005a6ac9eaSMiao Xie 		}
26015a6ac9eaSMiao Xie 
26025a6ac9eaSMiao Xie 		/*
26035a6ac9eaSMiao Xie 		 * Here means we got one corrupted data stripe and one
26045a6ac9eaSMiao Xie 		 * corrupted parity on RAID6, if the corrupted parity
260501327610SNicholas D Steeves 		 * is scrubbing parity, luckily, use the other one to repair
26065a6ac9eaSMiao Xie 		 * the data, or we can not repair the data stripe.
26075a6ac9eaSMiao Xie 		 */
26085a6ac9eaSMiao Xie 		if (failp != rbio->scrubp)
26095a6ac9eaSMiao Xie 			goto cleanup;
26105a6ac9eaSMiao Xie 
26115a6ac9eaSMiao Xie 		__raid_recover_end_io(rbio);
26125a6ac9eaSMiao Xie 	} else {
26135a6ac9eaSMiao Xie 		finish_parity_scrub(rbio, 1);
26145a6ac9eaSMiao Xie 	}
26155a6ac9eaSMiao Xie 	return;
26165a6ac9eaSMiao Xie 
26175a6ac9eaSMiao Xie cleanup:
261858efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
26195a6ac9eaSMiao Xie }
26205a6ac9eaSMiao Xie 
26215a6ac9eaSMiao Xie /*
26225a6ac9eaSMiao Xie  * end io for the read phase of the rmw cycle.  All the bios here are physical
26235a6ac9eaSMiao Xie  * stripe bios we've read from the disk so we can recalculate the parity of the
26245a6ac9eaSMiao Xie  * stripe.
26255a6ac9eaSMiao Xie  *
26265a6ac9eaSMiao Xie  * This will usually kick off finish_rmw once all the bios are read in, but it
26275a6ac9eaSMiao Xie  * may trigger parity reconstruction if we had any errors along the way
26285a6ac9eaSMiao Xie  */
26294246a0b6SChristoph Hellwig static void raid56_parity_scrub_end_io(struct bio *bio)
26305a6ac9eaSMiao Xie {
26315a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio = bio->bi_private;
26325a6ac9eaSMiao Xie 
26334e4cbee9SChristoph Hellwig 	if (bio->bi_status)
26345a6ac9eaSMiao Xie 		fail_bio_stripe(rbio, bio);
26355a6ac9eaSMiao Xie 	else
26365a6ac9eaSMiao Xie 		set_bio_pages_uptodate(bio);
26375a6ac9eaSMiao Xie 
26385a6ac9eaSMiao Xie 	bio_put(bio);
26395a6ac9eaSMiao Xie 
26405a6ac9eaSMiao Xie 	if (!atomic_dec_and_test(&rbio->stripes_pending))
26415a6ac9eaSMiao Xie 		return;
26425a6ac9eaSMiao Xie 
26435a6ac9eaSMiao Xie 	/*
26445a6ac9eaSMiao Xie 	 * this will normally call finish_rmw to start our write
26455a6ac9eaSMiao Xie 	 * but if there are any failed stripes we'll reconstruct
26465a6ac9eaSMiao Xie 	 * from parity first
26475a6ac9eaSMiao Xie 	 */
26485a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
26495a6ac9eaSMiao Xie }
26505a6ac9eaSMiao Xie 
26515a6ac9eaSMiao Xie static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
26525a6ac9eaSMiao Xie {
26535a6ac9eaSMiao Xie 	int bios_to_read = 0;
26545a6ac9eaSMiao Xie 	struct bio_list bio_list;
26555a6ac9eaSMiao Xie 	int ret;
26563e77605dSQu Wenruo 	int sectornr;
26575a6ac9eaSMiao Xie 	int stripe;
26585a6ac9eaSMiao Xie 	struct bio *bio;
26595a6ac9eaSMiao Xie 
2660785884fcSLiu Bo 	bio_list_init(&bio_list);
2661785884fcSLiu Bo 
26625a6ac9eaSMiao Xie 	ret = alloc_rbio_essential_pages(rbio);
26635a6ac9eaSMiao Xie 	if (ret)
26645a6ac9eaSMiao Xie 		goto cleanup;
26655a6ac9eaSMiao Xie 
26665a6ac9eaSMiao Xie 	atomic_set(&rbio->error, 0);
26675a6ac9eaSMiao Xie 	/*
26685a6ac9eaSMiao Xie 	 * build a list of bios to read all the missing parts of this
26695a6ac9eaSMiao Xie 	 * stripe
26705a6ac9eaSMiao Xie 	 */
26712c8cdd6eSMiao Xie 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
26723e77605dSQu Wenruo 		for_each_set_bit(sectornr , rbio->dbitmap, rbio->stripe_nsectors) {
26733e77605dSQu Wenruo 			struct sector_ptr *sector;
26745a6ac9eaSMiao Xie 			/*
26753e77605dSQu Wenruo 			 * We want to find all the sectors missing from the
26763e77605dSQu Wenruo 			 * rbio and read them from the disk.  If * sector_in_rbio()
26773e77605dSQu Wenruo 			 * finds a sector in the bio list we don't need to read
26783e77605dSQu Wenruo 			 * it off the stripe.
26795a6ac9eaSMiao Xie 			 */
26803e77605dSQu Wenruo 			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
26813e77605dSQu Wenruo 			if (sector)
26825a6ac9eaSMiao Xie 				continue;
26835a6ac9eaSMiao Xie 
26843e77605dSQu Wenruo 			sector = rbio_stripe_sector(rbio, stripe, sectornr);
26855a6ac9eaSMiao Xie 			/*
26863e77605dSQu Wenruo 			 * The bio cache may have handed us an uptodate sector.
26873e77605dSQu Wenruo 			 * If so, be happy and use it.
26885a6ac9eaSMiao Xie 			 */
26893e77605dSQu Wenruo 			if (sector->uptodate)
26905a6ac9eaSMiao Xie 				continue;
26915a6ac9eaSMiao Xie 
26923e77605dSQu Wenruo 			ret = rbio_add_io_sector(rbio, &bio_list, sector,
26933e77605dSQu Wenruo 						 stripe, sectornr, rbio->stripe_len,
26943e77605dSQu Wenruo 						 REQ_OP_READ);
26955a6ac9eaSMiao Xie 			if (ret)
26965a6ac9eaSMiao Xie 				goto cleanup;
26975a6ac9eaSMiao Xie 		}
26985a6ac9eaSMiao Xie 	}
26995a6ac9eaSMiao Xie 
27005a6ac9eaSMiao Xie 	bios_to_read = bio_list_size(&bio_list);
27015a6ac9eaSMiao Xie 	if (!bios_to_read) {
27025a6ac9eaSMiao Xie 		/*
27035a6ac9eaSMiao Xie 		 * this can happen if others have merged with
27045a6ac9eaSMiao Xie 		 * us, it means there is nothing left to read.
27055a6ac9eaSMiao Xie 		 * But if there are missing devices it may not be
27065a6ac9eaSMiao Xie 		 * safe to do the full stripe write yet.
27075a6ac9eaSMiao Xie 		 */
27085a6ac9eaSMiao Xie 		goto finish;
27095a6ac9eaSMiao Xie 	}
27105a6ac9eaSMiao Xie 
27115a6ac9eaSMiao Xie 	/*
27124c664611SQu Wenruo 	 * The bioc may be freed once we submit the last bio. Make sure not to
27134c664611SQu Wenruo 	 * touch it after that.
27145a6ac9eaSMiao Xie 	 */
27155a6ac9eaSMiao Xie 	atomic_set(&rbio->stripes_pending, bios_to_read);
2716bf28a605SNikolay Borisov 	while ((bio = bio_list_pop(&bio_list))) {
27175a6ac9eaSMiao Xie 		bio->bi_end_io = raid56_parity_scrub_end_io;
27185a6ac9eaSMiao Xie 
27196a258d72SQu Wenruo 		btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
27205a6ac9eaSMiao Xie 
27214e49ea4aSMike Christie 		submit_bio(bio);
27225a6ac9eaSMiao Xie 	}
27235a6ac9eaSMiao Xie 	/* the actual write will happen once the reads are done */
27245a6ac9eaSMiao Xie 	return;
27255a6ac9eaSMiao Xie 
27265a6ac9eaSMiao Xie cleanup:
272758efbc9fSOmar Sandoval 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2728785884fcSLiu Bo 
2729785884fcSLiu Bo 	while ((bio = bio_list_pop(&bio_list)))
2730785884fcSLiu Bo 		bio_put(bio);
2731785884fcSLiu Bo 
27325a6ac9eaSMiao Xie 	return;
27335a6ac9eaSMiao Xie 
27345a6ac9eaSMiao Xie finish:
27355a6ac9eaSMiao Xie 	validate_rbio_for_parity_scrub(rbio);
27365a6ac9eaSMiao Xie }
27375a6ac9eaSMiao Xie 
27385a6ac9eaSMiao Xie static void scrub_parity_work(struct btrfs_work *work)
27395a6ac9eaSMiao Xie {
27405a6ac9eaSMiao Xie 	struct btrfs_raid_bio *rbio;
27415a6ac9eaSMiao Xie 
27425a6ac9eaSMiao Xie 	rbio = container_of(work, struct btrfs_raid_bio, work);
27435a6ac9eaSMiao Xie 	raid56_parity_scrub_stripe(rbio);
27445a6ac9eaSMiao Xie }
27455a6ac9eaSMiao Xie 
27465a6ac9eaSMiao Xie void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
27475a6ac9eaSMiao Xie {
27485a6ac9eaSMiao Xie 	if (!lock_stripe_add(rbio))
2749a81b747dSDavid Sterba 		start_async_work(rbio, scrub_parity_work);
27505a6ac9eaSMiao Xie }
2751b4ee1782SOmar Sandoval 
2752b4ee1782SOmar Sandoval /* The following code is used for dev replace of a missing RAID 5/6 device. */
2753b4ee1782SOmar Sandoval 
2754b4ee1782SOmar Sandoval struct btrfs_raid_bio *
27556a258d72SQu Wenruo raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc,
27566a258d72SQu Wenruo 			  u64 length)
2757b4ee1782SOmar Sandoval {
27586a258d72SQu Wenruo 	struct btrfs_fs_info *fs_info = bioc->fs_info;
2759b4ee1782SOmar Sandoval 	struct btrfs_raid_bio *rbio;
2760b4ee1782SOmar Sandoval 
27614c664611SQu Wenruo 	rbio = alloc_rbio(fs_info, bioc, length);
2762b4ee1782SOmar Sandoval 	if (IS_ERR(rbio))
2763b4ee1782SOmar Sandoval 		return NULL;
2764b4ee1782SOmar Sandoval 
2765b4ee1782SOmar Sandoval 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2766b4ee1782SOmar Sandoval 	bio_list_add(&rbio->bio_list, bio);
2767b4ee1782SOmar Sandoval 	/*
2768b4ee1782SOmar Sandoval 	 * This is a special bio which is used to hold the completion handler
2769b4ee1782SOmar Sandoval 	 * and make the scrub rbio is similar to the other types
2770b4ee1782SOmar Sandoval 	 */
2771b4ee1782SOmar Sandoval 	ASSERT(!bio->bi_iter.bi_size);
2772b4ee1782SOmar Sandoval 
2773b4ee1782SOmar Sandoval 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2774b4ee1782SOmar Sandoval 	if (rbio->faila == -1) {
2775b4ee1782SOmar Sandoval 		BUG();
2776b4ee1782SOmar Sandoval 		kfree(rbio);
2777b4ee1782SOmar Sandoval 		return NULL;
2778b4ee1782SOmar Sandoval 	}
2779b4ee1782SOmar Sandoval 
2780ae6529c3SQu Wenruo 	/*
27814c664611SQu Wenruo 	 * When we get bioc, we have already increased bio_counter, record it
2782ae6529c3SQu Wenruo 	 * so we can free it at rbio_orig_end_io()
2783ae6529c3SQu Wenruo 	 */
2784ae6529c3SQu Wenruo 	rbio->generic_bio_cnt = 1;
2785ae6529c3SQu Wenruo 
2786b4ee1782SOmar Sandoval 	return rbio;
2787b4ee1782SOmar Sandoval }
2788b4ee1782SOmar Sandoval 
2789b4ee1782SOmar Sandoval void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2790b4ee1782SOmar Sandoval {
2791b4ee1782SOmar Sandoval 	if (!lock_stripe_add(rbio))
2792e66d8d5aSDavid Sterba 		start_async_work(rbio, read_rebuild_work);
2793b4ee1782SOmar Sandoval }
2794