xref: /linux/fs/btrfs/raid56.c (revision ebcc326316f3d798e9715e5ca1451c3e457b95dd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2012 Fusion-io  All rights reserved.
4  * Copyright (C) 2012 Intel Corp. All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21 
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT	1
24 
25 /*
26  * set when this rbio is sitting in the hash, but it is just a cache
27  * of past RMW
28  */
29 #define RBIO_CACHE_BIT		2
30 
31 /*
32  * set when it is safe to trust the stripe_pages for caching
33  */
34 #define RBIO_CACHE_READY_BIT	3
35 
36 #define RBIO_CACHE_SIZE 1024
37 
38 enum btrfs_rbio_ops {
39 	BTRFS_RBIO_WRITE,
40 	BTRFS_RBIO_READ_REBUILD,
41 	BTRFS_RBIO_PARITY_SCRUB,
42 	BTRFS_RBIO_REBUILD_MISSING,
43 };
44 
45 struct btrfs_raid_bio {
46 	struct btrfs_fs_info *fs_info;
47 	struct btrfs_bio *bbio;
48 
49 	/* while we're doing rmw on a stripe
50 	 * we put it into a hash table so we can
51 	 * lock the stripe and merge more rbios
52 	 * into it.
53 	 */
54 	struct list_head hash_list;
55 
56 	/*
57 	 * LRU list for the stripe cache
58 	 */
59 	struct list_head stripe_cache;
60 
61 	/*
62 	 * for scheduling work in the helper threads
63 	 */
64 	struct btrfs_work work;
65 
66 	/*
67 	 * bio list and bio_list_lock are used
68 	 * to add more bios into the stripe
69 	 * in hopes of avoiding the full rmw
70 	 */
71 	struct bio_list bio_list;
72 	spinlock_t bio_list_lock;
73 
74 	/* also protected by the bio_list_lock, the
75 	 * plug list is used by the plugging code
76 	 * to collect partial bios while plugged.  The
77 	 * stripe locking code also uses it to hand off
78 	 * the stripe lock to the next pending IO
79 	 */
80 	struct list_head plug_list;
81 
82 	/*
83 	 * flags that tell us if it is safe to
84 	 * merge with this bio
85 	 */
86 	unsigned long flags;
87 
88 	/* size of each individual stripe on disk */
89 	int stripe_len;
90 
91 	/* number of data stripes (no p/q) */
92 	int nr_data;
93 
94 	int real_stripes;
95 
96 	int stripe_npages;
97 	/*
98 	 * set if we're doing a parity rebuild
99 	 * for a read from higher up, which is handled
100 	 * differently from a parity rebuild as part of
101 	 * rmw
102 	 */
103 	enum btrfs_rbio_ops operation;
104 
105 	/* first bad stripe */
106 	int faila;
107 
108 	/* second bad stripe (for raid6 use) */
109 	int failb;
110 
111 	int scrubp;
112 	/*
113 	 * number of pages needed to represent the full
114 	 * stripe
115 	 */
116 	int nr_pages;
117 
118 	/*
119 	 * size of all the bios in the bio_list.  This
120 	 * helps us decide if the rbio maps to a full
121 	 * stripe or not
122 	 */
123 	int bio_list_bytes;
124 
125 	int generic_bio_cnt;
126 
127 	refcount_t refs;
128 
129 	atomic_t stripes_pending;
130 
131 	atomic_t error;
132 	/*
133 	 * these are two arrays of pointers.  We allocate the
134 	 * rbio big enough to hold them both and setup their
135 	 * locations when the rbio is allocated
136 	 */
137 
138 	/* pointers to pages that we allocated for
139 	 * reading/writing stripes directly from the disk (including P/Q)
140 	 */
141 	struct page **stripe_pages;
142 
143 	/*
144 	 * pointers to the pages in the bio_list.  Stored
145 	 * here for faster lookup
146 	 */
147 	struct page **bio_pages;
148 
149 	/*
150 	 * bitmap to record which horizontal stripe has data
151 	 */
152 	unsigned long *dbitmap;
153 
154 	/* allocated with real_stripes-many pointers for finish_*() calls */
155 	void **finish_pointers;
156 
157 	/* allocated with stripe_npages-many bits for finish_*() calls */
158 	unsigned long *finish_pbitmap;
159 };
160 
161 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
162 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
163 static void rmw_work(struct btrfs_work *work);
164 static void read_rebuild_work(struct btrfs_work *work);
165 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
166 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
167 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
168 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
169 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
170 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
171 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
172 
173 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
174 					 int need_check);
175 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
176 
177 /*
178  * the stripe hash table is used for locking, and to collect
179  * bios in hopes of making a full stripe
180  */
181 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
182 {
183 	struct btrfs_stripe_hash_table *table;
184 	struct btrfs_stripe_hash_table *x;
185 	struct btrfs_stripe_hash *cur;
186 	struct btrfs_stripe_hash *h;
187 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
188 	int i;
189 	int table_size;
190 
191 	if (info->stripe_hash_table)
192 		return 0;
193 
194 	/*
195 	 * The table is large, starting with order 4 and can go as high as
196 	 * order 7 in case lock debugging is turned on.
197 	 *
198 	 * Try harder to allocate and fallback to vmalloc to lower the chance
199 	 * of a failing mount.
200 	 */
201 	table_size = sizeof(*table) + sizeof(*h) * num_entries;
202 	table = kvzalloc(table_size, GFP_KERNEL);
203 	if (!table)
204 		return -ENOMEM;
205 
206 	spin_lock_init(&table->cache_lock);
207 	INIT_LIST_HEAD(&table->stripe_cache);
208 
209 	h = table->table;
210 
211 	for (i = 0; i < num_entries; i++) {
212 		cur = h + i;
213 		INIT_LIST_HEAD(&cur->hash_list);
214 		spin_lock_init(&cur->lock);
215 	}
216 
217 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
218 	if (x)
219 		kvfree(x);
220 	return 0;
221 }
222 
223 /*
224  * caching an rbio means to copy anything from the
225  * bio_pages array into the stripe_pages array.  We
226  * use the page uptodate bit in the stripe cache array
227  * to indicate if it has valid data
228  *
229  * once the caching is done, we set the cache ready
230  * bit.
231  */
232 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
233 {
234 	int i;
235 	char *s;
236 	char *d;
237 	int ret;
238 
239 	ret = alloc_rbio_pages(rbio);
240 	if (ret)
241 		return;
242 
243 	for (i = 0; i < rbio->nr_pages; i++) {
244 		if (!rbio->bio_pages[i])
245 			continue;
246 
247 		s = kmap(rbio->bio_pages[i]);
248 		d = kmap(rbio->stripe_pages[i]);
249 
250 		copy_page(d, s);
251 
252 		kunmap(rbio->bio_pages[i]);
253 		kunmap(rbio->stripe_pages[i]);
254 		SetPageUptodate(rbio->stripe_pages[i]);
255 	}
256 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
257 }
258 
259 /*
260  * we hash on the first logical address of the stripe
261  */
262 static int rbio_bucket(struct btrfs_raid_bio *rbio)
263 {
264 	u64 num = rbio->bbio->raid_map[0];
265 
266 	/*
267 	 * we shift down quite a bit.  We're using byte
268 	 * addressing, and most of the lower bits are zeros.
269 	 * This tends to upset hash_64, and it consistently
270 	 * returns just one or two different values.
271 	 *
272 	 * shifting off the lower bits fixes things.
273 	 */
274 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
275 }
276 
277 /*
278  * stealing an rbio means taking all the uptodate pages from the stripe
279  * array in the source rbio and putting them into the destination rbio
280  */
281 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
282 {
283 	int i;
284 	struct page *s;
285 	struct page *d;
286 
287 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
288 		return;
289 
290 	for (i = 0; i < dest->nr_pages; i++) {
291 		s = src->stripe_pages[i];
292 		if (!s || !PageUptodate(s)) {
293 			continue;
294 		}
295 
296 		d = dest->stripe_pages[i];
297 		if (d)
298 			__free_page(d);
299 
300 		dest->stripe_pages[i] = s;
301 		src->stripe_pages[i] = NULL;
302 	}
303 }
304 
305 /*
306  * merging means we take the bio_list from the victim and
307  * splice it into the destination.  The victim should
308  * be discarded afterwards.
309  *
310  * must be called with dest->rbio_list_lock held
311  */
312 static void merge_rbio(struct btrfs_raid_bio *dest,
313 		       struct btrfs_raid_bio *victim)
314 {
315 	bio_list_merge(&dest->bio_list, &victim->bio_list);
316 	dest->bio_list_bytes += victim->bio_list_bytes;
317 	dest->generic_bio_cnt += victim->generic_bio_cnt;
318 	bio_list_init(&victim->bio_list);
319 }
320 
321 /*
322  * used to prune items that are in the cache.  The caller
323  * must hold the hash table lock.
324  */
325 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
326 {
327 	int bucket = rbio_bucket(rbio);
328 	struct btrfs_stripe_hash_table *table;
329 	struct btrfs_stripe_hash *h;
330 	int freeit = 0;
331 
332 	/*
333 	 * check the bit again under the hash table lock.
334 	 */
335 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
336 		return;
337 
338 	table = rbio->fs_info->stripe_hash_table;
339 	h = table->table + bucket;
340 
341 	/* hold the lock for the bucket because we may be
342 	 * removing it from the hash table
343 	 */
344 	spin_lock(&h->lock);
345 
346 	/*
347 	 * hold the lock for the bio list because we need
348 	 * to make sure the bio list is empty
349 	 */
350 	spin_lock(&rbio->bio_list_lock);
351 
352 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
353 		list_del_init(&rbio->stripe_cache);
354 		table->cache_size -= 1;
355 		freeit = 1;
356 
357 		/* if the bio list isn't empty, this rbio is
358 		 * still involved in an IO.  We take it out
359 		 * of the cache list, and drop the ref that
360 		 * was held for the list.
361 		 *
362 		 * If the bio_list was empty, we also remove
363 		 * the rbio from the hash_table, and drop
364 		 * the corresponding ref
365 		 */
366 		if (bio_list_empty(&rbio->bio_list)) {
367 			if (!list_empty(&rbio->hash_list)) {
368 				list_del_init(&rbio->hash_list);
369 				refcount_dec(&rbio->refs);
370 				BUG_ON(!list_empty(&rbio->plug_list));
371 			}
372 		}
373 	}
374 
375 	spin_unlock(&rbio->bio_list_lock);
376 	spin_unlock(&h->lock);
377 
378 	if (freeit)
379 		__free_raid_bio(rbio);
380 }
381 
382 /*
383  * prune a given rbio from the cache
384  */
385 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
386 {
387 	struct btrfs_stripe_hash_table *table;
388 	unsigned long flags;
389 
390 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
391 		return;
392 
393 	table = rbio->fs_info->stripe_hash_table;
394 
395 	spin_lock_irqsave(&table->cache_lock, flags);
396 	__remove_rbio_from_cache(rbio);
397 	spin_unlock_irqrestore(&table->cache_lock, flags);
398 }
399 
400 /*
401  * remove everything in the cache
402  */
403 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
404 {
405 	struct btrfs_stripe_hash_table *table;
406 	unsigned long flags;
407 	struct btrfs_raid_bio *rbio;
408 
409 	table = info->stripe_hash_table;
410 
411 	spin_lock_irqsave(&table->cache_lock, flags);
412 	while (!list_empty(&table->stripe_cache)) {
413 		rbio = list_entry(table->stripe_cache.next,
414 				  struct btrfs_raid_bio,
415 				  stripe_cache);
416 		__remove_rbio_from_cache(rbio);
417 	}
418 	spin_unlock_irqrestore(&table->cache_lock, flags);
419 }
420 
421 /*
422  * remove all cached entries and free the hash table
423  * used by unmount
424  */
425 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
426 {
427 	if (!info->stripe_hash_table)
428 		return;
429 	btrfs_clear_rbio_cache(info);
430 	kvfree(info->stripe_hash_table);
431 	info->stripe_hash_table = NULL;
432 }
433 
434 /*
435  * insert an rbio into the stripe cache.  It
436  * must have already been prepared by calling
437  * cache_rbio_pages
438  *
439  * If this rbio was already cached, it gets
440  * moved to the front of the lru.
441  *
442  * If the size of the rbio cache is too big, we
443  * prune an item.
444  */
445 static void cache_rbio(struct btrfs_raid_bio *rbio)
446 {
447 	struct btrfs_stripe_hash_table *table;
448 	unsigned long flags;
449 
450 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
451 		return;
452 
453 	table = rbio->fs_info->stripe_hash_table;
454 
455 	spin_lock_irqsave(&table->cache_lock, flags);
456 	spin_lock(&rbio->bio_list_lock);
457 
458 	/* bump our ref if we were not in the list before */
459 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
460 		refcount_inc(&rbio->refs);
461 
462 	if (!list_empty(&rbio->stripe_cache)){
463 		list_move(&rbio->stripe_cache, &table->stripe_cache);
464 	} else {
465 		list_add(&rbio->stripe_cache, &table->stripe_cache);
466 		table->cache_size += 1;
467 	}
468 
469 	spin_unlock(&rbio->bio_list_lock);
470 
471 	if (table->cache_size > RBIO_CACHE_SIZE) {
472 		struct btrfs_raid_bio *found;
473 
474 		found = list_entry(table->stripe_cache.prev,
475 				  struct btrfs_raid_bio,
476 				  stripe_cache);
477 
478 		if (found != rbio)
479 			__remove_rbio_from_cache(found);
480 	}
481 
482 	spin_unlock_irqrestore(&table->cache_lock, flags);
483 }
484 
485 /*
486  * helper function to run the xor_blocks api.  It is only
487  * able to do MAX_XOR_BLOCKS at a time, so we need to
488  * loop through.
489  */
490 static void run_xor(void **pages, int src_cnt, ssize_t len)
491 {
492 	int src_off = 0;
493 	int xor_src_cnt = 0;
494 	void *dest = pages[src_cnt];
495 
496 	while(src_cnt > 0) {
497 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
498 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
499 
500 		src_cnt -= xor_src_cnt;
501 		src_off += xor_src_cnt;
502 	}
503 }
504 
505 /*
506  * returns true if the bio list inside this rbio
507  * covers an entire stripe (no rmw required).
508  * Must be called with the bio list lock held, or
509  * at a time when you know it is impossible to add
510  * new bios into the list
511  */
512 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
513 {
514 	unsigned long size = rbio->bio_list_bytes;
515 	int ret = 1;
516 
517 	if (size != rbio->nr_data * rbio->stripe_len)
518 		ret = 0;
519 
520 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
521 	return ret;
522 }
523 
524 static int rbio_is_full(struct btrfs_raid_bio *rbio)
525 {
526 	unsigned long flags;
527 	int ret;
528 
529 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
530 	ret = __rbio_is_full(rbio);
531 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
532 	return ret;
533 }
534 
535 /*
536  * returns 1 if it is safe to merge two rbios together.
537  * The merging is safe if the two rbios correspond to
538  * the same stripe and if they are both going in the same
539  * direction (read vs write), and if neither one is
540  * locked for final IO
541  *
542  * The caller is responsible for locking such that
543  * rmw_locked is safe to test
544  */
545 static int rbio_can_merge(struct btrfs_raid_bio *last,
546 			  struct btrfs_raid_bio *cur)
547 {
548 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
549 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
550 		return 0;
551 
552 	/*
553 	 * we can't merge with cached rbios, since the
554 	 * idea is that when we merge the destination
555 	 * rbio is going to run our IO for us.  We can
556 	 * steal from cached rbios though, other functions
557 	 * handle that.
558 	 */
559 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
560 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
561 		return 0;
562 
563 	if (last->bbio->raid_map[0] !=
564 	    cur->bbio->raid_map[0])
565 		return 0;
566 
567 	/* we can't merge with different operations */
568 	if (last->operation != cur->operation)
569 		return 0;
570 	/*
571 	 * We've need read the full stripe from the drive.
572 	 * check and repair the parity and write the new results.
573 	 *
574 	 * We're not allowed to add any new bios to the
575 	 * bio list here, anyone else that wants to
576 	 * change this stripe needs to do their own rmw.
577 	 */
578 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
579 		return 0;
580 
581 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
582 		return 0;
583 
584 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
585 		int fa = last->faila;
586 		int fb = last->failb;
587 		int cur_fa = cur->faila;
588 		int cur_fb = cur->failb;
589 
590 		if (last->faila >= last->failb) {
591 			fa = last->failb;
592 			fb = last->faila;
593 		}
594 
595 		if (cur->faila >= cur->failb) {
596 			cur_fa = cur->failb;
597 			cur_fb = cur->faila;
598 		}
599 
600 		if (fa != cur_fa || fb != cur_fb)
601 			return 0;
602 	}
603 	return 1;
604 }
605 
606 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
607 				  int index)
608 {
609 	return stripe * rbio->stripe_npages + index;
610 }
611 
612 /*
613  * these are just the pages from the rbio array, not from anything
614  * the FS sent down to us
615  */
616 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
617 				     int index)
618 {
619 	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
620 }
621 
622 /*
623  * helper to index into the pstripe
624  */
625 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
626 {
627 	return rbio_stripe_page(rbio, rbio->nr_data, index);
628 }
629 
630 /*
631  * helper to index into the qstripe, returns null
632  * if there is no qstripe
633  */
634 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
635 {
636 	if (rbio->nr_data + 1 == rbio->real_stripes)
637 		return NULL;
638 	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
639 }
640 
641 /*
642  * The first stripe in the table for a logical address
643  * has the lock.  rbios are added in one of three ways:
644  *
645  * 1) Nobody has the stripe locked yet.  The rbio is given
646  * the lock and 0 is returned.  The caller must start the IO
647  * themselves.
648  *
649  * 2) Someone has the stripe locked, but we're able to merge
650  * with the lock owner.  The rbio is freed and the IO will
651  * start automatically along with the existing rbio.  1 is returned.
652  *
653  * 3) Someone has the stripe locked, but we're not able to merge.
654  * The rbio is added to the lock owner's plug list, or merged into
655  * an rbio already on the plug list.  When the lock owner unlocks,
656  * the next rbio on the list is run and the IO is started automatically.
657  * 1 is returned
658  *
659  * If we return 0, the caller still owns the rbio and must continue with
660  * IO submission.  If we return 1, the caller must assume the rbio has
661  * already been freed.
662  */
663 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
664 {
665 	int bucket = rbio_bucket(rbio);
666 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
667 	struct btrfs_raid_bio *cur;
668 	struct btrfs_raid_bio *pending;
669 	unsigned long flags;
670 	struct btrfs_raid_bio *freeit = NULL;
671 	struct btrfs_raid_bio *cache_drop = NULL;
672 	int ret = 0;
673 
674 	spin_lock_irqsave(&h->lock, flags);
675 	list_for_each_entry(cur, &h->hash_list, hash_list) {
676 		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
677 			spin_lock(&cur->bio_list_lock);
678 
679 			/* can we steal this cached rbio's pages? */
680 			if (bio_list_empty(&cur->bio_list) &&
681 			    list_empty(&cur->plug_list) &&
682 			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
683 			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
684 				list_del_init(&cur->hash_list);
685 				refcount_dec(&cur->refs);
686 
687 				steal_rbio(cur, rbio);
688 				cache_drop = cur;
689 				spin_unlock(&cur->bio_list_lock);
690 
691 				goto lockit;
692 			}
693 
694 			/* can we merge into the lock owner? */
695 			if (rbio_can_merge(cur, rbio)) {
696 				merge_rbio(cur, rbio);
697 				spin_unlock(&cur->bio_list_lock);
698 				freeit = rbio;
699 				ret = 1;
700 				goto out;
701 			}
702 
703 
704 			/*
705 			 * we couldn't merge with the running
706 			 * rbio, see if we can merge with the
707 			 * pending ones.  We don't have to
708 			 * check for rmw_locked because there
709 			 * is no way they are inside finish_rmw
710 			 * right now
711 			 */
712 			list_for_each_entry(pending, &cur->plug_list,
713 					    plug_list) {
714 				if (rbio_can_merge(pending, rbio)) {
715 					merge_rbio(pending, rbio);
716 					spin_unlock(&cur->bio_list_lock);
717 					freeit = rbio;
718 					ret = 1;
719 					goto out;
720 				}
721 			}
722 
723 			/* no merging, put us on the tail of the plug list,
724 			 * our rbio will be started with the currently
725 			 * running rbio unlocks
726 			 */
727 			list_add_tail(&rbio->plug_list, &cur->plug_list);
728 			spin_unlock(&cur->bio_list_lock);
729 			ret = 1;
730 			goto out;
731 		}
732 	}
733 lockit:
734 	refcount_inc(&rbio->refs);
735 	list_add(&rbio->hash_list, &h->hash_list);
736 out:
737 	spin_unlock_irqrestore(&h->lock, flags);
738 	if (cache_drop)
739 		remove_rbio_from_cache(cache_drop);
740 	if (freeit)
741 		__free_raid_bio(freeit);
742 	return ret;
743 }
744 
745 /*
746  * called as rmw or parity rebuild is completed.  If the plug list has more
747  * rbios waiting for this stripe, the next one on the list will be started
748  */
749 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
750 {
751 	int bucket;
752 	struct btrfs_stripe_hash *h;
753 	unsigned long flags;
754 	int keep_cache = 0;
755 
756 	bucket = rbio_bucket(rbio);
757 	h = rbio->fs_info->stripe_hash_table->table + bucket;
758 
759 	if (list_empty(&rbio->plug_list))
760 		cache_rbio(rbio);
761 
762 	spin_lock_irqsave(&h->lock, flags);
763 	spin_lock(&rbio->bio_list_lock);
764 
765 	if (!list_empty(&rbio->hash_list)) {
766 		/*
767 		 * if we're still cached and there is no other IO
768 		 * to perform, just leave this rbio here for others
769 		 * to steal from later
770 		 */
771 		if (list_empty(&rbio->plug_list) &&
772 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
773 			keep_cache = 1;
774 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
775 			BUG_ON(!bio_list_empty(&rbio->bio_list));
776 			goto done;
777 		}
778 
779 		list_del_init(&rbio->hash_list);
780 		refcount_dec(&rbio->refs);
781 
782 		/*
783 		 * we use the plug list to hold all the rbios
784 		 * waiting for the chance to lock this stripe.
785 		 * hand the lock over to one of them.
786 		 */
787 		if (!list_empty(&rbio->plug_list)) {
788 			struct btrfs_raid_bio *next;
789 			struct list_head *head = rbio->plug_list.next;
790 
791 			next = list_entry(head, struct btrfs_raid_bio,
792 					  plug_list);
793 
794 			list_del_init(&rbio->plug_list);
795 
796 			list_add(&next->hash_list, &h->hash_list);
797 			refcount_inc(&next->refs);
798 			spin_unlock(&rbio->bio_list_lock);
799 			spin_unlock_irqrestore(&h->lock, flags);
800 
801 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
802 				async_read_rebuild(next);
803 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
804 				steal_rbio(rbio, next);
805 				async_read_rebuild(next);
806 			} else if (next->operation == BTRFS_RBIO_WRITE) {
807 				steal_rbio(rbio, next);
808 				async_rmw_stripe(next);
809 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
810 				steal_rbio(rbio, next);
811 				async_scrub_parity(next);
812 			}
813 
814 			goto done_nolock;
815 		}
816 	}
817 done:
818 	spin_unlock(&rbio->bio_list_lock);
819 	spin_unlock_irqrestore(&h->lock, flags);
820 
821 done_nolock:
822 	if (!keep_cache)
823 		remove_rbio_from_cache(rbio);
824 }
825 
826 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
827 {
828 	int i;
829 
830 	if (!refcount_dec_and_test(&rbio->refs))
831 		return;
832 
833 	WARN_ON(!list_empty(&rbio->stripe_cache));
834 	WARN_ON(!list_empty(&rbio->hash_list));
835 	WARN_ON(!bio_list_empty(&rbio->bio_list));
836 
837 	for (i = 0; i < rbio->nr_pages; i++) {
838 		if (rbio->stripe_pages[i]) {
839 			__free_page(rbio->stripe_pages[i]);
840 			rbio->stripe_pages[i] = NULL;
841 		}
842 	}
843 
844 	btrfs_put_bbio(rbio->bbio);
845 	kfree(rbio);
846 }
847 
848 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
849 {
850 	struct bio *next;
851 
852 	while (cur) {
853 		next = cur->bi_next;
854 		cur->bi_next = NULL;
855 		cur->bi_status = err;
856 		bio_endio(cur);
857 		cur = next;
858 	}
859 }
860 
861 /*
862  * this frees the rbio and runs through all the bios in the
863  * bio_list and calls end_io on them
864  */
865 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
866 {
867 	struct bio *cur = bio_list_get(&rbio->bio_list);
868 	struct bio *extra;
869 
870 	if (rbio->generic_bio_cnt)
871 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
872 
873 	/*
874 	 * At this moment, rbio->bio_list is empty, however since rbio does not
875 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
876 	 * hash list, rbio may be merged with others so that rbio->bio_list
877 	 * becomes non-empty.
878 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
879 	 * more and we can call bio_endio() on all queued bios.
880 	 */
881 	unlock_stripe(rbio);
882 	extra = bio_list_get(&rbio->bio_list);
883 	__free_raid_bio(rbio);
884 
885 	rbio_endio_bio_list(cur, err);
886 	if (extra)
887 		rbio_endio_bio_list(extra, err);
888 }
889 
890 /*
891  * end io function used by finish_rmw.  When we finally
892  * get here, we've written a full stripe
893  */
894 static void raid_write_end_io(struct bio *bio)
895 {
896 	struct btrfs_raid_bio *rbio = bio->bi_private;
897 	blk_status_t err = bio->bi_status;
898 	int max_errors;
899 
900 	if (err)
901 		fail_bio_stripe(rbio, bio);
902 
903 	bio_put(bio);
904 
905 	if (!atomic_dec_and_test(&rbio->stripes_pending))
906 		return;
907 
908 	err = BLK_STS_OK;
909 
910 	/* OK, we have read all the stripes we need to. */
911 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
912 		     0 : rbio->bbio->max_errors;
913 	if (atomic_read(&rbio->error) > max_errors)
914 		err = BLK_STS_IOERR;
915 
916 	rbio_orig_end_io(rbio, err);
917 }
918 
919 /*
920  * the read/modify/write code wants to use the original bio for
921  * any pages it included, and then use the rbio for everything
922  * else.  This function decides if a given index (stripe number)
923  * and page number in that stripe fall inside the original bio
924  * or the rbio.
925  *
926  * if you set bio_list_only, you'll get a NULL back for any ranges
927  * that are outside the bio_list
928  *
929  * This doesn't take any refs on anything, you get a bare page pointer
930  * and the caller must bump refs as required.
931  *
932  * You must call index_rbio_pages once before you can trust
933  * the answers from this function.
934  */
935 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
936 				 int index, int pagenr, int bio_list_only)
937 {
938 	int chunk_page;
939 	struct page *p = NULL;
940 
941 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
942 
943 	spin_lock_irq(&rbio->bio_list_lock);
944 	p = rbio->bio_pages[chunk_page];
945 	spin_unlock_irq(&rbio->bio_list_lock);
946 
947 	if (p || bio_list_only)
948 		return p;
949 
950 	return rbio->stripe_pages[chunk_page];
951 }
952 
953 /*
954  * number of pages we need for the entire stripe across all the
955  * drives
956  */
957 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
958 {
959 	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
960 }
961 
962 /*
963  * allocation and initial setup for the btrfs_raid_bio.  Not
964  * this does not allocate any pages for rbio->pages.
965  */
966 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
967 					 struct btrfs_bio *bbio,
968 					 u64 stripe_len)
969 {
970 	struct btrfs_raid_bio *rbio;
971 	int nr_data = 0;
972 	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
973 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
974 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
975 	void *p;
976 
977 	rbio = kzalloc(sizeof(*rbio) +
978 		       sizeof(*rbio->stripe_pages) * num_pages +
979 		       sizeof(*rbio->bio_pages) * num_pages +
980 		       sizeof(*rbio->finish_pointers) * real_stripes +
981 		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
982 		       sizeof(*rbio->finish_pbitmap) *
983 				BITS_TO_LONGS(stripe_npages),
984 		       GFP_NOFS);
985 	if (!rbio)
986 		return ERR_PTR(-ENOMEM);
987 
988 	bio_list_init(&rbio->bio_list);
989 	INIT_LIST_HEAD(&rbio->plug_list);
990 	spin_lock_init(&rbio->bio_list_lock);
991 	INIT_LIST_HEAD(&rbio->stripe_cache);
992 	INIT_LIST_HEAD(&rbio->hash_list);
993 	rbio->bbio = bbio;
994 	rbio->fs_info = fs_info;
995 	rbio->stripe_len = stripe_len;
996 	rbio->nr_pages = num_pages;
997 	rbio->real_stripes = real_stripes;
998 	rbio->stripe_npages = stripe_npages;
999 	rbio->faila = -1;
1000 	rbio->failb = -1;
1001 	refcount_set(&rbio->refs, 1);
1002 	atomic_set(&rbio->error, 0);
1003 	atomic_set(&rbio->stripes_pending, 0);
1004 
1005 	/*
1006 	 * the stripe_pages, bio_pages, etc arrays point to the extra
1007 	 * memory we allocated past the end of the rbio
1008 	 */
1009 	p = rbio + 1;
1010 #define CONSUME_ALLOC(ptr, count)	do {				\
1011 		ptr = p;						\
1012 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
1013 	} while (0)
1014 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1015 	CONSUME_ALLOC(rbio->bio_pages, num_pages);
1016 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1017 	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1018 	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1019 #undef  CONSUME_ALLOC
1020 
1021 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1022 		nr_data = real_stripes - 1;
1023 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1024 		nr_data = real_stripes - 2;
1025 	else
1026 		BUG();
1027 
1028 	rbio->nr_data = nr_data;
1029 	return rbio;
1030 }
1031 
1032 /* allocate pages for all the stripes in the bio, including parity */
1033 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1034 {
1035 	int i;
1036 	struct page *page;
1037 
1038 	for (i = 0; i < rbio->nr_pages; i++) {
1039 		if (rbio->stripe_pages[i])
1040 			continue;
1041 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1042 		if (!page)
1043 			return -ENOMEM;
1044 		rbio->stripe_pages[i] = page;
1045 	}
1046 	return 0;
1047 }
1048 
1049 /* only allocate pages for p/q stripes */
1050 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1051 {
1052 	int i;
1053 	struct page *page;
1054 
1055 	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1056 
1057 	for (; i < rbio->nr_pages; i++) {
1058 		if (rbio->stripe_pages[i])
1059 			continue;
1060 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1061 		if (!page)
1062 			return -ENOMEM;
1063 		rbio->stripe_pages[i] = page;
1064 	}
1065 	return 0;
1066 }
1067 
1068 /*
1069  * add a single page from a specific stripe into our list of bios for IO
1070  * this will try to merge into existing bios if possible, and returns
1071  * zero if all went well.
1072  */
1073 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1074 			    struct bio_list *bio_list,
1075 			    struct page *page,
1076 			    int stripe_nr,
1077 			    unsigned long page_index,
1078 			    unsigned long bio_max_len)
1079 {
1080 	struct bio *last = bio_list->tail;
1081 	u64 last_end = 0;
1082 	int ret;
1083 	struct bio *bio;
1084 	struct btrfs_bio_stripe *stripe;
1085 	u64 disk_start;
1086 
1087 	stripe = &rbio->bbio->stripes[stripe_nr];
1088 	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1089 
1090 	/* if the device is missing, just fail this stripe */
1091 	if (!stripe->dev->bdev)
1092 		return fail_rbio_index(rbio, stripe_nr);
1093 
1094 	/* see if we can add this page onto our existing bio */
1095 	if (last) {
1096 		last_end = (u64)last->bi_iter.bi_sector << 9;
1097 		last_end += last->bi_iter.bi_size;
1098 
1099 		/*
1100 		 * we can't merge these if they are from different
1101 		 * devices or if they are not contiguous
1102 		 */
1103 		if (last_end == disk_start && stripe->dev->bdev &&
1104 		    !last->bi_status &&
1105 		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1106 		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1107 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1108 			if (ret == PAGE_SIZE)
1109 				return 0;
1110 		}
1111 	}
1112 
1113 	/* put a new bio on the list */
1114 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1115 	bio->bi_iter.bi_size = 0;
1116 	bio_set_dev(bio, stripe->dev->bdev);
1117 	bio->bi_iter.bi_sector = disk_start >> 9;
1118 
1119 	bio_add_page(bio, page, PAGE_SIZE, 0);
1120 	bio_list_add(bio_list, bio);
1121 	return 0;
1122 }
1123 
1124 /*
1125  * while we're doing the read/modify/write cycle, we could
1126  * have errors in reading pages off the disk.  This checks
1127  * for errors and if we're not able to read the page it'll
1128  * trigger parity reconstruction.  The rmw will be finished
1129  * after we've reconstructed the failed stripes
1130  */
1131 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1132 {
1133 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1134 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1135 		__raid56_parity_recover(rbio);
1136 	} else {
1137 		finish_rmw(rbio);
1138 	}
1139 }
1140 
1141 /*
1142  * helper function to walk our bio list and populate the bio_pages array with
1143  * the result.  This seems expensive, but it is faster than constantly
1144  * searching through the bio list as we setup the IO in finish_rmw or stripe
1145  * reconstruction.
1146  *
1147  * This must be called before you trust the answers from page_in_rbio
1148  */
1149 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1150 {
1151 	struct bio *bio;
1152 	u64 start;
1153 	unsigned long stripe_offset;
1154 	unsigned long page_index;
1155 
1156 	spin_lock_irq(&rbio->bio_list_lock);
1157 	bio_list_for_each(bio, &rbio->bio_list) {
1158 		struct bio_vec bvec;
1159 		struct bvec_iter iter;
1160 		int i = 0;
1161 
1162 		start = (u64)bio->bi_iter.bi_sector << 9;
1163 		stripe_offset = start - rbio->bbio->raid_map[0];
1164 		page_index = stripe_offset >> PAGE_SHIFT;
1165 
1166 		if (bio_flagged(bio, BIO_CLONED))
1167 			bio->bi_iter = btrfs_io_bio(bio)->iter;
1168 
1169 		bio_for_each_segment(bvec, bio, iter) {
1170 			rbio->bio_pages[page_index + i] = bvec.bv_page;
1171 			i++;
1172 		}
1173 	}
1174 	spin_unlock_irq(&rbio->bio_list_lock);
1175 }
1176 
1177 /*
1178  * this is called from one of two situations.  We either
1179  * have a full stripe from the higher layers, or we've read all
1180  * the missing bits off disk.
1181  *
1182  * This will calculate the parity and then send down any
1183  * changed blocks.
1184  */
1185 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1186 {
1187 	struct btrfs_bio *bbio = rbio->bbio;
1188 	void **pointers = rbio->finish_pointers;
1189 	int nr_data = rbio->nr_data;
1190 	int stripe;
1191 	int pagenr;
1192 	int p_stripe = -1;
1193 	int q_stripe = -1;
1194 	struct bio_list bio_list;
1195 	struct bio *bio;
1196 	int ret;
1197 
1198 	bio_list_init(&bio_list);
1199 
1200 	if (rbio->real_stripes - rbio->nr_data == 1) {
1201 		p_stripe = rbio->real_stripes - 1;
1202 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1203 		p_stripe = rbio->real_stripes - 2;
1204 		q_stripe = rbio->real_stripes - 1;
1205 	} else {
1206 		BUG();
1207 	}
1208 
1209 	/* at this point we either have a full stripe,
1210 	 * or we've read the full stripe from the drive.
1211 	 * recalculate the parity and write the new results.
1212 	 *
1213 	 * We're not allowed to add any new bios to the
1214 	 * bio list here, anyone else that wants to
1215 	 * change this stripe needs to do their own rmw.
1216 	 */
1217 	spin_lock_irq(&rbio->bio_list_lock);
1218 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1219 	spin_unlock_irq(&rbio->bio_list_lock);
1220 
1221 	atomic_set(&rbio->error, 0);
1222 
1223 	/*
1224 	 * now that we've set rmw_locked, run through the
1225 	 * bio list one last time and map the page pointers
1226 	 *
1227 	 * We don't cache full rbios because we're assuming
1228 	 * the higher layers are unlikely to use this area of
1229 	 * the disk again soon.  If they do use it again,
1230 	 * hopefully they will send another full bio.
1231 	 */
1232 	index_rbio_pages(rbio);
1233 	if (!rbio_is_full(rbio))
1234 		cache_rbio_pages(rbio);
1235 	else
1236 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1237 
1238 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1239 		struct page *p;
1240 		/* first collect one page from each data stripe */
1241 		for (stripe = 0; stripe < nr_data; stripe++) {
1242 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1243 			pointers[stripe] = kmap(p);
1244 		}
1245 
1246 		/* then add the parity stripe */
1247 		p = rbio_pstripe_page(rbio, pagenr);
1248 		SetPageUptodate(p);
1249 		pointers[stripe++] = kmap(p);
1250 
1251 		if (q_stripe != -1) {
1252 
1253 			/*
1254 			 * raid6, add the qstripe and call the
1255 			 * library function to fill in our p/q
1256 			 */
1257 			p = rbio_qstripe_page(rbio, pagenr);
1258 			SetPageUptodate(p);
1259 			pointers[stripe++] = kmap(p);
1260 
1261 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1262 						pointers);
1263 		} else {
1264 			/* raid5 */
1265 			copy_page(pointers[nr_data], pointers[0]);
1266 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1267 		}
1268 
1269 
1270 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1271 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1272 	}
1273 
1274 	/*
1275 	 * time to start writing.  Make bios for everything from the
1276 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1277 	 * everything else.
1278 	 */
1279 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1280 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1281 			struct page *page;
1282 			if (stripe < rbio->nr_data) {
1283 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1284 				if (!page)
1285 					continue;
1286 			} else {
1287 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1288 			}
1289 
1290 			ret = rbio_add_io_page(rbio, &bio_list,
1291 				       page, stripe, pagenr, rbio->stripe_len);
1292 			if (ret)
1293 				goto cleanup;
1294 		}
1295 	}
1296 
1297 	if (likely(!bbio->num_tgtdevs))
1298 		goto write_data;
1299 
1300 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1301 		if (!bbio->tgtdev_map[stripe])
1302 			continue;
1303 
1304 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1305 			struct page *page;
1306 			if (stripe < rbio->nr_data) {
1307 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1308 				if (!page)
1309 					continue;
1310 			} else {
1311 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1312 			}
1313 
1314 			ret = rbio_add_io_page(rbio, &bio_list, page,
1315 					       rbio->bbio->tgtdev_map[stripe],
1316 					       pagenr, rbio->stripe_len);
1317 			if (ret)
1318 				goto cleanup;
1319 		}
1320 	}
1321 
1322 write_data:
1323 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1324 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1325 
1326 	while (1) {
1327 		bio = bio_list_pop(&bio_list);
1328 		if (!bio)
1329 			break;
1330 
1331 		bio->bi_private = rbio;
1332 		bio->bi_end_io = raid_write_end_io;
1333 		bio->bi_opf = REQ_OP_WRITE;
1334 
1335 		submit_bio(bio);
1336 	}
1337 	return;
1338 
1339 cleanup:
1340 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1341 
1342 	while ((bio = bio_list_pop(&bio_list)))
1343 		bio_put(bio);
1344 }
1345 
1346 /*
1347  * helper to find the stripe number for a given bio.  Used to figure out which
1348  * stripe has failed.  This expects the bio to correspond to a physical disk,
1349  * so it looks up based on physical sector numbers.
1350  */
1351 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1352 			   struct bio *bio)
1353 {
1354 	u64 physical = bio->bi_iter.bi_sector;
1355 	u64 stripe_start;
1356 	int i;
1357 	struct btrfs_bio_stripe *stripe;
1358 
1359 	physical <<= 9;
1360 
1361 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1362 		stripe = &rbio->bbio->stripes[i];
1363 		stripe_start = stripe->physical;
1364 		if (physical >= stripe_start &&
1365 		    physical < stripe_start + rbio->stripe_len &&
1366 		    stripe->dev->bdev &&
1367 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1368 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1369 			return i;
1370 		}
1371 	}
1372 	return -1;
1373 }
1374 
1375 /*
1376  * helper to find the stripe number for a given
1377  * bio (before mapping).  Used to figure out which stripe has
1378  * failed.  This looks up based on logical block numbers.
1379  */
1380 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1381 				   struct bio *bio)
1382 {
1383 	u64 logical = bio->bi_iter.bi_sector;
1384 	u64 stripe_start;
1385 	int i;
1386 
1387 	logical <<= 9;
1388 
1389 	for (i = 0; i < rbio->nr_data; i++) {
1390 		stripe_start = rbio->bbio->raid_map[i];
1391 		if (logical >= stripe_start &&
1392 		    logical < stripe_start + rbio->stripe_len) {
1393 			return i;
1394 		}
1395 	}
1396 	return -1;
1397 }
1398 
1399 /*
1400  * returns -EIO if we had too many failures
1401  */
1402 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1403 {
1404 	unsigned long flags;
1405 	int ret = 0;
1406 
1407 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1408 
1409 	/* we already know this stripe is bad, move on */
1410 	if (rbio->faila == failed || rbio->failb == failed)
1411 		goto out;
1412 
1413 	if (rbio->faila == -1) {
1414 		/* first failure on this rbio */
1415 		rbio->faila = failed;
1416 		atomic_inc(&rbio->error);
1417 	} else if (rbio->failb == -1) {
1418 		/* second failure on this rbio */
1419 		rbio->failb = failed;
1420 		atomic_inc(&rbio->error);
1421 	} else {
1422 		ret = -EIO;
1423 	}
1424 out:
1425 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1426 
1427 	return ret;
1428 }
1429 
1430 /*
1431  * helper to fail a stripe based on a physical disk
1432  * bio.
1433  */
1434 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1435 			   struct bio *bio)
1436 {
1437 	int failed = find_bio_stripe(rbio, bio);
1438 
1439 	if (failed < 0)
1440 		return -EIO;
1441 
1442 	return fail_rbio_index(rbio, failed);
1443 }
1444 
1445 /*
1446  * this sets each page in the bio uptodate.  It should only be used on private
1447  * rbio pages, nothing that comes in from the higher layers
1448  */
1449 static void set_bio_pages_uptodate(struct bio *bio)
1450 {
1451 	struct bio_vec *bvec;
1452 	int i;
1453 
1454 	ASSERT(!bio_flagged(bio, BIO_CLONED));
1455 
1456 	bio_for_each_segment_all(bvec, bio, i)
1457 		SetPageUptodate(bvec->bv_page);
1458 }
1459 
1460 /*
1461  * end io for the read phase of the rmw cycle.  All the bios here are physical
1462  * stripe bios we've read from the disk so we can recalculate the parity of the
1463  * stripe.
1464  *
1465  * This will usually kick off finish_rmw once all the bios are read in, but it
1466  * may trigger parity reconstruction if we had any errors along the way
1467  */
1468 static void raid_rmw_end_io(struct bio *bio)
1469 {
1470 	struct btrfs_raid_bio *rbio = bio->bi_private;
1471 
1472 	if (bio->bi_status)
1473 		fail_bio_stripe(rbio, bio);
1474 	else
1475 		set_bio_pages_uptodate(bio);
1476 
1477 	bio_put(bio);
1478 
1479 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1480 		return;
1481 
1482 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1483 		goto cleanup;
1484 
1485 	/*
1486 	 * this will normally call finish_rmw to start our write
1487 	 * but if there are any failed stripes we'll reconstruct
1488 	 * from parity first
1489 	 */
1490 	validate_rbio_for_rmw(rbio);
1491 	return;
1492 
1493 cleanup:
1494 
1495 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1496 }
1497 
1498 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1499 {
1500 	btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1501 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1502 }
1503 
1504 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1505 {
1506 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1507 			read_rebuild_work, NULL, NULL);
1508 
1509 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1510 }
1511 
1512 /*
1513  * the stripe must be locked by the caller.  It will
1514  * unlock after all the writes are done
1515  */
1516 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1517 {
1518 	int bios_to_read = 0;
1519 	struct bio_list bio_list;
1520 	int ret;
1521 	int pagenr;
1522 	int stripe;
1523 	struct bio *bio;
1524 
1525 	bio_list_init(&bio_list);
1526 
1527 	ret = alloc_rbio_pages(rbio);
1528 	if (ret)
1529 		goto cleanup;
1530 
1531 	index_rbio_pages(rbio);
1532 
1533 	atomic_set(&rbio->error, 0);
1534 	/*
1535 	 * build a list of bios to read all the missing parts of this
1536 	 * stripe
1537 	 */
1538 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1539 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1540 			struct page *page;
1541 			/*
1542 			 * we want to find all the pages missing from
1543 			 * the rbio and read them from the disk.  If
1544 			 * page_in_rbio finds a page in the bio list
1545 			 * we don't need to read it off the stripe.
1546 			 */
1547 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1548 			if (page)
1549 				continue;
1550 
1551 			page = rbio_stripe_page(rbio, stripe, pagenr);
1552 			/*
1553 			 * the bio cache may have handed us an uptodate
1554 			 * page.  If so, be happy and use it
1555 			 */
1556 			if (PageUptodate(page))
1557 				continue;
1558 
1559 			ret = rbio_add_io_page(rbio, &bio_list, page,
1560 				       stripe, pagenr, rbio->stripe_len);
1561 			if (ret)
1562 				goto cleanup;
1563 		}
1564 	}
1565 
1566 	bios_to_read = bio_list_size(&bio_list);
1567 	if (!bios_to_read) {
1568 		/*
1569 		 * this can happen if others have merged with
1570 		 * us, it means there is nothing left to read.
1571 		 * But if there are missing devices it may not be
1572 		 * safe to do the full stripe write yet.
1573 		 */
1574 		goto finish;
1575 	}
1576 
1577 	/*
1578 	 * the bbio may be freed once we submit the last bio.  Make sure
1579 	 * not to touch it after that
1580 	 */
1581 	atomic_set(&rbio->stripes_pending, bios_to_read);
1582 	while (1) {
1583 		bio = bio_list_pop(&bio_list);
1584 		if (!bio)
1585 			break;
1586 
1587 		bio->bi_private = rbio;
1588 		bio->bi_end_io = raid_rmw_end_io;
1589 		bio->bi_opf = REQ_OP_READ;
1590 
1591 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1592 
1593 		submit_bio(bio);
1594 	}
1595 	/* the actual write will happen once the reads are done */
1596 	return 0;
1597 
1598 cleanup:
1599 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1600 
1601 	while ((bio = bio_list_pop(&bio_list)))
1602 		bio_put(bio);
1603 
1604 	return -EIO;
1605 
1606 finish:
1607 	validate_rbio_for_rmw(rbio);
1608 	return 0;
1609 }
1610 
1611 /*
1612  * if the upper layers pass in a full stripe, we thank them by only allocating
1613  * enough pages to hold the parity, and sending it all down quickly.
1614  */
1615 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1616 {
1617 	int ret;
1618 
1619 	ret = alloc_rbio_parity_pages(rbio);
1620 	if (ret) {
1621 		__free_raid_bio(rbio);
1622 		return ret;
1623 	}
1624 
1625 	ret = lock_stripe_add(rbio);
1626 	if (ret == 0)
1627 		finish_rmw(rbio);
1628 	return 0;
1629 }
1630 
1631 /*
1632  * partial stripe writes get handed over to async helpers.
1633  * We're really hoping to merge a few more writes into this
1634  * rbio before calculating new parity
1635  */
1636 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1637 {
1638 	int ret;
1639 
1640 	ret = lock_stripe_add(rbio);
1641 	if (ret == 0)
1642 		async_rmw_stripe(rbio);
1643 	return 0;
1644 }
1645 
1646 /*
1647  * sometimes while we were reading from the drive to
1648  * recalculate parity, enough new bios come into create
1649  * a full stripe.  So we do a check here to see if we can
1650  * go directly to finish_rmw
1651  */
1652 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1653 {
1654 	/* head off into rmw land if we don't have a full stripe */
1655 	if (!rbio_is_full(rbio))
1656 		return partial_stripe_write(rbio);
1657 	return full_stripe_write(rbio);
1658 }
1659 
1660 /*
1661  * We use plugging call backs to collect full stripes.
1662  * Any time we get a partial stripe write while plugged
1663  * we collect it into a list.  When the unplug comes down,
1664  * we sort the list by logical block number and merge
1665  * everything we can into the same rbios
1666  */
1667 struct btrfs_plug_cb {
1668 	struct blk_plug_cb cb;
1669 	struct btrfs_fs_info *info;
1670 	struct list_head rbio_list;
1671 	struct btrfs_work work;
1672 };
1673 
1674 /*
1675  * rbios on the plug list are sorted for easier merging.
1676  */
1677 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1678 {
1679 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1680 						 plug_list);
1681 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1682 						 plug_list);
1683 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1684 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1685 
1686 	if (a_sector < b_sector)
1687 		return -1;
1688 	if (a_sector > b_sector)
1689 		return 1;
1690 	return 0;
1691 }
1692 
1693 static void run_plug(struct btrfs_plug_cb *plug)
1694 {
1695 	struct btrfs_raid_bio *cur;
1696 	struct btrfs_raid_bio *last = NULL;
1697 
1698 	/*
1699 	 * sort our plug list then try to merge
1700 	 * everything we can in hopes of creating full
1701 	 * stripes.
1702 	 */
1703 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1704 	while (!list_empty(&plug->rbio_list)) {
1705 		cur = list_entry(plug->rbio_list.next,
1706 				 struct btrfs_raid_bio, plug_list);
1707 		list_del_init(&cur->plug_list);
1708 
1709 		if (rbio_is_full(cur)) {
1710 			/* we have a full stripe, send it down */
1711 			full_stripe_write(cur);
1712 			continue;
1713 		}
1714 		if (last) {
1715 			if (rbio_can_merge(last, cur)) {
1716 				merge_rbio(last, cur);
1717 				__free_raid_bio(cur);
1718 				continue;
1719 
1720 			}
1721 			__raid56_parity_write(last);
1722 		}
1723 		last = cur;
1724 	}
1725 	if (last) {
1726 		__raid56_parity_write(last);
1727 	}
1728 	kfree(plug);
1729 }
1730 
1731 /*
1732  * if the unplug comes from schedule, we have to push the
1733  * work off to a helper thread
1734  */
1735 static void unplug_work(struct btrfs_work *work)
1736 {
1737 	struct btrfs_plug_cb *plug;
1738 	plug = container_of(work, struct btrfs_plug_cb, work);
1739 	run_plug(plug);
1740 }
1741 
1742 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1743 {
1744 	struct btrfs_plug_cb *plug;
1745 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1746 
1747 	if (from_schedule) {
1748 		btrfs_init_work(&plug->work, btrfs_rmw_helper,
1749 				unplug_work, NULL, NULL);
1750 		btrfs_queue_work(plug->info->rmw_workers,
1751 				 &plug->work);
1752 		return;
1753 	}
1754 	run_plug(plug);
1755 }
1756 
1757 /*
1758  * our main entry point for writes from the rest of the FS.
1759  */
1760 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1761 			struct btrfs_bio *bbio, u64 stripe_len)
1762 {
1763 	struct btrfs_raid_bio *rbio;
1764 	struct btrfs_plug_cb *plug = NULL;
1765 	struct blk_plug_cb *cb;
1766 	int ret;
1767 
1768 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1769 	if (IS_ERR(rbio)) {
1770 		btrfs_put_bbio(bbio);
1771 		return PTR_ERR(rbio);
1772 	}
1773 	bio_list_add(&rbio->bio_list, bio);
1774 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1775 	rbio->operation = BTRFS_RBIO_WRITE;
1776 
1777 	btrfs_bio_counter_inc_noblocked(fs_info);
1778 	rbio->generic_bio_cnt = 1;
1779 
1780 	/*
1781 	 * don't plug on full rbios, just get them out the door
1782 	 * as quickly as we can
1783 	 */
1784 	if (rbio_is_full(rbio)) {
1785 		ret = full_stripe_write(rbio);
1786 		if (ret)
1787 			btrfs_bio_counter_dec(fs_info);
1788 		return ret;
1789 	}
1790 
1791 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1792 	if (cb) {
1793 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1794 		if (!plug->info) {
1795 			plug->info = fs_info;
1796 			INIT_LIST_HEAD(&plug->rbio_list);
1797 		}
1798 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1799 		ret = 0;
1800 	} else {
1801 		ret = __raid56_parity_write(rbio);
1802 		if (ret)
1803 			btrfs_bio_counter_dec(fs_info);
1804 	}
1805 	return ret;
1806 }
1807 
1808 /*
1809  * all parity reconstruction happens here.  We've read in everything
1810  * we can find from the drives and this does the heavy lifting of
1811  * sorting the good from the bad.
1812  */
1813 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1814 {
1815 	int pagenr, stripe;
1816 	void **pointers;
1817 	int faila = -1, failb = -1;
1818 	struct page *page;
1819 	blk_status_t err;
1820 	int i;
1821 
1822 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1823 	if (!pointers) {
1824 		err = BLK_STS_RESOURCE;
1825 		goto cleanup_io;
1826 	}
1827 
1828 	faila = rbio->faila;
1829 	failb = rbio->failb;
1830 
1831 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1832 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1833 		spin_lock_irq(&rbio->bio_list_lock);
1834 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1835 		spin_unlock_irq(&rbio->bio_list_lock);
1836 	}
1837 
1838 	index_rbio_pages(rbio);
1839 
1840 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1841 		/*
1842 		 * Now we just use bitmap to mark the horizontal stripes in
1843 		 * which we have data when doing parity scrub.
1844 		 */
1845 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1846 		    !test_bit(pagenr, rbio->dbitmap))
1847 			continue;
1848 
1849 		/* setup our array of pointers with pages
1850 		 * from each stripe
1851 		 */
1852 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1853 			/*
1854 			 * if we're rebuilding a read, we have to use
1855 			 * pages from the bio list
1856 			 */
1857 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1858 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1859 			    (stripe == faila || stripe == failb)) {
1860 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1861 			} else {
1862 				page = rbio_stripe_page(rbio, stripe, pagenr);
1863 			}
1864 			pointers[stripe] = kmap(page);
1865 		}
1866 
1867 		/* all raid6 handling here */
1868 		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1869 			/*
1870 			 * single failure, rebuild from parity raid5
1871 			 * style
1872 			 */
1873 			if (failb < 0) {
1874 				if (faila == rbio->nr_data) {
1875 					/*
1876 					 * Just the P stripe has failed, without
1877 					 * a bad data or Q stripe.
1878 					 * TODO, we should redo the xor here.
1879 					 */
1880 					err = BLK_STS_IOERR;
1881 					goto cleanup;
1882 				}
1883 				/*
1884 				 * a single failure in raid6 is rebuilt
1885 				 * in the pstripe code below
1886 				 */
1887 				goto pstripe;
1888 			}
1889 
1890 			/* make sure our ps and qs are in order */
1891 			if (faila > failb) {
1892 				int tmp = failb;
1893 				failb = faila;
1894 				faila = tmp;
1895 			}
1896 
1897 			/* if the q stripe is failed, do a pstripe reconstruction
1898 			 * from the xors.
1899 			 * If both the q stripe and the P stripe are failed, we're
1900 			 * here due to a crc mismatch and we can't give them the
1901 			 * data they want
1902 			 */
1903 			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1904 				if (rbio->bbio->raid_map[faila] ==
1905 				    RAID5_P_STRIPE) {
1906 					err = BLK_STS_IOERR;
1907 					goto cleanup;
1908 				}
1909 				/*
1910 				 * otherwise we have one bad data stripe and
1911 				 * a good P stripe.  raid5!
1912 				 */
1913 				goto pstripe;
1914 			}
1915 
1916 			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1917 				raid6_datap_recov(rbio->real_stripes,
1918 						  PAGE_SIZE, faila, pointers);
1919 			} else {
1920 				raid6_2data_recov(rbio->real_stripes,
1921 						  PAGE_SIZE, faila, failb,
1922 						  pointers);
1923 			}
1924 		} else {
1925 			void *p;
1926 
1927 			/* rebuild from P stripe here (raid5 or raid6) */
1928 			BUG_ON(failb != -1);
1929 pstripe:
1930 			/* Copy parity block into failed block to start with */
1931 			copy_page(pointers[faila], pointers[rbio->nr_data]);
1932 
1933 			/* rearrange the pointer array */
1934 			p = pointers[faila];
1935 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1936 				pointers[stripe] = pointers[stripe + 1];
1937 			pointers[rbio->nr_data - 1] = p;
1938 
1939 			/* xor in the rest */
1940 			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1941 		}
1942 		/* if we're doing this rebuild as part of an rmw, go through
1943 		 * and set all of our private rbio pages in the
1944 		 * failed stripes as uptodate.  This way finish_rmw will
1945 		 * know they can be trusted.  If this was a read reconstruction,
1946 		 * other endio functions will fiddle the uptodate bits
1947 		 */
1948 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1949 			for (i = 0;  i < rbio->stripe_npages; i++) {
1950 				if (faila != -1) {
1951 					page = rbio_stripe_page(rbio, faila, i);
1952 					SetPageUptodate(page);
1953 				}
1954 				if (failb != -1) {
1955 					page = rbio_stripe_page(rbio, failb, i);
1956 					SetPageUptodate(page);
1957 				}
1958 			}
1959 		}
1960 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1961 			/*
1962 			 * if we're rebuilding a read, we have to use
1963 			 * pages from the bio list
1964 			 */
1965 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1966 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1967 			    (stripe == faila || stripe == failb)) {
1968 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1969 			} else {
1970 				page = rbio_stripe_page(rbio, stripe, pagenr);
1971 			}
1972 			kunmap(page);
1973 		}
1974 	}
1975 
1976 	err = BLK_STS_OK;
1977 cleanup:
1978 	kfree(pointers);
1979 
1980 cleanup_io:
1981 	/*
1982 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1983 	 * valid rbio which is consistent with ondisk content, thus such a
1984 	 * valid rbio can be cached to avoid further disk reads.
1985 	 */
1986 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1987 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1988 		/*
1989 		 * - In case of two failures, where rbio->failb != -1:
1990 		 *
1991 		 *   Do not cache this rbio since the above read reconstruction
1992 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1993 		 *   changed some content of stripes which are not identical to
1994 		 *   on-disk content any more, otherwise, a later write/recover
1995 		 *   may steal stripe_pages from this rbio and end up with
1996 		 *   corruptions or rebuild failures.
1997 		 *
1998 		 * - In case of single failure, where rbio->failb == -1:
1999 		 *
2000 		 *   Cache this rbio iff the above read reconstruction is
2001 		 *   excuted without problems.
2002 		 */
2003 		if (err == BLK_STS_OK && rbio->failb < 0)
2004 			cache_rbio_pages(rbio);
2005 		else
2006 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2007 
2008 		rbio_orig_end_io(rbio, err);
2009 	} else if (err == BLK_STS_OK) {
2010 		rbio->faila = -1;
2011 		rbio->failb = -1;
2012 
2013 		if (rbio->operation == BTRFS_RBIO_WRITE)
2014 			finish_rmw(rbio);
2015 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2016 			finish_parity_scrub(rbio, 0);
2017 		else
2018 			BUG();
2019 	} else {
2020 		rbio_orig_end_io(rbio, err);
2021 	}
2022 }
2023 
2024 /*
2025  * This is called only for stripes we've read from disk to
2026  * reconstruct the parity.
2027  */
2028 static void raid_recover_end_io(struct bio *bio)
2029 {
2030 	struct btrfs_raid_bio *rbio = bio->bi_private;
2031 
2032 	/*
2033 	 * we only read stripe pages off the disk, set them
2034 	 * up to date if there were no errors
2035 	 */
2036 	if (bio->bi_status)
2037 		fail_bio_stripe(rbio, bio);
2038 	else
2039 		set_bio_pages_uptodate(bio);
2040 	bio_put(bio);
2041 
2042 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2043 		return;
2044 
2045 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2046 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2047 	else
2048 		__raid_recover_end_io(rbio);
2049 }
2050 
2051 /*
2052  * reads everything we need off the disk to reconstruct
2053  * the parity. endio handlers trigger final reconstruction
2054  * when the IO is done.
2055  *
2056  * This is used both for reads from the higher layers and for
2057  * parity construction required to finish a rmw cycle.
2058  */
2059 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2060 {
2061 	int bios_to_read = 0;
2062 	struct bio_list bio_list;
2063 	int ret;
2064 	int pagenr;
2065 	int stripe;
2066 	struct bio *bio;
2067 
2068 	bio_list_init(&bio_list);
2069 
2070 	ret = alloc_rbio_pages(rbio);
2071 	if (ret)
2072 		goto cleanup;
2073 
2074 	atomic_set(&rbio->error, 0);
2075 
2076 	/*
2077 	 * read everything that hasn't failed.  Thanks to the
2078 	 * stripe cache, it is possible that some or all of these
2079 	 * pages are going to be uptodate.
2080 	 */
2081 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2082 		if (rbio->faila == stripe || rbio->failb == stripe) {
2083 			atomic_inc(&rbio->error);
2084 			continue;
2085 		}
2086 
2087 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2088 			struct page *p;
2089 
2090 			/*
2091 			 * the rmw code may have already read this
2092 			 * page in
2093 			 */
2094 			p = rbio_stripe_page(rbio, stripe, pagenr);
2095 			if (PageUptodate(p))
2096 				continue;
2097 
2098 			ret = rbio_add_io_page(rbio, &bio_list,
2099 				       rbio_stripe_page(rbio, stripe, pagenr),
2100 				       stripe, pagenr, rbio->stripe_len);
2101 			if (ret < 0)
2102 				goto cleanup;
2103 		}
2104 	}
2105 
2106 	bios_to_read = bio_list_size(&bio_list);
2107 	if (!bios_to_read) {
2108 		/*
2109 		 * we might have no bios to read just because the pages
2110 		 * were up to date, or we might have no bios to read because
2111 		 * the devices were gone.
2112 		 */
2113 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2114 			__raid_recover_end_io(rbio);
2115 			goto out;
2116 		} else {
2117 			goto cleanup;
2118 		}
2119 	}
2120 
2121 	/*
2122 	 * the bbio may be freed once we submit the last bio.  Make sure
2123 	 * not to touch it after that
2124 	 */
2125 	atomic_set(&rbio->stripes_pending, bios_to_read);
2126 	while (1) {
2127 		bio = bio_list_pop(&bio_list);
2128 		if (!bio)
2129 			break;
2130 
2131 		bio->bi_private = rbio;
2132 		bio->bi_end_io = raid_recover_end_io;
2133 		bio->bi_opf = REQ_OP_READ;
2134 
2135 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2136 
2137 		submit_bio(bio);
2138 	}
2139 out:
2140 	return 0;
2141 
2142 cleanup:
2143 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2144 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2145 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2146 
2147 	while ((bio = bio_list_pop(&bio_list)))
2148 		bio_put(bio);
2149 
2150 	return -EIO;
2151 }
2152 
2153 /*
2154  * the main entry point for reads from the higher layers.  This
2155  * is really only called when the normal read path had a failure,
2156  * so we assume the bio they send down corresponds to a failed part
2157  * of the drive.
2158  */
2159 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2160 			  struct btrfs_bio *bbio, u64 stripe_len,
2161 			  int mirror_num, int generic_io)
2162 {
2163 	struct btrfs_raid_bio *rbio;
2164 	int ret;
2165 
2166 	if (generic_io) {
2167 		ASSERT(bbio->mirror_num == mirror_num);
2168 		btrfs_io_bio(bio)->mirror_num = mirror_num;
2169 	}
2170 
2171 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2172 	if (IS_ERR(rbio)) {
2173 		if (generic_io)
2174 			btrfs_put_bbio(bbio);
2175 		return PTR_ERR(rbio);
2176 	}
2177 
2178 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2179 	bio_list_add(&rbio->bio_list, bio);
2180 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2181 
2182 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2183 	if (rbio->faila == -1) {
2184 		btrfs_warn(fs_info,
2185 	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2186 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2187 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2188 		if (generic_io)
2189 			btrfs_put_bbio(bbio);
2190 		kfree(rbio);
2191 		return -EIO;
2192 	}
2193 
2194 	if (generic_io) {
2195 		btrfs_bio_counter_inc_noblocked(fs_info);
2196 		rbio->generic_bio_cnt = 1;
2197 	} else {
2198 		btrfs_get_bbio(bbio);
2199 	}
2200 
2201 	/*
2202 	 * Loop retry:
2203 	 * for 'mirror == 2', reconstruct from all other stripes.
2204 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2205 	 */
2206 	if (mirror_num > 2) {
2207 		/*
2208 		 * 'mirror == 3' is to fail the p stripe and
2209 		 * reconstruct from the q stripe.  'mirror > 3' is to
2210 		 * fail a data stripe and reconstruct from p+q stripe.
2211 		 */
2212 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2213 		ASSERT(rbio->failb > 0);
2214 		if (rbio->failb <= rbio->faila)
2215 			rbio->failb--;
2216 	}
2217 
2218 	ret = lock_stripe_add(rbio);
2219 
2220 	/*
2221 	 * __raid56_parity_recover will end the bio with
2222 	 * any errors it hits.  We don't want to return
2223 	 * its error value up the stack because our caller
2224 	 * will end up calling bio_endio with any nonzero
2225 	 * return
2226 	 */
2227 	if (ret == 0)
2228 		__raid56_parity_recover(rbio);
2229 	/*
2230 	 * our rbio has been added to the list of
2231 	 * rbios that will be handled after the
2232 	 * currently lock owner is done
2233 	 */
2234 	return 0;
2235 
2236 }
2237 
2238 static void rmw_work(struct btrfs_work *work)
2239 {
2240 	struct btrfs_raid_bio *rbio;
2241 
2242 	rbio = container_of(work, struct btrfs_raid_bio, work);
2243 	raid56_rmw_stripe(rbio);
2244 }
2245 
2246 static void read_rebuild_work(struct btrfs_work *work)
2247 {
2248 	struct btrfs_raid_bio *rbio;
2249 
2250 	rbio = container_of(work, struct btrfs_raid_bio, work);
2251 	__raid56_parity_recover(rbio);
2252 }
2253 
2254 /*
2255  * The following code is used to scrub/replace the parity stripe
2256  *
2257  * Caller must have already increased bio_counter for getting @bbio.
2258  *
2259  * Note: We need make sure all the pages that add into the scrub/replace
2260  * raid bio are correct and not be changed during the scrub/replace. That
2261  * is those pages just hold metadata or file data with checksum.
2262  */
2263 
2264 struct btrfs_raid_bio *
2265 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2266 			       struct btrfs_bio *bbio, u64 stripe_len,
2267 			       struct btrfs_device *scrub_dev,
2268 			       unsigned long *dbitmap, int stripe_nsectors)
2269 {
2270 	struct btrfs_raid_bio *rbio;
2271 	int i;
2272 
2273 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2274 	if (IS_ERR(rbio))
2275 		return NULL;
2276 	bio_list_add(&rbio->bio_list, bio);
2277 	/*
2278 	 * This is a special bio which is used to hold the completion handler
2279 	 * and make the scrub rbio is similar to the other types
2280 	 */
2281 	ASSERT(!bio->bi_iter.bi_size);
2282 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2283 
2284 	/*
2285 	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2286 	 * to the end position, so this search can start from the first parity
2287 	 * stripe.
2288 	 */
2289 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2290 		if (bbio->stripes[i].dev == scrub_dev) {
2291 			rbio->scrubp = i;
2292 			break;
2293 		}
2294 	}
2295 	ASSERT(i < rbio->real_stripes);
2296 
2297 	/* Now we just support the sectorsize equals to page size */
2298 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2299 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2300 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2301 
2302 	/*
2303 	 * We have already increased bio_counter when getting bbio, record it
2304 	 * so we can free it at rbio_orig_end_io().
2305 	 */
2306 	rbio->generic_bio_cnt = 1;
2307 
2308 	return rbio;
2309 }
2310 
2311 /* Used for both parity scrub and missing. */
2312 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2313 			    u64 logical)
2314 {
2315 	int stripe_offset;
2316 	int index;
2317 
2318 	ASSERT(logical >= rbio->bbio->raid_map[0]);
2319 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2320 				rbio->stripe_len * rbio->nr_data);
2321 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2322 	index = stripe_offset >> PAGE_SHIFT;
2323 	rbio->bio_pages[index] = page;
2324 }
2325 
2326 /*
2327  * We just scrub the parity that we have correct data on the same horizontal,
2328  * so we needn't allocate all pages for all the stripes.
2329  */
2330 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2331 {
2332 	int i;
2333 	int bit;
2334 	int index;
2335 	struct page *page;
2336 
2337 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2338 		for (i = 0; i < rbio->real_stripes; i++) {
2339 			index = i * rbio->stripe_npages + bit;
2340 			if (rbio->stripe_pages[index])
2341 				continue;
2342 
2343 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2344 			if (!page)
2345 				return -ENOMEM;
2346 			rbio->stripe_pages[index] = page;
2347 		}
2348 	}
2349 	return 0;
2350 }
2351 
2352 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2353 					 int need_check)
2354 {
2355 	struct btrfs_bio *bbio = rbio->bbio;
2356 	void **pointers = rbio->finish_pointers;
2357 	unsigned long *pbitmap = rbio->finish_pbitmap;
2358 	int nr_data = rbio->nr_data;
2359 	int stripe;
2360 	int pagenr;
2361 	int p_stripe = -1;
2362 	int q_stripe = -1;
2363 	struct page *p_page = NULL;
2364 	struct page *q_page = NULL;
2365 	struct bio_list bio_list;
2366 	struct bio *bio;
2367 	int is_replace = 0;
2368 	int ret;
2369 
2370 	bio_list_init(&bio_list);
2371 
2372 	if (rbio->real_stripes - rbio->nr_data == 1) {
2373 		p_stripe = rbio->real_stripes - 1;
2374 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2375 		p_stripe = rbio->real_stripes - 2;
2376 		q_stripe = rbio->real_stripes - 1;
2377 	} else {
2378 		BUG();
2379 	}
2380 
2381 	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2382 		is_replace = 1;
2383 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2384 	}
2385 
2386 	/*
2387 	 * Because the higher layers(scrubber) are unlikely to
2388 	 * use this area of the disk again soon, so don't cache
2389 	 * it.
2390 	 */
2391 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2392 
2393 	if (!need_check)
2394 		goto writeback;
2395 
2396 	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2397 	if (!p_page)
2398 		goto cleanup;
2399 	SetPageUptodate(p_page);
2400 
2401 	if (q_stripe != -1) {
2402 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2403 		if (!q_page) {
2404 			__free_page(p_page);
2405 			goto cleanup;
2406 		}
2407 		SetPageUptodate(q_page);
2408 	}
2409 
2410 	atomic_set(&rbio->error, 0);
2411 
2412 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2413 		struct page *p;
2414 		void *parity;
2415 		/* first collect one page from each data stripe */
2416 		for (stripe = 0; stripe < nr_data; stripe++) {
2417 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2418 			pointers[stripe] = kmap(p);
2419 		}
2420 
2421 		/* then add the parity stripe */
2422 		pointers[stripe++] = kmap(p_page);
2423 
2424 		if (q_stripe != -1) {
2425 
2426 			/*
2427 			 * raid6, add the qstripe and call the
2428 			 * library function to fill in our p/q
2429 			 */
2430 			pointers[stripe++] = kmap(q_page);
2431 
2432 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2433 						pointers);
2434 		} else {
2435 			/* raid5 */
2436 			copy_page(pointers[nr_data], pointers[0]);
2437 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2438 		}
2439 
2440 		/* Check scrubbing parity and repair it */
2441 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2442 		parity = kmap(p);
2443 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2444 			copy_page(parity, pointers[rbio->scrubp]);
2445 		else
2446 			/* Parity is right, needn't writeback */
2447 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2448 		kunmap(p);
2449 
2450 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2451 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2452 	}
2453 
2454 	__free_page(p_page);
2455 	if (q_page)
2456 		__free_page(q_page);
2457 
2458 writeback:
2459 	/*
2460 	 * time to start writing.  Make bios for everything from the
2461 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2462 	 * everything else.
2463 	 */
2464 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2465 		struct page *page;
2466 
2467 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2468 		ret = rbio_add_io_page(rbio, &bio_list,
2469 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2470 		if (ret)
2471 			goto cleanup;
2472 	}
2473 
2474 	if (!is_replace)
2475 		goto submit_write;
2476 
2477 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2478 		struct page *page;
2479 
2480 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2481 		ret = rbio_add_io_page(rbio, &bio_list, page,
2482 				       bbio->tgtdev_map[rbio->scrubp],
2483 				       pagenr, rbio->stripe_len);
2484 		if (ret)
2485 			goto cleanup;
2486 	}
2487 
2488 submit_write:
2489 	nr_data = bio_list_size(&bio_list);
2490 	if (!nr_data) {
2491 		/* Every parity is right */
2492 		rbio_orig_end_io(rbio, BLK_STS_OK);
2493 		return;
2494 	}
2495 
2496 	atomic_set(&rbio->stripes_pending, nr_data);
2497 
2498 	while (1) {
2499 		bio = bio_list_pop(&bio_list);
2500 		if (!bio)
2501 			break;
2502 
2503 		bio->bi_private = rbio;
2504 		bio->bi_end_io = raid_write_end_io;
2505 		bio->bi_opf = REQ_OP_WRITE;
2506 
2507 		submit_bio(bio);
2508 	}
2509 	return;
2510 
2511 cleanup:
2512 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2513 
2514 	while ((bio = bio_list_pop(&bio_list)))
2515 		bio_put(bio);
2516 }
2517 
2518 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2519 {
2520 	if (stripe >= 0 && stripe < rbio->nr_data)
2521 		return 1;
2522 	return 0;
2523 }
2524 
2525 /*
2526  * While we're doing the parity check and repair, we could have errors
2527  * in reading pages off the disk.  This checks for errors and if we're
2528  * not able to read the page it'll trigger parity reconstruction.  The
2529  * parity scrub will be finished after we've reconstructed the failed
2530  * stripes
2531  */
2532 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2533 {
2534 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2535 		goto cleanup;
2536 
2537 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2538 		int dfail = 0, failp = -1;
2539 
2540 		if (is_data_stripe(rbio, rbio->faila))
2541 			dfail++;
2542 		else if (is_parity_stripe(rbio->faila))
2543 			failp = rbio->faila;
2544 
2545 		if (is_data_stripe(rbio, rbio->failb))
2546 			dfail++;
2547 		else if (is_parity_stripe(rbio->failb))
2548 			failp = rbio->failb;
2549 
2550 		/*
2551 		 * Because we can not use a scrubbing parity to repair
2552 		 * the data, so the capability of the repair is declined.
2553 		 * (In the case of RAID5, we can not repair anything)
2554 		 */
2555 		if (dfail > rbio->bbio->max_errors - 1)
2556 			goto cleanup;
2557 
2558 		/*
2559 		 * If all data is good, only parity is correctly, just
2560 		 * repair the parity.
2561 		 */
2562 		if (dfail == 0) {
2563 			finish_parity_scrub(rbio, 0);
2564 			return;
2565 		}
2566 
2567 		/*
2568 		 * Here means we got one corrupted data stripe and one
2569 		 * corrupted parity on RAID6, if the corrupted parity
2570 		 * is scrubbing parity, luckily, use the other one to repair
2571 		 * the data, or we can not repair the data stripe.
2572 		 */
2573 		if (failp != rbio->scrubp)
2574 			goto cleanup;
2575 
2576 		__raid_recover_end_io(rbio);
2577 	} else {
2578 		finish_parity_scrub(rbio, 1);
2579 	}
2580 	return;
2581 
2582 cleanup:
2583 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2584 }
2585 
2586 /*
2587  * end io for the read phase of the rmw cycle.  All the bios here are physical
2588  * stripe bios we've read from the disk so we can recalculate the parity of the
2589  * stripe.
2590  *
2591  * This will usually kick off finish_rmw once all the bios are read in, but it
2592  * may trigger parity reconstruction if we had any errors along the way
2593  */
2594 static void raid56_parity_scrub_end_io(struct bio *bio)
2595 {
2596 	struct btrfs_raid_bio *rbio = bio->bi_private;
2597 
2598 	if (bio->bi_status)
2599 		fail_bio_stripe(rbio, bio);
2600 	else
2601 		set_bio_pages_uptodate(bio);
2602 
2603 	bio_put(bio);
2604 
2605 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2606 		return;
2607 
2608 	/*
2609 	 * this will normally call finish_rmw to start our write
2610 	 * but if there are any failed stripes we'll reconstruct
2611 	 * from parity first
2612 	 */
2613 	validate_rbio_for_parity_scrub(rbio);
2614 }
2615 
2616 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2617 {
2618 	int bios_to_read = 0;
2619 	struct bio_list bio_list;
2620 	int ret;
2621 	int pagenr;
2622 	int stripe;
2623 	struct bio *bio;
2624 
2625 	bio_list_init(&bio_list);
2626 
2627 	ret = alloc_rbio_essential_pages(rbio);
2628 	if (ret)
2629 		goto cleanup;
2630 
2631 	atomic_set(&rbio->error, 0);
2632 	/*
2633 	 * build a list of bios to read all the missing parts of this
2634 	 * stripe
2635 	 */
2636 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2637 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2638 			struct page *page;
2639 			/*
2640 			 * we want to find all the pages missing from
2641 			 * the rbio and read them from the disk.  If
2642 			 * page_in_rbio finds a page in the bio list
2643 			 * we don't need to read it off the stripe.
2644 			 */
2645 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2646 			if (page)
2647 				continue;
2648 
2649 			page = rbio_stripe_page(rbio, stripe, pagenr);
2650 			/*
2651 			 * the bio cache may have handed us an uptodate
2652 			 * page.  If so, be happy and use it
2653 			 */
2654 			if (PageUptodate(page))
2655 				continue;
2656 
2657 			ret = rbio_add_io_page(rbio, &bio_list, page,
2658 				       stripe, pagenr, rbio->stripe_len);
2659 			if (ret)
2660 				goto cleanup;
2661 		}
2662 	}
2663 
2664 	bios_to_read = bio_list_size(&bio_list);
2665 	if (!bios_to_read) {
2666 		/*
2667 		 * this can happen if others have merged with
2668 		 * us, it means there is nothing left to read.
2669 		 * But if there are missing devices it may not be
2670 		 * safe to do the full stripe write yet.
2671 		 */
2672 		goto finish;
2673 	}
2674 
2675 	/*
2676 	 * the bbio may be freed once we submit the last bio.  Make sure
2677 	 * not to touch it after that
2678 	 */
2679 	atomic_set(&rbio->stripes_pending, bios_to_read);
2680 	while (1) {
2681 		bio = bio_list_pop(&bio_list);
2682 		if (!bio)
2683 			break;
2684 
2685 		bio->bi_private = rbio;
2686 		bio->bi_end_io = raid56_parity_scrub_end_io;
2687 		bio->bi_opf = REQ_OP_READ;
2688 
2689 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2690 
2691 		submit_bio(bio);
2692 	}
2693 	/* the actual write will happen once the reads are done */
2694 	return;
2695 
2696 cleanup:
2697 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2698 
2699 	while ((bio = bio_list_pop(&bio_list)))
2700 		bio_put(bio);
2701 
2702 	return;
2703 
2704 finish:
2705 	validate_rbio_for_parity_scrub(rbio);
2706 }
2707 
2708 static void scrub_parity_work(struct btrfs_work *work)
2709 {
2710 	struct btrfs_raid_bio *rbio;
2711 
2712 	rbio = container_of(work, struct btrfs_raid_bio, work);
2713 	raid56_parity_scrub_stripe(rbio);
2714 }
2715 
2716 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2717 {
2718 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2719 			scrub_parity_work, NULL, NULL);
2720 
2721 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2722 }
2723 
2724 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2725 {
2726 	if (!lock_stripe_add(rbio))
2727 		async_scrub_parity(rbio);
2728 }
2729 
2730 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2731 
2732 struct btrfs_raid_bio *
2733 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2734 			  struct btrfs_bio *bbio, u64 length)
2735 {
2736 	struct btrfs_raid_bio *rbio;
2737 
2738 	rbio = alloc_rbio(fs_info, bbio, length);
2739 	if (IS_ERR(rbio))
2740 		return NULL;
2741 
2742 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2743 	bio_list_add(&rbio->bio_list, bio);
2744 	/*
2745 	 * This is a special bio which is used to hold the completion handler
2746 	 * and make the scrub rbio is similar to the other types
2747 	 */
2748 	ASSERT(!bio->bi_iter.bi_size);
2749 
2750 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2751 	if (rbio->faila == -1) {
2752 		BUG();
2753 		kfree(rbio);
2754 		return NULL;
2755 	}
2756 
2757 	/*
2758 	 * When we get bbio, we have already increased bio_counter, record it
2759 	 * so we can free it at rbio_orig_end_io()
2760 	 */
2761 	rbio->generic_bio_cnt = 1;
2762 
2763 	return rbio;
2764 }
2765 
2766 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2767 {
2768 	if (!lock_stripe_add(rbio))
2769 		async_read_rebuild(rbio);
2770 }
2771