xref: /linux/drivers/md/raid10.c (revision 616355cc818c6ddadc393fdfd4491f94458cb715)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid10.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 2000-2004 Neil Brown
6  *
7  * RAID-10 support for md.
8  *
9  * Base on code in raid1.c.  See raid1.c for further copyright information.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22 #include "raid10.h"
23 #include "raid0.h"
24 #include "md-bitmap.h"
25 
26 /*
27  * RAID10 provides a combination of RAID0 and RAID1 functionality.
28  * The layout of data is defined by
29  *    chunk_size
30  *    raid_disks
31  *    near_copies (stored in low byte of layout)
32  *    far_copies (stored in second byte of layout)
33  *    far_offset (stored in bit 16 of layout )
34  *    use_far_sets (stored in bit 17 of layout )
35  *    use_far_sets_bugfixed (stored in bit 18 of layout )
36  *
37  * The data to be stored is divided into chunks using chunksize.  Each device
38  * is divided into far_copies sections.   In each section, chunks are laid out
39  * in a style similar to raid0, but near_copies copies of each chunk is stored
40  * (each on a different drive).  The starting device for each section is offset
41  * near_copies from the starting device of the previous section.  Thus there
42  * are (near_copies * far_copies) of each chunk, and each is on a different
43  * drive.  near_copies and far_copies must be at least one, and their product
44  * is at most raid_disks.
45  *
46  * If far_offset is true, then the far_copies are handled a bit differently.
47  * The copies are still in different stripes, but instead of being very far
48  * apart on disk, there are adjacent stripes.
49  *
50  * The far and offset algorithms are handled slightly differently if
51  * 'use_far_sets' is true.  In this case, the array's devices are grouped into
52  * sets that are (near_copies * far_copies) in size.  The far copied stripes
53  * are still shifted by 'near_copies' devices, but this shifting stays confined
54  * to the set rather than the entire array.  This is done to improve the number
55  * of device combinations that can fail without causing the array to fail.
56  * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
57  * on a device):
58  *    A B C D    A B C D E
59  *      ...         ...
60  *    D A B C    E A B C D
61  * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
62  *    [A B] [C D]    [A B] [C D E]
63  *    |...| |...|    |...| | ... |
64  *    [B A] [D C]    [B A] [E C D]
65  */
66 
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
72 				int *skipped);
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
74 static void end_reshape_write(struct bio *bio);
75 static void end_reshape(struct r10conf *conf);
76 
77 #define raid10_log(md, fmt, args...)				\
78 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
79 
80 #include "raid1-10.c"
81 
82 /*
83  * for resync bio, r10bio pointer can be retrieved from the per-bio
84  * 'struct resync_pages'.
85  */
86 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
87 {
88 	return get_resync_pages(bio)->raid_bio;
89 }
90 
91 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
92 {
93 	struct r10conf *conf = data;
94 	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
95 
96 	/* allocate a r10bio with room for raid_disks entries in the
97 	 * bios array */
98 	return kzalloc(size, gfp_flags);
99 }
100 
101 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
102 /* amount of memory to reserve for resync requests */
103 #define RESYNC_WINDOW (1024*1024)
104 /* maximum number of concurrent requests, memory permitting */
105 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
106 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
107 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
108 
109 /*
110  * When performing a resync, we need to read and compare, so
111  * we need as many pages are there are copies.
112  * When performing a recovery, we need 2 bios, one for read,
113  * one for write (we recover only one drive per r10buf)
114  *
115  */
116 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
117 {
118 	struct r10conf *conf = data;
119 	struct r10bio *r10_bio;
120 	struct bio *bio;
121 	int j;
122 	int nalloc, nalloc_rp;
123 	struct resync_pages *rps;
124 
125 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
126 	if (!r10_bio)
127 		return NULL;
128 
129 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
130 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
131 		nalloc = conf->copies; /* resync */
132 	else
133 		nalloc = 2; /* recovery */
134 
135 	/* allocate once for all bios */
136 	if (!conf->have_replacement)
137 		nalloc_rp = nalloc;
138 	else
139 		nalloc_rp = nalloc * 2;
140 	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
141 	if (!rps)
142 		goto out_free_r10bio;
143 
144 	/*
145 	 * Allocate bios.
146 	 */
147 	for (j = nalloc ; j-- ; ) {
148 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
149 		if (!bio)
150 			goto out_free_bio;
151 		r10_bio->devs[j].bio = bio;
152 		if (!conf->have_replacement)
153 			continue;
154 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
155 		if (!bio)
156 			goto out_free_bio;
157 		r10_bio->devs[j].repl_bio = bio;
158 	}
159 	/*
160 	 * Allocate RESYNC_PAGES data pages and attach them
161 	 * where needed.
162 	 */
163 	for (j = 0; j < nalloc; j++) {
164 		struct bio *rbio = r10_bio->devs[j].repl_bio;
165 		struct resync_pages *rp, *rp_repl;
166 
167 		rp = &rps[j];
168 		if (rbio)
169 			rp_repl = &rps[nalloc + j];
170 
171 		bio = r10_bio->devs[j].bio;
172 
173 		if (!j || test_bit(MD_RECOVERY_SYNC,
174 				   &conf->mddev->recovery)) {
175 			if (resync_alloc_pages(rp, gfp_flags))
176 				goto out_free_pages;
177 		} else {
178 			memcpy(rp, &rps[0], sizeof(*rp));
179 			resync_get_all_pages(rp);
180 		}
181 
182 		rp->raid_bio = r10_bio;
183 		bio->bi_private = rp;
184 		if (rbio) {
185 			memcpy(rp_repl, rp, sizeof(*rp));
186 			rbio->bi_private = rp_repl;
187 		}
188 	}
189 
190 	return r10_bio;
191 
192 out_free_pages:
193 	while (--j >= 0)
194 		resync_free_pages(&rps[j]);
195 
196 	j = 0;
197 out_free_bio:
198 	for ( ; j < nalloc; j++) {
199 		if (r10_bio->devs[j].bio)
200 			bio_put(r10_bio->devs[j].bio);
201 		if (r10_bio->devs[j].repl_bio)
202 			bio_put(r10_bio->devs[j].repl_bio);
203 	}
204 	kfree(rps);
205 out_free_r10bio:
206 	rbio_pool_free(r10_bio, conf);
207 	return NULL;
208 }
209 
210 static void r10buf_pool_free(void *__r10_bio, void *data)
211 {
212 	struct r10conf *conf = data;
213 	struct r10bio *r10bio = __r10_bio;
214 	int j;
215 	struct resync_pages *rp = NULL;
216 
217 	for (j = conf->copies; j--; ) {
218 		struct bio *bio = r10bio->devs[j].bio;
219 
220 		if (bio) {
221 			rp = get_resync_pages(bio);
222 			resync_free_pages(rp);
223 			bio_put(bio);
224 		}
225 
226 		bio = r10bio->devs[j].repl_bio;
227 		if (bio)
228 			bio_put(bio);
229 	}
230 
231 	/* resync pages array stored in the 1st bio's .bi_private */
232 	kfree(rp);
233 
234 	rbio_pool_free(r10bio, conf);
235 }
236 
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
238 {
239 	int i;
240 
241 	for (i = 0; i < conf->geo.raid_disks; i++) {
242 		struct bio **bio = & r10_bio->devs[i].bio;
243 		if (!BIO_SPECIAL(*bio))
244 			bio_put(*bio);
245 		*bio = NULL;
246 		bio = &r10_bio->devs[i].repl_bio;
247 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
248 			bio_put(*bio);
249 		*bio = NULL;
250 	}
251 }
252 
253 static void free_r10bio(struct r10bio *r10_bio)
254 {
255 	struct r10conf *conf = r10_bio->mddev->private;
256 
257 	put_all_bios(conf, r10_bio);
258 	mempool_free(r10_bio, &conf->r10bio_pool);
259 }
260 
261 static void put_buf(struct r10bio *r10_bio)
262 {
263 	struct r10conf *conf = r10_bio->mddev->private;
264 
265 	mempool_free(r10_bio, &conf->r10buf_pool);
266 
267 	lower_barrier(conf);
268 }
269 
270 static void reschedule_retry(struct r10bio *r10_bio)
271 {
272 	unsigned long flags;
273 	struct mddev *mddev = r10_bio->mddev;
274 	struct r10conf *conf = mddev->private;
275 
276 	spin_lock_irqsave(&conf->device_lock, flags);
277 	list_add(&r10_bio->retry_list, &conf->retry_list);
278 	conf->nr_queued ++;
279 	spin_unlock_irqrestore(&conf->device_lock, flags);
280 
281 	/* wake up frozen array... */
282 	wake_up(&conf->wait_barrier);
283 
284 	md_wakeup_thread(mddev->thread);
285 }
286 
287 /*
288  * raid_end_bio_io() is called when we have finished servicing a mirrored
289  * operation and are ready to return a success/failure code to the buffer
290  * cache layer.
291  */
292 static void raid_end_bio_io(struct r10bio *r10_bio)
293 {
294 	struct bio *bio = r10_bio->master_bio;
295 	struct r10conf *conf = r10_bio->mddev->private;
296 
297 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
298 		bio->bi_status = BLK_STS_IOERR;
299 
300 	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
301 		bio_end_io_acct(bio, r10_bio->start_time);
302 	bio_endio(bio);
303 	/*
304 	 * Wake up any possible resync thread that waits for the device
305 	 * to go idle.
306 	 */
307 	allow_barrier(conf);
308 
309 	free_r10bio(r10_bio);
310 }
311 
312 /*
313  * Update disk head position estimator based on IRQ completion info.
314  */
315 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
316 {
317 	struct r10conf *conf = r10_bio->mddev->private;
318 
319 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
320 		r10_bio->devs[slot].addr + (r10_bio->sectors);
321 }
322 
323 /*
324  * Find the disk number which triggered given bio
325  */
326 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
327 			 struct bio *bio, int *slotp, int *replp)
328 {
329 	int slot;
330 	int repl = 0;
331 
332 	for (slot = 0; slot < conf->geo.raid_disks; slot++) {
333 		if (r10_bio->devs[slot].bio == bio)
334 			break;
335 		if (r10_bio->devs[slot].repl_bio == bio) {
336 			repl = 1;
337 			break;
338 		}
339 	}
340 
341 	update_head_pos(slot, r10_bio);
342 
343 	if (slotp)
344 		*slotp = slot;
345 	if (replp)
346 		*replp = repl;
347 	return r10_bio->devs[slot].devnum;
348 }
349 
350 static void raid10_end_read_request(struct bio *bio)
351 {
352 	int uptodate = !bio->bi_status;
353 	struct r10bio *r10_bio = bio->bi_private;
354 	int slot;
355 	struct md_rdev *rdev;
356 	struct r10conf *conf = r10_bio->mddev->private;
357 
358 	slot = r10_bio->read_slot;
359 	rdev = r10_bio->devs[slot].rdev;
360 	/*
361 	 * this branch is our 'one mirror IO has finished' event handler:
362 	 */
363 	update_head_pos(slot, r10_bio);
364 
365 	if (uptodate) {
366 		/*
367 		 * Set R10BIO_Uptodate in our master bio, so that
368 		 * we will return a good error code to the higher
369 		 * levels even if IO on some other mirrored buffer fails.
370 		 *
371 		 * The 'master' represents the composite IO operation to
372 		 * user-side. So if something waits for IO, then it will
373 		 * wait for the 'master' bio.
374 		 */
375 		set_bit(R10BIO_Uptodate, &r10_bio->state);
376 	} else {
377 		/* If all other devices that store this block have
378 		 * failed, we want to return the error upwards rather
379 		 * than fail the last device.  Here we redefine
380 		 * "uptodate" to mean "Don't want to retry"
381 		 */
382 		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
383 			     rdev->raid_disk))
384 			uptodate = 1;
385 	}
386 	if (uptodate) {
387 		raid_end_bio_io(r10_bio);
388 		rdev_dec_pending(rdev, conf->mddev);
389 	} else {
390 		/*
391 		 * oops, read error - keep the refcount on the rdev
392 		 */
393 		char b[BDEVNAME_SIZE];
394 		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
395 				   mdname(conf->mddev),
396 				   bdevname(rdev->bdev, b),
397 				   (unsigned long long)r10_bio->sector);
398 		set_bit(R10BIO_ReadError, &r10_bio->state);
399 		reschedule_retry(r10_bio);
400 	}
401 }
402 
403 static void close_write(struct r10bio *r10_bio)
404 {
405 	/* clear the bitmap if all writes complete successfully */
406 	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
407 			   r10_bio->sectors,
408 			   !test_bit(R10BIO_Degraded, &r10_bio->state),
409 			   0);
410 	md_write_end(r10_bio->mddev);
411 }
412 
413 static void one_write_done(struct r10bio *r10_bio)
414 {
415 	if (atomic_dec_and_test(&r10_bio->remaining)) {
416 		if (test_bit(R10BIO_WriteError, &r10_bio->state))
417 			reschedule_retry(r10_bio);
418 		else {
419 			close_write(r10_bio);
420 			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
421 				reschedule_retry(r10_bio);
422 			else
423 				raid_end_bio_io(r10_bio);
424 		}
425 	}
426 }
427 
428 static void raid10_end_write_request(struct bio *bio)
429 {
430 	struct r10bio *r10_bio = bio->bi_private;
431 	int dev;
432 	int dec_rdev = 1;
433 	struct r10conf *conf = r10_bio->mddev->private;
434 	int slot, repl;
435 	struct md_rdev *rdev = NULL;
436 	struct bio *to_put = NULL;
437 	bool discard_error;
438 
439 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
440 
441 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
442 
443 	if (repl)
444 		rdev = conf->mirrors[dev].replacement;
445 	if (!rdev) {
446 		smp_rmb();
447 		repl = 0;
448 		rdev = conf->mirrors[dev].rdev;
449 	}
450 	/*
451 	 * this branch is our 'one mirror IO has finished' event handler:
452 	 */
453 	if (bio->bi_status && !discard_error) {
454 		if (repl)
455 			/* Never record new bad blocks to replacement,
456 			 * just fail it.
457 			 */
458 			md_error(rdev->mddev, rdev);
459 		else {
460 			set_bit(WriteErrorSeen,	&rdev->flags);
461 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
462 				set_bit(MD_RECOVERY_NEEDED,
463 					&rdev->mddev->recovery);
464 
465 			dec_rdev = 0;
466 			if (test_bit(FailFast, &rdev->flags) &&
467 			    (bio->bi_opf & MD_FAILFAST)) {
468 				md_error(rdev->mddev, rdev);
469 			}
470 
471 			/*
472 			 * When the device is faulty, it is not necessary to
473 			 * handle write error.
474 			 */
475 			if (!test_bit(Faulty, &rdev->flags))
476 				set_bit(R10BIO_WriteError, &r10_bio->state);
477 			else {
478 				/* Fail the request */
479 				set_bit(R10BIO_Degraded, &r10_bio->state);
480 				r10_bio->devs[slot].bio = NULL;
481 				to_put = bio;
482 				dec_rdev = 1;
483 			}
484 		}
485 	} else {
486 		/*
487 		 * Set R10BIO_Uptodate in our master bio, so that
488 		 * we will return a good error code for to the higher
489 		 * levels even if IO on some other mirrored buffer fails.
490 		 *
491 		 * The 'master' represents the composite IO operation to
492 		 * user-side. So if something waits for IO, then it will
493 		 * wait for the 'master' bio.
494 		 */
495 		sector_t first_bad;
496 		int bad_sectors;
497 
498 		/*
499 		 * Do not set R10BIO_Uptodate if the current device is
500 		 * rebuilding or Faulty. This is because we cannot use
501 		 * such device for properly reading the data back (we could
502 		 * potentially use it, if the current write would have felt
503 		 * before rdev->recovery_offset, but for simplicity we don't
504 		 * check this here.
505 		 */
506 		if (test_bit(In_sync, &rdev->flags) &&
507 		    !test_bit(Faulty, &rdev->flags))
508 			set_bit(R10BIO_Uptodate, &r10_bio->state);
509 
510 		/* Maybe we can clear some bad blocks. */
511 		if (is_badblock(rdev,
512 				r10_bio->devs[slot].addr,
513 				r10_bio->sectors,
514 				&first_bad, &bad_sectors) && !discard_error) {
515 			bio_put(bio);
516 			if (repl)
517 				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
518 			else
519 				r10_bio->devs[slot].bio = IO_MADE_GOOD;
520 			dec_rdev = 0;
521 			set_bit(R10BIO_MadeGood, &r10_bio->state);
522 		}
523 	}
524 
525 	/*
526 	 *
527 	 * Let's see if all mirrored write operations have finished
528 	 * already.
529 	 */
530 	one_write_done(r10_bio);
531 	if (dec_rdev)
532 		rdev_dec_pending(rdev, conf->mddev);
533 	if (to_put)
534 		bio_put(to_put);
535 }
536 
537 /*
538  * RAID10 layout manager
539  * As well as the chunksize and raid_disks count, there are two
540  * parameters: near_copies and far_copies.
541  * near_copies * far_copies must be <= raid_disks.
542  * Normally one of these will be 1.
543  * If both are 1, we get raid0.
544  * If near_copies == raid_disks, we get raid1.
545  *
546  * Chunks are laid out in raid0 style with near_copies copies of the
547  * first chunk, followed by near_copies copies of the next chunk and
548  * so on.
549  * If far_copies > 1, then after 1/far_copies of the array has been assigned
550  * as described above, we start again with a device offset of near_copies.
551  * So we effectively have another copy of the whole array further down all
552  * the drives, but with blocks on different drives.
553  * With this layout, and block is never stored twice on the one device.
554  *
555  * raid10_find_phys finds the sector offset of a given virtual sector
556  * on each device that it is on.
557  *
558  * raid10_find_virt does the reverse mapping, from a device and a
559  * sector offset to a virtual address
560  */
561 
562 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
563 {
564 	int n,f;
565 	sector_t sector;
566 	sector_t chunk;
567 	sector_t stripe;
568 	int dev;
569 	int slot = 0;
570 	int last_far_set_start, last_far_set_size;
571 
572 	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
573 	last_far_set_start *= geo->far_set_size;
574 
575 	last_far_set_size = geo->far_set_size;
576 	last_far_set_size += (geo->raid_disks % geo->far_set_size);
577 
578 	/* now calculate first sector/dev */
579 	chunk = r10bio->sector >> geo->chunk_shift;
580 	sector = r10bio->sector & geo->chunk_mask;
581 
582 	chunk *= geo->near_copies;
583 	stripe = chunk;
584 	dev = sector_div(stripe, geo->raid_disks);
585 	if (geo->far_offset)
586 		stripe *= geo->far_copies;
587 
588 	sector += stripe << geo->chunk_shift;
589 
590 	/* and calculate all the others */
591 	for (n = 0; n < geo->near_copies; n++) {
592 		int d = dev;
593 		int set;
594 		sector_t s = sector;
595 		r10bio->devs[slot].devnum = d;
596 		r10bio->devs[slot].addr = s;
597 		slot++;
598 
599 		for (f = 1; f < geo->far_copies; f++) {
600 			set = d / geo->far_set_size;
601 			d += geo->near_copies;
602 
603 			if ((geo->raid_disks % geo->far_set_size) &&
604 			    (d > last_far_set_start)) {
605 				d -= last_far_set_start;
606 				d %= last_far_set_size;
607 				d += last_far_set_start;
608 			} else {
609 				d %= geo->far_set_size;
610 				d += geo->far_set_size * set;
611 			}
612 			s += geo->stride;
613 			r10bio->devs[slot].devnum = d;
614 			r10bio->devs[slot].addr = s;
615 			slot++;
616 		}
617 		dev++;
618 		if (dev >= geo->raid_disks) {
619 			dev = 0;
620 			sector += (geo->chunk_mask + 1);
621 		}
622 	}
623 }
624 
625 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
626 {
627 	struct geom *geo = &conf->geo;
628 
629 	if (conf->reshape_progress != MaxSector &&
630 	    ((r10bio->sector >= conf->reshape_progress) !=
631 	     conf->mddev->reshape_backwards)) {
632 		set_bit(R10BIO_Previous, &r10bio->state);
633 		geo = &conf->prev;
634 	} else
635 		clear_bit(R10BIO_Previous, &r10bio->state);
636 
637 	__raid10_find_phys(geo, r10bio);
638 }
639 
640 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
641 {
642 	sector_t offset, chunk, vchunk;
643 	/* Never use conf->prev as this is only called during resync
644 	 * or recovery, so reshape isn't happening
645 	 */
646 	struct geom *geo = &conf->geo;
647 	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
648 	int far_set_size = geo->far_set_size;
649 	int last_far_set_start;
650 
651 	if (geo->raid_disks % geo->far_set_size) {
652 		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
653 		last_far_set_start *= geo->far_set_size;
654 
655 		if (dev >= last_far_set_start) {
656 			far_set_size = geo->far_set_size;
657 			far_set_size += (geo->raid_disks % geo->far_set_size);
658 			far_set_start = last_far_set_start;
659 		}
660 	}
661 
662 	offset = sector & geo->chunk_mask;
663 	if (geo->far_offset) {
664 		int fc;
665 		chunk = sector >> geo->chunk_shift;
666 		fc = sector_div(chunk, geo->far_copies);
667 		dev -= fc * geo->near_copies;
668 		if (dev < far_set_start)
669 			dev += far_set_size;
670 	} else {
671 		while (sector >= geo->stride) {
672 			sector -= geo->stride;
673 			if (dev < (geo->near_copies + far_set_start))
674 				dev += far_set_size - geo->near_copies;
675 			else
676 				dev -= geo->near_copies;
677 		}
678 		chunk = sector >> geo->chunk_shift;
679 	}
680 	vchunk = chunk * geo->raid_disks + dev;
681 	sector_div(vchunk, geo->near_copies);
682 	return (vchunk << geo->chunk_shift) + offset;
683 }
684 
685 /*
686  * This routine returns the disk from which the requested read should
687  * be done. There is a per-array 'next expected sequential IO' sector
688  * number - if this matches on the next IO then we use the last disk.
689  * There is also a per-disk 'last know head position' sector that is
690  * maintained from IRQ contexts, both the normal and the resync IO
691  * completion handlers update this position correctly. If there is no
692  * perfect sequential match then we pick the disk whose head is closest.
693  *
694  * If there are 2 mirrors in the same 2 devices, performance degrades
695  * because position is mirror, not device based.
696  *
697  * The rdev for the device selected will have nr_pending incremented.
698  */
699 
700 /*
701  * FIXME: possibly should rethink readbalancing and do it differently
702  * depending on near_copies / far_copies geometry.
703  */
704 static struct md_rdev *read_balance(struct r10conf *conf,
705 				    struct r10bio *r10_bio,
706 				    int *max_sectors)
707 {
708 	const sector_t this_sector = r10_bio->sector;
709 	int disk, slot;
710 	int sectors = r10_bio->sectors;
711 	int best_good_sectors;
712 	sector_t new_distance, best_dist;
713 	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
714 	int do_balance;
715 	int best_dist_slot, best_pending_slot;
716 	bool has_nonrot_disk = false;
717 	unsigned int min_pending;
718 	struct geom *geo = &conf->geo;
719 
720 	raid10_find_phys(conf, r10_bio);
721 	rcu_read_lock();
722 	best_dist_slot = -1;
723 	min_pending = UINT_MAX;
724 	best_dist_rdev = NULL;
725 	best_pending_rdev = NULL;
726 	best_dist = MaxSector;
727 	best_good_sectors = 0;
728 	do_balance = 1;
729 	clear_bit(R10BIO_FailFast, &r10_bio->state);
730 	/*
731 	 * Check if we can balance. We can balance on the whole
732 	 * device if no resync is going on (recovery is ok), or below
733 	 * the resync window. We take the first readable disk when
734 	 * above the resync window.
735 	 */
736 	if ((conf->mddev->recovery_cp < MaxSector
737 	     && (this_sector + sectors >= conf->next_resync)) ||
738 	    (mddev_is_clustered(conf->mddev) &&
739 	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
740 					    this_sector + sectors)))
741 		do_balance = 0;
742 
743 	for (slot = 0; slot < conf->copies ; slot++) {
744 		sector_t first_bad;
745 		int bad_sectors;
746 		sector_t dev_sector;
747 		unsigned int pending;
748 		bool nonrot;
749 
750 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
751 			continue;
752 		disk = r10_bio->devs[slot].devnum;
753 		rdev = rcu_dereference(conf->mirrors[disk].replacement);
754 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
755 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
756 			rdev = rcu_dereference(conf->mirrors[disk].rdev);
757 		if (rdev == NULL ||
758 		    test_bit(Faulty, &rdev->flags))
759 			continue;
760 		if (!test_bit(In_sync, &rdev->flags) &&
761 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
762 			continue;
763 
764 		dev_sector = r10_bio->devs[slot].addr;
765 		if (is_badblock(rdev, dev_sector, sectors,
766 				&first_bad, &bad_sectors)) {
767 			if (best_dist < MaxSector)
768 				/* Already have a better slot */
769 				continue;
770 			if (first_bad <= dev_sector) {
771 				/* Cannot read here.  If this is the
772 				 * 'primary' device, then we must not read
773 				 * beyond 'bad_sectors' from another device.
774 				 */
775 				bad_sectors -= (dev_sector - first_bad);
776 				if (!do_balance && sectors > bad_sectors)
777 					sectors = bad_sectors;
778 				if (best_good_sectors > sectors)
779 					best_good_sectors = sectors;
780 			} else {
781 				sector_t good_sectors =
782 					first_bad - dev_sector;
783 				if (good_sectors > best_good_sectors) {
784 					best_good_sectors = good_sectors;
785 					best_dist_slot = slot;
786 					best_dist_rdev = rdev;
787 				}
788 				if (!do_balance)
789 					/* Must read from here */
790 					break;
791 			}
792 			continue;
793 		} else
794 			best_good_sectors = sectors;
795 
796 		if (!do_balance)
797 			break;
798 
799 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
800 		has_nonrot_disk |= nonrot;
801 		pending = atomic_read(&rdev->nr_pending);
802 		if (min_pending > pending && nonrot) {
803 			min_pending = pending;
804 			best_pending_slot = slot;
805 			best_pending_rdev = rdev;
806 		}
807 
808 		if (best_dist_slot >= 0)
809 			/* At least 2 disks to choose from so failfast is OK */
810 			set_bit(R10BIO_FailFast, &r10_bio->state);
811 		/* This optimisation is debatable, and completely destroys
812 		 * sequential read speed for 'far copies' arrays.  So only
813 		 * keep it for 'near' arrays, and review those later.
814 		 */
815 		if (geo->near_copies > 1 && !pending)
816 			new_distance = 0;
817 
818 		/* for far > 1 always use the lowest address */
819 		else if (geo->far_copies > 1)
820 			new_distance = r10_bio->devs[slot].addr;
821 		else
822 			new_distance = abs(r10_bio->devs[slot].addr -
823 					   conf->mirrors[disk].head_position);
824 
825 		if (new_distance < best_dist) {
826 			best_dist = new_distance;
827 			best_dist_slot = slot;
828 			best_dist_rdev = rdev;
829 		}
830 	}
831 	if (slot >= conf->copies) {
832 		if (has_nonrot_disk) {
833 			slot = best_pending_slot;
834 			rdev = best_pending_rdev;
835 		} else {
836 			slot = best_dist_slot;
837 			rdev = best_dist_rdev;
838 		}
839 	}
840 
841 	if (slot >= 0) {
842 		atomic_inc(&rdev->nr_pending);
843 		r10_bio->read_slot = slot;
844 	} else
845 		rdev = NULL;
846 	rcu_read_unlock();
847 	*max_sectors = best_good_sectors;
848 
849 	return rdev;
850 }
851 
852 static void flush_pending_writes(struct r10conf *conf)
853 {
854 	/* Any writes that have been queued but are awaiting
855 	 * bitmap updates get flushed here.
856 	 */
857 	spin_lock_irq(&conf->device_lock);
858 
859 	if (conf->pending_bio_list.head) {
860 		struct blk_plug plug;
861 		struct bio *bio;
862 
863 		bio = bio_list_get(&conf->pending_bio_list);
864 		conf->pending_count = 0;
865 		spin_unlock_irq(&conf->device_lock);
866 
867 		/*
868 		 * As this is called in a wait_event() loop (see freeze_array),
869 		 * current->state might be TASK_UNINTERRUPTIBLE which will
870 		 * cause a warning when we prepare to wait again.  As it is
871 		 * rare that this path is taken, it is perfectly safe to force
872 		 * us to go around the wait_event() loop again, so the warning
873 		 * is a false-positive. Silence the warning by resetting
874 		 * thread state
875 		 */
876 		__set_current_state(TASK_RUNNING);
877 
878 		blk_start_plug(&plug);
879 		/* flush any pending bitmap writes to disk
880 		 * before proceeding w/ I/O */
881 		md_bitmap_unplug(conf->mddev->bitmap);
882 		wake_up(&conf->wait_barrier);
883 
884 		while (bio) { /* submit pending writes */
885 			struct bio *next = bio->bi_next;
886 			struct md_rdev *rdev = (void*)bio->bi_bdev;
887 			bio->bi_next = NULL;
888 			bio_set_dev(bio, rdev->bdev);
889 			if (test_bit(Faulty, &rdev->flags)) {
890 				bio_io_error(bio);
891 			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
892 					    !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
893 				/* Just ignore it */
894 				bio_endio(bio);
895 			else
896 				submit_bio_noacct(bio);
897 			bio = next;
898 		}
899 		blk_finish_plug(&plug);
900 	} else
901 		spin_unlock_irq(&conf->device_lock);
902 }
903 
904 /* Barriers....
905  * Sometimes we need to suspend IO while we do something else,
906  * either some resync/recovery, or reconfigure the array.
907  * To do this we raise a 'barrier'.
908  * The 'barrier' is a counter that can be raised multiple times
909  * to count how many activities are happening which preclude
910  * normal IO.
911  * We can only raise the barrier if there is no pending IO.
912  * i.e. if nr_pending == 0.
913  * We choose only to raise the barrier if no-one is waiting for the
914  * barrier to go down.  This means that as soon as an IO request
915  * is ready, no other operations which require a barrier will start
916  * until the IO request has had a chance.
917  *
918  * So: regular IO calls 'wait_barrier'.  When that returns there
919  *    is no backgroup IO happening,  It must arrange to call
920  *    allow_barrier when it has finished its IO.
921  * backgroup IO calls must call raise_barrier.  Once that returns
922  *    there is no normal IO happeing.  It must arrange to call
923  *    lower_barrier when the particular background IO completes.
924  */
925 
926 static void raise_barrier(struct r10conf *conf, int force)
927 {
928 	BUG_ON(force && !conf->barrier);
929 	spin_lock_irq(&conf->resync_lock);
930 
931 	/* Wait until no block IO is waiting (unless 'force') */
932 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
933 			    conf->resync_lock);
934 
935 	/* block any new IO from starting */
936 	conf->barrier++;
937 
938 	/* Now wait for all pending IO to complete */
939 	wait_event_lock_irq(conf->wait_barrier,
940 			    !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
941 			    conf->resync_lock);
942 
943 	spin_unlock_irq(&conf->resync_lock);
944 }
945 
946 static void lower_barrier(struct r10conf *conf)
947 {
948 	unsigned long flags;
949 	spin_lock_irqsave(&conf->resync_lock, flags);
950 	conf->barrier--;
951 	spin_unlock_irqrestore(&conf->resync_lock, flags);
952 	wake_up(&conf->wait_barrier);
953 }
954 
955 static bool wait_barrier(struct r10conf *conf, bool nowait)
956 {
957 	bool ret = true;
958 
959 	spin_lock_irq(&conf->resync_lock);
960 	if (conf->barrier) {
961 		struct bio_list *bio_list = current->bio_list;
962 		conf->nr_waiting++;
963 		/* Wait for the barrier to drop.
964 		 * However if there are already pending
965 		 * requests (preventing the barrier from
966 		 * rising completely), and the
967 		 * pre-process bio queue isn't empty,
968 		 * then don't wait, as we need to empty
969 		 * that queue to get the nr_pending
970 		 * count down.
971 		 */
972 		/* Return false when nowait flag is set */
973 		if (nowait) {
974 			ret = false;
975 		} else {
976 			raid10_log(conf->mddev, "wait barrier");
977 			wait_event_lock_irq(conf->wait_barrier,
978 					    !conf->barrier ||
979 					    (atomic_read(&conf->nr_pending) &&
980 					     bio_list &&
981 					     (!bio_list_empty(&bio_list[0]) ||
982 					      !bio_list_empty(&bio_list[1]))) ||
983 					     /* move on if recovery thread is
984 					      * blocked by us
985 					      */
986 					     (conf->mddev->thread->tsk == current &&
987 					      test_bit(MD_RECOVERY_RUNNING,
988 						       &conf->mddev->recovery) &&
989 					      conf->nr_queued > 0),
990 					    conf->resync_lock);
991 		}
992 		conf->nr_waiting--;
993 		if (!conf->nr_waiting)
994 			wake_up(&conf->wait_barrier);
995 	}
996 	/* Only increment nr_pending when we wait */
997 	if (ret)
998 		atomic_inc(&conf->nr_pending);
999 	spin_unlock_irq(&conf->resync_lock);
1000 	return ret;
1001 }
1002 
1003 static void allow_barrier(struct r10conf *conf)
1004 {
1005 	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1006 			(conf->array_freeze_pending))
1007 		wake_up(&conf->wait_barrier);
1008 }
1009 
1010 static void freeze_array(struct r10conf *conf, int extra)
1011 {
1012 	/* stop syncio and normal IO and wait for everything to
1013 	 * go quiet.
1014 	 * We increment barrier and nr_waiting, and then
1015 	 * wait until nr_pending match nr_queued+extra
1016 	 * This is called in the context of one normal IO request
1017 	 * that has failed. Thus any sync request that might be pending
1018 	 * will be blocked by nr_pending, and we need to wait for
1019 	 * pending IO requests to complete or be queued for re-try.
1020 	 * Thus the number queued (nr_queued) plus this request (extra)
1021 	 * must match the number of pending IOs (nr_pending) before
1022 	 * we continue.
1023 	 */
1024 	spin_lock_irq(&conf->resync_lock);
1025 	conf->array_freeze_pending++;
1026 	conf->barrier++;
1027 	conf->nr_waiting++;
1028 	wait_event_lock_irq_cmd(conf->wait_barrier,
1029 				atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1030 				conf->resync_lock,
1031 				flush_pending_writes(conf));
1032 
1033 	conf->array_freeze_pending--;
1034 	spin_unlock_irq(&conf->resync_lock);
1035 }
1036 
1037 static void unfreeze_array(struct r10conf *conf)
1038 {
1039 	/* reverse the effect of the freeze */
1040 	spin_lock_irq(&conf->resync_lock);
1041 	conf->barrier--;
1042 	conf->nr_waiting--;
1043 	wake_up(&conf->wait_barrier);
1044 	spin_unlock_irq(&conf->resync_lock);
1045 }
1046 
1047 static sector_t choose_data_offset(struct r10bio *r10_bio,
1048 				   struct md_rdev *rdev)
1049 {
1050 	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1051 	    test_bit(R10BIO_Previous, &r10_bio->state))
1052 		return rdev->data_offset;
1053 	else
1054 		return rdev->new_data_offset;
1055 }
1056 
1057 struct raid10_plug_cb {
1058 	struct blk_plug_cb	cb;
1059 	struct bio_list		pending;
1060 	int			pending_cnt;
1061 };
1062 
1063 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1064 {
1065 	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1066 						   cb);
1067 	struct mddev *mddev = plug->cb.data;
1068 	struct r10conf *conf = mddev->private;
1069 	struct bio *bio;
1070 
1071 	if (from_schedule || current->bio_list) {
1072 		spin_lock_irq(&conf->device_lock);
1073 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1074 		conf->pending_count += plug->pending_cnt;
1075 		spin_unlock_irq(&conf->device_lock);
1076 		wake_up(&conf->wait_barrier);
1077 		md_wakeup_thread(mddev->thread);
1078 		kfree(plug);
1079 		return;
1080 	}
1081 
1082 	/* we aren't scheduling, so we can do the write-out directly. */
1083 	bio = bio_list_get(&plug->pending);
1084 	md_bitmap_unplug(mddev->bitmap);
1085 	wake_up(&conf->wait_barrier);
1086 
1087 	while (bio) { /* submit pending writes */
1088 		struct bio *next = bio->bi_next;
1089 		struct md_rdev *rdev = (void*)bio->bi_bdev;
1090 		bio->bi_next = NULL;
1091 		bio_set_dev(bio, rdev->bdev);
1092 		if (test_bit(Faulty, &rdev->flags)) {
1093 			bio_io_error(bio);
1094 		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1095 				    !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
1096 			/* Just ignore it */
1097 			bio_endio(bio);
1098 		else
1099 			submit_bio_noacct(bio);
1100 		bio = next;
1101 	}
1102 	kfree(plug);
1103 }
1104 
1105 /*
1106  * 1. Register the new request and wait if the reconstruction thread has put
1107  * up a bar for new requests. Continue immediately if no resync is active
1108  * currently.
1109  * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1110  */
1111 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1112 				 struct bio *bio, sector_t sectors)
1113 {
1114 	/* Bail out if REQ_NOWAIT is set for the bio */
1115 	if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1116 		bio_wouldblock_error(bio);
1117 		return false;
1118 	}
1119 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1120 	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1121 	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1122 		allow_barrier(conf);
1123 		if (bio->bi_opf & REQ_NOWAIT) {
1124 			bio_wouldblock_error(bio);
1125 			return false;
1126 		}
1127 		raid10_log(conf->mddev, "wait reshape");
1128 		wait_event(conf->wait_barrier,
1129 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1130 			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1131 			   sectors);
1132 		wait_barrier(conf, false);
1133 	}
1134 	return true;
1135 }
1136 
1137 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1138 				struct r10bio *r10_bio)
1139 {
1140 	struct r10conf *conf = mddev->private;
1141 	struct bio *read_bio;
1142 	const int op = bio_op(bio);
1143 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1144 	int max_sectors;
1145 	struct md_rdev *rdev;
1146 	char b[BDEVNAME_SIZE];
1147 	int slot = r10_bio->read_slot;
1148 	struct md_rdev *err_rdev = NULL;
1149 	gfp_t gfp = GFP_NOIO;
1150 
1151 	if (slot >= 0 && r10_bio->devs[slot].rdev) {
1152 		/*
1153 		 * This is an error retry, but we cannot
1154 		 * safely dereference the rdev in the r10_bio,
1155 		 * we must use the one in conf.
1156 		 * If it has already been disconnected (unlikely)
1157 		 * we lose the device name in error messages.
1158 		 */
1159 		int disk;
1160 		/*
1161 		 * As we are blocking raid10, it is a little safer to
1162 		 * use __GFP_HIGH.
1163 		 */
1164 		gfp = GFP_NOIO | __GFP_HIGH;
1165 
1166 		rcu_read_lock();
1167 		disk = r10_bio->devs[slot].devnum;
1168 		err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1169 		if (err_rdev)
1170 			bdevname(err_rdev->bdev, b);
1171 		else {
1172 			strcpy(b, "???");
1173 			/* This never gets dereferenced */
1174 			err_rdev = r10_bio->devs[slot].rdev;
1175 		}
1176 		rcu_read_unlock();
1177 	}
1178 
1179 	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1180 		return;
1181 	rdev = read_balance(conf, r10_bio, &max_sectors);
1182 	if (!rdev) {
1183 		if (err_rdev) {
1184 			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1185 					    mdname(mddev), b,
1186 					    (unsigned long long)r10_bio->sector);
1187 		}
1188 		raid_end_bio_io(r10_bio);
1189 		return;
1190 	}
1191 	if (err_rdev)
1192 		pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
1193 				   mdname(mddev),
1194 				   bdevname(rdev->bdev, b),
1195 				   (unsigned long long)r10_bio->sector);
1196 	if (max_sectors < bio_sectors(bio)) {
1197 		struct bio *split = bio_split(bio, max_sectors,
1198 					      gfp, &conf->bio_split);
1199 		bio_chain(split, bio);
1200 		allow_barrier(conf);
1201 		submit_bio_noacct(bio);
1202 		wait_barrier(conf, false);
1203 		bio = split;
1204 		r10_bio->master_bio = bio;
1205 		r10_bio->sectors = max_sectors;
1206 	}
1207 	slot = r10_bio->read_slot;
1208 
1209 	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1210 		r10_bio->start_time = bio_start_io_acct(bio);
1211 	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1212 
1213 	r10_bio->devs[slot].bio = read_bio;
1214 	r10_bio->devs[slot].rdev = rdev;
1215 
1216 	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1217 		choose_data_offset(r10_bio, rdev);
1218 	read_bio->bi_end_io = raid10_end_read_request;
1219 	bio_set_op_attrs(read_bio, op, do_sync);
1220 	if (test_bit(FailFast, &rdev->flags) &&
1221 	    test_bit(R10BIO_FailFast, &r10_bio->state))
1222 	        read_bio->bi_opf |= MD_FAILFAST;
1223 	read_bio->bi_private = r10_bio;
1224 
1225 	if (mddev->gendisk)
1226 	        trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1227 	                              r10_bio->sector);
1228 	submit_bio_noacct(read_bio);
1229 	return;
1230 }
1231 
1232 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1233 				  struct bio *bio, bool replacement,
1234 				  int n_copy)
1235 {
1236 	const int op = bio_op(bio);
1237 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1238 	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1239 	unsigned long flags;
1240 	struct blk_plug_cb *cb;
1241 	struct raid10_plug_cb *plug = NULL;
1242 	struct r10conf *conf = mddev->private;
1243 	struct md_rdev *rdev;
1244 	int devnum = r10_bio->devs[n_copy].devnum;
1245 	struct bio *mbio;
1246 
1247 	if (replacement) {
1248 		rdev = conf->mirrors[devnum].replacement;
1249 		if (rdev == NULL) {
1250 			/* Replacement just got moved to main 'rdev' */
1251 			smp_mb();
1252 			rdev = conf->mirrors[devnum].rdev;
1253 		}
1254 	} else
1255 		rdev = conf->mirrors[devnum].rdev;
1256 
1257 	mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1258 	if (replacement)
1259 		r10_bio->devs[n_copy].repl_bio = mbio;
1260 	else
1261 		r10_bio->devs[n_copy].bio = mbio;
1262 
1263 	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1264 				   choose_data_offset(r10_bio, rdev));
1265 	mbio->bi_end_io	= raid10_end_write_request;
1266 	bio_set_op_attrs(mbio, op, do_sync | do_fua);
1267 	if (!replacement && test_bit(FailFast,
1268 				     &conf->mirrors[devnum].rdev->flags)
1269 			 && enough(conf, devnum))
1270 		mbio->bi_opf |= MD_FAILFAST;
1271 	mbio->bi_private = r10_bio;
1272 
1273 	if (conf->mddev->gendisk)
1274 		trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
1275 				      r10_bio->sector);
1276 	/* flush_pending_writes() needs access to the rdev so...*/
1277 	mbio->bi_bdev = (void *)rdev;
1278 
1279 	atomic_inc(&r10_bio->remaining);
1280 
1281 	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1282 	if (cb)
1283 		plug = container_of(cb, struct raid10_plug_cb, cb);
1284 	else
1285 		plug = NULL;
1286 	if (plug) {
1287 		bio_list_add(&plug->pending, mbio);
1288 		plug->pending_cnt++;
1289 	} else {
1290 		spin_lock_irqsave(&conf->device_lock, flags);
1291 		bio_list_add(&conf->pending_bio_list, mbio);
1292 		conf->pending_count++;
1293 		spin_unlock_irqrestore(&conf->device_lock, flags);
1294 		md_wakeup_thread(mddev->thread);
1295 	}
1296 }
1297 
1298 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1299 {
1300 	int i;
1301 	struct r10conf *conf = mddev->private;
1302 	struct md_rdev *blocked_rdev;
1303 
1304 retry_wait:
1305 	blocked_rdev = NULL;
1306 	rcu_read_lock();
1307 	for (i = 0; i < conf->copies; i++) {
1308 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1309 		struct md_rdev *rrdev = rcu_dereference(
1310 			conf->mirrors[i].replacement);
1311 		if (rdev == rrdev)
1312 			rrdev = NULL;
1313 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1314 			atomic_inc(&rdev->nr_pending);
1315 			blocked_rdev = rdev;
1316 			break;
1317 		}
1318 		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1319 			atomic_inc(&rrdev->nr_pending);
1320 			blocked_rdev = rrdev;
1321 			break;
1322 		}
1323 
1324 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1325 			sector_t first_bad;
1326 			sector_t dev_sector = r10_bio->devs[i].addr;
1327 			int bad_sectors;
1328 			int is_bad;
1329 
1330 			/*
1331 			 * Discard request doesn't care the write result
1332 			 * so it doesn't need to wait blocked disk here.
1333 			 */
1334 			if (!r10_bio->sectors)
1335 				continue;
1336 
1337 			is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1338 					     &first_bad, &bad_sectors);
1339 			if (is_bad < 0) {
1340 				/*
1341 				 * Mustn't write here until the bad block
1342 				 * is acknowledged
1343 				 */
1344 				atomic_inc(&rdev->nr_pending);
1345 				set_bit(BlockedBadBlocks, &rdev->flags);
1346 				blocked_rdev = rdev;
1347 				break;
1348 			}
1349 		}
1350 	}
1351 	rcu_read_unlock();
1352 
1353 	if (unlikely(blocked_rdev)) {
1354 		/* Have to wait for this device to get unblocked, then retry */
1355 		allow_barrier(conf);
1356 		raid10_log(conf->mddev, "%s wait rdev %d blocked",
1357 				__func__, blocked_rdev->raid_disk);
1358 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1359 		wait_barrier(conf, false);
1360 		goto retry_wait;
1361 	}
1362 }
1363 
1364 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1365 				 struct r10bio *r10_bio)
1366 {
1367 	struct r10conf *conf = mddev->private;
1368 	int i;
1369 	sector_t sectors;
1370 	int max_sectors;
1371 
1372 	if ((mddev_is_clustered(mddev) &&
1373 	     md_cluster_ops->area_resyncing(mddev, WRITE,
1374 					    bio->bi_iter.bi_sector,
1375 					    bio_end_sector(bio)))) {
1376 		DEFINE_WAIT(w);
1377 		/* Bail out if REQ_NOWAIT is set for the bio */
1378 		if (bio->bi_opf & REQ_NOWAIT) {
1379 			bio_wouldblock_error(bio);
1380 			return;
1381 		}
1382 		for (;;) {
1383 			prepare_to_wait(&conf->wait_barrier,
1384 					&w, TASK_IDLE);
1385 			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1386 				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1387 				break;
1388 			schedule();
1389 		}
1390 		finish_wait(&conf->wait_barrier, &w);
1391 	}
1392 
1393 	sectors = r10_bio->sectors;
1394 	if (!regular_request_wait(mddev, conf, bio, sectors))
1395 		return;
1396 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1397 	    (mddev->reshape_backwards
1398 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1399 		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1400 	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1401 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1402 		/* Need to update reshape_position in metadata */
1403 		mddev->reshape_position = conf->reshape_progress;
1404 		set_mask_bits(&mddev->sb_flags, 0,
1405 			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1406 		md_wakeup_thread(mddev->thread);
1407 		if (bio->bi_opf & REQ_NOWAIT) {
1408 			allow_barrier(conf);
1409 			bio_wouldblock_error(bio);
1410 			return;
1411 		}
1412 		raid10_log(conf->mddev, "wait reshape metadata");
1413 		wait_event(mddev->sb_wait,
1414 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1415 
1416 		conf->reshape_safe = mddev->reshape_position;
1417 	}
1418 
1419 	/* first select target devices under rcu_lock and
1420 	 * inc refcount on their rdev.  Record them by setting
1421 	 * bios[x] to bio
1422 	 * If there are known/acknowledged bad blocks on any device
1423 	 * on which we have seen a write error, we want to avoid
1424 	 * writing to those blocks.  This potentially requires several
1425 	 * writes to write around the bad blocks.  Each set of writes
1426 	 * gets its own r10_bio with a set of bios attached.
1427 	 */
1428 
1429 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1430 	raid10_find_phys(conf, r10_bio);
1431 
1432 	wait_blocked_dev(mddev, r10_bio);
1433 
1434 	rcu_read_lock();
1435 	max_sectors = r10_bio->sectors;
1436 
1437 	for (i = 0;  i < conf->copies; i++) {
1438 		int d = r10_bio->devs[i].devnum;
1439 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1440 		struct md_rdev *rrdev = rcu_dereference(
1441 			conf->mirrors[d].replacement);
1442 		if (rdev == rrdev)
1443 			rrdev = NULL;
1444 		if (rdev && (test_bit(Faulty, &rdev->flags)))
1445 			rdev = NULL;
1446 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1447 			rrdev = NULL;
1448 
1449 		r10_bio->devs[i].bio = NULL;
1450 		r10_bio->devs[i].repl_bio = NULL;
1451 
1452 		if (!rdev && !rrdev) {
1453 			set_bit(R10BIO_Degraded, &r10_bio->state);
1454 			continue;
1455 		}
1456 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1457 			sector_t first_bad;
1458 			sector_t dev_sector = r10_bio->devs[i].addr;
1459 			int bad_sectors;
1460 			int is_bad;
1461 
1462 			is_bad = is_badblock(rdev, dev_sector, max_sectors,
1463 					     &first_bad, &bad_sectors);
1464 			if (is_bad && first_bad <= dev_sector) {
1465 				/* Cannot write here at all */
1466 				bad_sectors -= (dev_sector - first_bad);
1467 				if (bad_sectors < max_sectors)
1468 					/* Mustn't write more than bad_sectors
1469 					 * to other devices yet
1470 					 */
1471 					max_sectors = bad_sectors;
1472 				/* We don't set R10BIO_Degraded as that
1473 				 * only applies if the disk is missing,
1474 				 * so it might be re-added, and we want to
1475 				 * know to recover this chunk.
1476 				 * In this case the device is here, and the
1477 				 * fact that this chunk is not in-sync is
1478 				 * recorded in the bad block log.
1479 				 */
1480 				continue;
1481 			}
1482 			if (is_bad) {
1483 				int good_sectors = first_bad - dev_sector;
1484 				if (good_sectors < max_sectors)
1485 					max_sectors = good_sectors;
1486 			}
1487 		}
1488 		if (rdev) {
1489 			r10_bio->devs[i].bio = bio;
1490 			atomic_inc(&rdev->nr_pending);
1491 		}
1492 		if (rrdev) {
1493 			r10_bio->devs[i].repl_bio = bio;
1494 			atomic_inc(&rrdev->nr_pending);
1495 		}
1496 	}
1497 	rcu_read_unlock();
1498 
1499 	if (max_sectors < r10_bio->sectors)
1500 		r10_bio->sectors = max_sectors;
1501 
1502 	if (r10_bio->sectors < bio_sectors(bio)) {
1503 		struct bio *split = bio_split(bio, r10_bio->sectors,
1504 					      GFP_NOIO, &conf->bio_split);
1505 		bio_chain(split, bio);
1506 		allow_barrier(conf);
1507 		submit_bio_noacct(bio);
1508 		wait_barrier(conf, false);
1509 		bio = split;
1510 		r10_bio->master_bio = bio;
1511 	}
1512 
1513 	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1514 		r10_bio->start_time = bio_start_io_acct(bio);
1515 	atomic_set(&r10_bio->remaining, 1);
1516 	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1517 
1518 	for (i = 0; i < conf->copies; i++) {
1519 		if (r10_bio->devs[i].bio)
1520 			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1521 		if (r10_bio->devs[i].repl_bio)
1522 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1523 	}
1524 	one_write_done(r10_bio);
1525 }
1526 
1527 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1528 {
1529 	struct r10conf *conf = mddev->private;
1530 	struct r10bio *r10_bio;
1531 
1532 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1533 
1534 	r10_bio->master_bio = bio;
1535 	r10_bio->sectors = sectors;
1536 
1537 	r10_bio->mddev = mddev;
1538 	r10_bio->sector = bio->bi_iter.bi_sector;
1539 	r10_bio->state = 0;
1540 	r10_bio->read_slot = -1;
1541 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1542 			conf->geo.raid_disks);
1543 
1544 	if (bio_data_dir(bio) == READ)
1545 		raid10_read_request(mddev, bio, r10_bio);
1546 	else
1547 		raid10_write_request(mddev, bio, r10_bio);
1548 }
1549 
1550 static void raid_end_discard_bio(struct r10bio *r10bio)
1551 {
1552 	struct r10conf *conf = r10bio->mddev->private;
1553 	struct r10bio *first_r10bio;
1554 
1555 	while (atomic_dec_and_test(&r10bio->remaining)) {
1556 
1557 		allow_barrier(conf);
1558 
1559 		if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1560 			first_r10bio = (struct r10bio *)r10bio->master_bio;
1561 			free_r10bio(r10bio);
1562 			r10bio = first_r10bio;
1563 		} else {
1564 			md_write_end(r10bio->mddev);
1565 			bio_endio(r10bio->master_bio);
1566 			free_r10bio(r10bio);
1567 			break;
1568 		}
1569 	}
1570 }
1571 
1572 static void raid10_end_discard_request(struct bio *bio)
1573 {
1574 	struct r10bio *r10_bio = bio->bi_private;
1575 	struct r10conf *conf = r10_bio->mddev->private;
1576 	struct md_rdev *rdev = NULL;
1577 	int dev;
1578 	int slot, repl;
1579 
1580 	/*
1581 	 * We don't care the return value of discard bio
1582 	 */
1583 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1584 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1585 
1586 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1587 	if (repl)
1588 		rdev = conf->mirrors[dev].replacement;
1589 	if (!rdev) {
1590 		/*
1591 		 * raid10_remove_disk uses smp_mb to make sure rdev is set to
1592 		 * replacement before setting replacement to NULL. It can read
1593 		 * rdev first without barrier protect even replacment is NULL
1594 		 */
1595 		smp_rmb();
1596 		rdev = conf->mirrors[dev].rdev;
1597 	}
1598 
1599 	raid_end_discard_bio(r10_bio);
1600 	rdev_dec_pending(rdev, conf->mddev);
1601 }
1602 
1603 /*
1604  * There are some limitations to handle discard bio
1605  * 1st, the discard size is bigger than stripe_size*2.
1606  * 2st, if the discard bio spans reshape progress, we use the old way to
1607  * handle discard bio
1608  */
1609 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1610 {
1611 	struct r10conf *conf = mddev->private;
1612 	struct geom *geo = &conf->geo;
1613 	int far_copies = geo->far_copies;
1614 	bool first_copy = true;
1615 	struct r10bio *r10_bio, *first_r10bio;
1616 	struct bio *split;
1617 	int disk;
1618 	sector_t chunk;
1619 	unsigned int stripe_size;
1620 	unsigned int stripe_data_disks;
1621 	sector_t split_size;
1622 	sector_t bio_start, bio_end;
1623 	sector_t first_stripe_index, last_stripe_index;
1624 	sector_t start_disk_offset;
1625 	unsigned int start_disk_index;
1626 	sector_t end_disk_offset;
1627 	unsigned int end_disk_index;
1628 	unsigned int remainder;
1629 
1630 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1631 		return -EAGAIN;
1632 
1633 	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1634 		bio_wouldblock_error(bio);
1635 		return 0;
1636 	}
1637 	wait_barrier(conf, false);
1638 
1639 	/*
1640 	 * Check reshape again to avoid reshape happens after checking
1641 	 * MD_RECOVERY_RESHAPE and before wait_barrier
1642 	 */
1643 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1644 		goto out;
1645 
1646 	if (geo->near_copies)
1647 		stripe_data_disks = geo->raid_disks / geo->near_copies +
1648 					geo->raid_disks % geo->near_copies;
1649 	else
1650 		stripe_data_disks = geo->raid_disks;
1651 
1652 	stripe_size = stripe_data_disks << geo->chunk_shift;
1653 
1654 	bio_start = bio->bi_iter.bi_sector;
1655 	bio_end = bio_end_sector(bio);
1656 
1657 	/*
1658 	 * Maybe one discard bio is smaller than strip size or across one
1659 	 * stripe and discard region is larger than one stripe size. For far
1660 	 * offset layout, if the discard region is not aligned with stripe
1661 	 * size, there is hole when we submit discard bio to member disk.
1662 	 * For simplicity, we only handle discard bio which discard region
1663 	 * is bigger than stripe_size * 2
1664 	 */
1665 	if (bio_sectors(bio) < stripe_size*2)
1666 		goto out;
1667 
1668 	/*
1669 	 * Keep bio aligned with strip size.
1670 	 */
1671 	div_u64_rem(bio_start, stripe_size, &remainder);
1672 	if (remainder) {
1673 		split_size = stripe_size - remainder;
1674 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1675 		bio_chain(split, bio);
1676 		allow_barrier(conf);
1677 		/* Resend the fist split part */
1678 		submit_bio_noacct(split);
1679 		wait_barrier(conf, false);
1680 	}
1681 	div_u64_rem(bio_end, stripe_size, &remainder);
1682 	if (remainder) {
1683 		split_size = bio_sectors(bio) - remainder;
1684 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1685 		bio_chain(split, bio);
1686 		allow_barrier(conf);
1687 		/* Resend the second split part */
1688 		submit_bio_noacct(bio);
1689 		bio = split;
1690 		wait_barrier(conf, false);
1691 	}
1692 
1693 	bio_start = bio->bi_iter.bi_sector;
1694 	bio_end = bio_end_sector(bio);
1695 
1696 	/*
1697 	 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1698 	 * One stripe contains the chunks from all member disk (one chunk from
1699 	 * one disk at the same HBA address). For layout detail, see 'man md 4'
1700 	 */
1701 	chunk = bio_start >> geo->chunk_shift;
1702 	chunk *= geo->near_copies;
1703 	first_stripe_index = chunk;
1704 	start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1705 	if (geo->far_offset)
1706 		first_stripe_index *= geo->far_copies;
1707 	start_disk_offset = (bio_start & geo->chunk_mask) +
1708 				(first_stripe_index << geo->chunk_shift);
1709 
1710 	chunk = bio_end >> geo->chunk_shift;
1711 	chunk *= geo->near_copies;
1712 	last_stripe_index = chunk;
1713 	end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1714 	if (geo->far_offset)
1715 		last_stripe_index *= geo->far_copies;
1716 	end_disk_offset = (bio_end & geo->chunk_mask) +
1717 				(last_stripe_index << geo->chunk_shift);
1718 
1719 retry_discard:
1720 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1721 	r10_bio->mddev = mddev;
1722 	r10_bio->state = 0;
1723 	r10_bio->sectors = 0;
1724 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1725 	wait_blocked_dev(mddev, r10_bio);
1726 
1727 	/*
1728 	 * For far layout it needs more than one r10bio to cover all regions.
1729 	 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1730 	 * to record the discard bio. Other r10bio->master_bio record the first
1731 	 * r10bio. The first r10bio only release after all other r10bios finish.
1732 	 * The discard bio returns only first r10bio finishes
1733 	 */
1734 	if (first_copy) {
1735 		r10_bio->master_bio = bio;
1736 		set_bit(R10BIO_Discard, &r10_bio->state);
1737 		first_copy = false;
1738 		first_r10bio = r10_bio;
1739 	} else
1740 		r10_bio->master_bio = (struct bio *)first_r10bio;
1741 
1742 	/*
1743 	 * first select target devices under rcu_lock and
1744 	 * inc refcount on their rdev.  Record them by setting
1745 	 * bios[x] to bio
1746 	 */
1747 	rcu_read_lock();
1748 	for (disk = 0; disk < geo->raid_disks; disk++) {
1749 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
1750 		struct md_rdev *rrdev = rcu_dereference(
1751 			conf->mirrors[disk].replacement);
1752 
1753 		r10_bio->devs[disk].bio = NULL;
1754 		r10_bio->devs[disk].repl_bio = NULL;
1755 
1756 		if (rdev && (test_bit(Faulty, &rdev->flags)))
1757 			rdev = NULL;
1758 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1759 			rrdev = NULL;
1760 		if (!rdev && !rrdev)
1761 			continue;
1762 
1763 		if (rdev) {
1764 			r10_bio->devs[disk].bio = bio;
1765 			atomic_inc(&rdev->nr_pending);
1766 		}
1767 		if (rrdev) {
1768 			r10_bio->devs[disk].repl_bio = bio;
1769 			atomic_inc(&rrdev->nr_pending);
1770 		}
1771 	}
1772 	rcu_read_unlock();
1773 
1774 	atomic_set(&r10_bio->remaining, 1);
1775 	for (disk = 0; disk < geo->raid_disks; disk++) {
1776 		sector_t dev_start, dev_end;
1777 		struct bio *mbio, *rbio = NULL;
1778 
1779 		/*
1780 		 * Now start to calculate the start and end address for each disk.
1781 		 * The space between dev_start and dev_end is the discard region.
1782 		 *
1783 		 * For dev_start, it needs to consider three conditions:
1784 		 * 1st, the disk is before start_disk, you can imagine the disk in
1785 		 * the next stripe. So the dev_start is the start address of next
1786 		 * stripe.
1787 		 * 2st, the disk is after start_disk, it means the disk is at the
1788 		 * same stripe of first disk
1789 		 * 3st, the first disk itself, we can use start_disk_offset directly
1790 		 */
1791 		if (disk < start_disk_index)
1792 			dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1793 		else if (disk > start_disk_index)
1794 			dev_start = first_stripe_index * mddev->chunk_sectors;
1795 		else
1796 			dev_start = start_disk_offset;
1797 
1798 		if (disk < end_disk_index)
1799 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1800 		else if (disk > end_disk_index)
1801 			dev_end = last_stripe_index * mddev->chunk_sectors;
1802 		else
1803 			dev_end = end_disk_offset;
1804 
1805 		/*
1806 		 * It only handles discard bio which size is >= stripe size, so
1807 		 * dev_end > dev_start all the time.
1808 		 * It doesn't need to use rcu lock to get rdev here. We already
1809 		 * add rdev->nr_pending in the first loop.
1810 		 */
1811 		if (r10_bio->devs[disk].bio) {
1812 			struct md_rdev *rdev = conf->mirrors[disk].rdev;
1813 			mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1814 					       &mddev->bio_set);
1815 			mbio->bi_end_io = raid10_end_discard_request;
1816 			mbio->bi_private = r10_bio;
1817 			r10_bio->devs[disk].bio = mbio;
1818 			r10_bio->devs[disk].devnum = disk;
1819 			atomic_inc(&r10_bio->remaining);
1820 			md_submit_discard_bio(mddev, rdev, mbio,
1821 					dev_start + choose_data_offset(r10_bio, rdev),
1822 					dev_end - dev_start);
1823 			bio_endio(mbio);
1824 		}
1825 		if (r10_bio->devs[disk].repl_bio) {
1826 			struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1827 			rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1828 					       &mddev->bio_set);
1829 			rbio->bi_end_io = raid10_end_discard_request;
1830 			rbio->bi_private = r10_bio;
1831 			r10_bio->devs[disk].repl_bio = rbio;
1832 			r10_bio->devs[disk].devnum = disk;
1833 			atomic_inc(&r10_bio->remaining);
1834 			md_submit_discard_bio(mddev, rrdev, rbio,
1835 					dev_start + choose_data_offset(r10_bio, rrdev),
1836 					dev_end - dev_start);
1837 			bio_endio(rbio);
1838 		}
1839 	}
1840 
1841 	if (!geo->far_offset && --far_copies) {
1842 		first_stripe_index += geo->stride >> geo->chunk_shift;
1843 		start_disk_offset += geo->stride;
1844 		last_stripe_index += geo->stride >> geo->chunk_shift;
1845 		end_disk_offset += geo->stride;
1846 		atomic_inc(&first_r10bio->remaining);
1847 		raid_end_discard_bio(r10_bio);
1848 		wait_barrier(conf, false);
1849 		goto retry_discard;
1850 	}
1851 
1852 	raid_end_discard_bio(r10_bio);
1853 
1854 	return 0;
1855 out:
1856 	allow_barrier(conf);
1857 	return -EAGAIN;
1858 }
1859 
1860 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1861 {
1862 	struct r10conf *conf = mddev->private;
1863 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1864 	int chunk_sects = chunk_mask + 1;
1865 	int sectors = bio_sectors(bio);
1866 
1867 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1868 	    && md_flush_request(mddev, bio))
1869 		return true;
1870 
1871 	if (!md_write_start(mddev, bio))
1872 		return false;
1873 
1874 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1875 		if (!raid10_handle_discard(mddev, bio))
1876 			return true;
1877 
1878 	/*
1879 	 * If this request crosses a chunk boundary, we need to split
1880 	 * it.
1881 	 */
1882 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1883 		     sectors > chunk_sects
1884 		     && (conf->geo.near_copies < conf->geo.raid_disks
1885 			 || conf->prev.near_copies <
1886 			 conf->prev.raid_disks)))
1887 		sectors = chunk_sects -
1888 			(bio->bi_iter.bi_sector &
1889 			 (chunk_sects - 1));
1890 	__make_request(mddev, bio, sectors);
1891 
1892 	/* In case raid10d snuck in to freeze_array */
1893 	wake_up(&conf->wait_barrier);
1894 	return true;
1895 }
1896 
1897 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1898 {
1899 	struct r10conf *conf = mddev->private;
1900 	int i;
1901 
1902 	if (conf->geo.near_copies < conf->geo.raid_disks)
1903 		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1904 	if (conf->geo.near_copies > 1)
1905 		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1906 	if (conf->geo.far_copies > 1) {
1907 		if (conf->geo.far_offset)
1908 			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1909 		else
1910 			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1911 		if (conf->geo.far_set_size != conf->geo.raid_disks)
1912 			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1913 	}
1914 	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1915 					conf->geo.raid_disks - mddev->degraded);
1916 	rcu_read_lock();
1917 	for (i = 0; i < conf->geo.raid_disks; i++) {
1918 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1919 		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1920 	}
1921 	rcu_read_unlock();
1922 	seq_printf(seq, "]");
1923 }
1924 
1925 /* check if there are enough drives for
1926  * every block to appear on atleast one.
1927  * Don't consider the device numbered 'ignore'
1928  * as we might be about to remove it.
1929  */
1930 static int _enough(struct r10conf *conf, int previous, int ignore)
1931 {
1932 	int first = 0;
1933 	int has_enough = 0;
1934 	int disks, ncopies;
1935 	if (previous) {
1936 		disks = conf->prev.raid_disks;
1937 		ncopies = conf->prev.near_copies;
1938 	} else {
1939 		disks = conf->geo.raid_disks;
1940 		ncopies = conf->geo.near_copies;
1941 	}
1942 
1943 	rcu_read_lock();
1944 	do {
1945 		int n = conf->copies;
1946 		int cnt = 0;
1947 		int this = first;
1948 		while (n--) {
1949 			struct md_rdev *rdev;
1950 			if (this != ignore &&
1951 			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1952 			    test_bit(In_sync, &rdev->flags))
1953 				cnt++;
1954 			this = (this+1) % disks;
1955 		}
1956 		if (cnt == 0)
1957 			goto out;
1958 		first = (first + ncopies) % disks;
1959 	} while (first != 0);
1960 	has_enough = 1;
1961 out:
1962 	rcu_read_unlock();
1963 	return has_enough;
1964 }
1965 
1966 static int enough(struct r10conf *conf, int ignore)
1967 {
1968 	/* when calling 'enough', both 'prev' and 'geo' must
1969 	 * be stable.
1970 	 * This is ensured if ->reconfig_mutex or ->device_lock
1971 	 * is held.
1972 	 */
1973 	return _enough(conf, 0, ignore) &&
1974 		_enough(conf, 1, ignore);
1975 }
1976 
1977 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1978 {
1979 	char b[BDEVNAME_SIZE];
1980 	struct r10conf *conf = mddev->private;
1981 	unsigned long flags;
1982 
1983 	/*
1984 	 * If it is not operational, then we have already marked it as dead
1985 	 * else if it is the last working disks with "fail_last_dev == false",
1986 	 * ignore the error, let the next level up know.
1987 	 * else mark the drive as failed
1988 	 */
1989 	spin_lock_irqsave(&conf->device_lock, flags);
1990 	if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1991 	    && !enough(conf, rdev->raid_disk)) {
1992 		/*
1993 		 * Don't fail the drive, just return an IO error.
1994 		 */
1995 		spin_unlock_irqrestore(&conf->device_lock, flags);
1996 		return;
1997 	}
1998 	if (test_and_clear_bit(In_sync, &rdev->flags))
1999 		mddev->degraded++;
2000 	/*
2001 	 * If recovery is running, make sure it aborts.
2002 	 */
2003 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2004 	set_bit(Blocked, &rdev->flags);
2005 	set_bit(Faulty, &rdev->flags);
2006 	set_mask_bits(&mddev->sb_flags, 0,
2007 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2008 	spin_unlock_irqrestore(&conf->device_lock, flags);
2009 	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
2010 		"md/raid10:%s: Operation continuing on %d devices.\n",
2011 		mdname(mddev), bdevname(rdev->bdev, b),
2012 		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2013 }
2014 
2015 static void print_conf(struct r10conf *conf)
2016 {
2017 	int i;
2018 	struct md_rdev *rdev;
2019 
2020 	pr_debug("RAID10 conf printout:\n");
2021 	if (!conf) {
2022 		pr_debug("(!conf)\n");
2023 		return;
2024 	}
2025 	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2026 		 conf->geo.raid_disks);
2027 
2028 	/* This is only called with ->reconfix_mutex held, so
2029 	 * rcu protection of rdev is not needed */
2030 	for (i = 0; i < conf->geo.raid_disks; i++) {
2031 		char b[BDEVNAME_SIZE];
2032 		rdev = conf->mirrors[i].rdev;
2033 		if (rdev)
2034 			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
2035 				 i, !test_bit(In_sync, &rdev->flags),
2036 				 !test_bit(Faulty, &rdev->flags),
2037 				 bdevname(rdev->bdev,b));
2038 	}
2039 }
2040 
2041 static void close_sync(struct r10conf *conf)
2042 {
2043 	wait_barrier(conf, false);
2044 	allow_barrier(conf);
2045 
2046 	mempool_exit(&conf->r10buf_pool);
2047 }
2048 
2049 static int raid10_spare_active(struct mddev *mddev)
2050 {
2051 	int i;
2052 	struct r10conf *conf = mddev->private;
2053 	struct raid10_info *tmp;
2054 	int count = 0;
2055 	unsigned long flags;
2056 
2057 	/*
2058 	 * Find all non-in_sync disks within the RAID10 configuration
2059 	 * and mark them in_sync
2060 	 */
2061 	for (i = 0; i < conf->geo.raid_disks; i++) {
2062 		tmp = conf->mirrors + i;
2063 		if (tmp->replacement
2064 		    && tmp->replacement->recovery_offset == MaxSector
2065 		    && !test_bit(Faulty, &tmp->replacement->flags)
2066 		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2067 			/* Replacement has just become active */
2068 			if (!tmp->rdev
2069 			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2070 				count++;
2071 			if (tmp->rdev) {
2072 				/* Replaced device not technically faulty,
2073 				 * but we need to be sure it gets removed
2074 				 * and never re-added.
2075 				 */
2076 				set_bit(Faulty, &tmp->rdev->flags);
2077 				sysfs_notify_dirent_safe(
2078 					tmp->rdev->sysfs_state);
2079 			}
2080 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2081 		} else if (tmp->rdev
2082 			   && tmp->rdev->recovery_offset == MaxSector
2083 			   && !test_bit(Faulty, &tmp->rdev->flags)
2084 			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2085 			count++;
2086 			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2087 		}
2088 	}
2089 	spin_lock_irqsave(&conf->device_lock, flags);
2090 	mddev->degraded -= count;
2091 	spin_unlock_irqrestore(&conf->device_lock, flags);
2092 
2093 	print_conf(conf);
2094 	return count;
2095 }
2096 
2097 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2098 {
2099 	struct r10conf *conf = mddev->private;
2100 	int err = -EEXIST;
2101 	int mirror;
2102 	int first = 0;
2103 	int last = conf->geo.raid_disks - 1;
2104 
2105 	if (mddev->recovery_cp < MaxSector)
2106 		/* only hot-add to in-sync arrays, as recovery is
2107 		 * very different from resync
2108 		 */
2109 		return -EBUSY;
2110 	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2111 		return -EINVAL;
2112 
2113 	if (md_integrity_add_rdev(rdev, mddev))
2114 		return -ENXIO;
2115 
2116 	if (rdev->raid_disk >= 0)
2117 		first = last = rdev->raid_disk;
2118 
2119 	if (rdev->saved_raid_disk >= first &&
2120 	    rdev->saved_raid_disk < conf->geo.raid_disks &&
2121 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2122 		mirror = rdev->saved_raid_disk;
2123 	else
2124 		mirror = first;
2125 	for ( ; mirror <= last ; mirror++) {
2126 		struct raid10_info *p = &conf->mirrors[mirror];
2127 		if (p->recovery_disabled == mddev->recovery_disabled)
2128 			continue;
2129 		if (p->rdev) {
2130 			if (!test_bit(WantReplacement, &p->rdev->flags) ||
2131 			    p->replacement != NULL)
2132 				continue;
2133 			clear_bit(In_sync, &rdev->flags);
2134 			set_bit(Replacement, &rdev->flags);
2135 			rdev->raid_disk = mirror;
2136 			err = 0;
2137 			if (mddev->gendisk)
2138 				disk_stack_limits(mddev->gendisk, rdev->bdev,
2139 						  rdev->data_offset << 9);
2140 			conf->fullsync = 1;
2141 			rcu_assign_pointer(p->replacement, rdev);
2142 			break;
2143 		}
2144 
2145 		if (mddev->gendisk)
2146 			disk_stack_limits(mddev->gendisk, rdev->bdev,
2147 					  rdev->data_offset << 9);
2148 
2149 		p->head_position = 0;
2150 		p->recovery_disabled = mddev->recovery_disabled - 1;
2151 		rdev->raid_disk = mirror;
2152 		err = 0;
2153 		if (rdev->saved_raid_disk != mirror)
2154 			conf->fullsync = 1;
2155 		rcu_assign_pointer(p->rdev, rdev);
2156 		break;
2157 	}
2158 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
2159 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
2160 
2161 	print_conf(conf);
2162 	return err;
2163 }
2164 
2165 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2166 {
2167 	struct r10conf *conf = mddev->private;
2168 	int err = 0;
2169 	int number = rdev->raid_disk;
2170 	struct md_rdev **rdevp;
2171 	struct raid10_info *p = conf->mirrors + number;
2172 
2173 	print_conf(conf);
2174 	if (rdev == p->rdev)
2175 		rdevp = &p->rdev;
2176 	else if (rdev == p->replacement)
2177 		rdevp = &p->replacement;
2178 	else
2179 		return 0;
2180 
2181 	if (test_bit(In_sync, &rdev->flags) ||
2182 	    atomic_read(&rdev->nr_pending)) {
2183 		err = -EBUSY;
2184 		goto abort;
2185 	}
2186 	/* Only remove non-faulty devices if recovery
2187 	 * is not possible.
2188 	 */
2189 	if (!test_bit(Faulty, &rdev->flags) &&
2190 	    mddev->recovery_disabled != p->recovery_disabled &&
2191 	    (!p->replacement || p->replacement == rdev) &&
2192 	    number < conf->geo.raid_disks &&
2193 	    enough(conf, -1)) {
2194 		err = -EBUSY;
2195 		goto abort;
2196 	}
2197 	*rdevp = NULL;
2198 	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
2199 		synchronize_rcu();
2200 		if (atomic_read(&rdev->nr_pending)) {
2201 			/* lost the race, try later */
2202 			err = -EBUSY;
2203 			*rdevp = rdev;
2204 			goto abort;
2205 		}
2206 	}
2207 	if (p->replacement) {
2208 		/* We must have just cleared 'rdev' */
2209 		p->rdev = p->replacement;
2210 		clear_bit(Replacement, &p->replacement->flags);
2211 		smp_mb(); /* Make sure other CPUs may see both as identical
2212 			   * but will never see neither -- if they are careful.
2213 			   */
2214 		p->replacement = NULL;
2215 	}
2216 
2217 	clear_bit(WantReplacement, &rdev->flags);
2218 	err = md_integrity_register(mddev);
2219 
2220 abort:
2221 
2222 	print_conf(conf);
2223 	return err;
2224 }
2225 
2226 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2227 {
2228 	struct r10conf *conf = r10_bio->mddev->private;
2229 
2230 	if (!bio->bi_status)
2231 		set_bit(R10BIO_Uptodate, &r10_bio->state);
2232 	else
2233 		/* The write handler will notice the lack of
2234 		 * R10BIO_Uptodate and record any errors etc
2235 		 */
2236 		atomic_add(r10_bio->sectors,
2237 			   &conf->mirrors[d].rdev->corrected_errors);
2238 
2239 	/* for reconstruct, we always reschedule after a read.
2240 	 * for resync, only after all reads
2241 	 */
2242 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2243 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2244 	    atomic_dec_and_test(&r10_bio->remaining)) {
2245 		/* we have read all the blocks,
2246 		 * do the comparison in process context in raid10d
2247 		 */
2248 		reschedule_retry(r10_bio);
2249 	}
2250 }
2251 
2252 static void end_sync_read(struct bio *bio)
2253 {
2254 	struct r10bio *r10_bio = get_resync_r10bio(bio);
2255 	struct r10conf *conf = r10_bio->mddev->private;
2256 	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2257 
2258 	__end_sync_read(r10_bio, bio, d);
2259 }
2260 
2261 static void end_reshape_read(struct bio *bio)
2262 {
2263 	/* reshape read bio isn't allocated from r10buf_pool */
2264 	struct r10bio *r10_bio = bio->bi_private;
2265 
2266 	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
2267 }
2268 
2269 static void end_sync_request(struct r10bio *r10_bio)
2270 {
2271 	struct mddev *mddev = r10_bio->mddev;
2272 
2273 	while (atomic_dec_and_test(&r10_bio->remaining)) {
2274 		if (r10_bio->master_bio == NULL) {
2275 			/* the primary of several recovery bios */
2276 			sector_t s = r10_bio->sectors;
2277 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2278 			    test_bit(R10BIO_WriteError, &r10_bio->state))
2279 				reschedule_retry(r10_bio);
2280 			else
2281 				put_buf(r10_bio);
2282 			md_done_sync(mddev, s, 1);
2283 			break;
2284 		} else {
2285 			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2286 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2287 			    test_bit(R10BIO_WriteError, &r10_bio->state))
2288 				reschedule_retry(r10_bio);
2289 			else
2290 				put_buf(r10_bio);
2291 			r10_bio = r10_bio2;
2292 		}
2293 	}
2294 }
2295 
2296 static void end_sync_write(struct bio *bio)
2297 {
2298 	struct r10bio *r10_bio = get_resync_r10bio(bio);
2299 	struct mddev *mddev = r10_bio->mddev;
2300 	struct r10conf *conf = mddev->private;
2301 	int d;
2302 	sector_t first_bad;
2303 	int bad_sectors;
2304 	int slot;
2305 	int repl;
2306 	struct md_rdev *rdev = NULL;
2307 
2308 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2309 	if (repl)
2310 		rdev = conf->mirrors[d].replacement;
2311 	else
2312 		rdev = conf->mirrors[d].rdev;
2313 
2314 	if (bio->bi_status) {
2315 		if (repl)
2316 			md_error(mddev, rdev);
2317 		else {
2318 			set_bit(WriteErrorSeen, &rdev->flags);
2319 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2320 				set_bit(MD_RECOVERY_NEEDED,
2321 					&rdev->mddev->recovery);
2322 			set_bit(R10BIO_WriteError, &r10_bio->state);
2323 		}
2324 	} else if (is_badblock(rdev,
2325 			     r10_bio->devs[slot].addr,
2326 			     r10_bio->sectors,
2327 			     &first_bad, &bad_sectors))
2328 		set_bit(R10BIO_MadeGood, &r10_bio->state);
2329 
2330 	rdev_dec_pending(rdev, mddev);
2331 
2332 	end_sync_request(r10_bio);
2333 }
2334 
2335 /*
2336  * Note: sync and recover and handled very differently for raid10
2337  * This code is for resync.
2338  * For resync, we read through virtual addresses and read all blocks.
2339  * If there is any error, we schedule a write.  The lowest numbered
2340  * drive is authoritative.
2341  * However requests come for physical address, so we need to map.
2342  * For every physical address there are raid_disks/copies virtual addresses,
2343  * which is always are least one, but is not necessarly an integer.
2344  * This means that a physical address can span multiple chunks, so we may
2345  * have to submit multiple io requests for a single sync request.
2346  */
2347 /*
2348  * We check if all blocks are in-sync and only write to blocks that
2349  * aren't in sync
2350  */
2351 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2352 {
2353 	struct r10conf *conf = mddev->private;
2354 	int i, first;
2355 	struct bio *tbio, *fbio;
2356 	int vcnt;
2357 	struct page **tpages, **fpages;
2358 
2359 	atomic_set(&r10_bio->remaining, 1);
2360 
2361 	/* find the first device with a block */
2362 	for (i=0; i<conf->copies; i++)
2363 		if (!r10_bio->devs[i].bio->bi_status)
2364 			break;
2365 
2366 	if (i == conf->copies)
2367 		goto done;
2368 
2369 	first = i;
2370 	fbio = r10_bio->devs[i].bio;
2371 	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2372 	fbio->bi_iter.bi_idx = 0;
2373 	fpages = get_resync_pages(fbio)->pages;
2374 
2375 	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2376 	/* now find blocks with errors */
2377 	for (i=0 ; i < conf->copies ; i++) {
2378 		int  j, d;
2379 		struct md_rdev *rdev;
2380 		struct resync_pages *rp;
2381 
2382 		tbio = r10_bio->devs[i].bio;
2383 
2384 		if (tbio->bi_end_io != end_sync_read)
2385 			continue;
2386 		if (i == first)
2387 			continue;
2388 
2389 		tpages = get_resync_pages(tbio)->pages;
2390 		d = r10_bio->devs[i].devnum;
2391 		rdev = conf->mirrors[d].rdev;
2392 		if (!r10_bio->devs[i].bio->bi_status) {
2393 			/* We know that the bi_io_vec layout is the same for
2394 			 * both 'first' and 'i', so we just compare them.
2395 			 * All vec entries are PAGE_SIZE;
2396 			 */
2397 			int sectors = r10_bio->sectors;
2398 			for (j = 0; j < vcnt; j++) {
2399 				int len = PAGE_SIZE;
2400 				if (sectors < (len / 512))
2401 					len = sectors * 512;
2402 				if (memcmp(page_address(fpages[j]),
2403 					   page_address(tpages[j]),
2404 					   len))
2405 					break;
2406 				sectors -= len/512;
2407 			}
2408 			if (j == vcnt)
2409 				continue;
2410 			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2411 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2412 				/* Don't fix anything. */
2413 				continue;
2414 		} else if (test_bit(FailFast, &rdev->flags)) {
2415 			/* Just give up on this device */
2416 			md_error(rdev->mddev, rdev);
2417 			continue;
2418 		}
2419 		/* Ok, we need to write this bio, either to correct an
2420 		 * inconsistency or to correct an unreadable block.
2421 		 * First we need to fixup bv_offset, bv_len and
2422 		 * bi_vecs, as the read request might have corrupted these
2423 		 */
2424 		rp = get_resync_pages(tbio);
2425 		bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2426 
2427 		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2428 
2429 		rp->raid_bio = r10_bio;
2430 		tbio->bi_private = rp;
2431 		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2432 		tbio->bi_end_io = end_sync_write;
2433 
2434 		bio_copy_data(tbio, fbio);
2435 
2436 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2437 		atomic_inc(&r10_bio->remaining);
2438 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2439 
2440 		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2441 			tbio->bi_opf |= MD_FAILFAST;
2442 		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2443 		submit_bio_noacct(tbio);
2444 	}
2445 
2446 	/* Now write out to any replacement devices
2447 	 * that are active
2448 	 */
2449 	for (i = 0; i < conf->copies; i++) {
2450 		int d;
2451 
2452 		tbio = r10_bio->devs[i].repl_bio;
2453 		if (!tbio || !tbio->bi_end_io)
2454 			continue;
2455 		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2456 		    && r10_bio->devs[i].bio != fbio)
2457 			bio_copy_data(tbio, fbio);
2458 		d = r10_bio->devs[i].devnum;
2459 		atomic_inc(&r10_bio->remaining);
2460 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2461 			     bio_sectors(tbio));
2462 		submit_bio_noacct(tbio);
2463 	}
2464 
2465 done:
2466 	if (atomic_dec_and_test(&r10_bio->remaining)) {
2467 		md_done_sync(mddev, r10_bio->sectors, 1);
2468 		put_buf(r10_bio);
2469 	}
2470 }
2471 
2472 /*
2473  * Now for the recovery code.
2474  * Recovery happens across physical sectors.
2475  * We recover all non-is_sync drives by finding the virtual address of
2476  * each, and then choose a working drive that also has that virt address.
2477  * There is a separate r10_bio for each non-in_sync drive.
2478  * Only the first two slots are in use. The first for reading,
2479  * The second for writing.
2480  *
2481  */
2482 static void fix_recovery_read_error(struct r10bio *r10_bio)
2483 {
2484 	/* We got a read error during recovery.
2485 	 * We repeat the read in smaller page-sized sections.
2486 	 * If a read succeeds, write it to the new device or record
2487 	 * a bad block if we cannot.
2488 	 * If a read fails, record a bad block on both old and
2489 	 * new devices.
2490 	 */
2491 	struct mddev *mddev = r10_bio->mddev;
2492 	struct r10conf *conf = mddev->private;
2493 	struct bio *bio = r10_bio->devs[0].bio;
2494 	sector_t sect = 0;
2495 	int sectors = r10_bio->sectors;
2496 	int idx = 0;
2497 	int dr = r10_bio->devs[0].devnum;
2498 	int dw = r10_bio->devs[1].devnum;
2499 	struct page **pages = get_resync_pages(bio)->pages;
2500 
2501 	while (sectors) {
2502 		int s = sectors;
2503 		struct md_rdev *rdev;
2504 		sector_t addr;
2505 		int ok;
2506 
2507 		if (s > (PAGE_SIZE>>9))
2508 			s = PAGE_SIZE >> 9;
2509 
2510 		rdev = conf->mirrors[dr].rdev;
2511 		addr = r10_bio->devs[0].addr + sect,
2512 		ok = sync_page_io(rdev,
2513 				  addr,
2514 				  s << 9,
2515 				  pages[idx],
2516 				  REQ_OP_READ, 0, false);
2517 		if (ok) {
2518 			rdev = conf->mirrors[dw].rdev;
2519 			addr = r10_bio->devs[1].addr + sect;
2520 			ok = sync_page_io(rdev,
2521 					  addr,
2522 					  s << 9,
2523 					  pages[idx],
2524 					  REQ_OP_WRITE, 0, false);
2525 			if (!ok) {
2526 				set_bit(WriteErrorSeen, &rdev->flags);
2527 				if (!test_and_set_bit(WantReplacement,
2528 						      &rdev->flags))
2529 					set_bit(MD_RECOVERY_NEEDED,
2530 						&rdev->mddev->recovery);
2531 			}
2532 		}
2533 		if (!ok) {
2534 			/* We don't worry if we cannot set a bad block -
2535 			 * it really is bad so there is no loss in not
2536 			 * recording it yet
2537 			 */
2538 			rdev_set_badblocks(rdev, addr, s, 0);
2539 
2540 			if (rdev != conf->mirrors[dw].rdev) {
2541 				/* need bad block on destination too */
2542 				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2543 				addr = r10_bio->devs[1].addr + sect;
2544 				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2545 				if (!ok) {
2546 					/* just abort the recovery */
2547 					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2548 						  mdname(mddev));
2549 
2550 					conf->mirrors[dw].recovery_disabled
2551 						= mddev->recovery_disabled;
2552 					set_bit(MD_RECOVERY_INTR,
2553 						&mddev->recovery);
2554 					break;
2555 				}
2556 			}
2557 		}
2558 
2559 		sectors -= s;
2560 		sect += s;
2561 		idx++;
2562 	}
2563 }
2564 
2565 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2566 {
2567 	struct r10conf *conf = mddev->private;
2568 	int d;
2569 	struct bio *wbio, *wbio2;
2570 
2571 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2572 		fix_recovery_read_error(r10_bio);
2573 		end_sync_request(r10_bio);
2574 		return;
2575 	}
2576 
2577 	/*
2578 	 * share the pages with the first bio
2579 	 * and submit the write request
2580 	 */
2581 	d = r10_bio->devs[1].devnum;
2582 	wbio = r10_bio->devs[1].bio;
2583 	wbio2 = r10_bio->devs[1].repl_bio;
2584 	/* Need to test wbio2->bi_end_io before we call
2585 	 * submit_bio_noacct as if the former is NULL,
2586 	 * the latter is free to free wbio2.
2587 	 */
2588 	if (wbio2 && !wbio2->bi_end_io)
2589 		wbio2 = NULL;
2590 	if (wbio->bi_end_io) {
2591 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2592 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2593 		submit_bio_noacct(wbio);
2594 	}
2595 	if (wbio2) {
2596 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2597 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2598 			     bio_sectors(wbio2));
2599 		submit_bio_noacct(wbio2);
2600 	}
2601 }
2602 
2603 /*
2604  * Used by fix_read_error() to decay the per rdev read_errors.
2605  * We halve the read error count for every hour that has elapsed
2606  * since the last recorded read error.
2607  *
2608  */
2609 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2610 {
2611 	long cur_time_mon;
2612 	unsigned long hours_since_last;
2613 	unsigned int read_errors = atomic_read(&rdev->read_errors);
2614 
2615 	cur_time_mon = ktime_get_seconds();
2616 
2617 	if (rdev->last_read_error == 0) {
2618 		/* first time we've seen a read error */
2619 		rdev->last_read_error = cur_time_mon;
2620 		return;
2621 	}
2622 
2623 	hours_since_last = (long)(cur_time_mon -
2624 			    rdev->last_read_error) / 3600;
2625 
2626 	rdev->last_read_error = cur_time_mon;
2627 
2628 	/*
2629 	 * if hours_since_last is > the number of bits in read_errors
2630 	 * just set read errors to 0. We do this to avoid
2631 	 * overflowing the shift of read_errors by hours_since_last.
2632 	 */
2633 	if (hours_since_last >= 8 * sizeof(read_errors))
2634 		atomic_set(&rdev->read_errors, 0);
2635 	else
2636 		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2637 }
2638 
2639 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2640 			    int sectors, struct page *page, int rw)
2641 {
2642 	sector_t first_bad;
2643 	int bad_sectors;
2644 
2645 	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2646 	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2647 		return -1;
2648 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2649 		/* success */
2650 		return 1;
2651 	if (rw == WRITE) {
2652 		set_bit(WriteErrorSeen, &rdev->flags);
2653 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2654 			set_bit(MD_RECOVERY_NEEDED,
2655 				&rdev->mddev->recovery);
2656 	}
2657 	/* need to record an error - either for the block or the device */
2658 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2659 		md_error(rdev->mddev, rdev);
2660 	return 0;
2661 }
2662 
2663 /*
2664  * This is a kernel thread which:
2665  *
2666  *	1.	Retries failed read operations on working mirrors.
2667  *	2.	Updates the raid superblock when problems encounter.
2668  *	3.	Performs writes following reads for array synchronising.
2669  */
2670 
2671 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2672 {
2673 	int sect = 0; /* Offset from r10_bio->sector */
2674 	int sectors = r10_bio->sectors;
2675 	struct md_rdev *rdev;
2676 	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2677 	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2678 
2679 	/* still own a reference to this rdev, so it cannot
2680 	 * have been cleared recently.
2681 	 */
2682 	rdev = conf->mirrors[d].rdev;
2683 
2684 	if (test_bit(Faulty, &rdev->flags))
2685 		/* drive has already been failed, just ignore any
2686 		   more fix_read_error() attempts */
2687 		return;
2688 
2689 	check_decay_read_errors(mddev, rdev);
2690 	atomic_inc(&rdev->read_errors);
2691 	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2692 		char b[BDEVNAME_SIZE];
2693 		bdevname(rdev->bdev, b);
2694 
2695 		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2696 			  mdname(mddev), b,
2697 			  atomic_read(&rdev->read_errors), max_read_errors);
2698 		pr_notice("md/raid10:%s: %s: Failing raid device\n",
2699 			  mdname(mddev), b);
2700 		md_error(mddev, rdev);
2701 		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2702 		return;
2703 	}
2704 
2705 	while(sectors) {
2706 		int s = sectors;
2707 		int sl = r10_bio->read_slot;
2708 		int success = 0;
2709 		int start;
2710 
2711 		if (s > (PAGE_SIZE>>9))
2712 			s = PAGE_SIZE >> 9;
2713 
2714 		rcu_read_lock();
2715 		do {
2716 			sector_t first_bad;
2717 			int bad_sectors;
2718 
2719 			d = r10_bio->devs[sl].devnum;
2720 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2721 			if (rdev &&
2722 			    test_bit(In_sync, &rdev->flags) &&
2723 			    !test_bit(Faulty, &rdev->flags) &&
2724 			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2725 					&first_bad, &bad_sectors) == 0) {
2726 				atomic_inc(&rdev->nr_pending);
2727 				rcu_read_unlock();
2728 				success = sync_page_io(rdev,
2729 						       r10_bio->devs[sl].addr +
2730 						       sect,
2731 						       s<<9,
2732 						       conf->tmppage,
2733 						       REQ_OP_READ, 0, false);
2734 				rdev_dec_pending(rdev, mddev);
2735 				rcu_read_lock();
2736 				if (success)
2737 					break;
2738 			}
2739 			sl++;
2740 			if (sl == conf->copies)
2741 				sl = 0;
2742 		} while (!success && sl != r10_bio->read_slot);
2743 		rcu_read_unlock();
2744 
2745 		if (!success) {
2746 			/* Cannot read from anywhere, just mark the block
2747 			 * as bad on the first device to discourage future
2748 			 * reads.
2749 			 */
2750 			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2751 			rdev = conf->mirrors[dn].rdev;
2752 
2753 			if (!rdev_set_badblocks(
2754 				    rdev,
2755 				    r10_bio->devs[r10_bio->read_slot].addr
2756 				    + sect,
2757 				    s, 0)) {
2758 				md_error(mddev, rdev);
2759 				r10_bio->devs[r10_bio->read_slot].bio
2760 					= IO_BLOCKED;
2761 			}
2762 			break;
2763 		}
2764 
2765 		start = sl;
2766 		/* write it back and re-read */
2767 		rcu_read_lock();
2768 		while (sl != r10_bio->read_slot) {
2769 			char b[BDEVNAME_SIZE];
2770 
2771 			if (sl==0)
2772 				sl = conf->copies;
2773 			sl--;
2774 			d = r10_bio->devs[sl].devnum;
2775 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2776 			if (!rdev ||
2777 			    test_bit(Faulty, &rdev->flags) ||
2778 			    !test_bit(In_sync, &rdev->flags))
2779 				continue;
2780 
2781 			atomic_inc(&rdev->nr_pending);
2782 			rcu_read_unlock();
2783 			if (r10_sync_page_io(rdev,
2784 					     r10_bio->devs[sl].addr +
2785 					     sect,
2786 					     s, conf->tmppage, WRITE)
2787 			    == 0) {
2788 				/* Well, this device is dead */
2789 				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2790 					  mdname(mddev), s,
2791 					  (unsigned long long)(
2792 						  sect +
2793 						  choose_data_offset(r10_bio,
2794 								     rdev)),
2795 					  bdevname(rdev->bdev, b));
2796 				pr_notice("md/raid10:%s: %s: failing drive\n",
2797 					  mdname(mddev),
2798 					  bdevname(rdev->bdev, b));
2799 			}
2800 			rdev_dec_pending(rdev, mddev);
2801 			rcu_read_lock();
2802 		}
2803 		sl = start;
2804 		while (sl != r10_bio->read_slot) {
2805 			char b[BDEVNAME_SIZE];
2806 
2807 			if (sl==0)
2808 				sl = conf->copies;
2809 			sl--;
2810 			d = r10_bio->devs[sl].devnum;
2811 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2812 			if (!rdev ||
2813 			    test_bit(Faulty, &rdev->flags) ||
2814 			    !test_bit(In_sync, &rdev->flags))
2815 				continue;
2816 
2817 			atomic_inc(&rdev->nr_pending);
2818 			rcu_read_unlock();
2819 			switch (r10_sync_page_io(rdev,
2820 					     r10_bio->devs[sl].addr +
2821 					     sect,
2822 					     s, conf->tmppage,
2823 						 READ)) {
2824 			case 0:
2825 				/* Well, this device is dead */
2826 				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
2827 				       mdname(mddev), s,
2828 				       (unsigned long long)(
2829 					       sect +
2830 					       choose_data_offset(r10_bio, rdev)),
2831 				       bdevname(rdev->bdev, b));
2832 				pr_notice("md/raid10:%s: %s: failing drive\n",
2833 				       mdname(mddev),
2834 				       bdevname(rdev->bdev, b));
2835 				break;
2836 			case 1:
2837 				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
2838 				       mdname(mddev), s,
2839 				       (unsigned long long)(
2840 					       sect +
2841 					       choose_data_offset(r10_bio, rdev)),
2842 				       bdevname(rdev->bdev, b));
2843 				atomic_add(s, &rdev->corrected_errors);
2844 			}
2845 
2846 			rdev_dec_pending(rdev, mddev);
2847 			rcu_read_lock();
2848 		}
2849 		rcu_read_unlock();
2850 
2851 		sectors -= s;
2852 		sect += s;
2853 	}
2854 }
2855 
2856 static int narrow_write_error(struct r10bio *r10_bio, int i)
2857 {
2858 	struct bio *bio = r10_bio->master_bio;
2859 	struct mddev *mddev = r10_bio->mddev;
2860 	struct r10conf *conf = mddev->private;
2861 	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2862 	/* bio has the data to be written to slot 'i' where
2863 	 * we just recently had a write error.
2864 	 * We repeatedly clone the bio and trim down to one block,
2865 	 * then try the write.  Where the write fails we record
2866 	 * a bad block.
2867 	 * It is conceivable that the bio doesn't exactly align with
2868 	 * blocks.  We must handle this.
2869 	 *
2870 	 * We currently own a reference to the rdev.
2871 	 */
2872 
2873 	int block_sectors;
2874 	sector_t sector;
2875 	int sectors;
2876 	int sect_to_write = r10_bio->sectors;
2877 	int ok = 1;
2878 
2879 	if (rdev->badblocks.shift < 0)
2880 		return 0;
2881 
2882 	block_sectors = roundup(1 << rdev->badblocks.shift,
2883 				bdev_logical_block_size(rdev->bdev) >> 9);
2884 	sector = r10_bio->sector;
2885 	sectors = ((r10_bio->sector + block_sectors)
2886 		   & ~(sector_t)(block_sectors - 1))
2887 		- sector;
2888 
2889 	while (sect_to_write) {
2890 		struct bio *wbio;
2891 		sector_t wsector;
2892 		if (sectors > sect_to_write)
2893 			sectors = sect_to_write;
2894 		/* Write at 'sector' for 'sectors' */
2895 		wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2896 				       &mddev->bio_set);
2897 		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2898 		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2899 		wbio->bi_iter.bi_sector = wsector +
2900 				   choose_data_offset(r10_bio, rdev);
2901 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2902 
2903 		if (submit_bio_wait(wbio) < 0)
2904 			/* Failure! */
2905 			ok = rdev_set_badblocks(rdev, wsector,
2906 						sectors, 0)
2907 				&& ok;
2908 
2909 		bio_put(wbio);
2910 		sect_to_write -= sectors;
2911 		sector += sectors;
2912 		sectors = block_sectors;
2913 	}
2914 	return ok;
2915 }
2916 
2917 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2918 {
2919 	int slot = r10_bio->read_slot;
2920 	struct bio *bio;
2921 	struct r10conf *conf = mddev->private;
2922 	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2923 
2924 	/* we got a read error. Maybe the drive is bad.  Maybe just
2925 	 * the block and we can fix it.
2926 	 * We freeze all other IO, and try reading the block from
2927 	 * other devices.  When we find one, we re-write
2928 	 * and check it that fixes the read error.
2929 	 * This is all done synchronously while the array is
2930 	 * frozen.
2931 	 */
2932 	bio = r10_bio->devs[slot].bio;
2933 	bio_put(bio);
2934 	r10_bio->devs[slot].bio = NULL;
2935 
2936 	if (mddev->ro)
2937 		r10_bio->devs[slot].bio = IO_BLOCKED;
2938 	else if (!test_bit(FailFast, &rdev->flags)) {
2939 		freeze_array(conf, 1);
2940 		fix_read_error(conf, mddev, r10_bio);
2941 		unfreeze_array(conf);
2942 	} else
2943 		md_error(mddev, rdev);
2944 
2945 	rdev_dec_pending(rdev, mddev);
2946 	allow_barrier(conf);
2947 	r10_bio->state = 0;
2948 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2949 }
2950 
2951 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2952 {
2953 	/* Some sort of write request has finished and it
2954 	 * succeeded in writing where we thought there was a
2955 	 * bad block.  So forget the bad block.
2956 	 * Or possibly if failed and we need to record
2957 	 * a bad block.
2958 	 */
2959 	int m;
2960 	struct md_rdev *rdev;
2961 
2962 	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2963 	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2964 		for (m = 0; m < conf->copies; m++) {
2965 			int dev = r10_bio->devs[m].devnum;
2966 			rdev = conf->mirrors[dev].rdev;
2967 			if (r10_bio->devs[m].bio == NULL ||
2968 				r10_bio->devs[m].bio->bi_end_io == NULL)
2969 				continue;
2970 			if (!r10_bio->devs[m].bio->bi_status) {
2971 				rdev_clear_badblocks(
2972 					rdev,
2973 					r10_bio->devs[m].addr,
2974 					r10_bio->sectors, 0);
2975 			} else {
2976 				if (!rdev_set_badblocks(
2977 					    rdev,
2978 					    r10_bio->devs[m].addr,
2979 					    r10_bio->sectors, 0))
2980 					md_error(conf->mddev, rdev);
2981 			}
2982 			rdev = conf->mirrors[dev].replacement;
2983 			if (r10_bio->devs[m].repl_bio == NULL ||
2984 				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2985 				continue;
2986 
2987 			if (!r10_bio->devs[m].repl_bio->bi_status) {
2988 				rdev_clear_badblocks(
2989 					rdev,
2990 					r10_bio->devs[m].addr,
2991 					r10_bio->sectors, 0);
2992 			} else {
2993 				if (!rdev_set_badblocks(
2994 					    rdev,
2995 					    r10_bio->devs[m].addr,
2996 					    r10_bio->sectors, 0))
2997 					md_error(conf->mddev, rdev);
2998 			}
2999 		}
3000 		put_buf(r10_bio);
3001 	} else {
3002 		bool fail = false;
3003 		for (m = 0; m < conf->copies; m++) {
3004 			int dev = r10_bio->devs[m].devnum;
3005 			struct bio *bio = r10_bio->devs[m].bio;
3006 			rdev = conf->mirrors[dev].rdev;
3007 			if (bio == IO_MADE_GOOD) {
3008 				rdev_clear_badblocks(
3009 					rdev,
3010 					r10_bio->devs[m].addr,
3011 					r10_bio->sectors, 0);
3012 				rdev_dec_pending(rdev, conf->mddev);
3013 			} else if (bio != NULL && bio->bi_status) {
3014 				fail = true;
3015 				if (!narrow_write_error(r10_bio, m)) {
3016 					md_error(conf->mddev, rdev);
3017 					set_bit(R10BIO_Degraded,
3018 						&r10_bio->state);
3019 				}
3020 				rdev_dec_pending(rdev, conf->mddev);
3021 			}
3022 			bio = r10_bio->devs[m].repl_bio;
3023 			rdev = conf->mirrors[dev].replacement;
3024 			if (rdev && bio == IO_MADE_GOOD) {
3025 				rdev_clear_badblocks(
3026 					rdev,
3027 					r10_bio->devs[m].addr,
3028 					r10_bio->sectors, 0);
3029 				rdev_dec_pending(rdev, conf->mddev);
3030 			}
3031 		}
3032 		if (fail) {
3033 			spin_lock_irq(&conf->device_lock);
3034 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
3035 			conf->nr_queued++;
3036 			spin_unlock_irq(&conf->device_lock);
3037 			/*
3038 			 * In case freeze_array() is waiting for condition
3039 			 * nr_pending == nr_queued + extra to be true.
3040 			 */
3041 			wake_up(&conf->wait_barrier);
3042 			md_wakeup_thread(conf->mddev->thread);
3043 		} else {
3044 			if (test_bit(R10BIO_WriteError,
3045 				     &r10_bio->state))
3046 				close_write(r10_bio);
3047 			raid_end_bio_io(r10_bio);
3048 		}
3049 	}
3050 }
3051 
3052 static void raid10d(struct md_thread *thread)
3053 {
3054 	struct mddev *mddev = thread->mddev;
3055 	struct r10bio *r10_bio;
3056 	unsigned long flags;
3057 	struct r10conf *conf = mddev->private;
3058 	struct list_head *head = &conf->retry_list;
3059 	struct blk_plug plug;
3060 
3061 	md_check_recovery(mddev);
3062 
3063 	if (!list_empty_careful(&conf->bio_end_io_list) &&
3064 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3065 		LIST_HEAD(tmp);
3066 		spin_lock_irqsave(&conf->device_lock, flags);
3067 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3068 			while (!list_empty(&conf->bio_end_io_list)) {
3069 				list_move(conf->bio_end_io_list.prev, &tmp);
3070 				conf->nr_queued--;
3071 			}
3072 		}
3073 		spin_unlock_irqrestore(&conf->device_lock, flags);
3074 		while (!list_empty(&tmp)) {
3075 			r10_bio = list_first_entry(&tmp, struct r10bio,
3076 						   retry_list);
3077 			list_del(&r10_bio->retry_list);
3078 			if (mddev->degraded)
3079 				set_bit(R10BIO_Degraded, &r10_bio->state);
3080 
3081 			if (test_bit(R10BIO_WriteError,
3082 				     &r10_bio->state))
3083 				close_write(r10_bio);
3084 			raid_end_bio_io(r10_bio);
3085 		}
3086 	}
3087 
3088 	blk_start_plug(&plug);
3089 	for (;;) {
3090 
3091 		flush_pending_writes(conf);
3092 
3093 		spin_lock_irqsave(&conf->device_lock, flags);
3094 		if (list_empty(head)) {
3095 			spin_unlock_irqrestore(&conf->device_lock, flags);
3096 			break;
3097 		}
3098 		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3099 		list_del(head->prev);
3100 		conf->nr_queued--;
3101 		spin_unlock_irqrestore(&conf->device_lock, flags);
3102 
3103 		mddev = r10_bio->mddev;
3104 		conf = mddev->private;
3105 		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3106 		    test_bit(R10BIO_WriteError, &r10_bio->state))
3107 			handle_write_completed(conf, r10_bio);
3108 		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3109 			reshape_request_write(mddev, r10_bio);
3110 		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3111 			sync_request_write(mddev, r10_bio);
3112 		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3113 			recovery_request_write(mddev, r10_bio);
3114 		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3115 			handle_read_error(mddev, r10_bio);
3116 		else
3117 			WARN_ON_ONCE(1);
3118 
3119 		cond_resched();
3120 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3121 			md_check_recovery(mddev);
3122 	}
3123 	blk_finish_plug(&plug);
3124 }
3125 
3126 static int init_resync(struct r10conf *conf)
3127 {
3128 	int ret, buffs, i;
3129 
3130 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3131 	BUG_ON(mempool_initialized(&conf->r10buf_pool));
3132 	conf->have_replacement = 0;
3133 	for (i = 0; i < conf->geo.raid_disks; i++)
3134 		if (conf->mirrors[i].replacement)
3135 			conf->have_replacement = 1;
3136 	ret = mempool_init(&conf->r10buf_pool, buffs,
3137 			   r10buf_pool_alloc, r10buf_pool_free, conf);
3138 	if (ret)
3139 		return ret;
3140 	conf->next_resync = 0;
3141 	return 0;
3142 }
3143 
3144 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3145 {
3146 	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3147 	struct rsync_pages *rp;
3148 	struct bio *bio;
3149 	int nalloc;
3150 	int i;
3151 
3152 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3153 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3154 		nalloc = conf->copies; /* resync */
3155 	else
3156 		nalloc = 2; /* recovery */
3157 
3158 	for (i = 0; i < nalloc; i++) {
3159 		bio = r10bio->devs[i].bio;
3160 		rp = bio->bi_private;
3161 		bio_reset(bio, NULL, 0);
3162 		bio->bi_private = rp;
3163 		bio = r10bio->devs[i].repl_bio;
3164 		if (bio) {
3165 			rp = bio->bi_private;
3166 			bio_reset(bio, NULL, 0);
3167 			bio->bi_private = rp;
3168 		}
3169 	}
3170 	return r10bio;
3171 }
3172 
3173 /*
3174  * Set cluster_sync_high since we need other nodes to add the
3175  * range [cluster_sync_low, cluster_sync_high] to suspend list.
3176  */
3177 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3178 {
3179 	sector_t window_size;
3180 	int extra_chunk, chunks;
3181 
3182 	/*
3183 	 * First, here we define "stripe" as a unit which across
3184 	 * all member devices one time, so we get chunks by use
3185 	 * raid_disks / near_copies. Otherwise, if near_copies is
3186 	 * close to raid_disks, then resync window could increases
3187 	 * linearly with the increase of raid_disks, which means
3188 	 * we will suspend a really large IO window while it is not
3189 	 * necessary. If raid_disks is not divisible by near_copies,
3190 	 * an extra chunk is needed to ensure the whole "stripe" is
3191 	 * covered.
3192 	 */
3193 
3194 	chunks = conf->geo.raid_disks / conf->geo.near_copies;
3195 	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3196 		extra_chunk = 0;
3197 	else
3198 		extra_chunk = 1;
3199 	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3200 
3201 	/*
3202 	 * At least use a 32M window to align with raid1's resync window
3203 	 */
3204 	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3205 			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3206 
3207 	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3208 }
3209 
3210 /*
3211  * perform a "sync" on one "block"
3212  *
3213  * We need to make sure that no normal I/O request - particularly write
3214  * requests - conflict with active sync requests.
3215  *
3216  * This is achieved by tracking pending requests and a 'barrier' concept
3217  * that can be installed to exclude normal IO requests.
3218  *
3219  * Resync and recovery are handled very differently.
3220  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3221  *
3222  * For resync, we iterate over virtual addresses, read all copies,
3223  * and update if there are differences.  If only one copy is live,
3224  * skip it.
3225  * For recovery, we iterate over physical addresses, read a good
3226  * value for each non-in_sync drive, and over-write.
3227  *
3228  * So, for recovery we may have several outstanding complex requests for a
3229  * given address, one for each out-of-sync device.  We model this by allocating
3230  * a number of r10_bio structures, one for each out-of-sync device.
3231  * As we setup these structures, we collect all bio's together into a list
3232  * which we then process collectively to add pages, and then process again
3233  * to pass to submit_bio_noacct.
3234  *
3235  * The r10_bio structures are linked using a borrowed master_bio pointer.
3236  * This link is counted in ->remaining.  When the r10_bio that points to NULL
3237  * has its remaining count decremented to 0, the whole complex operation
3238  * is complete.
3239  *
3240  */
3241 
3242 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3243 			     int *skipped)
3244 {
3245 	struct r10conf *conf = mddev->private;
3246 	struct r10bio *r10_bio;
3247 	struct bio *biolist = NULL, *bio;
3248 	sector_t max_sector, nr_sectors;
3249 	int i;
3250 	int max_sync;
3251 	sector_t sync_blocks;
3252 	sector_t sectors_skipped = 0;
3253 	int chunks_skipped = 0;
3254 	sector_t chunk_mask = conf->geo.chunk_mask;
3255 	int page_idx = 0;
3256 
3257 	if (!mempool_initialized(&conf->r10buf_pool))
3258 		if (init_resync(conf))
3259 			return 0;
3260 
3261 	/*
3262 	 * Allow skipping a full rebuild for incremental assembly
3263 	 * of a clean array, like RAID1 does.
3264 	 */
3265 	if (mddev->bitmap == NULL &&
3266 	    mddev->recovery_cp == MaxSector &&
3267 	    mddev->reshape_position == MaxSector &&
3268 	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3269 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3270 	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3271 	    conf->fullsync == 0) {
3272 		*skipped = 1;
3273 		return mddev->dev_sectors - sector_nr;
3274 	}
3275 
3276  skipped:
3277 	max_sector = mddev->dev_sectors;
3278 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
3279 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3280 		max_sector = mddev->resync_max_sectors;
3281 	if (sector_nr >= max_sector) {
3282 		conf->cluster_sync_low = 0;
3283 		conf->cluster_sync_high = 0;
3284 
3285 		/* If we aborted, we need to abort the
3286 		 * sync on the 'current' bitmap chucks (there can
3287 		 * be several when recovering multiple devices).
3288 		 * as we may have started syncing it but not finished.
3289 		 * We can find the current address in
3290 		 * mddev->curr_resync, but for recovery,
3291 		 * we need to convert that to several
3292 		 * virtual addresses.
3293 		 */
3294 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3295 			end_reshape(conf);
3296 			close_sync(conf);
3297 			return 0;
3298 		}
3299 
3300 		if (mddev->curr_resync < max_sector) { /* aborted */
3301 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3302 				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3303 						   &sync_blocks, 1);
3304 			else for (i = 0; i < conf->geo.raid_disks; i++) {
3305 				sector_t sect =
3306 					raid10_find_virt(conf, mddev->curr_resync, i);
3307 				md_bitmap_end_sync(mddev->bitmap, sect,
3308 						   &sync_blocks, 1);
3309 			}
3310 		} else {
3311 			/* completed sync */
3312 			if ((!mddev->bitmap || conf->fullsync)
3313 			    && conf->have_replacement
3314 			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3315 				/* Completed a full sync so the replacements
3316 				 * are now fully recovered.
3317 				 */
3318 				rcu_read_lock();
3319 				for (i = 0; i < conf->geo.raid_disks; i++) {
3320 					struct md_rdev *rdev =
3321 						rcu_dereference(conf->mirrors[i].replacement);
3322 					if (rdev)
3323 						rdev->recovery_offset = MaxSector;
3324 				}
3325 				rcu_read_unlock();
3326 			}
3327 			conf->fullsync = 0;
3328 		}
3329 		md_bitmap_close_sync(mddev->bitmap);
3330 		close_sync(conf);
3331 		*skipped = 1;
3332 		return sectors_skipped;
3333 	}
3334 
3335 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3336 		return reshape_request(mddev, sector_nr, skipped);
3337 
3338 	if (chunks_skipped >= conf->geo.raid_disks) {
3339 		/* if there has been nothing to do on any drive,
3340 		 * then there is nothing to do at all..
3341 		 */
3342 		*skipped = 1;
3343 		return (max_sector - sector_nr) + sectors_skipped;
3344 	}
3345 
3346 	if (max_sector > mddev->resync_max)
3347 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3348 
3349 	/* make sure whole request will fit in a chunk - if chunks
3350 	 * are meaningful
3351 	 */
3352 	if (conf->geo.near_copies < conf->geo.raid_disks &&
3353 	    max_sector > (sector_nr | chunk_mask))
3354 		max_sector = (sector_nr | chunk_mask) + 1;
3355 
3356 	/*
3357 	 * If there is non-resync activity waiting for a turn, then let it
3358 	 * though before starting on this new sync request.
3359 	 */
3360 	if (conf->nr_waiting)
3361 		schedule_timeout_uninterruptible(1);
3362 
3363 	/* Again, very different code for resync and recovery.
3364 	 * Both must result in an r10bio with a list of bios that
3365 	 * have bi_end_io, bi_sector, bi_bdev set,
3366 	 * and bi_private set to the r10bio.
3367 	 * For recovery, we may actually create several r10bios
3368 	 * with 2 bios in each, that correspond to the bios in the main one.
3369 	 * In this case, the subordinate r10bios link back through a
3370 	 * borrowed master_bio pointer, and the counter in the master
3371 	 * includes a ref from each subordinate.
3372 	 */
3373 	/* First, we decide what to do and set ->bi_end_io
3374 	 * To end_sync_read if we want to read, and
3375 	 * end_sync_write if we will want to write.
3376 	 */
3377 
3378 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3379 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3380 		/* recovery... the complicated one */
3381 		int j;
3382 		r10_bio = NULL;
3383 
3384 		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3385 			int still_degraded;
3386 			struct r10bio *rb2;
3387 			sector_t sect;
3388 			int must_sync;
3389 			int any_working;
3390 			int need_recover = 0;
3391 			int need_replace = 0;
3392 			struct raid10_info *mirror = &conf->mirrors[i];
3393 			struct md_rdev *mrdev, *mreplace;
3394 
3395 			rcu_read_lock();
3396 			mrdev = rcu_dereference(mirror->rdev);
3397 			mreplace = rcu_dereference(mirror->replacement);
3398 
3399 			if (mrdev != NULL &&
3400 			    !test_bit(Faulty, &mrdev->flags) &&
3401 			    !test_bit(In_sync, &mrdev->flags))
3402 				need_recover = 1;
3403 			if (mreplace != NULL &&
3404 			    !test_bit(Faulty, &mreplace->flags))
3405 				need_replace = 1;
3406 
3407 			if (!need_recover && !need_replace) {
3408 				rcu_read_unlock();
3409 				continue;
3410 			}
3411 
3412 			still_degraded = 0;
3413 			/* want to reconstruct this device */
3414 			rb2 = r10_bio;
3415 			sect = raid10_find_virt(conf, sector_nr, i);
3416 			if (sect >= mddev->resync_max_sectors) {
3417 				/* last stripe is not complete - don't
3418 				 * try to recover this sector.
3419 				 */
3420 				rcu_read_unlock();
3421 				continue;
3422 			}
3423 			if (mreplace && test_bit(Faulty, &mreplace->flags))
3424 				mreplace = NULL;
3425 			/* Unless we are doing a full sync, or a replacement
3426 			 * we only need to recover the block if it is set in
3427 			 * the bitmap
3428 			 */
3429 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3430 							 &sync_blocks, 1);
3431 			if (sync_blocks < max_sync)
3432 				max_sync = sync_blocks;
3433 			if (!must_sync &&
3434 			    mreplace == NULL &&
3435 			    !conf->fullsync) {
3436 				/* yep, skip the sync_blocks here, but don't assume
3437 				 * that there will never be anything to do here
3438 				 */
3439 				chunks_skipped = -1;
3440 				rcu_read_unlock();
3441 				continue;
3442 			}
3443 			atomic_inc(&mrdev->nr_pending);
3444 			if (mreplace)
3445 				atomic_inc(&mreplace->nr_pending);
3446 			rcu_read_unlock();
3447 
3448 			r10_bio = raid10_alloc_init_r10buf(conf);
3449 			r10_bio->state = 0;
3450 			raise_barrier(conf, rb2 != NULL);
3451 			atomic_set(&r10_bio->remaining, 0);
3452 
3453 			r10_bio->master_bio = (struct bio*)rb2;
3454 			if (rb2)
3455 				atomic_inc(&rb2->remaining);
3456 			r10_bio->mddev = mddev;
3457 			set_bit(R10BIO_IsRecover, &r10_bio->state);
3458 			r10_bio->sector = sect;
3459 
3460 			raid10_find_phys(conf, r10_bio);
3461 
3462 			/* Need to check if the array will still be
3463 			 * degraded
3464 			 */
3465 			rcu_read_lock();
3466 			for (j = 0; j < conf->geo.raid_disks; j++) {
3467 				struct md_rdev *rdev = rcu_dereference(
3468 					conf->mirrors[j].rdev);
3469 				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3470 					still_degraded = 1;
3471 					break;
3472 				}
3473 			}
3474 
3475 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3476 							 &sync_blocks, still_degraded);
3477 
3478 			any_working = 0;
3479 			for (j=0; j<conf->copies;j++) {
3480 				int k;
3481 				int d = r10_bio->devs[j].devnum;
3482 				sector_t from_addr, to_addr;
3483 				struct md_rdev *rdev =
3484 					rcu_dereference(conf->mirrors[d].rdev);
3485 				sector_t sector, first_bad;
3486 				int bad_sectors;
3487 				if (!rdev ||
3488 				    !test_bit(In_sync, &rdev->flags))
3489 					continue;
3490 				/* This is where we read from */
3491 				any_working = 1;
3492 				sector = r10_bio->devs[j].addr;
3493 
3494 				if (is_badblock(rdev, sector, max_sync,
3495 						&first_bad, &bad_sectors)) {
3496 					if (first_bad > sector)
3497 						max_sync = first_bad - sector;
3498 					else {
3499 						bad_sectors -= (sector
3500 								- first_bad);
3501 						if (max_sync > bad_sectors)
3502 							max_sync = bad_sectors;
3503 						continue;
3504 					}
3505 				}
3506 				bio = r10_bio->devs[0].bio;
3507 				bio->bi_next = biolist;
3508 				biolist = bio;
3509 				bio->bi_end_io = end_sync_read;
3510 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
3511 				if (test_bit(FailFast, &rdev->flags))
3512 					bio->bi_opf |= MD_FAILFAST;
3513 				from_addr = r10_bio->devs[j].addr;
3514 				bio->bi_iter.bi_sector = from_addr +
3515 					rdev->data_offset;
3516 				bio_set_dev(bio, rdev->bdev);
3517 				atomic_inc(&rdev->nr_pending);
3518 				/* and we write to 'i' (if not in_sync) */
3519 
3520 				for (k=0; k<conf->copies; k++)
3521 					if (r10_bio->devs[k].devnum == i)
3522 						break;
3523 				BUG_ON(k == conf->copies);
3524 				to_addr = r10_bio->devs[k].addr;
3525 				r10_bio->devs[0].devnum = d;
3526 				r10_bio->devs[0].addr = from_addr;
3527 				r10_bio->devs[1].devnum = i;
3528 				r10_bio->devs[1].addr = to_addr;
3529 
3530 				if (need_recover) {
3531 					bio = r10_bio->devs[1].bio;
3532 					bio->bi_next = biolist;
3533 					biolist = bio;
3534 					bio->bi_end_io = end_sync_write;
3535 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3536 					bio->bi_iter.bi_sector = to_addr
3537 						+ mrdev->data_offset;
3538 					bio_set_dev(bio, mrdev->bdev);
3539 					atomic_inc(&r10_bio->remaining);
3540 				} else
3541 					r10_bio->devs[1].bio->bi_end_io = NULL;
3542 
3543 				/* and maybe write to replacement */
3544 				bio = r10_bio->devs[1].repl_bio;
3545 				if (bio)
3546 					bio->bi_end_io = NULL;
3547 				/* Note: if need_replace, then bio
3548 				 * cannot be NULL as r10buf_pool_alloc will
3549 				 * have allocated it.
3550 				 */
3551 				if (!need_replace)
3552 					break;
3553 				bio->bi_next = biolist;
3554 				biolist = bio;
3555 				bio->bi_end_io = end_sync_write;
3556 				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3557 				bio->bi_iter.bi_sector = to_addr +
3558 					mreplace->data_offset;
3559 				bio_set_dev(bio, mreplace->bdev);
3560 				atomic_inc(&r10_bio->remaining);
3561 				break;
3562 			}
3563 			rcu_read_unlock();
3564 			if (j == conf->copies) {
3565 				/* Cannot recover, so abort the recovery or
3566 				 * record a bad block */
3567 				if (any_working) {
3568 					/* problem is that there are bad blocks
3569 					 * on other device(s)
3570 					 */
3571 					int k;
3572 					for (k = 0; k < conf->copies; k++)
3573 						if (r10_bio->devs[k].devnum == i)
3574 							break;
3575 					if (!test_bit(In_sync,
3576 						      &mrdev->flags)
3577 					    && !rdev_set_badblocks(
3578 						    mrdev,
3579 						    r10_bio->devs[k].addr,
3580 						    max_sync, 0))
3581 						any_working = 0;
3582 					if (mreplace &&
3583 					    !rdev_set_badblocks(
3584 						    mreplace,
3585 						    r10_bio->devs[k].addr,
3586 						    max_sync, 0))
3587 						any_working = 0;
3588 				}
3589 				if (!any_working)  {
3590 					if (!test_and_set_bit(MD_RECOVERY_INTR,
3591 							      &mddev->recovery))
3592 						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3593 						       mdname(mddev));
3594 					mirror->recovery_disabled
3595 						= mddev->recovery_disabled;
3596 				}
3597 				put_buf(r10_bio);
3598 				if (rb2)
3599 					atomic_dec(&rb2->remaining);
3600 				r10_bio = rb2;
3601 				rdev_dec_pending(mrdev, mddev);
3602 				if (mreplace)
3603 					rdev_dec_pending(mreplace, mddev);
3604 				break;
3605 			}
3606 			rdev_dec_pending(mrdev, mddev);
3607 			if (mreplace)
3608 				rdev_dec_pending(mreplace, mddev);
3609 			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3610 				/* Only want this if there is elsewhere to
3611 				 * read from. 'j' is currently the first
3612 				 * readable copy.
3613 				 */
3614 				int targets = 1;
3615 				for (; j < conf->copies; j++) {
3616 					int d = r10_bio->devs[j].devnum;
3617 					if (conf->mirrors[d].rdev &&
3618 					    test_bit(In_sync,
3619 						      &conf->mirrors[d].rdev->flags))
3620 						targets++;
3621 				}
3622 				if (targets == 1)
3623 					r10_bio->devs[0].bio->bi_opf
3624 						&= ~MD_FAILFAST;
3625 			}
3626 		}
3627 		if (biolist == NULL) {
3628 			while (r10_bio) {
3629 				struct r10bio *rb2 = r10_bio;
3630 				r10_bio = (struct r10bio*) rb2->master_bio;
3631 				rb2->master_bio = NULL;
3632 				put_buf(rb2);
3633 			}
3634 			goto giveup;
3635 		}
3636 	} else {
3637 		/* resync. Schedule a read for every block at this virt offset */
3638 		int count = 0;
3639 
3640 		/*
3641 		 * Since curr_resync_completed could probably not update in
3642 		 * time, and we will set cluster_sync_low based on it.
3643 		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3644 		 * safety reason, which ensures curr_resync_completed is
3645 		 * updated in bitmap_cond_end_sync.
3646 		 */
3647 		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3648 					mddev_is_clustered(mddev) &&
3649 					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3650 
3651 		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3652 					  &sync_blocks, mddev->degraded) &&
3653 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3654 						 &mddev->recovery)) {
3655 			/* We can skip this block */
3656 			*skipped = 1;
3657 			return sync_blocks + sectors_skipped;
3658 		}
3659 		if (sync_blocks < max_sync)
3660 			max_sync = sync_blocks;
3661 		r10_bio = raid10_alloc_init_r10buf(conf);
3662 		r10_bio->state = 0;
3663 
3664 		r10_bio->mddev = mddev;
3665 		atomic_set(&r10_bio->remaining, 0);
3666 		raise_barrier(conf, 0);
3667 		conf->next_resync = sector_nr;
3668 
3669 		r10_bio->master_bio = NULL;
3670 		r10_bio->sector = sector_nr;
3671 		set_bit(R10BIO_IsSync, &r10_bio->state);
3672 		raid10_find_phys(conf, r10_bio);
3673 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3674 
3675 		for (i = 0; i < conf->copies; i++) {
3676 			int d = r10_bio->devs[i].devnum;
3677 			sector_t first_bad, sector;
3678 			int bad_sectors;
3679 			struct md_rdev *rdev;
3680 
3681 			if (r10_bio->devs[i].repl_bio)
3682 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3683 
3684 			bio = r10_bio->devs[i].bio;
3685 			bio->bi_status = BLK_STS_IOERR;
3686 			rcu_read_lock();
3687 			rdev = rcu_dereference(conf->mirrors[d].rdev);
3688 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3689 				rcu_read_unlock();
3690 				continue;
3691 			}
3692 			sector = r10_bio->devs[i].addr;
3693 			if (is_badblock(rdev, sector, max_sync,
3694 					&first_bad, &bad_sectors)) {
3695 				if (first_bad > sector)
3696 					max_sync = first_bad - sector;
3697 				else {
3698 					bad_sectors -= (sector - first_bad);
3699 					if (max_sync > bad_sectors)
3700 						max_sync = bad_sectors;
3701 					rcu_read_unlock();
3702 					continue;
3703 				}
3704 			}
3705 			atomic_inc(&rdev->nr_pending);
3706 			atomic_inc(&r10_bio->remaining);
3707 			bio->bi_next = biolist;
3708 			biolist = bio;
3709 			bio->bi_end_io = end_sync_read;
3710 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
3711 			if (test_bit(FailFast, &rdev->flags))
3712 				bio->bi_opf |= MD_FAILFAST;
3713 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3714 			bio_set_dev(bio, rdev->bdev);
3715 			count++;
3716 
3717 			rdev = rcu_dereference(conf->mirrors[d].replacement);
3718 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3719 				rcu_read_unlock();
3720 				continue;
3721 			}
3722 			atomic_inc(&rdev->nr_pending);
3723 
3724 			/* Need to set up for writing to the replacement */
3725 			bio = r10_bio->devs[i].repl_bio;
3726 			bio->bi_status = BLK_STS_IOERR;
3727 
3728 			sector = r10_bio->devs[i].addr;
3729 			bio->bi_next = biolist;
3730 			biolist = bio;
3731 			bio->bi_end_io = end_sync_write;
3732 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3733 			if (test_bit(FailFast, &rdev->flags))
3734 				bio->bi_opf |= MD_FAILFAST;
3735 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3736 			bio_set_dev(bio, rdev->bdev);
3737 			count++;
3738 			rcu_read_unlock();
3739 		}
3740 
3741 		if (count < 2) {
3742 			for (i=0; i<conf->copies; i++) {
3743 				int d = r10_bio->devs[i].devnum;
3744 				if (r10_bio->devs[i].bio->bi_end_io)
3745 					rdev_dec_pending(conf->mirrors[d].rdev,
3746 							 mddev);
3747 				if (r10_bio->devs[i].repl_bio &&
3748 				    r10_bio->devs[i].repl_bio->bi_end_io)
3749 					rdev_dec_pending(
3750 						conf->mirrors[d].replacement,
3751 						mddev);
3752 			}
3753 			put_buf(r10_bio);
3754 			biolist = NULL;
3755 			goto giveup;
3756 		}
3757 	}
3758 
3759 	nr_sectors = 0;
3760 	if (sector_nr + max_sync < max_sector)
3761 		max_sector = sector_nr + max_sync;
3762 	do {
3763 		struct page *page;
3764 		int len = PAGE_SIZE;
3765 		if (sector_nr + (len>>9) > max_sector)
3766 			len = (max_sector - sector_nr) << 9;
3767 		if (len == 0)
3768 			break;
3769 		for (bio= biolist ; bio ; bio=bio->bi_next) {
3770 			struct resync_pages *rp = get_resync_pages(bio);
3771 			page = resync_fetch_page(rp, page_idx);
3772 			/*
3773 			 * won't fail because the vec table is big enough
3774 			 * to hold all these pages
3775 			 */
3776 			bio_add_page(bio, page, len, 0);
3777 		}
3778 		nr_sectors += len>>9;
3779 		sector_nr += len>>9;
3780 	} while (++page_idx < RESYNC_PAGES);
3781 	r10_bio->sectors = nr_sectors;
3782 
3783 	if (mddev_is_clustered(mddev) &&
3784 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3785 		/* It is resync not recovery */
3786 		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3787 			conf->cluster_sync_low = mddev->curr_resync_completed;
3788 			raid10_set_cluster_sync_high(conf);
3789 			/* Send resync message */
3790 			md_cluster_ops->resync_info_update(mddev,
3791 						conf->cluster_sync_low,
3792 						conf->cluster_sync_high);
3793 		}
3794 	} else if (mddev_is_clustered(mddev)) {
3795 		/* This is recovery not resync */
3796 		sector_t sect_va1, sect_va2;
3797 		bool broadcast_msg = false;
3798 
3799 		for (i = 0; i < conf->geo.raid_disks; i++) {
3800 			/*
3801 			 * sector_nr is a device address for recovery, so we
3802 			 * need translate it to array address before compare
3803 			 * with cluster_sync_high.
3804 			 */
3805 			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3806 
3807 			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3808 				broadcast_msg = true;
3809 				/*
3810 				 * curr_resync_completed is similar as
3811 				 * sector_nr, so make the translation too.
3812 				 */
3813 				sect_va2 = raid10_find_virt(conf,
3814 					mddev->curr_resync_completed, i);
3815 
3816 				if (conf->cluster_sync_low == 0 ||
3817 				    conf->cluster_sync_low > sect_va2)
3818 					conf->cluster_sync_low = sect_va2;
3819 			}
3820 		}
3821 		if (broadcast_msg) {
3822 			raid10_set_cluster_sync_high(conf);
3823 			md_cluster_ops->resync_info_update(mddev,
3824 						conf->cluster_sync_low,
3825 						conf->cluster_sync_high);
3826 		}
3827 	}
3828 
3829 	while (biolist) {
3830 		bio = biolist;
3831 		biolist = biolist->bi_next;
3832 
3833 		bio->bi_next = NULL;
3834 		r10_bio = get_resync_r10bio(bio);
3835 		r10_bio->sectors = nr_sectors;
3836 
3837 		if (bio->bi_end_io == end_sync_read) {
3838 			md_sync_acct_bio(bio, nr_sectors);
3839 			bio->bi_status = 0;
3840 			submit_bio_noacct(bio);
3841 		}
3842 	}
3843 
3844 	if (sectors_skipped)
3845 		/* pretend they weren't skipped, it makes
3846 		 * no important difference in this case
3847 		 */
3848 		md_done_sync(mddev, sectors_skipped, 1);
3849 
3850 	return sectors_skipped + nr_sectors;
3851  giveup:
3852 	/* There is nowhere to write, so all non-sync
3853 	 * drives must be failed or in resync, all drives
3854 	 * have a bad block, so try the next chunk...
3855 	 */
3856 	if (sector_nr + max_sync < max_sector)
3857 		max_sector = sector_nr + max_sync;
3858 
3859 	sectors_skipped += (max_sector - sector_nr);
3860 	chunks_skipped ++;
3861 	sector_nr = max_sector;
3862 	goto skipped;
3863 }
3864 
3865 static sector_t
3866 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3867 {
3868 	sector_t size;
3869 	struct r10conf *conf = mddev->private;
3870 
3871 	if (!raid_disks)
3872 		raid_disks = min(conf->geo.raid_disks,
3873 				 conf->prev.raid_disks);
3874 	if (!sectors)
3875 		sectors = conf->dev_sectors;
3876 
3877 	size = sectors >> conf->geo.chunk_shift;
3878 	sector_div(size, conf->geo.far_copies);
3879 	size = size * raid_disks;
3880 	sector_div(size, conf->geo.near_copies);
3881 
3882 	return size << conf->geo.chunk_shift;
3883 }
3884 
3885 static void calc_sectors(struct r10conf *conf, sector_t size)
3886 {
3887 	/* Calculate the number of sectors-per-device that will
3888 	 * actually be used, and set conf->dev_sectors and
3889 	 * conf->stride
3890 	 */
3891 
3892 	size = size >> conf->geo.chunk_shift;
3893 	sector_div(size, conf->geo.far_copies);
3894 	size = size * conf->geo.raid_disks;
3895 	sector_div(size, conf->geo.near_copies);
3896 	/* 'size' is now the number of chunks in the array */
3897 	/* calculate "used chunks per device" */
3898 	size = size * conf->copies;
3899 
3900 	/* We need to round up when dividing by raid_disks to
3901 	 * get the stride size.
3902 	 */
3903 	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3904 
3905 	conf->dev_sectors = size << conf->geo.chunk_shift;
3906 
3907 	if (conf->geo.far_offset)
3908 		conf->geo.stride = 1 << conf->geo.chunk_shift;
3909 	else {
3910 		sector_div(size, conf->geo.far_copies);
3911 		conf->geo.stride = size << conf->geo.chunk_shift;
3912 	}
3913 }
3914 
3915 enum geo_type {geo_new, geo_old, geo_start};
3916 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3917 {
3918 	int nc, fc, fo;
3919 	int layout, chunk, disks;
3920 	switch (new) {
3921 	case geo_old:
3922 		layout = mddev->layout;
3923 		chunk = mddev->chunk_sectors;
3924 		disks = mddev->raid_disks - mddev->delta_disks;
3925 		break;
3926 	case geo_new:
3927 		layout = mddev->new_layout;
3928 		chunk = mddev->new_chunk_sectors;
3929 		disks = mddev->raid_disks;
3930 		break;
3931 	default: /* avoid 'may be unused' warnings */
3932 	case geo_start: /* new when starting reshape - raid_disks not
3933 			 * updated yet. */
3934 		layout = mddev->new_layout;
3935 		chunk = mddev->new_chunk_sectors;
3936 		disks = mddev->raid_disks + mddev->delta_disks;
3937 		break;
3938 	}
3939 	if (layout >> 19)
3940 		return -1;
3941 	if (chunk < (PAGE_SIZE >> 9) ||
3942 	    !is_power_of_2(chunk))
3943 		return -2;
3944 	nc = layout & 255;
3945 	fc = (layout >> 8) & 255;
3946 	fo = layout & (1<<16);
3947 	geo->raid_disks = disks;
3948 	geo->near_copies = nc;
3949 	geo->far_copies = fc;
3950 	geo->far_offset = fo;
3951 	switch (layout >> 17) {
3952 	case 0:	/* original layout.  simple but not always optimal */
3953 		geo->far_set_size = disks;
3954 		break;
3955 	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3956 		 * actually using this, but leave code here just in case.*/
3957 		geo->far_set_size = disks/fc;
3958 		WARN(geo->far_set_size < fc,
3959 		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3960 		break;
3961 	case 2: /* "improved" layout fixed to match documentation */
3962 		geo->far_set_size = fc * nc;
3963 		break;
3964 	default: /* Not a valid layout */
3965 		return -1;
3966 	}
3967 	geo->chunk_mask = chunk - 1;
3968 	geo->chunk_shift = ffz(~chunk);
3969 	return nc*fc;
3970 }
3971 
3972 static struct r10conf *setup_conf(struct mddev *mddev)
3973 {
3974 	struct r10conf *conf = NULL;
3975 	int err = -EINVAL;
3976 	struct geom geo;
3977 	int copies;
3978 
3979 	copies = setup_geo(&geo, mddev, geo_new);
3980 
3981 	if (copies == -2) {
3982 		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3983 			mdname(mddev), PAGE_SIZE);
3984 		goto out;
3985 	}
3986 
3987 	if (copies < 2 || copies > mddev->raid_disks) {
3988 		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3989 			mdname(mddev), mddev->new_layout);
3990 		goto out;
3991 	}
3992 
3993 	err = -ENOMEM;
3994 	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3995 	if (!conf)
3996 		goto out;
3997 
3998 	/* FIXME calc properly */
3999 	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
4000 				sizeof(struct raid10_info),
4001 				GFP_KERNEL);
4002 	if (!conf->mirrors)
4003 		goto out;
4004 
4005 	conf->tmppage = alloc_page(GFP_KERNEL);
4006 	if (!conf->tmppage)
4007 		goto out;
4008 
4009 	conf->geo = geo;
4010 	conf->copies = copies;
4011 	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
4012 			   rbio_pool_free, conf);
4013 	if (err)
4014 		goto out;
4015 
4016 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
4017 	if (err)
4018 		goto out;
4019 
4020 	calc_sectors(conf, mddev->dev_sectors);
4021 	if (mddev->reshape_position == MaxSector) {
4022 		conf->prev = conf->geo;
4023 		conf->reshape_progress = MaxSector;
4024 	} else {
4025 		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
4026 			err = -EINVAL;
4027 			goto out;
4028 		}
4029 		conf->reshape_progress = mddev->reshape_position;
4030 		if (conf->prev.far_offset)
4031 			conf->prev.stride = 1 << conf->prev.chunk_shift;
4032 		else
4033 			/* far_copies must be 1 */
4034 			conf->prev.stride = conf->dev_sectors;
4035 	}
4036 	conf->reshape_safe = conf->reshape_progress;
4037 	spin_lock_init(&conf->device_lock);
4038 	INIT_LIST_HEAD(&conf->retry_list);
4039 	INIT_LIST_HEAD(&conf->bio_end_io_list);
4040 
4041 	spin_lock_init(&conf->resync_lock);
4042 	init_waitqueue_head(&conf->wait_barrier);
4043 	atomic_set(&conf->nr_pending, 0);
4044 
4045 	err = -ENOMEM;
4046 	conf->thread = md_register_thread(raid10d, mddev, "raid10");
4047 	if (!conf->thread)
4048 		goto out;
4049 
4050 	conf->mddev = mddev;
4051 	return conf;
4052 
4053  out:
4054 	if (conf) {
4055 		mempool_exit(&conf->r10bio_pool);
4056 		kfree(conf->mirrors);
4057 		safe_put_page(conf->tmppage);
4058 		bioset_exit(&conf->bio_split);
4059 		kfree(conf);
4060 	}
4061 	return ERR_PTR(err);
4062 }
4063 
4064 static void raid10_set_io_opt(struct r10conf *conf)
4065 {
4066 	int raid_disks = conf->geo.raid_disks;
4067 
4068 	if (!(conf->geo.raid_disks % conf->geo.near_copies))
4069 		raid_disks /= conf->geo.near_copies;
4070 	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4071 			 raid_disks);
4072 }
4073 
4074 static int raid10_run(struct mddev *mddev)
4075 {
4076 	struct r10conf *conf;
4077 	int i, disk_idx;
4078 	struct raid10_info *disk;
4079 	struct md_rdev *rdev;
4080 	sector_t size;
4081 	sector_t min_offset_diff = 0;
4082 	int first = 1;
4083 	bool discard_supported = false;
4084 
4085 	if (mddev_init_writes_pending(mddev) < 0)
4086 		return -ENOMEM;
4087 
4088 	if (mddev->private == NULL) {
4089 		conf = setup_conf(mddev);
4090 		if (IS_ERR(conf))
4091 			return PTR_ERR(conf);
4092 		mddev->private = conf;
4093 	}
4094 	conf = mddev->private;
4095 	if (!conf)
4096 		goto out;
4097 
4098 	if (mddev_is_clustered(conf->mddev)) {
4099 		int fc, fo;
4100 
4101 		fc = (mddev->layout >> 8) & 255;
4102 		fo = mddev->layout & (1<<16);
4103 		if (fc > 1 || fo > 0) {
4104 			pr_err("only near layout is supported by clustered"
4105 				" raid10\n");
4106 			goto out_free_conf;
4107 		}
4108 	}
4109 
4110 	mddev->thread = conf->thread;
4111 	conf->thread = NULL;
4112 
4113 	if (mddev->queue) {
4114 		blk_queue_max_discard_sectors(mddev->queue,
4115 					      UINT_MAX);
4116 		blk_queue_max_write_same_sectors(mddev->queue, 0);
4117 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
4118 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
4119 		raid10_set_io_opt(conf);
4120 	}
4121 
4122 	rdev_for_each(rdev, mddev) {
4123 		long long diff;
4124 
4125 		disk_idx = rdev->raid_disk;
4126 		if (disk_idx < 0)
4127 			continue;
4128 		if (disk_idx >= conf->geo.raid_disks &&
4129 		    disk_idx >= conf->prev.raid_disks)
4130 			continue;
4131 		disk = conf->mirrors + disk_idx;
4132 
4133 		if (test_bit(Replacement, &rdev->flags)) {
4134 			if (disk->replacement)
4135 				goto out_free_conf;
4136 			disk->replacement = rdev;
4137 		} else {
4138 			if (disk->rdev)
4139 				goto out_free_conf;
4140 			disk->rdev = rdev;
4141 		}
4142 		diff = (rdev->new_data_offset - rdev->data_offset);
4143 		if (!mddev->reshape_backwards)
4144 			diff = -diff;
4145 		if (diff < 0)
4146 			diff = 0;
4147 		if (first || diff < min_offset_diff)
4148 			min_offset_diff = diff;
4149 
4150 		if (mddev->gendisk)
4151 			disk_stack_limits(mddev->gendisk, rdev->bdev,
4152 					  rdev->data_offset << 9);
4153 
4154 		disk->head_position = 0;
4155 
4156 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
4157 			discard_supported = true;
4158 		first = 0;
4159 	}
4160 
4161 	if (mddev->queue) {
4162 		if (discard_supported)
4163 			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
4164 						mddev->queue);
4165 		else
4166 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
4167 						  mddev->queue);
4168 	}
4169 	/* need to check that every block has at least one working mirror */
4170 	if (!enough(conf, -1)) {
4171 		pr_err("md/raid10:%s: not enough operational mirrors.\n",
4172 		       mdname(mddev));
4173 		goto out_free_conf;
4174 	}
4175 
4176 	if (conf->reshape_progress != MaxSector) {
4177 		/* must ensure that shape change is supported */
4178 		if (conf->geo.far_copies != 1 &&
4179 		    conf->geo.far_offset == 0)
4180 			goto out_free_conf;
4181 		if (conf->prev.far_copies != 1 &&
4182 		    conf->prev.far_offset == 0)
4183 			goto out_free_conf;
4184 	}
4185 
4186 	mddev->degraded = 0;
4187 	for (i = 0;
4188 	     i < conf->geo.raid_disks
4189 		     || i < conf->prev.raid_disks;
4190 	     i++) {
4191 
4192 		disk = conf->mirrors + i;
4193 
4194 		if (!disk->rdev && disk->replacement) {
4195 			/* The replacement is all we have - use it */
4196 			disk->rdev = disk->replacement;
4197 			disk->replacement = NULL;
4198 			clear_bit(Replacement, &disk->rdev->flags);
4199 		}
4200 
4201 		if (!disk->rdev ||
4202 		    !test_bit(In_sync, &disk->rdev->flags)) {
4203 			disk->head_position = 0;
4204 			mddev->degraded++;
4205 			if (disk->rdev &&
4206 			    disk->rdev->saved_raid_disk < 0)
4207 				conf->fullsync = 1;
4208 		}
4209 
4210 		if (disk->replacement &&
4211 		    !test_bit(In_sync, &disk->replacement->flags) &&
4212 		    disk->replacement->saved_raid_disk < 0) {
4213 			conf->fullsync = 1;
4214 		}
4215 
4216 		disk->recovery_disabled = mddev->recovery_disabled - 1;
4217 	}
4218 
4219 	if (mddev->recovery_cp != MaxSector)
4220 		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4221 			  mdname(mddev));
4222 	pr_info("md/raid10:%s: active with %d out of %d devices\n",
4223 		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4224 		conf->geo.raid_disks);
4225 	/*
4226 	 * Ok, everything is just fine now
4227 	 */
4228 	mddev->dev_sectors = conf->dev_sectors;
4229 	size = raid10_size(mddev, 0, 0);
4230 	md_set_array_sectors(mddev, size);
4231 	mddev->resync_max_sectors = size;
4232 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4233 
4234 	if (md_integrity_register(mddev))
4235 		goto out_free_conf;
4236 
4237 	if (conf->reshape_progress != MaxSector) {
4238 		unsigned long before_length, after_length;
4239 
4240 		before_length = ((1 << conf->prev.chunk_shift) *
4241 				 conf->prev.far_copies);
4242 		after_length = ((1 << conf->geo.chunk_shift) *
4243 				conf->geo.far_copies);
4244 
4245 		if (max(before_length, after_length) > min_offset_diff) {
4246 			/* This cannot work */
4247 			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4248 			goto out_free_conf;
4249 		}
4250 		conf->offset_diff = min_offset_diff;
4251 
4252 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4253 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4254 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4255 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4256 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4257 							"reshape");
4258 		if (!mddev->sync_thread)
4259 			goto out_free_conf;
4260 	}
4261 
4262 	return 0;
4263 
4264 out_free_conf:
4265 	md_unregister_thread(&mddev->thread);
4266 	mempool_exit(&conf->r10bio_pool);
4267 	safe_put_page(conf->tmppage);
4268 	kfree(conf->mirrors);
4269 	kfree(conf);
4270 	mddev->private = NULL;
4271 out:
4272 	return -EIO;
4273 }
4274 
4275 static void raid10_free(struct mddev *mddev, void *priv)
4276 {
4277 	struct r10conf *conf = priv;
4278 
4279 	mempool_exit(&conf->r10bio_pool);
4280 	safe_put_page(conf->tmppage);
4281 	kfree(conf->mirrors);
4282 	kfree(conf->mirrors_old);
4283 	kfree(conf->mirrors_new);
4284 	bioset_exit(&conf->bio_split);
4285 	kfree(conf);
4286 }
4287 
4288 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4289 {
4290 	struct r10conf *conf = mddev->private;
4291 
4292 	if (quiesce)
4293 		raise_barrier(conf, 0);
4294 	else
4295 		lower_barrier(conf);
4296 }
4297 
4298 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4299 {
4300 	/* Resize of 'far' arrays is not supported.
4301 	 * For 'near' and 'offset' arrays we can set the
4302 	 * number of sectors used to be an appropriate multiple
4303 	 * of the chunk size.
4304 	 * For 'offset', this is far_copies*chunksize.
4305 	 * For 'near' the multiplier is the LCM of
4306 	 * near_copies and raid_disks.
4307 	 * So if far_copies > 1 && !far_offset, fail.
4308 	 * Else find LCM(raid_disks, near_copy)*far_copies and
4309 	 * multiply by chunk_size.  Then round to this number.
4310 	 * This is mostly done by raid10_size()
4311 	 */
4312 	struct r10conf *conf = mddev->private;
4313 	sector_t oldsize, size;
4314 
4315 	if (mddev->reshape_position != MaxSector)
4316 		return -EBUSY;
4317 
4318 	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4319 		return -EINVAL;
4320 
4321 	oldsize = raid10_size(mddev, 0, 0);
4322 	size = raid10_size(mddev, sectors, 0);
4323 	if (mddev->external_size &&
4324 	    mddev->array_sectors > size)
4325 		return -EINVAL;
4326 	if (mddev->bitmap) {
4327 		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4328 		if (ret)
4329 			return ret;
4330 	}
4331 	md_set_array_sectors(mddev, size);
4332 	if (sectors > mddev->dev_sectors &&
4333 	    mddev->recovery_cp > oldsize) {
4334 		mddev->recovery_cp = oldsize;
4335 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4336 	}
4337 	calc_sectors(conf, sectors);
4338 	mddev->dev_sectors = conf->dev_sectors;
4339 	mddev->resync_max_sectors = size;
4340 	return 0;
4341 }
4342 
4343 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4344 {
4345 	struct md_rdev *rdev;
4346 	struct r10conf *conf;
4347 
4348 	if (mddev->degraded > 0) {
4349 		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4350 			mdname(mddev));
4351 		return ERR_PTR(-EINVAL);
4352 	}
4353 	sector_div(size, devs);
4354 
4355 	/* Set new parameters */
4356 	mddev->new_level = 10;
4357 	/* new layout: far_copies = 1, near_copies = 2 */
4358 	mddev->new_layout = (1<<8) + 2;
4359 	mddev->new_chunk_sectors = mddev->chunk_sectors;
4360 	mddev->delta_disks = mddev->raid_disks;
4361 	mddev->raid_disks *= 2;
4362 	/* make sure it will be not marked as dirty */
4363 	mddev->recovery_cp = MaxSector;
4364 	mddev->dev_sectors = size;
4365 
4366 	conf = setup_conf(mddev);
4367 	if (!IS_ERR(conf)) {
4368 		rdev_for_each(rdev, mddev)
4369 			if (rdev->raid_disk >= 0) {
4370 				rdev->new_raid_disk = rdev->raid_disk * 2;
4371 				rdev->sectors = size;
4372 			}
4373 		conf->barrier = 1;
4374 	}
4375 
4376 	return conf;
4377 }
4378 
4379 static void *raid10_takeover(struct mddev *mddev)
4380 {
4381 	struct r0conf *raid0_conf;
4382 
4383 	/* raid10 can take over:
4384 	 *  raid0 - providing it has only two drives
4385 	 */
4386 	if (mddev->level == 0) {
4387 		/* for raid0 takeover only one zone is supported */
4388 		raid0_conf = mddev->private;
4389 		if (raid0_conf->nr_strip_zones > 1) {
4390 			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4391 				mdname(mddev));
4392 			return ERR_PTR(-EINVAL);
4393 		}
4394 		return raid10_takeover_raid0(mddev,
4395 			raid0_conf->strip_zone->zone_end,
4396 			raid0_conf->strip_zone->nb_dev);
4397 	}
4398 	return ERR_PTR(-EINVAL);
4399 }
4400 
4401 static int raid10_check_reshape(struct mddev *mddev)
4402 {
4403 	/* Called when there is a request to change
4404 	 * - layout (to ->new_layout)
4405 	 * - chunk size (to ->new_chunk_sectors)
4406 	 * - raid_disks (by delta_disks)
4407 	 * or when trying to restart a reshape that was ongoing.
4408 	 *
4409 	 * We need to validate the request and possibly allocate
4410 	 * space if that might be an issue later.
4411 	 *
4412 	 * Currently we reject any reshape of a 'far' mode array,
4413 	 * allow chunk size to change if new is generally acceptable,
4414 	 * allow raid_disks to increase, and allow
4415 	 * a switch between 'near' mode and 'offset' mode.
4416 	 */
4417 	struct r10conf *conf = mddev->private;
4418 	struct geom geo;
4419 
4420 	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4421 		return -EINVAL;
4422 
4423 	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4424 		/* mustn't change number of copies */
4425 		return -EINVAL;
4426 	if (geo.far_copies > 1 && !geo.far_offset)
4427 		/* Cannot switch to 'far' mode */
4428 		return -EINVAL;
4429 
4430 	if (mddev->array_sectors & geo.chunk_mask)
4431 			/* not factor of array size */
4432 			return -EINVAL;
4433 
4434 	if (!enough(conf, -1))
4435 		return -EINVAL;
4436 
4437 	kfree(conf->mirrors_new);
4438 	conf->mirrors_new = NULL;
4439 	if (mddev->delta_disks > 0) {
4440 		/* allocate new 'mirrors' list */
4441 		conf->mirrors_new =
4442 			kcalloc(mddev->raid_disks + mddev->delta_disks,
4443 				sizeof(struct raid10_info),
4444 				GFP_KERNEL);
4445 		if (!conf->mirrors_new)
4446 			return -ENOMEM;
4447 	}
4448 	return 0;
4449 }
4450 
4451 /*
4452  * Need to check if array has failed when deciding whether to:
4453  *  - start an array
4454  *  - remove non-faulty devices
4455  *  - add a spare
4456  *  - allow a reshape
4457  * This determination is simple when no reshape is happening.
4458  * However if there is a reshape, we need to carefully check
4459  * both the before and after sections.
4460  * This is because some failed devices may only affect one
4461  * of the two sections, and some non-in_sync devices may
4462  * be insync in the section most affected by failed devices.
4463  */
4464 static int calc_degraded(struct r10conf *conf)
4465 {
4466 	int degraded, degraded2;
4467 	int i;
4468 
4469 	rcu_read_lock();
4470 	degraded = 0;
4471 	/* 'prev' section first */
4472 	for (i = 0; i < conf->prev.raid_disks; i++) {
4473 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4474 		if (!rdev || test_bit(Faulty, &rdev->flags))
4475 			degraded++;
4476 		else if (!test_bit(In_sync, &rdev->flags))
4477 			/* When we can reduce the number of devices in
4478 			 * an array, this might not contribute to
4479 			 * 'degraded'.  It does now.
4480 			 */
4481 			degraded++;
4482 	}
4483 	rcu_read_unlock();
4484 	if (conf->geo.raid_disks == conf->prev.raid_disks)
4485 		return degraded;
4486 	rcu_read_lock();
4487 	degraded2 = 0;
4488 	for (i = 0; i < conf->geo.raid_disks; i++) {
4489 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4490 		if (!rdev || test_bit(Faulty, &rdev->flags))
4491 			degraded2++;
4492 		else if (!test_bit(In_sync, &rdev->flags)) {
4493 			/* If reshape is increasing the number of devices,
4494 			 * this section has already been recovered, so
4495 			 * it doesn't contribute to degraded.
4496 			 * else it does.
4497 			 */
4498 			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4499 				degraded2++;
4500 		}
4501 	}
4502 	rcu_read_unlock();
4503 	if (degraded2 > degraded)
4504 		return degraded2;
4505 	return degraded;
4506 }
4507 
4508 static int raid10_start_reshape(struct mddev *mddev)
4509 {
4510 	/* A 'reshape' has been requested. This commits
4511 	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4512 	 * This also checks if there are enough spares and adds them
4513 	 * to the array.
4514 	 * We currently require enough spares to make the final
4515 	 * array non-degraded.  We also require that the difference
4516 	 * between old and new data_offset - on each device - is
4517 	 * enough that we never risk over-writing.
4518 	 */
4519 
4520 	unsigned long before_length, after_length;
4521 	sector_t min_offset_diff = 0;
4522 	int first = 1;
4523 	struct geom new;
4524 	struct r10conf *conf = mddev->private;
4525 	struct md_rdev *rdev;
4526 	int spares = 0;
4527 	int ret;
4528 
4529 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4530 		return -EBUSY;
4531 
4532 	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4533 		return -EINVAL;
4534 
4535 	before_length = ((1 << conf->prev.chunk_shift) *
4536 			 conf->prev.far_copies);
4537 	after_length = ((1 << conf->geo.chunk_shift) *
4538 			conf->geo.far_copies);
4539 
4540 	rdev_for_each(rdev, mddev) {
4541 		if (!test_bit(In_sync, &rdev->flags)
4542 		    && !test_bit(Faulty, &rdev->flags))
4543 			spares++;
4544 		if (rdev->raid_disk >= 0) {
4545 			long long diff = (rdev->new_data_offset
4546 					  - rdev->data_offset);
4547 			if (!mddev->reshape_backwards)
4548 				diff = -diff;
4549 			if (diff < 0)
4550 				diff = 0;
4551 			if (first || diff < min_offset_diff)
4552 				min_offset_diff = diff;
4553 			first = 0;
4554 		}
4555 	}
4556 
4557 	if (max(before_length, after_length) > min_offset_diff)
4558 		return -EINVAL;
4559 
4560 	if (spares < mddev->delta_disks)
4561 		return -EINVAL;
4562 
4563 	conf->offset_diff = min_offset_diff;
4564 	spin_lock_irq(&conf->device_lock);
4565 	if (conf->mirrors_new) {
4566 		memcpy(conf->mirrors_new, conf->mirrors,
4567 		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4568 		smp_mb();
4569 		kfree(conf->mirrors_old);
4570 		conf->mirrors_old = conf->mirrors;
4571 		conf->mirrors = conf->mirrors_new;
4572 		conf->mirrors_new = NULL;
4573 	}
4574 	setup_geo(&conf->geo, mddev, geo_start);
4575 	smp_mb();
4576 	if (mddev->reshape_backwards) {
4577 		sector_t size = raid10_size(mddev, 0, 0);
4578 		if (size < mddev->array_sectors) {
4579 			spin_unlock_irq(&conf->device_lock);
4580 			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4581 				mdname(mddev));
4582 			return -EINVAL;
4583 		}
4584 		mddev->resync_max_sectors = size;
4585 		conf->reshape_progress = size;
4586 	} else
4587 		conf->reshape_progress = 0;
4588 	conf->reshape_safe = conf->reshape_progress;
4589 	spin_unlock_irq(&conf->device_lock);
4590 
4591 	if (mddev->delta_disks && mddev->bitmap) {
4592 		struct mdp_superblock_1 *sb = NULL;
4593 		sector_t oldsize, newsize;
4594 
4595 		oldsize = raid10_size(mddev, 0, 0);
4596 		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4597 
4598 		if (!mddev_is_clustered(mddev)) {
4599 			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4600 			if (ret)
4601 				goto abort;
4602 			else
4603 				goto out;
4604 		}
4605 
4606 		rdev_for_each(rdev, mddev) {
4607 			if (rdev->raid_disk > -1 &&
4608 			    !test_bit(Faulty, &rdev->flags))
4609 				sb = page_address(rdev->sb_page);
4610 		}
4611 
4612 		/*
4613 		 * some node is already performing reshape, and no need to
4614 		 * call md_bitmap_resize again since it should be called when
4615 		 * receiving BITMAP_RESIZE msg
4616 		 */
4617 		if ((sb && (le32_to_cpu(sb->feature_map) &
4618 			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4619 			goto out;
4620 
4621 		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4622 		if (ret)
4623 			goto abort;
4624 
4625 		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4626 		if (ret) {
4627 			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4628 			goto abort;
4629 		}
4630 	}
4631 out:
4632 	if (mddev->delta_disks > 0) {
4633 		rdev_for_each(rdev, mddev)
4634 			if (rdev->raid_disk < 0 &&
4635 			    !test_bit(Faulty, &rdev->flags)) {
4636 				if (raid10_add_disk(mddev, rdev) == 0) {
4637 					if (rdev->raid_disk >=
4638 					    conf->prev.raid_disks)
4639 						set_bit(In_sync, &rdev->flags);
4640 					else
4641 						rdev->recovery_offset = 0;
4642 
4643 					/* Failure here is OK */
4644 					sysfs_link_rdev(mddev, rdev);
4645 				}
4646 			} else if (rdev->raid_disk >= conf->prev.raid_disks
4647 				   && !test_bit(Faulty, &rdev->flags)) {
4648 				/* This is a spare that was manually added */
4649 				set_bit(In_sync, &rdev->flags);
4650 			}
4651 	}
4652 	/* When a reshape changes the number of devices,
4653 	 * ->degraded is measured against the larger of the
4654 	 * pre and  post numbers.
4655 	 */
4656 	spin_lock_irq(&conf->device_lock);
4657 	mddev->degraded = calc_degraded(conf);
4658 	spin_unlock_irq(&conf->device_lock);
4659 	mddev->raid_disks = conf->geo.raid_disks;
4660 	mddev->reshape_position = conf->reshape_progress;
4661 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4662 
4663 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4664 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4665 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4666 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4667 	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4668 
4669 	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4670 						"reshape");
4671 	if (!mddev->sync_thread) {
4672 		ret = -EAGAIN;
4673 		goto abort;
4674 	}
4675 	conf->reshape_checkpoint = jiffies;
4676 	md_wakeup_thread(mddev->sync_thread);
4677 	md_new_event();
4678 	return 0;
4679 
4680 abort:
4681 	mddev->recovery = 0;
4682 	spin_lock_irq(&conf->device_lock);
4683 	conf->geo = conf->prev;
4684 	mddev->raid_disks = conf->geo.raid_disks;
4685 	rdev_for_each(rdev, mddev)
4686 		rdev->new_data_offset = rdev->data_offset;
4687 	smp_wmb();
4688 	conf->reshape_progress = MaxSector;
4689 	conf->reshape_safe = MaxSector;
4690 	mddev->reshape_position = MaxSector;
4691 	spin_unlock_irq(&conf->device_lock);
4692 	return ret;
4693 }
4694 
4695 /* Calculate the last device-address that could contain
4696  * any block from the chunk that includes the array-address 's'
4697  * and report the next address.
4698  * i.e. the address returned will be chunk-aligned and after
4699  * any data that is in the chunk containing 's'.
4700  */
4701 static sector_t last_dev_address(sector_t s, struct geom *geo)
4702 {
4703 	s = (s | geo->chunk_mask) + 1;
4704 	s >>= geo->chunk_shift;
4705 	s *= geo->near_copies;
4706 	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4707 	s *= geo->far_copies;
4708 	s <<= geo->chunk_shift;
4709 	return s;
4710 }
4711 
4712 /* Calculate the first device-address that could contain
4713  * any block from the chunk that includes the array-address 's'.
4714  * This too will be the start of a chunk
4715  */
4716 static sector_t first_dev_address(sector_t s, struct geom *geo)
4717 {
4718 	s >>= geo->chunk_shift;
4719 	s *= geo->near_copies;
4720 	sector_div(s, geo->raid_disks);
4721 	s *= geo->far_copies;
4722 	s <<= geo->chunk_shift;
4723 	return s;
4724 }
4725 
4726 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4727 				int *skipped)
4728 {
4729 	/* We simply copy at most one chunk (smallest of old and new)
4730 	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4731 	 * or we hit a bad block or something.
4732 	 * This might mean we pause for normal IO in the middle of
4733 	 * a chunk, but that is not a problem as mddev->reshape_position
4734 	 * can record any location.
4735 	 *
4736 	 * If we will want to write to a location that isn't
4737 	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4738 	 * we need to flush all reshape requests and update the metadata.
4739 	 *
4740 	 * When reshaping forwards (e.g. to more devices), we interpret
4741 	 * 'safe' as the earliest block which might not have been copied
4742 	 * down yet.  We divide this by previous stripe size and multiply
4743 	 * by previous stripe length to get lowest device offset that we
4744 	 * cannot write to yet.
4745 	 * We interpret 'sector_nr' as an address that we want to write to.
4746 	 * From this we use last_device_address() to find where we might
4747 	 * write to, and first_device_address on the  'safe' position.
4748 	 * If this 'next' write position is after the 'safe' position,
4749 	 * we must update the metadata to increase the 'safe' position.
4750 	 *
4751 	 * When reshaping backwards, we round in the opposite direction
4752 	 * and perform the reverse test:  next write position must not be
4753 	 * less than current safe position.
4754 	 *
4755 	 * In all this the minimum difference in data offsets
4756 	 * (conf->offset_diff - always positive) allows a bit of slack,
4757 	 * so next can be after 'safe', but not by more than offset_diff
4758 	 *
4759 	 * We need to prepare all the bios here before we start any IO
4760 	 * to ensure the size we choose is acceptable to all devices.
4761 	 * The means one for each copy for write-out and an extra one for
4762 	 * read-in.
4763 	 * We store the read-in bio in ->master_bio and the others in
4764 	 * ->devs[x].bio and ->devs[x].repl_bio.
4765 	 */
4766 	struct r10conf *conf = mddev->private;
4767 	struct r10bio *r10_bio;
4768 	sector_t next, safe, last;
4769 	int max_sectors;
4770 	int nr_sectors;
4771 	int s;
4772 	struct md_rdev *rdev;
4773 	int need_flush = 0;
4774 	struct bio *blist;
4775 	struct bio *bio, *read_bio;
4776 	int sectors_done = 0;
4777 	struct page **pages;
4778 
4779 	if (sector_nr == 0) {
4780 		/* If restarting in the middle, skip the initial sectors */
4781 		if (mddev->reshape_backwards &&
4782 		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4783 			sector_nr = (raid10_size(mddev, 0, 0)
4784 				     - conf->reshape_progress);
4785 		} else if (!mddev->reshape_backwards &&
4786 			   conf->reshape_progress > 0)
4787 			sector_nr = conf->reshape_progress;
4788 		if (sector_nr) {
4789 			mddev->curr_resync_completed = sector_nr;
4790 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
4791 			*skipped = 1;
4792 			return sector_nr;
4793 		}
4794 	}
4795 
4796 	/* We don't use sector_nr to track where we are up to
4797 	 * as that doesn't work well for ->reshape_backwards.
4798 	 * So just use ->reshape_progress.
4799 	 */
4800 	if (mddev->reshape_backwards) {
4801 		/* 'next' is the earliest device address that we might
4802 		 * write to for this chunk in the new layout
4803 		 */
4804 		next = first_dev_address(conf->reshape_progress - 1,
4805 					 &conf->geo);
4806 
4807 		/* 'safe' is the last device address that we might read from
4808 		 * in the old layout after a restart
4809 		 */
4810 		safe = last_dev_address(conf->reshape_safe - 1,
4811 					&conf->prev);
4812 
4813 		if (next + conf->offset_diff < safe)
4814 			need_flush = 1;
4815 
4816 		last = conf->reshape_progress - 1;
4817 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4818 					       & conf->prev.chunk_mask);
4819 		if (sector_nr + RESYNC_SECTORS < last)
4820 			sector_nr = last + 1 - RESYNC_SECTORS;
4821 	} else {
4822 		/* 'next' is after the last device address that we
4823 		 * might write to for this chunk in the new layout
4824 		 */
4825 		next = last_dev_address(conf->reshape_progress, &conf->geo);
4826 
4827 		/* 'safe' is the earliest device address that we might
4828 		 * read from in the old layout after a restart
4829 		 */
4830 		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4831 
4832 		/* Need to update metadata if 'next' might be beyond 'safe'
4833 		 * as that would possibly corrupt data
4834 		 */
4835 		if (next > safe + conf->offset_diff)
4836 			need_flush = 1;
4837 
4838 		sector_nr = conf->reshape_progress;
4839 		last  = sector_nr | (conf->geo.chunk_mask
4840 				     & conf->prev.chunk_mask);
4841 
4842 		if (sector_nr + RESYNC_SECTORS <= last)
4843 			last = sector_nr + RESYNC_SECTORS - 1;
4844 	}
4845 
4846 	if (need_flush ||
4847 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4848 		/* Need to update reshape_position in metadata */
4849 		wait_barrier(conf, false);
4850 		mddev->reshape_position = conf->reshape_progress;
4851 		if (mddev->reshape_backwards)
4852 			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4853 				- conf->reshape_progress;
4854 		else
4855 			mddev->curr_resync_completed = conf->reshape_progress;
4856 		conf->reshape_checkpoint = jiffies;
4857 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4858 		md_wakeup_thread(mddev->thread);
4859 		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4860 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4861 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4862 			allow_barrier(conf);
4863 			return sectors_done;
4864 		}
4865 		conf->reshape_safe = mddev->reshape_position;
4866 		allow_barrier(conf);
4867 	}
4868 
4869 	raise_barrier(conf, 0);
4870 read_more:
4871 	/* Now schedule reads for blocks from sector_nr to last */
4872 	r10_bio = raid10_alloc_init_r10buf(conf);
4873 	r10_bio->state = 0;
4874 	raise_barrier(conf, 1);
4875 	atomic_set(&r10_bio->remaining, 0);
4876 	r10_bio->mddev = mddev;
4877 	r10_bio->sector = sector_nr;
4878 	set_bit(R10BIO_IsReshape, &r10_bio->state);
4879 	r10_bio->sectors = last - sector_nr + 1;
4880 	rdev = read_balance(conf, r10_bio, &max_sectors);
4881 	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4882 
4883 	if (!rdev) {
4884 		/* Cannot read from here, so need to record bad blocks
4885 		 * on all the target devices.
4886 		 */
4887 		// FIXME
4888 		mempool_free(r10_bio, &conf->r10buf_pool);
4889 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4890 		return sectors_done;
4891 	}
4892 
4893 	read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4894 				    GFP_KERNEL, &mddev->bio_set);
4895 	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4896 			       + rdev->data_offset);
4897 	read_bio->bi_private = r10_bio;
4898 	read_bio->bi_end_io = end_reshape_read;
4899 	r10_bio->master_bio = read_bio;
4900 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4901 
4902 	/*
4903 	 * Broadcast RESYNC message to other nodes, so all nodes would not
4904 	 * write to the region to avoid conflict.
4905 	*/
4906 	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4907 		struct mdp_superblock_1 *sb = NULL;
4908 		int sb_reshape_pos = 0;
4909 
4910 		conf->cluster_sync_low = sector_nr;
4911 		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4912 		sb = page_address(rdev->sb_page);
4913 		if (sb) {
4914 			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4915 			/*
4916 			 * Set cluster_sync_low again if next address for array
4917 			 * reshape is less than cluster_sync_low. Since we can't
4918 			 * update cluster_sync_low until it has finished reshape.
4919 			 */
4920 			if (sb_reshape_pos < conf->cluster_sync_low)
4921 				conf->cluster_sync_low = sb_reshape_pos;
4922 		}
4923 
4924 		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4925 							  conf->cluster_sync_high);
4926 	}
4927 
4928 	/* Now find the locations in the new layout */
4929 	__raid10_find_phys(&conf->geo, r10_bio);
4930 
4931 	blist = read_bio;
4932 	read_bio->bi_next = NULL;
4933 
4934 	rcu_read_lock();
4935 	for (s = 0; s < conf->copies*2; s++) {
4936 		struct bio *b;
4937 		int d = r10_bio->devs[s/2].devnum;
4938 		struct md_rdev *rdev2;
4939 		if (s&1) {
4940 			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4941 			b = r10_bio->devs[s/2].repl_bio;
4942 		} else {
4943 			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4944 			b = r10_bio->devs[s/2].bio;
4945 		}
4946 		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4947 			continue;
4948 
4949 		bio_set_dev(b, rdev2->bdev);
4950 		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4951 			rdev2->new_data_offset;
4952 		b->bi_end_io = end_reshape_write;
4953 		bio_set_op_attrs(b, REQ_OP_WRITE, 0);
4954 		b->bi_next = blist;
4955 		blist = b;
4956 	}
4957 
4958 	/* Now add as many pages as possible to all of these bios. */
4959 
4960 	nr_sectors = 0;
4961 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4962 	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4963 		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4964 		int len = (max_sectors - s) << 9;
4965 		if (len > PAGE_SIZE)
4966 			len = PAGE_SIZE;
4967 		for (bio = blist; bio ; bio = bio->bi_next) {
4968 			/*
4969 			 * won't fail because the vec table is big enough
4970 			 * to hold all these pages
4971 			 */
4972 			bio_add_page(bio, page, len, 0);
4973 		}
4974 		sector_nr += len >> 9;
4975 		nr_sectors += len >> 9;
4976 	}
4977 	rcu_read_unlock();
4978 	r10_bio->sectors = nr_sectors;
4979 
4980 	/* Now submit the read */
4981 	md_sync_acct_bio(read_bio, r10_bio->sectors);
4982 	atomic_inc(&r10_bio->remaining);
4983 	read_bio->bi_next = NULL;
4984 	submit_bio_noacct(read_bio);
4985 	sectors_done += nr_sectors;
4986 	if (sector_nr <= last)
4987 		goto read_more;
4988 
4989 	lower_barrier(conf);
4990 
4991 	/* Now that we have done the whole section we can
4992 	 * update reshape_progress
4993 	 */
4994 	if (mddev->reshape_backwards)
4995 		conf->reshape_progress -= sectors_done;
4996 	else
4997 		conf->reshape_progress += sectors_done;
4998 
4999 	return sectors_done;
5000 }
5001 
5002 static void end_reshape_request(struct r10bio *r10_bio);
5003 static int handle_reshape_read_error(struct mddev *mddev,
5004 				     struct r10bio *r10_bio);
5005 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
5006 {
5007 	/* Reshape read completed.  Hopefully we have a block
5008 	 * to write out.
5009 	 * If we got a read error then we do sync 1-page reads from
5010 	 * elsewhere until we find the data - or give up.
5011 	 */
5012 	struct r10conf *conf = mddev->private;
5013 	int s;
5014 
5015 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
5016 		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
5017 			/* Reshape has been aborted */
5018 			md_done_sync(mddev, r10_bio->sectors, 0);
5019 			return;
5020 		}
5021 
5022 	/* We definitely have the data in the pages, schedule the
5023 	 * writes.
5024 	 */
5025 	atomic_set(&r10_bio->remaining, 1);
5026 	for (s = 0; s < conf->copies*2; s++) {
5027 		struct bio *b;
5028 		int d = r10_bio->devs[s/2].devnum;
5029 		struct md_rdev *rdev;
5030 		rcu_read_lock();
5031 		if (s&1) {
5032 			rdev = rcu_dereference(conf->mirrors[d].replacement);
5033 			b = r10_bio->devs[s/2].repl_bio;
5034 		} else {
5035 			rdev = rcu_dereference(conf->mirrors[d].rdev);
5036 			b = r10_bio->devs[s/2].bio;
5037 		}
5038 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
5039 			rcu_read_unlock();
5040 			continue;
5041 		}
5042 		atomic_inc(&rdev->nr_pending);
5043 		rcu_read_unlock();
5044 		md_sync_acct_bio(b, r10_bio->sectors);
5045 		atomic_inc(&r10_bio->remaining);
5046 		b->bi_next = NULL;
5047 		submit_bio_noacct(b);
5048 	}
5049 	end_reshape_request(r10_bio);
5050 }
5051 
5052 static void end_reshape(struct r10conf *conf)
5053 {
5054 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
5055 		return;
5056 
5057 	spin_lock_irq(&conf->device_lock);
5058 	conf->prev = conf->geo;
5059 	md_finish_reshape(conf->mddev);
5060 	smp_wmb();
5061 	conf->reshape_progress = MaxSector;
5062 	conf->reshape_safe = MaxSector;
5063 	spin_unlock_irq(&conf->device_lock);
5064 
5065 	if (conf->mddev->queue)
5066 		raid10_set_io_opt(conf);
5067 	conf->fullsync = 0;
5068 }
5069 
5070 static void raid10_update_reshape_pos(struct mddev *mddev)
5071 {
5072 	struct r10conf *conf = mddev->private;
5073 	sector_t lo, hi;
5074 
5075 	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
5076 	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
5077 	    || mddev->reshape_position == MaxSector)
5078 		conf->reshape_progress = mddev->reshape_position;
5079 	else
5080 		WARN_ON_ONCE(1);
5081 }
5082 
5083 static int handle_reshape_read_error(struct mddev *mddev,
5084 				     struct r10bio *r10_bio)
5085 {
5086 	/* Use sync reads to get the blocks from somewhere else */
5087 	int sectors = r10_bio->sectors;
5088 	struct r10conf *conf = mddev->private;
5089 	struct r10bio *r10b;
5090 	int slot = 0;
5091 	int idx = 0;
5092 	struct page **pages;
5093 
5094 	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5095 	if (!r10b) {
5096 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5097 		return -ENOMEM;
5098 	}
5099 
5100 	/* reshape IOs share pages from .devs[0].bio */
5101 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5102 
5103 	r10b->sector = r10_bio->sector;
5104 	__raid10_find_phys(&conf->prev, r10b);
5105 
5106 	while (sectors) {
5107 		int s = sectors;
5108 		int success = 0;
5109 		int first_slot = slot;
5110 
5111 		if (s > (PAGE_SIZE >> 9))
5112 			s = PAGE_SIZE >> 9;
5113 
5114 		rcu_read_lock();
5115 		while (!success) {
5116 			int d = r10b->devs[slot].devnum;
5117 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5118 			sector_t addr;
5119 			if (rdev == NULL ||
5120 			    test_bit(Faulty, &rdev->flags) ||
5121 			    !test_bit(In_sync, &rdev->flags))
5122 				goto failed;
5123 
5124 			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5125 			atomic_inc(&rdev->nr_pending);
5126 			rcu_read_unlock();
5127 			success = sync_page_io(rdev,
5128 					       addr,
5129 					       s << 9,
5130 					       pages[idx],
5131 					       REQ_OP_READ, 0, false);
5132 			rdev_dec_pending(rdev, mddev);
5133 			rcu_read_lock();
5134 			if (success)
5135 				break;
5136 		failed:
5137 			slot++;
5138 			if (slot >= conf->copies)
5139 				slot = 0;
5140 			if (slot == first_slot)
5141 				break;
5142 		}
5143 		rcu_read_unlock();
5144 		if (!success) {
5145 			/* couldn't read this block, must give up */
5146 			set_bit(MD_RECOVERY_INTR,
5147 				&mddev->recovery);
5148 			kfree(r10b);
5149 			return -EIO;
5150 		}
5151 		sectors -= s;
5152 		idx++;
5153 	}
5154 	kfree(r10b);
5155 	return 0;
5156 }
5157 
5158 static void end_reshape_write(struct bio *bio)
5159 {
5160 	struct r10bio *r10_bio = get_resync_r10bio(bio);
5161 	struct mddev *mddev = r10_bio->mddev;
5162 	struct r10conf *conf = mddev->private;
5163 	int d;
5164 	int slot;
5165 	int repl;
5166 	struct md_rdev *rdev = NULL;
5167 
5168 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5169 	if (repl)
5170 		rdev = conf->mirrors[d].replacement;
5171 	if (!rdev) {
5172 		smp_mb();
5173 		rdev = conf->mirrors[d].rdev;
5174 	}
5175 
5176 	if (bio->bi_status) {
5177 		/* FIXME should record badblock */
5178 		md_error(mddev, rdev);
5179 	}
5180 
5181 	rdev_dec_pending(rdev, mddev);
5182 	end_reshape_request(r10_bio);
5183 }
5184 
5185 static void end_reshape_request(struct r10bio *r10_bio)
5186 {
5187 	if (!atomic_dec_and_test(&r10_bio->remaining))
5188 		return;
5189 	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5190 	bio_put(r10_bio->master_bio);
5191 	put_buf(r10_bio);
5192 }
5193 
5194 static void raid10_finish_reshape(struct mddev *mddev)
5195 {
5196 	struct r10conf *conf = mddev->private;
5197 
5198 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5199 		return;
5200 
5201 	if (mddev->delta_disks > 0) {
5202 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
5203 			mddev->recovery_cp = mddev->resync_max_sectors;
5204 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5205 		}
5206 		mddev->resync_max_sectors = mddev->array_sectors;
5207 	} else {
5208 		int d;
5209 		rcu_read_lock();
5210 		for (d = conf->geo.raid_disks ;
5211 		     d < conf->geo.raid_disks - mddev->delta_disks;
5212 		     d++) {
5213 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5214 			if (rdev)
5215 				clear_bit(In_sync, &rdev->flags);
5216 			rdev = rcu_dereference(conf->mirrors[d].replacement);
5217 			if (rdev)
5218 				clear_bit(In_sync, &rdev->flags);
5219 		}
5220 		rcu_read_unlock();
5221 	}
5222 	mddev->layout = mddev->new_layout;
5223 	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5224 	mddev->reshape_position = MaxSector;
5225 	mddev->delta_disks = 0;
5226 	mddev->reshape_backwards = 0;
5227 }
5228 
5229 static struct md_personality raid10_personality =
5230 {
5231 	.name		= "raid10",
5232 	.level		= 10,
5233 	.owner		= THIS_MODULE,
5234 	.make_request	= raid10_make_request,
5235 	.run		= raid10_run,
5236 	.free		= raid10_free,
5237 	.status		= raid10_status,
5238 	.error_handler	= raid10_error,
5239 	.hot_add_disk	= raid10_add_disk,
5240 	.hot_remove_disk= raid10_remove_disk,
5241 	.spare_active	= raid10_spare_active,
5242 	.sync_request	= raid10_sync_request,
5243 	.quiesce	= raid10_quiesce,
5244 	.size		= raid10_size,
5245 	.resize		= raid10_resize,
5246 	.takeover	= raid10_takeover,
5247 	.check_reshape	= raid10_check_reshape,
5248 	.start_reshape	= raid10_start_reshape,
5249 	.finish_reshape	= raid10_finish_reshape,
5250 	.update_reshape_pos = raid10_update_reshape_pos,
5251 };
5252 
5253 static int __init raid_init(void)
5254 {
5255 	return register_md_personality(&raid10_personality);
5256 }
5257 
5258 static void raid_exit(void)
5259 {
5260 	unregister_md_personality(&raid10_personality);
5261 }
5262 
5263 module_init(raid_init);
5264 module_exit(raid_exit);
5265 MODULE_LICENSE("GPL");
5266 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5267 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5268 MODULE_ALIAS("md-raid10");
5269 MODULE_ALIAS("md-level-10");
5270