xref: /linux/drivers/md/raid1.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid1.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6  *
7  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8  *
9  * RAID-1 management functions.
10  *
11  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12  *
13  * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
14  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15  *
16  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17  * bitmapped intelligence in resync:
18  *
19  *      - bitmap marked during normal i/o
20  *      - bitmap used to skip nondirty blocks during sync
21  *
22  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23  * - persistent bitmap code
24  */
25 
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/ratelimit.h>
32 #include <linux/interval_tree_generic.h>
33 
34 #include <trace/events/block.h>
35 
36 #include "md.h"
37 #include "raid1.h"
38 #include "md-bitmap.h"
39 
40 #define UNSUPPORTED_MDDEV_FLAGS		\
41 	((1L << MD_HAS_JOURNAL) |	\
42 	 (1L << MD_JOURNAL_CLEAN) |	\
43 	 (1L << MD_HAS_PPL) |		\
44 	 (1L << MD_HAS_MULTIPLE_PPLS))
45 
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
48 
49 #define RAID_1_10_NAME "raid1"
50 #include "raid1-10.c"
51 
52 #define START(node) ((node)->start)
53 #define LAST(node) ((node)->last)
54 INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
55 		     START, LAST, static inline, raid1_rb);
56 
57 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
58 				struct serial_info *si, int idx)
59 {
60 	unsigned long flags;
61 	int ret = 0;
62 	sector_t lo = r1_bio->sector;
63 	sector_t hi = lo + r1_bio->sectors;
64 	struct serial_in_rdev *serial = &rdev->serial[idx];
65 
66 	spin_lock_irqsave(&serial->serial_lock, flags);
67 	/* collision happened */
68 	if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
69 		ret = -EBUSY;
70 	else {
71 		si->start = lo;
72 		si->last = hi;
73 		raid1_rb_insert(si, &serial->serial_rb);
74 	}
75 	spin_unlock_irqrestore(&serial->serial_lock, flags);
76 
77 	return ret;
78 }
79 
80 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
81 {
82 	struct mddev *mddev = rdev->mddev;
83 	struct serial_info *si;
84 	int idx = sector_to_idx(r1_bio->sector);
85 	struct serial_in_rdev *serial = &rdev->serial[idx];
86 
87 	if (WARN_ON(!mddev->serial_info_pool))
88 		return;
89 	si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
90 	wait_event(serial->serial_io_wait,
91 		   check_and_add_serial(rdev, r1_bio, si, idx) == 0);
92 }
93 
94 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
95 {
96 	struct serial_info *si;
97 	unsigned long flags;
98 	int found = 0;
99 	struct mddev *mddev = rdev->mddev;
100 	int idx = sector_to_idx(lo);
101 	struct serial_in_rdev *serial = &rdev->serial[idx];
102 
103 	spin_lock_irqsave(&serial->serial_lock, flags);
104 	for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
105 	     si; si = raid1_rb_iter_next(si, lo, hi)) {
106 		if (si->start == lo && si->last == hi) {
107 			raid1_rb_remove(si, &serial->serial_rb);
108 			mempool_free(si, mddev->serial_info_pool);
109 			found = 1;
110 			break;
111 		}
112 	}
113 	if (!found)
114 		WARN(1, "The write IO is not recorded for serialization\n");
115 	spin_unlock_irqrestore(&serial->serial_lock, flags);
116 	wake_up(&serial->serial_io_wait);
117 }
118 
119 /*
120  * for resync bio, r1bio pointer can be retrieved from the per-bio
121  * 'struct resync_pages'.
122  */
123 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
124 {
125 	return get_resync_pages(bio)->raid_bio;
126 }
127 
128 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
129 {
130 	struct pool_info *pi = data;
131 	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
132 
133 	/* allocate a r1bio with room for raid_disks entries in the bios array */
134 	return kzalloc(size, gfp_flags);
135 }
136 
137 #define RESYNC_DEPTH 32
138 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
139 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
140 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
141 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
142 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
143 
144 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
145 {
146 	struct pool_info *pi = data;
147 	struct r1bio *r1_bio;
148 	struct bio *bio;
149 	int need_pages;
150 	int j;
151 	struct resync_pages *rps;
152 
153 	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
154 	if (!r1_bio)
155 		return NULL;
156 
157 	rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
158 			    gfp_flags);
159 	if (!rps)
160 		goto out_free_r1bio;
161 
162 	/*
163 	 * Allocate bios : 1 for reading, n-1 for writing
164 	 */
165 	for (j = pi->raid_disks ; j-- ; ) {
166 		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
167 		if (!bio)
168 			goto out_free_bio;
169 		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
170 		r1_bio->bios[j] = bio;
171 	}
172 	/*
173 	 * Allocate RESYNC_PAGES data pages and attach them to
174 	 * the first bio.
175 	 * If this is a user-requested check/repair, allocate
176 	 * RESYNC_PAGES for each bio.
177 	 */
178 	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
179 		need_pages = pi->raid_disks;
180 	else
181 		need_pages = 1;
182 	for (j = 0; j < pi->raid_disks; j++) {
183 		struct resync_pages *rp = &rps[j];
184 
185 		bio = r1_bio->bios[j];
186 
187 		if (j < need_pages) {
188 			if (resync_alloc_pages(rp, gfp_flags))
189 				goto out_free_pages;
190 		} else {
191 			memcpy(rp, &rps[0], sizeof(*rp));
192 			resync_get_all_pages(rp);
193 		}
194 
195 		rp->raid_bio = r1_bio;
196 		bio->bi_private = rp;
197 	}
198 
199 	r1_bio->master_bio = NULL;
200 
201 	return r1_bio;
202 
203 out_free_pages:
204 	while (--j >= 0)
205 		resync_free_pages(&rps[j]);
206 
207 out_free_bio:
208 	while (++j < pi->raid_disks) {
209 		bio_uninit(r1_bio->bios[j]);
210 		kfree(r1_bio->bios[j]);
211 	}
212 	kfree(rps);
213 
214 out_free_r1bio:
215 	rbio_pool_free(r1_bio, data);
216 	return NULL;
217 }
218 
219 static void r1buf_pool_free(void *__r1_bio, void *data)
220 {
221 	struct pool_info *pi = data;
222 	int i;
223 	struct r1bio *r1bio = __r1_bio;
224 	struct resync_pages *rp = NULL;
225 
226 	for (i = pi->raid_disks; i--; ) {
227 		rp = get_resync_pages(r1bio->bios[i]);
228 		resync_free_pages(rp);
229 		bio_uninit(r1bio->bios[i]);
230 		kfree(r1bio->bios[i]);
231 	}
232 
233 	/* resync pages array stored in the 1st bio's .bi_private */
234 	kfree(rp);
235 
236 	rbio_pool_free(r1bio, data);
237 }
238 
239 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
240 {
241 	int i;
242 
243 	for (i = 0; i < conf->raid_disks * 2; i++) {
244 		struct bio **bio = r1_bio->bios + i;
245 		if (!BIO_SPECIAL(*bio))
246 			bio_put(*bio);
247 		*bio = NULL;
248 	}
249 }
250 
251 static void free_r1bio(struct r1bio *r1_bio)
252 {
253 	struct r1conf *conf = r1_bio->mddev->private;
254 
255 	put_all_bios(conf, r1_bio);
256 	mempool_free(r1_bio, &conf->r1bio_pool);
257 }
258 
259 static void put_buf(struct r1bio *r1_bio)
260 {
261 	struct r1conf *conf = r1_bio->mddev->private;
262 	sector_t sect = r1_bio->sector;
263 	int i;
264 
265 	for (i = 0; i < conf->raid_disks * 2; i++) {
266 		struct bio *bio = r1_bio->bios[i];
267 		if (bio->bi_end_io)
268 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
269 	}
270 
271 	mempool_free(r1_bio, &conf->r1buf_pool);
272 
273 	lower_barrier(conf, sect);
274 }
275 
276 static void reschedule_retry(struct r1bio *r1_bio)
277 {
278 	unsigned long flags;
279 	struct mddev *mddev = r1_bio->mddev;
280 	struct r1conf *conf = mddev->private;
281 	int idx;
282 
283 	idx = sector_to_idx(r1_bio->sector);
284 	spin_lock_irqsave(&conf->device_lock, flags);
285 	list_add(&r1_bio->retry_list, &conf->retry_list);
286 	atomic_inc(&conf->nr_queued[idx]);
287 	spin_unlock_irqrestore(&conf->device_lock, flags);
288 
289 	wake_up(&conf->wait_barrier);
290 	md_wakeup_thread(mddev->thread);
291 }
292 
293 /*
294  * raid_end_bio_io() is called when we have finished servicing a mirrored
295  * operation and are ready to return a success/failure code to the buffer
296  * cache layer.
297  */
298 static void call_bio_endio(struct r1bio *r1_bio)
299 {
300 	struct bio *bio = r1_bio->master_bio;
301 
302 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
303 		bio->bi_status = BLK_STS_IOERR;
304 
305 	bio_endio(bio);
306 }
307 
308 static void raid_end_bio_io(struct r1bio *r1_bio)
309 {
310 	struct bio *bio = r1_bio->master_bio;
311 	struct r1conf *conf = r1_bio->mddev->private;
312 	sector_t sector = r1_bio->sector;
313 
314 	/* if nobody has done the final endio yet, do it now */
315 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
316 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
317 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
318 			 (unsigned long long) bio->bi_iter.bi_sector,
319 			 (unsigned long long) bio_end_sector(bio) - 1);
320 
321 		call_bio_endio(r1_bio);
322 	}
323 
324 	free_r1bio(r1_bio);
325 	/*
326 	 * Wake up any possible resync thread that waits for the device
327 	 * to go idle.  All I/Os, even write-behind writes, are done.
328 	 */
329 	allow_barrier(conf, sector);
330 }
331 
332 /*
333  * Update disk head position estimator based on IRQ completion info.
334  */
335 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
336 {
337 	struct r1conf *conf = r1_bio->mddev->private;
338 
339 	conf->mirrors[disk].head_position =
340 		r1_bio->sector + (r1_bio->sectors);
341 }
342 
343 /*
344  * Find the disk number which triggered given bio
345  */
346 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
347 {
348 	int mirror;
349 	struct r1conf *conf = r1_bio->mddev->private;
350 	int raid_disks = conf->raid_disks;
351 
352 	for (mirror = 0; mirror < raid_disks * 2; mirror++)
353 		if (r1_bio->bios[mirror] == bio)
354 			break;
355 
356 	BUG_ON(mirror == raid_disks * 2);
357 	update_head_pos(mirror, r1_bio);
358 
359 	return mirror;
360 }
361 
362 static void raid1_end_read_request(struct bio *bio)
363 {
364 	int uptodate = !bio->bi_status;
365 	struct r1bio *r1_bio = bio->bi_private;
366 	struct r1conf *conf = r1_bio->mddev->private;
367 	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
368 
369 	/*
370 	 * this branch is our 'one mirror IO has finished' event handler:
371 	 */
372 	update_head_pos(r1_bio->read_disk, r1_bio);
373 
374 	if (uptodate)
375 		set_bit(R1BIO_Uptodate, &r1_bio->state);
376 	else if (test_bit(FailFast, &rdev->flags) &&
377 		 test_bit(R1BIO_FailFast, &r1_bio->state))
378 		/* This was a fail-fast read so we definitely
379 		 * want to retry */
380 		;
381 	else {
382 		/* If all other devices have failed, we want to return
383 		 * the error upwards rather than fail the last device.
384 		 * Here we redefine "uptodate" to mean "Don't want to retry"
385 		 */
386 		unsigned long flags;
387 		spin_lock_irqsave(&conf->device_lock, flags);
388 		if (r1_bio->mddev->degraded == conf->raid_disks ||
389 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
390 		     test_bit(In_sync, &rdev->flags)))
391 			uptodate = 1;
392 		spin_unlock_irqrestore(&conf->device_lock, flags);
393 	}
394 
395 	if (uptodate) {
396 		raid_end_bio_io(r1_bio);
397 		rdev_dec_pending(rdev, conf->mddev);
398 	} else {
399 		/*
400 		 * oops, read error:
401 		 */
402 		pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
403 				   mdname(conf->mddev),
404 				   rdev->bdev,
405 				   (unsigned long long)r1_bio->sector);
406 		set_bit(R1BIO_ReadError, &r1_bio->state);
407 		reschedule_retry(r1_bio);
408 		/* don't drop the reference on read_disk yet */
409 	}
410 }
411 
412 static void close_write(struct r1bio *r1_bio)
413 {
414 	/* it really is the end of this request */
415 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
416 		bio_free_pages(r1_bio->behind_master_bio);
417 		bio_put(r1_bio->behind_master_bio);
418 		r1_bio->behind_master_bio = NULL;
419 	}
420 	/* clear the bitmap if all writes complete successfully */
421 	md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
422 			   r1_bio->sectors,
423 			   !test_bit(R1BIO_Degraded, &r1_bio->state),
424 			   test_bit(R1BIO_BehindIO, &r1_bio->state));
425 	md_write_end(r1_bio->mddev);
426 }
427 
428 static void r1_bio_write_done(struct r1bio *r1_bio)
429 {
430 	if (!atomic_dec_and_test(&r1_bio->remaining))
431 		return;
432 
433 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
434 		reschedule_retry(r1_bio);
435 	else {
436 		close_write(r1_bio);
437 		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
438 			reschedule_retry(r1_bio);
439 		else
440 			raid_end_bio_io(r1_bio);
441 	}
442 }
443 
444 static void raid1_end_write_request(struct bio *bio)
445 {
446 	struct r1bio *r1_bio = bio->bi_private;
447 	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
448 	struct r1conf *conf = r1_bio->mddev->private;
449 	struct bio *to_put = NULL;
450 	int mirror = find_bio_disk(r1_bio, bio);
451 	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
452 	bool discard_error;
453 	sector_t lo = r1_bio->sector;
454 	sector_t hi = r1_bio->sector + r1_bio->sectors;
455 
456 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
457 
458 	/*
459 	 * 'one mirror IO has finished' event handler:
460 	 */
461 	if (bio->bi_status && !discard_error) {
462 		set_bit(WriteErrorSeen,	&rdev->flags);
463 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
464 			set_bit(MD_RECOVERY_NEEDED, &
465 				conf->mddev->recovery);
466 
467 		if (test_bit(FailFast, &rdev->flags) &&
468 		    (bio->bi_opf & MD_FAILFAST) &&
469 		    /* We never try FailFast to WriteMostly devices */
470 		    !test_bit(WriteMostly, &rdev->flags)) {
471 			md_error(r1_bio->mddev, rdev);
472 		}
473 
474 		/*
475 		 * When the device is faulty, it is not necessary to
476 		 * handle write error.
477 		 */
478 		if (!test_bit(Faulty, &rdev->flags))
479 			set_bit(R1BIO_WriteError, &r1_bio->state);
480 		else {
481 			/* Fail the request */
482 			set_bit(R1BIO_Degraded, &r1_bio->state);
483 			/* Finished with this branch */
484 			r1_bio->bios[mirror] = NULL;
485 			to_put = bio;
486 		}
487 	} else {
488 		/*
489 		 * Set R1BIO_Uptodate in our master bio, so that we
490 		 * will return a good error code for to the higher
491 		 * levels even if IO on some other mirrored buffer
492 		 * fails.
493 		 *
494 		 * The 'master' represents the composite IO operation
495 		 * to user-side. So if something waits for IO, then it
496 		 * will wait for the 'master' bio.
497 		 */
498 		r1_bio->bios[mirror] = NULL;
499 		to_put = bio;
500 		/*
501 		 * Do not set R1BIO_Uptodate if the current device is
502 		 * rebuilding or Faulty. This is because we cannot use
503 		 * such device for properly reading the data back (we could
504 		 * potentially use it, if the current write would have felt
505 		 * before rdev->recovery_offset, but for simplicity we don't
506 		 * check this here.
507 		 */
508 		if (test_bit(In_sync, &rdev->flags) &&
509 		    !test_bit(Faulty, &rdev->flags))
510 			set_bit(R1BIO_Uptodate, &r1_bio->state);
511 
512 		/* Maybe we can clear some bad blocks. */
513 		if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
514 		    !discard_error) {
515 			r1_bio->bios[mirror] = IO_MADE_GOOD;
516 			set_bit(R1BIO_MadeGood, &r1_bio->state);
517 		}
518 	}
519 
520 	if (behind) {
521 		if (test_bit(CollisionCheck, &rdev->flags))
522 			remove_serial(rdev, lo, hi);
523 		if (test_bit(WriteMostly, &rdev->flags))
524 			atomic_dec(&r1_bio->behind_remaining);
525 
526 		/*
527 		 * In behind mode, we ACK the master bio once the I/O
528 		 * has safely reached all non-writemostly
529 		 * disks. Setting the Returned bit ensures that this
530 		 * gets done only once -- we don't ever want to return
531 		 * -EIO here, instead we'll wait
532 		 */
533 		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
534 		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
535 			/* Maybe we can return now */
536 			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
537 				struct bio *mbio = r1_bio->master_bio;
538 				pr_debug("raid1: behind end write sectors"
539 					 " %llu-%llu\n",
540 					 (unsigned long long) mbio->bi_iter.bi_sector,
541 					 (unsigned long long) bio_end_sector(mbio) - 1);
542 				call_bio_endio(r1_bio);
543 			}
544 		}
545 	} else if (rdev->mddev->serialize_policy)
546 		remove_serial(rdev, lo, hi);
547 	if (r1_bio->bios[mirror] == NULL)
548 		rdev_dec_pending(rdev, conf->mddev);
549 
550 	/*
551 	 * Let's see if all mirrored write operations have finished
552 	 * already.
553 	 */
554 	r1_bio_write_done(r1_bio);
555 
556 	if (to_put)
557 		bio_put(to_put);
558 }
559 
560 static sector_t align_to_barrier_unit_end(sector_t start_sector,
561 					  sector_t sectors)
562 {
563 	sector_t len;
564 
565 	WARN_ON(sectors == 0);
566 	/*
567 	 * len is the number of sectors from start_sector to end of the
568 	 * barrier unit which start_sector belongs to.
569 	 */
570 	len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
571 	      start_sector;
572 
573 	if (len > sectors)
574 		len = sectors;
575 
576 	return len;
577 }
578 
579 static void update_read_sectors(struct r1conf *conf, int disk,
580 				sector_t this_sector, int len)
581 {
582 	struct raid1_info *info = &conf->mirrors[disk];
583 
584 	atomic_inc(&info->rdev->nr_pending);
585 	if (info->next_seq_sect != this_sector)
586 		info->seq_start = this_sector;
587 	info->next_seq_sect = this_sector + len;
588 }
589 
590 static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
591 			     int *max_sectors)
592 {
593 	sector_t this_sector = r1_bio->sector;
594 	int len = r1_bio->sectors;
595 	int disk;
596 
597 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
598 		struct md_rdev *rdev;
599 		int read_len;
600 
601 		if (r1_bio->bios[disk] == IO_BLOCKED)
602 			continue;
603 
604 		rdev = conf->mirrors[disk].rdev;
605 		if (!rdev || test_bit(Faulty, &rdev->flags))
606 			continue;
607 
608 		/* choose the first disk even if it has some bad blocks. */
609 		read_len = raid1_check_read_range(rdev, this_sector, &len);
610 		if (read_len > 0) {
611 			update_read_sectors(conf, disk, this_sector, read_len);
612 			*max_sectors = read_len;
613 			return disk;
614 		}
615 	}
616 
617 	return -1;
618 }
619 
620 static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
621 			  int *max_sectors)
622 {
623 	sector_t this_sector = r1_bio->sector;
624 	int best_disk = -1;
625 	int best_len = 0;
626 	int disk;
627 
628 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
629 		struct md_rdev *rdev;
630 		int len;
631 		int read_len;
632 
633 		if (r1_bio->bios[disk] == IO_BLOCKED)
634 			continue;
635 
636 		rdev = conf->mirrors[disk].rdev;
637 		if (!rdev || test_bit(Faulty, &rdev->flags) ||
638 		    test_bit(WriteMostly, &rdev->flags))
639 			continue;
640 
641 		/* keep track of the disk with the most readable sectors. */
642 		len = r1_bio->sectors;
643 		read_len = raid1_check_read_range(rdev, this_sector, &len);
644 		if (read_len > best_len) {
645 			best_disk = disk;
646 			best_len = read_len;
647 		}
648 	}
649 
650 	if (best_disk != -1) {
651 		*max_sectors = best_len;
652 		update_read_sectors(conf, best_disk, this_sector, best_len);
653 	}
654 
655 	return best_disk;
656 }
657 
658 static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
659 			    int *max_sectors)
660 {
661 	sector_t this_sector = r1_bio->sector;
662 	int bb_disk = -1;
663 	int bb_read_len = 0;
664 	int disk;
665 
666 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
667 		struct md_rdev *rdev;
668 		int len;
669 		int read_len;
670 
671 		if (r1_bio->bios[disk] == IO_BLOCKED)
672 			continue;
673 
674 		rdev = conf->mirrors[disk].rdev;
675 		if (!rdev || test_bit(Faulty, &rdev->flags) ||
676 		    !test_bit(WriteMostly, &rdev->flags))
677 			continue;
678 
679 		/* there are no bad blocks, we can use this disk */
680 		len = r1_bio->sectors;
681 		read_len = raid1_check_read_range(rdev, this_sector, &len);
682 		if (read_len == r1_bio->sectors) {
683 			update_read_sectors(conf, disk, this_sector, read_len);
684 			return disk;
685 		}
686 
687 		/*
688 		 * there are partial bad blocks, choose the rdev with largest
689 		 * read length.
690 		 */
691 		if (read_len > bb_read_len) {
692 			bb_disk = disk;
693 			bb_read_len = read_len;
694 		}
695 	}
696 
697 	if (bb_disk != -1) {
698 		*max_sectors = bb_read_len;
699 		update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
700 	}
701 
702 	return bb_disk;
703 }
704 
705 static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
706 {
707 	/* TODO: address issues with this check and concurrency. */
708 	return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
709 	       conf->mirrors[disk].head_position == r1_bio->sector;
710 }
711 
712 /*
713  * If buffered sequential IO size exceeds optimal iosize, check if there is idle
714  * disk. If yes, choose the idle disk.
715  */
716 static bool should_choose_next(struct r1conf *conf, int disk)
717 {
718 	struct raid1_info *mirror = &conf->mirrors[disk];
719 	int opt_iosize;
720 
721 	if (!test_bit(Nonrot, &mirror->rdev->flags))
722 		return false;
723 
724 	opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
725 	return opt_iosize > 0 && mirror->seq_start != MaxSector &&
726 	       mirror->next_seq_sect > opt_iosize &&
727 	       mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
728 }
729 
730 static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
731 {
732 	if (!rdev || test_bit(Faulty, &rdev->flags))
733 		return false;
734 
735 	/* still in recovery */
736 	if (!test_bit(In_sync, &rdev->flags) &&
737 	    rdev->recovery_offset < r1_bio->sector + r1_bio->sectors)
738 		return false;
739 
740 	/* don't read from slow disk unless have to */
741 	if (test_bit(WriteMostly, &rdev->flags))
742 		return false;
743 
744 	/* don't split IO for bad blocks unless have to */
745 	if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
746 		return false;
747 
748 	return true;
749 }
750 
751 struct read_balance_ctl {
752 	sector_t closest_dist;
753 	int closest_dist_disk;
754 	int min_pending;
755 	int min_pending_disk;
756 	int sequential_disk;
757 	int readable_disks;
758 };
759 
760 static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
761 {
762 	int disk;
763 	struct read_balance_ctl ctl = {
764 		.closest_dist_disk      = -1,
765 		.closest_dist           = MaxSector,
766 		.min_pending_disk       = -1,
767 		.min_pending            = UINT_MAX,
768 		.sequential_disk	= -1,
769 	};
770 
771 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
772 		struct md_rdev *rdev;
773 		sector_t dist;
774 		unsigned int pending;
775 
776 		if (r1_bio->bios[disk] == IO_BLOCKED)
777 			continue;
778 
779 		rdev = conf->mirrors[disk].rdev;
780 		if (!rdev_readable(rdev, r1_bio))
781 			continue;
782 
783 		/* At least two disks to choose from so failfast is OK */
784 		if (ctl.readable_disks++ == 1)
785 			set_bit(R1BIO_FailFast, &r1_bio->state);
786 
787 		pending = atomic_read(&rdev->nr_pending);
788 		dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
789 
790 		/* Don't change to another disk for sequential reads */
791 		if (is_sequential(conf, disk, r1_bio)) {
792 			if (!should_choose_next(conf, disk))
793 				return disk;
794 
795 			/*
796 			 * Add 'pending' to avoid choosing this disk if
797 			 * there is other idle disk.
798 			 */
799 			pending++;
800 			/*
801 			 * If there is no other idle disk, this disk
802 			 * will be chosen.
803 			 */
804 			ctl.sequential_disk = disk;
805 		}
806 
807 		if (ctl.min_pending > pending) {
808 			ctl.min_pending = pending;
809 			ctl.min_pending_disk = disk;
810 		}
811 
812 		if (ctl.closest_dist > dist) {
813 			ctl.closest_dist = dist;
814 			ctl.closest_dist_disk = disk;
815 		}
816 	}
817 
818 	/*
819 	 * sequential IO size exceeds optimal iosize, however, there is no other
820 	 * idle disk, so choose the sequential disk.
821 	 */
822 	if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
823 		return ctl.sequential_disk;
824 
825 	/*
826 	 * If all disks are rotational, choose the closest disk. If any disk is
827 	 * non-rotational, choose the disk with less pending request even the
828 	 * disk is rotational, which might/might not be optimal for raids with
829 	 * mixed ratation/non-rotational disks depending on workload.
830 	 */
831 	if (ctl.min_pending_disk != -1 &&
832 	    (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
833 		return ctl.min_pending_disk;
834 	else
835 		return ctl.closest_dist_disk;
836 }
837 
838 /*
839  * This routine returns the disk from which the requested read should be done.
840  *
841  * 1) If resync is in progress, find the first usable disk and use it even if it
842  * has some bad blocks.
843  *
844  * 2) Now that there is no resync, loop through all disks and skipping slow
845  * disks and disks with bad blocks for now. Only pay attention to key disk
846  * choice.
847  *
848  * 3) If we've made it this far, now look for disks with bad blocks and choose
849  * the one with most number of sectors.
850  *
851  * 4) If we are all the way at the end, we have no choice but to use a disk even
852  * if it is write mostly.
853  *
854  * The rdev for the device selected will have nr_pending incremented.
855  */
856 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
857 			int *max_sectors)
858 {
859 	int disk;
860 
861 	clear_bit(R1BIO_FailFast, &r1_bio->state);
862 
863 	if (raid1_should_read_first(conf->mddev, r1_bio->sector,
864 				    r1_bio->sectors))
865 		return choose_first_rdev(conf, r1_bio, max_sectors);
866 
867 	disk = choose_best_rdev(conf, r1_bio);
868 	if (disk >= 0) {
869 		*max_sectors = r1_bio->sectors;
870 		update_read_sectors(conf, disk, r1_bio->sector,
871 				    r1_bio->sectors);
872 		return disk;
873 	}
874 
875 	/*
876 	 * If we are here it means we didn't find a perfectly good disk so
877 	 * now spend a bit more time trying to find one with the most good
878 	 * sectors.
879 	 */
880 	disk = choose_bb_rdev(conf, r1_bio, max_sectors);
881 	if (disk >= 0)
882 		return disk;
883 
884 	return choose_slow_rdev(conf, r1_bio, max_sectors);
885 }
886 
887 static void wake_up_barrier(struct r1conf *conf)
888 {
889 	if (wq_has_sleeper(&conf->wait_barrier))
890 		wake_up(&conf->wait_barrier);
891 }
892 
893 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
894 {
895 	/* flush any pending bitmap writes to disk before proceeding w/ I/O */
896 	raid1_prepare_flush_writes(conf->mddev->bitmap);
897 	wake_up_barrier(conf);
898 
899 	while (bio) { /* submit pending writes */
900 		struct bio *next = bio->bi_next;
901 
902 		raid1_submit_write(bio);
903 		bio = next;
904 		cond_resched();
905 	}
906 }
907 
908 static void flush_pending_writes(struct r1conf *conf)
909 {
910 	/* Any writes that have been queued but are awaiting
911 	 * bitmap updates get flushed here.
912 	 */
913 	spin_lock_irq(&conf->device_lock);
914 
915 	if (conf->pending_bio_list.head) {
916 		struct blk_plug plug;
917 		struct bio *bio;
918 
919 		bio = bio_list_get(&conf->pending_bio_list);
920 		spin_unlock_irq(&conf->device_lock);
921 
922 		/*
923 		 * As this is called in a wait_event() loop (see freeze_array),
924 		 * current->state might be TASK_UNINTERRUPTIBLE which will
925 		 * cause a warning when we prepare to wait again.  As it is
926 		 * rare that this path is taken, it is perfectly safe to force
927 		 * us to go around the wait_event() loop again, so the warning
928 		 * is a false-positive.  Silence the warning by resetting
929 		 * thread state
930 		 */
931 		__set_current_state(TASK_RUNNING);
932 		blk_start_plug(&plug);
933 		flush_bio_list(conf, bio);
934 		blk_finish_plug(&plug);
935 	} else
936 		spin_unlock_irq(&conf->device_lock);
937 }
938 
939 /* Barriers....
940  * Sometimes we need to suspend IO while we do something else,
941  * either some resync/recovery, or reconfigure the array.
942  * To do this we raise a 'barrier'.
943  * The 'barrier' is a counter that can be raised multiple times
944  * to count how many activities are happening which preclude
945  * normal IO.
946  * We can only raise the barrier if there is no pending IO.
947  * i.e. if nr_pending == 0.
948  * We choose only to raise the barrier if no-one is waiting for the
949  * barrier to go down.  This means that as soon as an IO request
950  * is ready, no other operations which require a barrier will start
951  * until the IO request has had a chance.
952  *
953  * So: regular IO calls 'wait_barrier'.  When that returns there
954  *    is no backgroup IO happening,  It must arrange to call
955  *    allow_barrier when it has finished its IO.
956  * backgroup IO calls must call raise_barrier.  Once that returns
957  *    there is no normal IO happeing.  It must arrange to call
958  *    lower_barrier when the particular background IO completes.
959  *
960  * If resync/recovery is interrupted, returns -EINTR;
961  * Otherwise, returns 0.
962  */
963 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
964 {
965 	int idx = sector_to_idx(sector_nr);
966 
967 	spin_lock_irq(&conf->resync_lock);
968 
969 	/* Wait until no block IO is waiting */
970 	wait_event_lock_irq(conf->wait_barrier,
971 			    !atomic_read(&conf->nr_waiting[idx]),
972 			    conf->resync_lock);
973 
974 	/* block any new IO from starting */
975 	atomic_inc(&conf->barrier[idx]);
976 	/*
977 	 * In raise_barrier() we firstly increase conf->barrier[idx] then
978 	 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
979 	 * increase conf->nr_pending[idx] then check conf->barrier[idx].
980 	 * A memory barrier here to make sure conf->nr_pending[idx] won't
981 	 * be fetched before conf->barrier[idx] is increased. Otherwise
982 	 * there will be a race between raise_barrier() and _wait_barrier().
983 	 */
984 	smp_mb__after_atomic();
985 
986 	/* For these conditions we must wait:
987 	 * A: while the array is in frozen state
988 	 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
989 	 *    existing in corresponding I/O barrier bucket.
990 	 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
991 	 *    max resync count which allowed on current I/O barrier bucket.
992 	 */
993 	wait_event_lock_irq(conf->wait_barrier,
994 			    (!conf->array_frozen &&
995 			     !atomic_read(&conf->nr_pending[idx]) &&
996 			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
997 				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
998 			    conf->resync_lock);
999 
1000 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
1001 		atomic_dec(&conf->barrier[idx]);
1002 		spin_unlock_irq(&conf->resync_lock);
1003 		wake_up(&conf->wait_barrier);
1004 		return -EINTR;
1005 	}
1006 
1007 	atomic_inc(&conf->nr_sync_pending);
1008 	spin_unlock_irq(&conf->resync_lock);
1009 
1010 	return 0;
1011 }
1012 
1013 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
1014 {
1015 	int idx = sector_to_idx(sector_nr);
1016 
1017 	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
1018 
1019 	atomic_dec(&conf->barrier[idx]);
1020 	atomic_dec(&conf->nr_sync_pending);
1021 	wake_up(&conf->wait_barrier);
1022 }
1023 
1024 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
1025 {
1026 	bool ret = true;
1027 
1028 	/*
1029 	 * We need to increase conf->nr_pending[idx] very early here,
1030 	 * then raise_barrier() can be blocked when it waits for
1031 	 * conf->nr_pending[idx] to be 0. Then we can avoid holding
1032 	 * conf->resync_lock when there is no barrier raised in same
1033 	 * barrier unit bucket. Also if the array is frozen, I/O
1034 	 * should be blocked until array is unfrozen.
1035 	 */
1036 	atomic_inc(&conf->nr_pending[idx]);
1037 	/*
1038 	 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
1039 	 * check conf->barrier[idx]. In raise_barrier() we firstly increase
1040 	 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
1041 	 * barrier is necessary here to make sure conf->barrier[idx] won't be
1042 	 * fetched before conf->nr_pending[idx] is increased. Otherwise there
1043 	 * will be a race between _wait_barrier() and raise_barrier().
1044 	 */
1045 	smp_mb__after_atomic();
1046 
1047 	/*
1048 	 * Don't worry about checking two atomic_t variables at same time
1049 	 * here. If during we check conf->barrier[idx], the array is
1050 	 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
1051 	 * 0, it is safe to return and make the I/O continue. Because the
1052 	 * array is frozen, all I/O returned here will eventually complete
1053 	 * or be queued, no race will happen. See code comment in
1054 	 * frozen_array().
1055 	 */
1056 	if (!READ_ONCE(conf->array_frozen) &&
1057 	    !atomic_read(&conf->barrier[idx]))
1058 		return ret;
1059 
1060 	/*
1061 	 * After holding conf->resync_lock, conf->nr_pending[idx]
1062 	 * should be decreased before waiting for barrier to drop.
1063 	 * Otherwise, we may encounter a race condition because
1064 	 * raise_barrer() might be waiting for conf->nr_pending[idx]
1065 	 * to be 0 at same time.
1066 	 */
1067 	spin_lock_irq(&conf->resync_lock);
1068 	atomic_inc(&conf->nr_waiting[idx]);
1069 	atomic_dec(&conf->nr_pending[idx]);
1070 	/*
1071 	 * In case freeze_array() is waiting for
1072 	 * get_unqueued_pending() == extra
1073 	 */
1074 	wake_up_barrier(conf);
1075 	/* Wait for the barrier in same barrier unit bucket to drop. */
1076 
1077 	/* Return false when nowait flag is set */
1078 	if (nowait) {
1079 		ret = false;
1080 	} else {
1081 		wait_event_lock_irq(conf->wait_barrier,
1082 				!conf->array_frozen &&
1083 				!atomic_read(&conf->barrier[idx]),
1084 				conf->resync_lock);
1085 		atomic_inc(&conf->nr_pending[idx]);
1086 	}
1087 
1088 	atomic_dec(&conf->nr_waiting[idx]);
1089 	spin_unlock_irq(&conf->resync_lock);
1090 	return ret;
1091 }
1092 
1093 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1094 {
1095 	int idx = sector_to_idx(sector_nr);
1096 	bool ret = true;
1097 
1098 	/*
1099 	 * Very similar to _wait_barrier(). The difference is, for read
1100 	 * I/O we don't need wait for sync I/O, but if the whole array
1101 	 * is frozen, the read I/O still has to wait until the array is
1102 	 * unfrozen. Since there is no ordering requirement with
1103 	 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1104 	 */
1105 	atomic_inc(&conf->nr_pending[idx]);
1106 
1107 	if (!READ_ONCE(conf->array_frozen))
1108 		return ret;
1109 
1110 	spin_lock_irq(&conf->resync_lock);
1111 	atomic_inc(&conf->nr_waiting[idx]);
1112 	atomic_dec(&conf->nr_pending[idx]);
1113 	/*
1114 	 * In case freeze_array() is waiting for
1115 	 * get_unqueued_pending() == extra
1116 	 */
1117 	wake_up_barrier(conf);
1118 	/* Wait for array to be unfrozen */
1119 
1120 	/* Return false when nowait flag is set */
1121 	if (nowait) {
1122 		/* Return false when nowait flag is set */
1123 		ret = false;
1124 	} else {
1125 		wait_event_lock_irq(conf->wait_barrier,
1126 				!conf->array_frozen,
1127 				conf->resync_lock);
1128 		atomic_inc(&conf->nr_pending[idx]);
1129 	}
1130 
1131 	atomic_dec(&conf->nr_waiting[idx]);
1132 	spin_unlock_irq(&conf->resync_lock);
1133 	return ret;
1134 }
1135 
1136 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1137 {
1138 	int idx = sector_to_idx(sector_nr);
1139 
1140 	return _wait_barrier(conf, idx, nowait);
1141 }
1142 
1143 static void _allow_barrier(struct r1conf *conf, int idx)
1144 {
1145 	atomic_dec(&conf->nr_pending[idx]);
1146 	wake_up_barrier(conf);
1147 }
1148 
1149 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1150 {
1151 	int idx = sector_to_idx(sector_nr);
1152 
1153 	_allow_barrier(conf, idx);
1154 }
1155 
1156 /* conf->resync_lock should be held */
1157 static int get_unqueued_pending(struct r1conf *conf)
1158 {
1159 	int idx, ret;
1160 
1161 	ret = atomic_read(&conf->nr_sync_pending);
1162 	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1163 		ret += atomic_read(&conf->nr_pending[idx]) -
1164 			atomic_read(&conf->nr_queued[idx]);
1165 
1166 	return ret;
1167 }
1168 
1169 static void freeze_array(struct r1conf *conf, int extra)
1170 {
1171 	/* Stop sync I/O and normal I/O and wait for everything to
1172 	 * go quiet.
1173 	 * This is called in two situations:
1174 	 * 1) management command handlers (reshape, remove disk, quiesce).
1175 	 * 2) one normal I/O request failed.
1176 
1177 	 * After array_frozen is set to 1, new sync IO will be blocked at
1178 	 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1179 	 * or wait_read_barrier(). The flying I/Os will either complete or be
1180 	 * queued. When everything goes quite, there are only queued I/Os left.
1181 
1182 	 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1183 	 * barrier bucket index which this I/O request hits. When all sync and
1184 	 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1185 	 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1186 	 * in handle_read_error(), we may call freeze_array() before trying to
1187 	 * fix the read error. In this case, the error read I/O is not queued,
1188 	 * so get_unqueued_pending() == 1.
1189 	 *
1190 	 * Therefore before this function returns, we need to wait until
1191 	 * get_unqueued_pendings(conf) gets equal to extra. For
1192 	 * normal I/O context, extra is 1, in rested situations extra is 0.
1193 	 */
1194 	spin_lock_irq(&conf->resync_lock);
1195 	conf->array_frozen = 1;
1196 	mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
1197 	wait_event_lock_irq_cmd(
1198 		conf->wait_barrier,
1199 		get_unqueued_pending(conf) == extra,
1200 		conf->resync_lock,
1201 		flush_pending_writes(conf));
1202 	spin_unlock_irq(&conf->resync_lock);
1203 }
1204 static void unfreeze_array(struct r1conf *conf)
1205 {
1206 	/* reverse the effect of the freeze */
1207 	spin_lock_irq(&conf->resync_lock);
1208 	conf->array_frozen = 0;
1209 	spin_unlock_irq(&conf->resync_lock);
1210 	wake_up(&conf->wait_barrier);
1211 }
1212 
1213 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1214 					   struct bio *bio)
1215 {
1216 	int size = bio->bi_iter.bi_size;
1217 	unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1218 	int i = 0;
1219 	struct bio *behind_bio = NULL;
1220 
1221 	behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
1222 				      &r1_bio->mddev->bio_set);
1223 
1224 	/* discard op, we don't support writezero/writesame yet */
1225 	if (!bio_has_data(bio)) {
1226 		behind_bio->bi_iter.bi_size = size;
1227 		goto skip_copy;
1228 	}
1229 
1230 	while (i < vcnt && size) {
1231 		struct page *page;
1232 		int len = min_t(int, PAGE_SIZE, size);
1233 
1234 		page = alloc_page(GFP_NOIO);
1235 		if (unlikely(!page))
1236 			goto free_pages;
1237 
1238 		if (!bio_add_page(behind_bio, page, len, 0)) {
1239 			put_page(page);
1240 			goto free_pages;
1241 		}
1242 
1243 		size -= len;
1244 		i++;
1245 	}
1246 
1247 	bio_copy_data(behind_bio, bio);
1248 skip_copy:
1249 	r1_bio->behind_master_bio = behind_bio;
1250 	set_bit(R1BIO_BehindIO, &r1_bio->state);
1251 
1252 	return;
1253 
1254 free_pages:
1255 	pr_debug("%dB behind alloc failed, doing sync I/O\n",
1256 		 bio->bi_iter.bi_size);
1257 	bio_free_pages(behind_bio);
1258 	bio_put(behind_bio);
1259 }
1260 
1261 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1262 {
1263 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1264 						  cb);
1265 	struct mddev *mddev = plug->cb.data;
1266 	struct r1conf *conf = mddev->private;
1267 	struct bio *bio;
1268 
1269 	if (from_schedule) {
1270 		spin_lock_irq(&conf->device_lock);
1271 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1272 		spin_unlock_irq(&conf->device_lock);
1273 		wake_up_barrier(conf);
1274 		md_wakeup_thread(mddev->thread);
1275 		kfree(plug);
1276 		return;
1277 	}
1278 
1279 	/* we aren't scheduling, so we can do the write-out directly. */
1280 	bio = bio_list_get(&plug->pending);
1281 	flush_bio_list(conf, bio);
1282 	kfree(plug);
1283 }
1284 
1285 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1286 {
1287 	r1_bio->master_bio = bio;
1288 	r1_bio->sectors = bio_sectors(bio);
1289 	r1_bio->state = 0;
1290 	r1_bio->mddev = mddev;
1291 	r1_bio->sector = bio->bi_iter.bi_sector;
1292 }
1293 
1294 static inline struct r1bio *
1295 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1296 {
1297 	struct r1conf *conf = mddev->private;
1298 	struct r1bio *r1_bio;
1299 
1300 	r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1301 	/* Ensure no bio records IO_BLOCKED */
1302 	memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1303 	init_r1bio(r1_bio, mddev, bio);
1304 	return r1_bio;
1305 }
1306 
1307 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1308 			       int max_read_sectors, struct r1bio *r1_bio)
1309 {
1310 	struct r1conf *conf = mddev->private;
1311 	struct raid1_info *mirror;
1312 	struct bio *read_bio;
1313 	struct bitmap *bitmap = mddev->bitmap;
1314 	const enum req_op op = bio_op(bio);
1315 	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1316 	int max_sectors;
1317 	int rdisk;
1318 	bool r1bio_existed = !!r1_bio;
1319 	char b[BDEVNAME_SIZE];
1320 
1321 	/*
1322 	 * If r1_bio is set, we are blocking the raid1d thread
1323 	 * so there is a tiny risk of deadlock.  So ask for
1324 	 * emergency memory if needed.
1325 	 */
1326 	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1327 
1328 	if (r1bio_existed) {
1329 		/* Need to get the block device name carefully */
1330 		struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
1331 
1332 		if (rdev)
1333 			snprintf(b, sizeof(b), "%pg", rdev->bdev);
1334 		else
1335 			strcpy(b, "???");
1336 	}
1337 
1338 	/*
1339 	 * Still need barrier for READ in case that whole
1340 	 * array is frozen.
1341 	 */
1342 	if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
1343 				bio->bi_opf & REQ_NOWAIT)) {
1344 		bio_wouldblock_error(bio);
1345 		return;
1346 	}
1347 
1348 	if (!r1_bio)
1349 		r1_bio = alloc_r1bio(mddev, bio);
1350 	else
1351 		init_r1bio(r1_bio, mddev, bio);
1352 	r1_bio->sectors = max_read_sectors;
1353 
1354 	/*
1355 	 * make_request() can abort the operation when read-ahead is being
1356 	 * used and no empty request is available.
1357 	 */
1358 	rdisk = read_balance(conf, r1_bio, &max_sectors);
1359 
1360 	if (rdisk < 0) {
1361 		/* couldn't find anywhere to read from */
1362 		if (r1bio_existed) {
1363 			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1364 					    mdname(mddev),
1365 					    b,
1366 					    (unsigned long long)r1_bio->sector);
1367 		}
1368 		raid_end_bio_io(r1_bio);
1369 		return;
1370 	}
1371 	mirror = conf->mirrors + rdisk;
1372 
1373 	if (r1bio_existed)
1374 		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
1375 				    mdname(mddev),
1376 				    (unsigned long long)r1_bio->sector,
1377 				    mirror->rdev->bdev);
1378 
1379 	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1380 	    bitmap) {
1381 		/*
1382 		 * Reading from a write-mostly device must take care not to
1383 		 * over-take any writes that are 'behind'
1384 		 */
1385 		mddev_add_trace_msg(mddev, "raid1 wait behind writes");
1386 		wait_event(bitmap->behind_wait,
1387 			   atomic_read(&bitmap->behind_writes) == 0);
1388 	}
1389 
1390 	if (max_sectors < bio_sectors(bio)) {
1391 		struct bio *split = bio_split(bio, max_sectors,
1392 					      gfp, &conf->bio_split);
1393 		bio_chain(split, bio);
1394 		submit_bio_noacct(bio);
1395 		bio = split;
1396 		r1_bio->master_bio = bio;
1397 		r1_bio->sectors = max_sectors;
1398 	}
1399 
1400 	r1_bio->read_disk = rdisk;
1401 	if (!r1bio_existed) {
1402 		md_account_bio(mddev, &bio);
1403 		r1_bio->master_bio = bio;
1404 	}
1405 	read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1406 				   &mddev->bio_set);
1407 
1408 	r1_bio->bios[rdisk] = read_bio;
1409 
1410 	read_bio->bi_iter.bi_sector = r1_bio->sector +
1411 		mirror->rdev->data_offset;
1412 	read_bio->bi_end_io = raid1_end_read_request;
1413 	read_bio->bi_opf = op | do_sync;
1414 	if (test_bit(FailFast, &mirror->rdev->flags) &&
1415 	    test_bit(R1BIO_FailFast, &r1_bio->state))
1416 	        read_bio->bi_opf |= MD_FAILFAST;
1417 	read_bio->bi_private = r1_bio;
1418 	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
1419 	submit_bio_noacct(read_bio);
1420 }
1421 
1422 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1423 				int max_write_sectors)
1424 {
1425 	struct r1conf *conf = mddev->private;
1426 	struct r1bio *r1_bio;
1427 	int i, disks;
1428 	struct bitmap *bitmap = mddev->bitmap;
1429 	unsigned long flags;
1430 	struct md_rdev *blocked_rdev;
1431 	int first_clone;
1432 	int max_sectors;
1433 	bool write_behind = false;
1434 	bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
1435 
1436 	if (mddev_is_clustered(mddev) &&
1437 	     md_cluster_ops->area_resyncing(mddev, WRITE,
1438 		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1439 
1440 		DEFINE_WAIT(w);
1441 		if (bio->bi_opf & REQ_NOWAIT) {
1442 			bio_wouldblock_error(bio);
1443 			return;
1444 		}
1445 		for (;;) {
1446 			prepare_to_wait(&conf->wait_barrier,
1447 					&w, TASK_IDLE);
1448 			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1449 							bio->bi_iter.bi_sector,
1450 							bio_end_sector(bio)))
1451 				break;
1452 			schedule();
1453 		}
1454 		finish_wait(&conf->wait_barrier, &w);
1455 	}
1456 
1457 	/*
1458 	 * Register the new request and wait if the reconstruction
1459 	 * thread has put up a bar for new requests.
1460 	 * Continue immediately if no resync is active currently.
1461 	 */
1462 	if (!wait_barrier(conf, bio->bi_iter.bi_sector,
1463 				bio->bi_opf & REQ_NOWAIT)) {
1464 		bio_wouldblock_error(bio);
1465 		return;
1466 	}
1467 
1468  retry_write:
1469 	r1_bio = alloc_r1bio(mddev, bio);
1470 	r1_bio->sectors = max_write_sectors;
1471 
1472 	/* first select target devices under rcu_lock and
1473 	 * inc refcount on their rdev.  Record them by setting
1474 	 * bios[x] to bio
1475 	 * If there are known/acknowledged bad blocks on any device on
1476 	 * which we have seen a write error, we want to avoid writing those
1477 	 * blocks.
1478 	 * This potentially requires several writes to write around
1479 	 * the bad blocks.  Each set of writes gets it's own r1bio
1480 	 * with a set of bios attached.
1481 	 */
1482 
1483 	disks = conf->raid_disks * 2;
1484 	blocked_rdev = NULL;
1485 	max_sectors = r1_bio->sectors;
1486 	for (i = 0;  i < disks; i++) {
1487 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1488 
1489 		/*
1490 		 * The write-behind io is only attempted on drives marked as
1491 		 * write-mostly, which means we could allocate write behind
1492 		 * bio later.
1493 		 */
1494 		if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
1495 			write_behind = true;
1496 
1497 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1498 			atomic_inc(&rdev->nr_pending);
1499 			blocked_rdev = rdev;
1500 			break;
1501 		}
1502 		r1_bio->bios[i] = NULL;
1503 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
1504 			if (i < conf->raid_disks)
1505 				set_bit(R1BIO_Degraded, &r1_bio->state);
1506 			continue;
1507 		}
1508 
1509 		atomic_inc(&rdev->nr_pending);
1510 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1511 			sector_t first_bad;
1512 			int bad_sectors;
1513 			int is_bad;
1514 
1515 			is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1516 					     &first_bad, &bad_sectors);
1517 			if (is_bad < 0) {
1518 				/* mustn't write here until the bad block is
1519 				 * acknowledged*/
1520 				set_bit(BlockedBadBlocks, &rdev->flags);
1521 				blocked_rdev = rdev;
1522 				break;
1523 			}
1524 			if (is_bad && first_bad <= r1_bio->sector) {
1525 				/* Cannot write here at all */
1526 				bad_sectors -= (r1_bio->sector - first_bad);
1527 				if (bad_sectors < max_sectors)
1528 					/* mustn't write more than bad_sectors
1529 					 * to other devices yet
1530 					 */
1531 					max_sectors = bad_sectors;
1532 				rdev_dec_pending(rdev, mddev);
1533 				/* We don't set R1BIO_Degraded as that
1534 				 * only applies if the disk is
1535 				 * missing, so it might be re-added,
1536 				 * and we want to know to recover this
1537 				 * chunk.
1538 				 * In this case the device is here,
1539 				 * and the fact that this chunk is not
1540 				 * in-sync is recorded in the bad
1541 				 * block log
1542 				 */
1543 				continue;
1544 			}
1545 			if (is_bad) {
1546 				int good_sectors = first_bad - r1_bio->sector;
1547 				if (good_sectors < max_sectors)
1548 					max_sectors = good_sectors;
1549 			}
1550 		}
1551 		r1_bio->bios[i] = bio;
1552 	}
1553 
1554 	if (unlikely(blocked_rdev)) {
1555 		/* Wait for this device to become unblocked */
1556 		int j;
1557 
1558 		for (j = 0; j < i; j++)
1559 			if (r1_bio->bios[j])
1560 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1561 		mempool_free(r1_bio, &conf->r1bio_pool);
1562 		allow_barrier(conf, bio->bi_iter.bi_sector);
1563 
1564 		if (bio->bi_opf & REQ_NOWAIT) {
1565 			bio_wouldblock_error(bio);
1566 			return;
1567 		}
1568 		mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
1569 				blocked_rdev->raid_disk);
1570 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1571 		wait_barrier(conf, bio->bi_iter.bi_sector, false);
1572 		goto retry_write;
1573 	}
1574 
1575 	/*
1576 	 * When using a bitmap, we may call alloc_behind_master_bio below.
1577 	 * alloc_behind_master_bio allocates a copy of the data payload a page
1578 	 * at a time and thus needs a new bio that can fit the whole payload
1579 	 * this bio in page sized chunks.
1580 	 */
1581 	if (write_behind && bitmap)
1582 		max_sectors = min_t(int, max_sectors,
1583 				    BIO_MAX_VECS * (PAGE_SIZE >> 9));
1584 	if (max_sectors < bio_sectors(bio)) {
1585 		struct bio *split = bio_split(bio, max_sectors,
1586 					      GFP_NOIO, &conf->bio_split);
1587 		bio_chain(split, bio);
1588 		submit_bio_noacct(bio);
1589 		bio = split;
1590 		r1_bio->master_bio = bio;
1591 		r1_bio->sectors = max_sectors;
1592 	}
1593 
1594 	md_account_bio(mddev, &bio);
1595 	r1_bio->master_bio = bio;
1596 	atomic_set(&r1_bio->remaining, 1);
1597 	atomic_set(&r1_bio->behind_remaining, 0);
1598 
1599 	first_clone = 1;
1600 
1601 	for (i = 0; i < disks; i++) {
1602 		struct bio *mbio = NULL;
1603 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1604 		if (!r1_bio->bios[i])
1605 			continue;
1606 
1607 		if (first_clone) {
1608 			/* do behind I/O ?
1609 			 * Not if there are too many, or cannot
1610 			 * allocate memory, or a reader on WriteMostly
1611 			 * is waiting for behind writes to flush */
1612 			if (bitmap && write_behind &&
1613 			    (atomic_read(&bitmap->behind_writes)
1614 			     < mddev->bitmap_info.max_write_behind) &&
1615 			    !waitqueue_active(&bitmap->behind_wait)) {
1616 				alloc_behind_master_bio(r1_bio, bio);
1617 			}
1618 
1619 			md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1620 					     test_bit(R1BIO_BehindIO, &r1_bio->state));
1621 			first_clone = 0;
1622 		}
1623 
1624 		if (r1_bio->behind_master_bio) {
1625 			mbio = bio_alloc_clone(rdev->bdev,
1626 					       r1_bio->behind_master_bio,
1627 					       GFP_NOIO, &mddev->bio_set);
1628 			if (test_bit(CollisionCheck, &rdev->flags))
1629 				wait_for_serialization(rdev, r1_bio);
1630 			if (test_bit(WriteMostly, &rdev->flags))
1631 				atomic_inc(&r1_bio->behind_remaining);
1632 		} else {
1633 			mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1634 					       &mddev->bio_set);
1635 
1636 			if (mddev->serialize_policy)
1637 				wait_for_serialization(rdev, r1_bio);
1638 		}
1639 
1640 		r1_bio->bios[i] = mbio;
1641 
1642 		mbio->bi_iter.bi_sector	= (r1_bio->sector + rdev->data_offset);
1643 		mbio->bi_end_io	= raid1_end_write_request;
1644 		mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1645 		if (test_bit(FailFast, &rdev->flags) &&
1646 		    !test_bit(WriteMostly, &rdev->flags) &&
1647 		    conf->raid_disks - mddev->degraded > 1)
1648 			mbio->bi_opf |= MD_FAILFAST;
1649 		mbio->bi_private = r1_bio;
1650 
1651 		atomic_inc(&r1_bio->remaining);
1652 		mddev_trace_remap(mddev, mbio, r1_bio->sector);
1653 		/* flush_pending_writes() needs access to the rdev so...*/
1654 		mbio->bi_bdev = (void *)rdev;
1655 		if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
1656 			spin_lock_irqsave(&conf->device_lock, flags);
1657 			bio_list_add(&conf->pending_bio_list, mbio);
1658 			spin_unlock_irqrestore(&conf->device_lock, flags);
1659 			md_wakeup_thread(mddev->thread);
1660 		}
1661 	}
1662 
1663 	r1_bio_write_done(r1_bio);
1664 
1665 	/* In case raid1d snuck in to freeze_array */
1666 	wake_up_barrier(conf);
1667 }
1668 
1669 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1670 {
1671 	sector_t sectors;
1672 
1673 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1674 	    && md_flush_request(mddev, bio))
1675 		return true;
1676 
1677 	/*
1678 	 * There is a limit to the maximum size, but
1679 	 * the read/write handler might find a lower limit
1680 	 * due to bad blocks.  To avoid multiple splits,
1681 	 * we pass the maximum number of sectors down
1682 	 * and let the lower level perform the split.
1683 	 */
1684 	sectors = align_to_barrier_unit_end(
1685 		bio->bi_iter.bi_sector, bio_sectors(bio));
1686 
1687 	if (bio_data_dir(bio) == READ)
1688 		raid1_read_request(mddev, bio, sectors, NULL);
1689 	else {
1690 		md_write_start(mddev,bio);
1691 		raid1_write_request(mddev, bio, sectors);
1692 	}
1693 	return true;
1694 }
1695 
1696 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1697 {
1698 	struct r1conf *conf = mddev->private;
1699 	int i;
1700 
1701 	lockdep_assert_held(&mddev->lock);
1702 
1703 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1704 		   conf->raid_disks - mddev->degraded);
1705 	for (i = 0; i < conf->raid_disks; i++) {
1706 		struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1707 
1708 		seq_printf(seq, "%s",
1709 			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1710 	}
1711 	seq_printf(seq, "]");
1712 }
1713 
1714 /**
1715  * raid1_error() - RAID1 error handler.
1716  * @mddev: affected md device.
1717  * @rdev: member device to fail.
1718  *
1719  * The routine acknowledges &rdev failure and determines new @mddev state.
1720  * If it failed, then:
1721  *	- &MD_BROKEN flag is set in &mddev->flags.
1722  *	- recovery is disabled.
1723  * Otherwise, it must be degraded:
1724  *	- recovery is interrupted.
1725  *	- &mddev->degraded is bumped.
1726  *
1727  * @rdev is marked as &Faulty excluding case when array is failed and
1728  * &mddev->fail_last_dev is off.
1729  */
1730 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1731 {
1732 	struct r1conf *conf = mddev->private;
1733 	unsigned long flags;
1734 
1735 	spin_lock_irqsave(&conf->device_lock, flags);
1736 
1737 	if (test_bit(In_sync, &rdev->flags) &&
1738 	    (conf->raid_disks - mddev->degraded) == 1) {
1739 		set_bit(MD_BROKEN, &mddev->flags);
1740 
1741 		if (!mddev->fail_last_dev) {
1742 			conf->recovery_disabled = mddev->recovery_disabled;
1743 			spin_unlock_irqrestore(&conf->device_lock, flags);
1744 			return;
1745 		}
1746 	}
1747 	set_bit(Blocked, &rdev->flags);
1748 	if (test_and_clear_bit(In_sync, &rdev->flags))
1749 		mddev->degraded++;
1750 	set_bit(Faulty, &rdev->flags);
1751 	spin_unlock_irqrestore(&conf->device_lock, flags);
1752 	/*
1753 	 * if recovery is running, make sure it aborts.
1754 	 */
1755 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1756 	set_mask_bits(&mddev->sb_flags, 0,
1757 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1758 	pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
1759 		"md/raid1:%s: Operation continuing on %d devices.\n",
1760 		mdname(mddev), rdev->bdev,
1761 		mdname(mddev), conf->raid_disks - mddev->degraded);
1762 }
1763 
1764 static void print_conf(struct r1conf *conf)
1765 {
1766 	int i;
1767 
1768 	pr_debug("RAID1 conf printout:\n");
1769 	if (!conf) {
1770 		pr_debug("(!conf)\n");
1771 		return;
1772 	}
1773 	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1774 		 conf->raid_disks);
1775 
1776 	lockdep_assert_held(&conf->mddev->reconfig_mutex);
1777 	for (i = 0; i < conf->raid_disks; i++) {
1778 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1779 		if (rdev)
1780 			pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
1781 				 i, !test_bit(In_sync, &rdev->flags),
1782 				 !test_bit(Faulty, &rdev->flags),
1783 				 rdev->bdev);
1784 	}
1785 }
1786 
1787 static void close_sync(struct r1conf *conf)
1788 {
1789 	int idx;
1790 
1791 	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1792 		_wait_barrier(conf, idx, false);
1793 		_allow_barrier(conf, idx);
1794 	}
1795 
1796 	mempool_exit(&conf->r1buf_pool);
1797 }
1798 
1799 static int raid1_spare_active(struct mddev *mddev)
1800 {
1801 	int i;
1802 	struct r1conf *conf = mddev->private;
1803 	int count = 0;
1804 	unsigned long flags;
1805 
1806 	/*
1807 	 * Find all failed disks within the RAID1 configuration
1808 	 * and mark them readable.
1809 	 * Called under mddev lock, so rcu protection not needed.
1810 	 * device_lock used to avoid races with raid1_end_read_request
1811 	 * which expects 'In_sync' flags and ->degraded to be consistent.
1812 	 */
1813 	spin_lock_irqsave(&conf->device_lock, flags);
1814 	for (i = 0; i < conf->raid_disks; i++) {
1815 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1816 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1817 		if (repl
1818 		    && !test_bit(Candidate, &repl->flags)
1819 		    && repl->recovery_offset == MaxSector
1820 		    && !test_bit(Faulty, &repl->flags)
1821 		    && !test_and_set_bit(In_sync, &repl->flags)) {
1822 			/* replacement has just become active */
1823 			if (!rdev ||
1824 			    !test_and_clear_bit(In_sync, &rdev->flags))
1825 				count++;
1826 			if (rdev) {
1827 				/* Replaced device not technically
1828 				 * faulty, but we need to be sure
1829 				 * it gets removed and never re-added
1830 				 */
1831 				set_bit(Faulty, &rdev->flags);
1832 				sysfs_notify_dirent_safe(
1833 					rdev->sysfs_state);
1834 			}
1835 		}
1836 		if (rdev
1837 		    && rdev->recovery_offset == MaxSector
1838 		    && !test_bit(Faulty, &rdev->flags)
1839 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1840 			count++;
1841 			sysfs_notify_dirent_safe(rdev->sysfs_state);
1842 		}
1843 	}
1844 	mddev->degraded -= count;
1845 	spin_unlock_irqrestore(&conf->device_lock, flags);
1846 
1847 	print_conf(conf);
1848 	return count;
1849 }
1850 
1851 static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
1852 			   bool replacement)
1853 {
1854 	struct raid1_info *info = conf->mirrors + disk;
1855 
1856 	if (replacement)
1857 		info += conf->raid_disks;
1858 
1859 	if (info->rdev)
1860 		return false;
1861 
1862 	if (bdev_nonrot(rdev->bdev)) {
1863 		set_bit(Nonrot, &rdev->flags);
1864 		WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
1865 	}
1866 
1867 	rdev->raid_disk = disk;
1868 	info->head_position = 0;
1869 	info->seq_start = MaxSector;
1870 	WRITE_ONCE(info->rdev, rdev);
1871 
1872 	return true;
1873 }
1874 
1875 static bool raid1_remove_conf(struct r1conf *conf, int disk)
1876 {
1877 	struct raid1_info *info = conf->mirrors + disk;
1878 	struct md_rdev *rdev = info->rdev;
1879 
1880 	if (!rdev || test_bit(In_sync, &rdev->flags) ||
1881 	    atomic_read(&rdev->nr_pending))
1882 		return false;
1883 
1884 	/* Only remove non-faulty devices if recovery is not possible. */
1885 	if (!test_bit(Faulty, &rdev->flags) &&
1886 	    rdev->mddev->recovery_disabled != conf->recovery_disabled &&
1887 	    rdev->mddev->degraded < conf->raid_disks)
1888 		return false;
1889 
1890 	if (test_and_clear_bit(Nonrot, &rdev->flags))
1891 		WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
1892 
1893 	WRITE_ONCE(info->rdev, NULL);
1894 	return true;
1895 }
1896 
1897 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1898 {
1899 	struct r1conf *conf = mddev->private;
1900 	int err = -EEXIST;
1901 	int mirror = 0, repl_slot = -1;
1902 	struct raid1_info *p;
1903 	int first = 0;
1904 	int last = conf->raid_disks - 1;
1905 
1906 	if (mddev->recovery_disabled == conf->recovery_disabled)
1907 		return -EBUSY;
1908 
1909 	if (rdev->raid_disk >= 0)
1910 		first = last = rdev->raid_disk;
1911 
1912 	/*
1913 	 * find the disk ... but prefer rdev->saved_raid_disk
1914 	 * if possible.
1915 	 */
1916 	if (rdev->saved_raid_disk >= 0 &&
1917 	    rdev->saved_raid_disk >= first &&
1918 	    rdev->saved_raid_disk < conf->raid_disks &&
1919 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1920 		first = last = rdev->saved_raid_disk;
1921 
1922 	for (mirror = first; mirror <= last; mirror++) {
1923 		p = conf->mirrors + mirror;
1924 		if (!p->rdev) {
1925 			err = mddev_stack_new_rdev(mddev, rdev);
1926 			if (err)
1927 				return err;
1928 
1929 			raid1_add_conf(conf, rdev, mirror, false);
1930 			/* As all devices are equivalent, we don't need a full recovery
1931 			 * if this was recently any drive of the array
1932 			 */
1933 			if (rdev->saved_raid_disk < 0)
1934 				conf->fullsync = 1;
1935 			break;
1936 		}
1937 		if (test_bit(WantReplacement, &p->rdev->flags) &&
1938 		    p[conf->raid_disks].rdev == NULL && repl_slot < 0)
1939 			repl_slot = mirror;
1940 	}
1941 
1942 	if (err && repl_slot >= 0) {
1943 		/* Add this device as a replacement */
1944 		clear_bit(In_sync, &rdev->flags);
1945 		set_bit(Replacement, &rdev->flags);
1946 		raid1_add_conf(conf, rdev, repl_slot, true);
1947 		err = 0;
1948 		conf->fullsync = 1;
1949 	}
1950 
1951 	print_conf(conf);
1952 	return err;
1953 }
1954 
1955 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1956 {
1957 	struct r1conf *conf = mddev->private;
1958 	int err = 0;
1959 	int number = rdev->raid_disk;
1960 	struct raid1_info *p = conf->mirrors + number;
1961 
1962 	if (unlikely(number >= conf->raid_disks))
1963 		goto abort;
1964 
1965 	if (rdev != p->rdev) {
1966 		number += conf->raid_disks;
1967 		p = conf->mirrors + number;
1968 	}
1969 
1970 	print_conf(conf);
1971 	if (rdev == p->rdev) {
1972 		if (!raid1_remove_conf(conf, number)) {
1973 			err = -EBUSY;
1974 			goto abort;
1975 		}
1976 
1977 		if (number < conf->raid_disks &&
1978 		    conf->mirrors[conf->raid_disks + number].rdev) {
1979 			/* We just removed a device that is being replaced.
1980 			 * Move down the replacement.  We drain all IO before
1981 			 * doing this to avoid confusion.
1982 			 */
1983 			struct md_rdev *repl =
1984 				conf->mirrors[conf->raid_disks + number].rdev;
1985 			freeze_array(conf, 0);
1986 			if (atomic_read(&repl->nr_pending)) {
1987 				/* It means that some queued IO of retry_list
1988 				 * hold repl. Thus, we cannot set replacement
1989 				 * as NULL, avoiding rdev NULL pointer
1990 				 * dereference in sync_request_write and
1991 				 * handle_write_finished.
1992 				 */
1993 				err = -EBUSY;
1994 				unfreeze_array(conf);
1995 				goto abort;
1996 			}
1997 			clear_bit(Replacement, &repl->flags);
1998 			WRITE_ONCE(p->rdev, repl);
1999 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
2000 			unfreeze_array(conf);
2001 		}
2002 
2003 		clear_bit(WantReplacement, &rdev->flags);
2004 		err = md_integrity_register(mddev);
2005 	}
2006 abort:
2007 
2008 	print_conf(conf);
2009 	return err;
2010 }
2011 
2012 static void end_sync_read(struct bio *bio)
2013 {
2014 	struct r1bio *r1_bio = get_resync_r1bio(bio);
2015 
2016 	update_head_pos(r1_bio->read_disk, r1_bio);
2017 
2018 	/*
2019 	 * we have read a block, now it needs to be re-written,
2020 	 * or re-read if the read failed.
2021 	 * We don't do much here, just schedule handling by raid1d
2022 	 */
2023 	if (!bio->bi_status)
2024 		set_bit(R1BIO_Uptodate, &r1_bio->state);
2025 
2026 	if (atomic_dec_and_test(&r1_bio->remaining))
2027 		reschedule_retry(r1_bio);
2028 }
2029 
2030 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
2031 {
2032 	sector_t sync_blocks = 0;
2033 	sector_t s = r1_bio->sector;
2034 	long sectors_to_go = r1_bio->sectors;
2035 
2036 	/* make sure these bits don't get cleared. */
2037 	do {
2038 		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
2039 		s += sync_blocks;
2040 		sectors_to_go -= sync_blocks;
2041 	} while (sectors_to_go > 0);
2042 }
2043 
2044 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
2045 {
2046 	if (atomic_dec_and_test(&r1_bio->remaining)) {
2047 		struct mddev *mddev = r1_bio->mddev;
2048 		int s = r1_bio->sectors;
2049 
2050 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2051 		    test_bit(R1BIO_WriteError, &r1_bio->state))
2052 			reschedule_retry(r1_bio);
2053 		else {
2054 			put_buf(r1_bio);
2055 			md_done_sync(mddev, s, uptodate);
2056 		}
2057 	}
2058 }
2059 
2060 static void end_sync_write(struct bio *bio)
2061 {
2062 	int uptodate = !bio->bi_status;
2063 	struct r1bio *r1_bio = get_resync_r1bio(bio);
2064 	struct mddev *mddev = r1_bio->mddev;
2065 	struct r1conf *conf = mddev->private;
2066 	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
2067 
2068 	if (!uptodate) {
2069 		abort_sync_write(mddev, r1_bio);
2070 		set_bit(WriteErrorSeen, &rdev->flags);
2071 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2072 			set_bit(MD_RECOVERY_NEEDED, &
2073 				mddev->recovery);
2074 		set_bit(R1BIO_WriteError, &r1_bio->state);
2075 	} else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
2076 		   !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
2077 				      r1_bio->sector, r1_bio->sectors)) {
2078 		set_bit(R1BIO_MadeGood, &r1_bio->state);
2079 	}
2080 
2081 	put_sync_write_buf(r1_bio, uptodate);
2082 }
2083 
2084 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
2085 			   int sectors, struct page *page, blk_opf_t rw)
2086 {
2087 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2088 		/* success */
2089 		return 1;
2090 	if (rw == REQ_OP_WRITE) {
2091 		set_bit(WriteErrorSeen, &rdev->flags);
2092 		if (!test_and_set_bit(WantReplacement,
2093 				      &rdev->flags))
2094 			set_bit(MD_RECOVERY_NEEDED, &
2095 				rdev->mddev->recovery);
2096 	}
2097 	/* need to record an error - either for the block or the device */
2098 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2099 		md_error(rdev->mddev, rdev);
2100 	return 0;
2101 }
2102 
2103 static int fix_sync_read_error(struct r1bio *r1_bio)
2104 {
2105 	/* Try some synchronous reads of other devices to get
2106 	 * good data, much like with normal read errors.  Only
2107 	 * read into the pages we already have so we don't
2108 	 * need to re-issue the read request.
2109 	 * We don't need to freeze the array, because being in an
2110 	 * active sync request, there is no normal IO, and
2111 	 * no overlapping syncs.
2112 	 * We don't need to check is_badblock() again as we
2113 	 * made sure that anything with a bad block in range
2114 	 * will have bi_end_io clear.
2115 	 */
2116 	struct mddev *mddev = r1_bio->mddev;
2117 	struct r1conf *conf = mddev->private;
2118 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
2119 	struct page **pages = get_resync_pages(bio)->pages;
2120 	sector_t sect = r1_bio->sector;
2121 	int sectors = r1_bio->sectors;
2122 	int idx = 0;
2123 	struct md_rdev *rdev;
2124 
2125 	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2126 	if (test_bit(FailFast, &rdev->flags)) {
2127 		/* Don't try recovering from here - just fail it
2128 		 * ... unless it is the last working device of course */
2129 		md_error(mddev, rdev);
2130 		if (test_bit(Faulty, &rdev->flags))
2131 			/* Don't try to read from here, but make sure
2132 			 * put_buf does it's thing
2133 			 */
2134 			bio->bi_end_io = end_sync_write;
2135 	}
2136 
2137 	while(sectors) {
2138 		int s = sectors;
2139 		int d = r1_bio->read_disk;
2140 		int success = 0;
2141 		int start;
2142 
2143 		if (s > (PAGE_SIZE>>9))
2144 			s = PAGE_SIZE >> 9;
2145 		do {
2146 			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2147 				/* No rcu protection needed here devices
2148 				 * can only be removed when no resync is
2149 				 * active, and resync is currently active
2150 				 */
2151 				rdev = conf->mirrors[d].rdev;
2152 				if (sync_page_io(rdev, sect, s<<9,
2153 						 pages[idx],
2154 						 REQ_OP_READ, false)) {
2155 					success = 1;
2156 					break;
2157 				}
2158 			}
2159 			d++;
2160 			if (d == conf->raid_disks * 2)
2161 				d = 0;
2162 		} while (!success && d != r1_bio->read_disk);
2163 
2164 		if (!success) {
2165 			int abort = 0;
2166 			/* Cannot read from anywhere, this block is lost.
2167 			 * Record a bad block on each device.  If that doesn't
2168 			 * work just disable and interrupt the recovery.
2169 			 * Don't fail devices as that won't really help.
2170 			 */
2171 			pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
2172 					    mdname(mddev), bio->bi_bdev,
2173 					    (unsigned long long)r1_bio->sector);
2174 			for (d = 0; d < conf->raid_disks * 2; d++) {
2175 				rdev = conf->mirrors[d].rdev;
2176 				if (!rdev || test_bit(Faulty, &rdev->flags))
2177 					continue;
2178 				if (!rdev_set_badblocks(rdev, sect, s, 0))
2179 					abort = 1;
2180 			}
2181 			if (abort) {
2182 				conf->recovery_disabled =
2183 					mddev->recovery_disabled;
2184 				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2185 				md_done_sync(mddev, r1_bio->sectors, 0);
2186 				put_buf(r1_bio);
2187 				return 0;
2188 			}
2189 			/* Try next page */
2190 			sectors -= s;
2191 			sect += s;
2192 			idx++;
2193 			continue;
2194 		}
2195 
2196 		start = d;
2197 		/* write it back and re-read */
2198 		while (d != r1_bio->read_disk) {
2199 			if (d == 0)
2200 				d = conf->raid_disks * 2;
2201 			d--;
2202 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2203 				continue;
2204 			rdev = conf->mirrors[d].rdev;
2205 			if (r1_sync_page_io(rdev, sect, s,
2206 					    pages[idx],
2207 					    REQ_OP_WRITE) == 0) {
2208 				r1_bio->bios[d]->bi_end_io = NULL;
2209 				rdev_dec_pending(rdev, mddev);
2210 			}
2211 		}
2212 		d = start;
2213 		while (d != r1_bio->read_disk) {
2214 			if (d == 0)
2215 				d = conf->raid_disks * 2;
2216 			d--;
2217 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2218 				continue;
2219 			rdev = conf->mirrors[d].rdev;
2220 			if (r1_sync_page_io(rdev, sect, s,
2221 					    pages[idx],
2222 					    REQ_OP_READ) != 0)
2223 				atomic_add(s, &rdev->corrected_errors);
2224 		}
2225 		sectors -= s;
2226 		sect += s;
2227 		idx ++;
2228 	}
2229 	set_bit(R1BIO_Uptodate, &r1_bio->state);
2230 	bio->bi_status = 0;
2231 	return 1;
2232 }
2233 
2234 static void process_checks(struct r1bio *r1_bio)
2235 {
2236 	/* We have read all readable devices.  If we haven't
2237 	 * got the block, then there is no hope left.
2238 	 * If we have, then we want to do a comparison
2239 	 * and skip the write if everything is the same.
2240 	 * If any blocks failed to read, then we need to
2241 	 * attempt an over-write
2242 	 */
2243 	struct mddev *mddev = r1_bio->mddev;
2244 	struct r1conf *conf = mddev->private;
2245 	int primary;
2246 	int i;
2247 	int vcnt;
2248 
2249 	/* Fix variable parts of all bios */
2250 	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2251 	for (i = 0; i < conf->raid_disks * 2; i++) {
2252 		blk_status_t status;
2253 		struct bio *b = r1_bio->bios[i];
2254 		struct resync_pages *rp = get_resync_pages(b);
2255 		if (b->bi_end_io != end_sync_read)
2256 			continue;
2257 		/* fixup the bio for reuse, but preserve errno */
2258 		status = b->bi_status;
2259 		bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
2260 		b->bi_status = status;
2261 		b->bi_iter.bi_sector = r1_bio->sector +
2262 			conf->mirrors[i].rdev->data_offset;
2263 		b->bi_end_io = end_sync_read;
2264 		rp->raid_bio = r1_bio;
2265 		b->bi_private = rp;
2266 
2267 		/* initialize bvec table again */
2268 		md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2269 	}
2270 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
2271 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2272 		    !r1_bio->bios[primary]->bi_status) {
2273 			r1_bio->bios[primary]->bi_end_io = NULL;
2274 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2275 			break;
2276 		}
2277 	r1_bio->read_disk = primary;
2278 	for (i = 0; i < conf->raid_disks * 2; i++) {
2279 		int j = 0;
2280 		struct bio *pbio = r1_bio->bios[primary];
2281 		struct bio *sbio = r1_bio->bios[i];
2282 		blk_status_t status = sbio->bi_status;
2283 		struct page **ppages = get_resync_pages(pbio)->pages;
2284 		struct page **spages = get_resync_pages(sbio)->pages;
2285 		struct bio_vec *bi;
2286 		int page_len[RESYNC_PAGES] = { 0 };
2287 		struct bvec_iter_all iter_all;
2288 
2289 		if (sbio->bi_end_io != end_sync_read)
2290 			continue;
2291 		/* Now we can 'fixup' the error value */
2292 		sbio->bi_status = 0;
2293 
2294 		bio_for_each_segment_all(bi, sbio, iter_all)
2295 			page_len[j++] = bi->bv_len;
2296 
2297 		if (!status) {
2298 			for (j = vcnt; j-- ; ) {
2299 				if (memcmp(page_address(ppages[j]),
2300 					   page_address(spages[j]),
2301 					   page_len[j]))
2302 					break;
2303 			}
2304 		} else
2305 			j = 0;
2306 		if (j >= 0)
2307 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2308 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2309 			      && !status)) {
2310 			/* No need to write to this device. */
2311 			sbio->bi_end_io = NULL;
2312 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2313 			continue;
2314 		}
2315 
2316 		bio_copy_data(sbio, pbio);
2317 	}
2318 }
2319 
2320 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2321 {
2322 	struct r1conf *conf = mddev->private;
2323 	int i;
2324 	int disks = conf->raid_disks * 2;
2325 	struct bio *wbio;
2326 
2327 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2328 		/* ouch - failed to read all of that. */
2329 		if (!fix_sync_read_error(r1_bio))
2330 			return;
2331 
2332 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2333 		process_checks(r1_bio);
2334 
2335 	/*
2336 	 * schedule writes
2337 	 */
2338 	atomic_set(&r1_bio->remaining, 1);
2339 	for (i = 0; i < disks ; i++) {
2340 		wbio = r1_bio->bios[i];
2341 		if (wbio->bi_end_io == NULL ||
2342 		    (wbio->bi_end_io == end_sync_read &&
2343 		     (i == r1_bio->read_disk ||
2344 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2345 			continue;
2346 		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2347 			abort_sync_write(mddev, r1_bio);
2348 			continue;
2349 		}
2350 
2351 		wbio->bi_opf = REQ_OP_WRITE;
2352 		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2353 			wbio->bi_opf |= MD_FAILFAST;
2354 
2355 		wbio->bi_end_io = end_sync_write;
2356 		atomic_inc(&r1_bio->remaining);
2357 		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2358 
2359 		submit_bio_noacct(wbio);
2360 	}
2361 
2362 	put_sync_write_buf(r1_bio, 1);
2363 }
2364 
2365 /*
2366  * This is a kernel thread which:
2367  *
2368  *	1.	Retries failed read operations on working mirrors.
2369  *	2.	Updates the raid superblock when problems encounter.
2370  *	3.	Performs writes following reads for array synchronising.
2371  */
2372 
2373 static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2374 {
2375 	sector_t sect = r1_bio->sector;
2376 	int sectors = r1_bio->sectors;
2377 	int read_disk = r1_bio->read_disk;
2378 	struct mddev *mddev = conf->mddev;
2379 	struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2380 
2381 	if (exceed_read_errors(mddev, rdev)) {
2382 		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2383 		return;
2384 	}
2385 
2386 	while(sectors) {
2387 		int s = sectors;
2388 		int d = read_disk;
2389 		int success = 0;
2390 		int start;
2391 
2392 		if (s > (PAGE_SIZE>>9))
2393 			s = PAGE_SIZE >> 9;
2394 
2395 		do {
2396 			rdev = conf->mirrors[d].rdev;
2397 			if (rdev &&
2398 			    (test_bit(In_sync, &rdev->flags) ||
2399 			     (!test_bit(Faulty, &rdev->flags) &&
2400 			      rdev->recovery_offset >= sect + s)) &&
2401 			    rdev_has_badblock(rdev, sect, s) == 0) {
2402 				atomic_inc(&rdev->nr_pending);
2403 				if (sync_page_io(rdev, sect, s<<9,
2404 					 conf->tmppage, REQ_OP_READ, false))
2405 					success = 1;
2406 				rdev_dec_pending(rdev, mddev);
2407 				if (success)
2408 					break;
2409 			}
2410 
2411 			d++;
2412 			if (d == conf->raid_disks * 2)
2413 				d = 0;
2414 		} while (d != read_disk);
2415 
2416 		if (!success) {
2417 			/* Cannot read from anywhere - mark it bad */
2418 			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2419 			if (!rdev_set_badblocks(rdev, sect, s, 0))
2420 				md_error(mddev, rdev);
2421 			break;
2422 		}
2423 		/* write it back and re-read */
2424 		start = d;
2425 		while (d != read_disk) {
2426 			if (d==0)
2427 				d = conf->raid_disks * 2;
2428 			d--;
2429 			rdev = conf->mirrors[d].rdev;
2430 			if (rdev &&
2431 			    !test_bit(Faulty, &rdev->flags)) {
2432 				atomic_inc(&rdev->nr_pending);
2433 				r1_sync_page_io(rdev, sect, s,
2434 						conf->tmppage, REQ_OP_WRITE);
2435 				rdev_dec_pending(rdev, mddev);
2436 			}
2437 		}
2438 		d = start;
2439 		while (d != read_disk) {
2440 			if (d==0)
2441 				d = conf->raid_disks * 2;
2442 			d--;
2443 			rdev = conf->mirrors[d].rdev;
2444 			if (rdev &&
2445 			    !test_bit(Faulty, &rdev->flags)) {
2446 				atomic_inc(&rdev->nr_pending);
2447 				if (r1_sync_page_io(rdev, sect, s,
2448 						conf->tmppage, REQ_OP_READ)) {
2449 					atomic_add(s, &rdev->corrected_errors);
2450 					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
2451 						mdname(mddev), s,
2452 						(unsigned long long)(sect +
2453 								     rdev->data_offset),
2454 						rdev->bdev);
2455 				}
2456 				rdev_dec_pending(rdev, mddev);
2457 			}
2458 		}
2459 		sectors -= s;
2460 		sect += s;
2461 	}
2462 }
2463 
2464 static int narrow_write_error(struct r1bio *r1_bio, int i)
2465 {
2466 	struct mddev *mddev = r1_bio->mddev;
2467 	struct r1conf *conf = mddev->private;
2468 	struct md_rdev *rdev = conf->mirrors[i].rdev;
2469 
2470 	/* bio has the data to be written to device 'i' where
2471 	 * we just recently had a write error.
2472 	 * We repeatedly clone the bio and trim down to one block,
2473 	 * then try the write.  Where the write fails we record
2474 	 * a bad block.
2475 	 * It is conceivable that the bio doesn't exactly align with
2476 	 * blocks.  We must handle this somehow.
2477 	 *
2478 	 * We currently own a reference on the rdev.
2479 	 */
2480 
2481 	int block_sectors;
2482 	sector_t sector;
2483 	int sectors;
2484 	int sect_to_write = r1_bio->sectors;
2485 	int ok = 1;
2486 
2487 	if (rdev->badblocks.shift < 0)
2488 		return 0;
2489 
2490 	block_sectors = roundup(1 << rdev->badblocks.shift,
2491 				bdev_logical_block_size(rdev->bdev) >> 9);
2492 	sector = r1_bio->sector;
2493 	sectors = ((sector + block_sectors)
2494 		   & ~(sector_t)(block_sectors - 1))
2495 		- sector;
2496 
2497 	while (sect_to_write) {
2498 		struct bio *wbio;
2499 		if (sectors > sect_to_write)
2500 			sectors = sect_to_write;
2501 		/* Write at 'sector' for 'sectors'*/
2502 
2503 		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2504 			wbio = bio_alloc_clone(rdev->bdev,
2505 					       r1_bio->behind_master_bio,
2506 					       GFP_NOIO, &mddev->bio_set);
2507 		} else {
2508 			wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
2509 					       GFP_NOIO, &mddev->bio_set);
2510 		}
2511 
2512 		wbio->bi_opf = REQ_OP_WRITE;
2513 		wbio->bi_iter.bi_sector = r1_bio->sector;
2514 		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2515 
2516 		bio_trim(wbio, sector - r1_bio->sector, sectors);
2517 		wbio->bi_iter.bi_sector += rdev->data_offset;
2518 
2519 		if (submit_bio_wait(wbio) < 0)
2520 			/* failure! */
2521 			ok = rdev_set_badblocks(rdev, sector,
2522 						sectors, 0)
2523 				&& ok;
2524 
2525 		bio_put(wbio);
2526 		sect_to_write -= sectors;
2527 		sector += sectors;
2528 		sectors = block_sectors;
2529 	}
2530 	return ok;
2531 }
2532 
2533 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2534 {
2535 	int m;
2536 	int s = r1_bio->sectors;
2537 	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2538 		struct md_rdev *rdev = conf->mirrors[m].rdev;
2539 		struct bio *bio = r1_bio->bios[m];
2540 		if (bio->bi_end_io == NULL)
2541 			continue;
2542 		if (!bio->bi_status &&
2543 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2544 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2545 		}
2546 		if (bio->bi_status &&
2547 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
2548 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2549 				md_error(conf->mddev, rdev);
2550 		}
2551 	}
2552 	put_buf(r1_bio);
2553 	md_done_sync(conf->mddev, s, 1);
2554 }
2555 
2556 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2557 {
2558 	int m, idx;
2559 	bool fail = false;
2560 
2561 	for (m = 0; m < conf->raid_disks * 2 ; m++)
2562 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2563 			struct md_rdev *rdev = conf->mirrors[m].rdev;
2564 			rdev_clear_badblocks(rdev,
2565 					     r1_bio->sector,
2566 					     r1_bio->sectors, 0);
2567 			rdev_dec_pending(rdev, conf->mddev);
2568 		} else if (r1_bio->bios[m] != NULL) {
2569 			/* This drive got a write error.  We need to
2570 			 * narrow down and record precise write
2571 			 * errors.
2572 			 */
2573 			fail = true;
2574 			if (!narrow_write_error(r1_bio, m)) {
2575 				md_error(conf->mddev,
2576 					 conf->mirrors[m].rdev);
2577 				/* an I/O failed, we can't clear the bitmap */
2578 				set_bit(R1BIO_Degraded, &r1_bio->state);
2579 			}
2580 			rdev_dec_pending(conf->mirrors[m].rdev,
2581 					 conf->mddev);
2582 		}
2583 	if (fail) {
2584 		spin_lock_irq(&conf->device_lock);
2585 		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2586 		idx = sector_to_idx(r1_bio->sector);
2587 		atomic_inc(&conf->nr_queued[idx]);
2588 		spin_unlock_irq(&conf->device_lock);
2589 		/*
2590 		 * In case freeze_array() is waiting for condition
2591 		 * get_unqueued_pending() == extra to be true.
2592 		 */
2593 		wake_up(&conf->wait_barrier);
2594 		md_wakeup_thread(conf->mddev->thread);
2595 	} else {
2596 		if (test_bit(R1BIO_WriteError, &r1_bio->state))
2597 			close_write(r1_bio);
2598 		raid_end_bio_io(r1_bio);
2599 	}
2600 }
2601 
2602 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2603 {
2604 	struct mddev *mddev = conf->mddev;
2605 	struct bio *bio;
2606 	struct md_rdev *rdev;
2607 	sector_t sector;
2608 
2609 	clear_bit(R1BIO_ReadError, &r1_bio->state);
2610 	/* we got a read error. Maybe the drive is bad.  Maybe just
2611 	 * the block and we can fix it.
2612 	 * We freeze all other IO, and try reading the block from
2613 	 * other devices.  When we find one, we re-write
2614 	 * and check it that fixes the read error.
2615 	 * This is all done synchronously while the array is
2616 	 * frozen
2617 	 */
2618 
2619 	bio = r1_bio->bios[r1_bio->read_disk];
2620 	bio_put(bio);
2621 	r1_bio->bios[r1_bio->read_disk] = NULL;
2622 
2623 	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2624 	if (mddev->ro == 0
2625 	    && !test_bit(FailFast, &rdev->flags)) {
2626 		freeze_array(conf, 1);
2627 		fix_read_error(conf, r1_bio);
2628 		unfreeze_array(conf);
2629 	} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2630 		md_error(mddev, rdev);
2631 	} else {
2632 		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2633 	}
2634 
2635 	rdev_dec_pending(rdev, conf->mddev);
2636 	sector = r1_bio->sector;
2637 	bio = r1_bio->master_bio;
2638 
2639 	/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2640 	r1_bio->state = 0;
2641 	raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2642 	allow_barrier(conf, sector);
2643 }
2644 
2645 static void raid1d(struct md_thread *thread)
2646 {
2647 	struct mddev *mddev = thread->mddev;
2648 	struct r1bio *r1_bio;
2649 	unsigned long flags;
2650 	struct r1conf *conf = mddev->private;
2651 	struct list_head *head = &conf->retry_list;
2652 	struct blk_plug plug;
2653 	int idx;
2654 
2655 	md_check_recovery(mddev);
2656 
2657 	if (!list_empty_careful(&conf->bio_end_io_list) &&
2658 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2659 		LIST_HEAD(tmp);
2660 		spin_lock_irqsave(&conf->device_lock, flags);
2661 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2662 			list_splice_init(&conf->bio_end_io_list, &tmp);
2663 		spin_unlock_irqrestore(&conf->device_lock, flags);
2664 		while (!list_empty(&tmp)) {
2665 			r1_bio = list_first_entry(&tmp, struct r1bio,
2666 						  retry_list);
2667 			list_del(&r1_bio->retry_list);
2668 			idx = sector_to_idx(r1_bio->sector);
2669 			atomic_dec(&conf->nr_queued[idx]);
2670 			if (mddev->degraded)
2671 				set_bit(R1BIO_Degraded, &r1_bio->state);
2672 			if (test_bit(R1BIO_WriteError, &r1_bio->state))
2673 				close_write(r1_bio);
2674 			raid_end_bio_io(r1_bio);
2675 		}
2676 	}
2677 
2678 	blk_start_plug(&plug);
2679 	for (;;) {
2680 
2681 		flush_pending_writes(conf);
2682 
2683 		spin_lock_irqsave(&conf->device_lock, flags);
2684 		if (list_empty(head)) {
2685 			spin_unlock_irqrestore(&conf->device_lock, flags);
2686 			break;
2687 		}
2688 		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2689 		list_del(head->prev);
2690 		idx = sector_to_idx(r1_bio->sector);
2691 		atomic_dec(&conf->nr_queued[idx]);
2692 		spin_unlock_irqrestore(&conf->device_lock, flags);
2693 
2694 		mddev = r1_bio->mddev;
2695 		conf = mddev->private;
2696 		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2697 			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2698 			    test_bit(R1BIO_WriteError, &r1_bio->state))
2699 				handle_sync_write_finished(conf, r1_bio);
2700 			else
2701 				sync_request_write(mddev, r1_bio);
2702 		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2703 			   test_bit(R1BIO_WriteError, &r1_bio->state))
2704 			handle_write_finished(conf, r1_bio);
2705 		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2706 			handle_read_error(conf, r1_bio);
2707 		else
2708 			WARN_ON_ONCE(1);
2709 
2710 		cond_resched();
2711 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2712 			md_check_recovery(mddev);
2713 	}
2714 	blk_finish_plug(&plug);
2715 }
2716 
2717 static int init_resync(struct r1conf *conf)
2718 {
2719 	int buffs;
2720 
2721 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2722 	BUG_ON(mempool_initialized(&conf->r1buf_pool));
2723 
2724 	return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2725 			    r1buf_pool_free, conf->poolinfo);
2726 }
2727 
2728 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2729 {
2730 	struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2731 	struct resync_pages *rps;
2732 	struct bio *bio;
2733 	int i;
2734 
2735 	for (i = conf->poolinfo->raid_disks; i--; ) {
2736 		bio = r1bio->bios[i];
2737 		rps = bio->bi_private;
2738 		bio_reset(bio, NULL, 0);
2739 		bio->bi_private = rps;
2740 	}
2741 	r1bio->master_bio = NULL;
2742 	return r1bio;
2743 }
2744 
2745 /*
2746  * perform a "sync" on one "block"
2747  *
2748  * We need to make sure that no normal I/O request - particularly write
2749  * requests - conflict with active sync requests.
2750  *
2751  * This is achieved by tracking pending requests and a 'barrier' concept
2752  * that can be installed to exclude normal IO requests.
2753  */
2754 
2755 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2756 				   sector_t max_sector, int *skipped)
2757 {
2758 	struct r1conf *conf = mddev->private;
2759 	struct r1bio *r1_bio;
2760 	struct bio *bio;
2761 	sector_t nr_sectors;
2762 	int disk = -1;
2763 	int i;
2764 	int wonly = -1;
2765 	int write_targets = 0, read_targets = 0;
2766 	sector_t sync_blocks;
2767 	int still_degraded = 0;
2768 	int good_sectors = RESYNC_SECTORS;
2769 	int min_bad = 0; /* number of sectors that are bad in all devices */
2770 	int idx = sector_to_idx(sector_nr);
2771 	int page_idx = 0;
2772 
2773 	if (!mempool_initialized(&conf->r1buf_pool))
2774 		if (init_resync(conf))
2775 			return 0;
2776 
2777 	if (sector_nr >= max_sector) {
2778 		/* If we aborted, we need to abort the
2779 		 * sync on the 'current' bitmap chunk (there will
2780 		 * only be one in raid1 resync.
2781 		 * We can find the current addess in mddev->curr_resync
2782 		 */
2783 		if (mddev->curr_resync < max_sector) /* aborted */
2784 			md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2785 					   &sync_blocks, 1);
2786 		else /* completed sync */
2787 			conf->fullsync = 0;
2788 
2789 		md_bitmap_close_sync(mddev->bitmap);
2790 		close_sync(conf);
2791 
2792 		if (mddev_is_clustered(mddev)) {
2793 			conf->cluster_sync_low = 0;
2794 			conf->cluster_sync_high = 0;
2795 		}
2796 		return 0;
2797 	}
2798 
2799 	if (mddev->bitmap == NULL &&
2800 	    mddev->recovery_cp == MaxSector &&
2801 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2802 	    conf->fullsync == 0) {
2803 		*skipped = 1;
2804 		return max_sector - sector_nr;
2805 	}
2806 	/* before building a request, check if we can skip these blocks..
2807 	 * This call the bitmap_start_sync doesn't actually record anything
2808 	 */
2809 	if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2810 	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2811 		/* We can skip this block, and probably several more */
2812 		*skipped = 1;
2813 		return sync_blocks;
2814 	}
2815 
2816 	/*
2817 	 * If there is non-resync activity waiting for a turn, then let it
2818 	 * though before starting on this new sync request.
2819 	 */
2820 	if (atomic_read(&conf->nr_waiting[idx]))
2821 		schedule_timeout_uninterruptible(1);
2822 
2823 	/* we are incrementing sector_nr below. To be safe, we check against
2824 	 * sector_nr + two times RESYNC_SECTORS
2825 	 */
2826 
2827 	md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2828 		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2829 
2830 
2831 	if (raise_barrier(conf, sector_nr))
2832 		return 0;
2833 
2834 	r1_bio = raid1_alloc_init_r1buf(conf);
2835 
2836 	/*
2837 	 * If we get a correctably read error during resync or recovery,
2838 	 * we might want to read from a different device.  So we
2839 	 * flag all drives that could conceivably be read from for READ,
2840 	 * and any others (which will be non-In_sync devices) for WRITE.
2841 	 * If a read fails, we try reading from something else for which READ
2842 	 * is OK.
2843 	 */
2844 
2845 	r1_bio->mddev = mddev;
2846 	r1_bio->sector = sector_nr;
2847 	r1_bio->state = 0;
2848 	set_bit(R1BIO_IsSync, &r1_bio->state);
2849 	/* make sure good_sectors won't go across barrier unit boundary */
2850 	good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2851 
2852 	for (i = 0; i < conf->raid_disks * 2; i++) {
2853 		struct md_rdev *rdev;
2854 		bio = r1_bio->bios[i];
2855 
2856 		rdev = conf->mirrors[i].rdev;
2857 		if (rdev == NULL ||
2858 		    test_bit(Faulty, &rdev->flags)) {
2859 			if (i < conf->raid_disks)
2860 				still_degraded = 1;
2861 		} else if (!test_bit(In_sync, &rdev->flags)) {
2862 			bio->bi_opf = REQ_OP_WRITE;
2863 			bio->bi_end_io = end_sync_write;
2864 			write_targets ++;
2865 		} else {
2866 			/* may need to read from here */
2867 			sector_t first_bad = MaxSector;
2868 			int bad_sectors;
2869 
2870 			if (is_badblock(rdev, sector_nr, good_sectors,
2871 					&first_bad, &bad_sectors)) {
2872 				if (first_bad > sector_nr)
2873 					good_sectors = first_bad - sector_nr;
2874 				else {
2875 					bad_sectors -= (sector_nr - first_bad);
2876 					if (min_bad == 0 ||
2877 					    min_bad > bad_sectors)
2878 						min_bad = bad_sectors;
2879 				}
2880 			}
2881 			if (sector_nr < first_bad) {
2882 				if (test_bit(WriteMostly, &rdev->flags)) {
2883 					if (wonly < 0)
2884 						wonly = i;
2885 				} else {
2886 					if (disk < 0)
2887 						disk = i;
2888 				}
2889 				bio->bi_opf = REQ_OP_READ;
2890 				bio->bi_end_io = end_sync_read;
2891 				read_targets++;
2892 			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2893 				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2894 				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2895 				/*
2896 				 * The device is suitable for reading (InSync),
2897 				 * but has bad block(s) here. Let's try to correct them,
2898 				 * if we are doing resync or repair. Otherwise, leave
2899 				 * this device alone for this sync request.
2900 				 */
2901 				bio->bi_opf = REQ_OP_WRITE;
2902 				bio->bi_end_io = end_sync_write;
2903 				write_targets++;
2904 			}
2905 		}
2906 		if (rdev && bio->bi_end_io) {
2907 			atomic_inc(&rdev->nr_pending);
2908 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2909 			bio_set_dev(bio, rdev->bdev);
2910 			if (test_bit(FailFast, &rdev->flags))
2911 				bio->bi_opf |= MD_FAILFAST;
2912 		}
2913 	}
2914 	if (disk < 0)
2915 		disk = wonly;
2916 	r1_bio->read_disk = disk;
2917 
2918 	if (read_targets == 0 && min_bad > 0) {
2919 		/* These sectors are bad on all InSync devices, so we
2920 		 * need to mark them bad on all write targets
2921 		 */
2922 		int ok = 1;
2923 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2924 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2925 				struct md_rdev *rdev = conf->mirrors[i].rdev;
2926 				ok = rdev_set_badblocks(rdev, sector_nr,
2927 							min_bad, 0
2928 					) && ok;
2929 			}
2930 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2931 		*skipped = 1;
2932 		put_buf(r1_bio);
2933 
2934 		if (!ok) {
2935 			/* Cannot record the badblocks, so need to
2936 			 * abort the resync.
2937 			 * If there are multiple read targets, could just
2938 			 * fail the really bad ones ???
2939 			 */
2940 			conf->recovery_disabled = mddev->recovery_disabled;
2941 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2942 			return 0;
2943 		} else
2944 			return min_bad;
2945 
2946 	}
2947 	if (min_bad > 0 && min_bad < good_sectors) {
2948 		/* only resync enough to reach the next bad->good
2949 		 * transition */
2950 		good_sectors = min_bad;
2951 	}
2952 
2953 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2954 		/* extra read targets are also write targets */
2955 		write_targets += read_targets-1;
2956 
2957 	if (write_targets == 0 || read_targets == 0) {
2958 		/* There is nowhere to write, so all non-sync
2959 		 * drives must be failed - so we are finished
2960 		 */
2961 		sector_t rv;
2962 		if (min_bad > 0)
2963 			max_sector = sector_nr + min_bad;
2964 		rv = max_sector - sector_nr;
2965 		*skipped = 1;
2966 		put_buf(r1_bio);
2967 		return rv;
2968 	}
2969 
2970 	if (max_sector > mddev->resync_max)
2971 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2972 	if (max_sector > sector_nr + good_sectors)
2973 		max_sector = sector_nr + good_sectors;
2974 	nr_sectors = 0;
2975 	sync_blocks = 0;
2976 	do {
2977 		struct page *page;
2978 		int len = PAGE_SIZE;
2979 		if (sector_nr + (len>>9) > max_sector)
2980 			len = (max_sector - sector_nr) << 9;
2981 		if (len == 0)
2982 			break;
2983 		if (sync_blocks == 0) {
2984 			if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2985 						  &sync_blocks, still_degraded) &&
2986 			    !conf->fullsync &&
2987 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2988 				break;
2989 			if ((len >> 9) > sync_blocks)
2990 				len = sync_blocks<<9;
2991 		}
2992 
2993 		for (i = 0 ; i < conf->raid_disks * 2; i++) {
2994 			struct resync_pages *rp;
2995 
2996 			bio = r1_bio->bios[i];
2997 			rp = get_resync_pages(bio);
2998 			if (bio->bi_end_io) {
2999 				page = resync_fetch_page(rp, page_idx);
3000 
3001 				/*
3002 				 * won't fail because the vec table is big
3003 				 * enough to hold all these pages
3004 				 */
3005 				__bio_add_page(bio, page, len, 0);
3006 			}
3007 		}
3008 		nr_sectors += len>>9;
3009 		sector_nr += len>>9;
3010 		sync_blocks -= (len>>9);
3011 	} while (++page_idx < RESYNC_PAGES);
3012 
3013 	r1_bio->sectors = nr_sectors;
3014 
3015 	if (mddev_is_clustered(mddev) &&
3016 			conf->cluster_sync_high < sector_nr + nr_sectors) {
3017 		conf->cluster_sync_low = mddev->curr_resync_completed;
3018 		conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
3019 		/* Send resync message */
3020 		md_cluster_ops->resync_info_update(mddev,
3021 				conf->cluster_sync_low,
3022 				conf->cluster_sync_high);
3023 	}
3024 
3025 	/* For a user-requested sync, we read all readable devices and do a
3026 	 * compare
3027 	 */
3028 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
3029 		atomic_set(&r1_bio->remaining, read_targets);
3030 		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
3031 			bio = r1_bio->bios[i];
3032 			if (bio->bi_end_io == end_sync_read) {
3033 				read_targets--;
3034 				md_sync_acct_bio(bio, nr_sectors);
3035 				if (read_targets == 1)
3036 					bio->bi_opf &= ~MD_FAILFAST;
3037 				submit_bio_noacct(bio);
3038 			}
3039 		}
3040 	} else {
3041 		atomic_set(&r1_bio->remaining, 1);
3042 		bio = r1_bio->bios[r1_bio->read_disk];
3043 		md_sync_acct_bio(bio, nr_sectors);
3044 		if (read_targets == 1)
3045 			bio->bi_opf &= ~MD_FAILFAST;
3046 		submit_bio_noacct(bio);
3047 	}
3048 	return nr_sectors;
3049 }
3050 
3051 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3052 {
3053 	if (sectors)
3054 		return sectors;
3055 
3056 	return mddev->dev_sectors;
3057 }
3058 
3059 static struct r1conf *setup_conf(struct mddev *mddev)
3060 {
3061 	struct r1conf *conf;
3062 	int i;
3063 	struct raid1_info *disk;
3064 	struct md_rdev *rdev;
3065 	int err = -ENOMEM;
3066 
3067 	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
3068 	if (!conf)
3069 		goto abort;
3070 
3071 	conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
3072 				   sizeof(atomic_t), GFP_KERNEL);
3073 	if (!conf->nr_pending)
3074 		goto abort;
3075 
3076 	conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
3077 				   sizeof(atomic_t), GFP_KERNEL);
3078 	if (!conf->nr_waiting)
3079 		goto abort;
3080 
3081 	conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
3082 				  sizeof(atomic_t), GFP_KERNEL);
3083 	if (!conf->nr_queued)
3084 		goto abort;
3085 
3086 	conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
3087 				sizeof(atomic_t), GFP_KERNEL);
3088 	if (!conf->barrier)
3089 		goto abort;
3090 
3091 	conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3092 					    mddev->raid_disks, 2),
3093 				GFP_KERNEL);
3094 	if (!conf->mirrors)
3095 		goto abort;
3096 
3097 	conf->tmppage = alloc_page(GFP_KERNEL);
3098 	if (!conf->tmppage)
3099 		goto abort;
3100 
3101 	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
3102 	if (!conf->poolinfo)
3103 		goto abort;
3104 	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
3105 	err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
3106 			   rbio_pool_free, conf->poolinfo);
3107 	if (err)
3108 		goto abort;
3109 
3110 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3111 	if (err)
3112 		goto abort;
3113 
3114 	conf->poolinfo->mddev = mddev;
3115 
3116 	err = -EINVAL;
3117 	spin_lock_init(&conf->device_lock);
3118 	conf->raid_disks = mddev->raid_disks;
3119 	rdev_for_each(rdev, mddev) {
3120 		int disk_idx = rdev->raid_disk;
3121 
3122 		if (disk_idx >= conf->raid_disks || disk_idx < 0)
3123 			continue;
3124 
3125 		if (!raid1_add_conf(conf, rdev, disk_idx,
3126 				    test_bit(Replacement, &rdev->flags)))
3127 			goto abort;
3128 	}
3129 	conf->mddev = mddev;
3130 	INIT_LIST_HEAD(&conf->retry_list);
3131 	INIT_LIST_HEAD(&conf->bio_end_io_list);
3132 
3133 	spin_lock_init(&conf->resync_lock);
3134 	init_waitqueue_head(&conf->wait_barrier);
3135 
3136 	bio_list_init(&conf->pending_bio_list);
3137 	conf->recovery_disabled = mddev->recovery_disabled - 1;
3138 
3139 	err = -EIO;
3140 	for (i = 0; i < conf->raid_disks * 2; i++) {
3141 
3142 		disk = conf->mirrors + i;
3143 
3144 		if (i < conf->raid_disks &&
3145 		    disk[conf->raid_disks].rdev) {
3146 			/* This slot has a replacement. */
3147 			if (!disk->rdev) {
3148 				/* No original, just make the replacement
3149 				 * a recovering spare
3150 				 */
3151 				disk->rdev =
3152 					disk[conf->raid_disks].rdev;
3153 				disk[conf->raid_disks].rdev = NULL;
3154 			} else if (!test_bit(In_sync, &disk->rdev->flags))
3155 				/* Original is not in_sync - bad */
3156 				goto abort;
3157 		}
3158 
3159 		if (!disk->rdev ||
3160 		    !test_bit(In_sync, &disk->rdev->flags)) {
3161 			disk->head_position = 0;
3162 			if (disk->rdev &&
3163 			    (disk->rdev->saved_raid_disk < 0))
3164 				conf->fullsync = 1;
3165 		}
3166 	}
3167 
3168 	err = -ENOMEM;
3169 	rcu_assign_pointer(conf->thread,
3170 			   md_register_thread(raid1d, mddev, "raid1"));
3171 	if (!conf->thread)
3172 		goto abort;
3173 
3174 	return conf;
3175 
3176  abort:
3177 	if (conf) {
3178 		mempool_exit(&conf->r1bio_pool);
3179 		kfree(conf->mirrors);
3180 		safe_put_page(conf->tmppage);
3181 		kfree(conf->poolinfo);
3182 		kfree(conf->nr_pending);
3183 		kfree(conf->nr_waiting);
3184 		kfree(conf->nr_queued);
3185 		kfree(conf->barrier);
3186 		bioset_exit(&conf->bio_split);
3187 		kfree(conf);
3188 	}
3189 	return ERR_PTR(err);
3190 }
3191 
3192 static int raid1_set_limits(struct mddev *mddev)
3193 {
3194 	struct queue_limits lim;
3195 	int err;
3196 
3197 	md_init_stacking_limits(&lim);
3198 	lim.max_write_zeroes_sectors = 0;
3199 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
3200 	if (err) {
3201 		queue_limits_cancel_update(mddev->gendisk->queue);
3202 		return err;
3203 	}
3204 	return queue_limits_set(mddev->gendisk->queue, &lim);
3205 }
3206 
3207 static int raid1_run(struct mddev *mddev)
3208 {
3209 	struct r1conf *conf;
3210 	int i;
3211 	int ret;
3212 
3213 	if (mddev->level != 1) {
3214 		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3215 			mdname(mddev), mddev->level);
3216 		return -EIO;
3217 	}
3218 	if (mddev->reshape_position != MaxSector) {
3219 		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3220 			mdname(mddev));
3221 		return -EIO;
3222 	}
3223 
3224 	/*
3225 	 * copy the already verified devices into our private RAID1
3226 	 * bookkeeping area. [whatever we allocate in run(),
3227 	 * should be freed in raid1_free()]
3228 	 */
3229 	if (mddev->private == NULL)
3230 		conf = setup_conf(mddev);
3231 	else
3232 		conf = mddev->private;
3233 
3234 	if (IS_ERR(conf))
3235 		return PTR_ERR(conf);
3236 
3237 	if (!mddev_is_dm(mddev)) {
3238 		ret = raid1_set_limits(mddev);
3239 		if (ret)
3240 			return ret;
3241 	}
3242 
3243 	mddev->degraded = 0;
3244 	for (i = 0; i < conf->raid_disks; i++)
3245 		if (conf->mirrors[i].rdev == NULL ||
3246 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3247 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3248 			mddev->degraded++;
3249 	/*
3250 	 * RAID1 needs at least one disk in active
3251 	 */
3252 	if (conf->raid_disks - mddev->degraded < 1) {
3253 		md_unregister_thread(mddev, &conf->thread);
3254 		return -EINVAL;
3255 	}
3256 
3257 	if (conf->raid_disks - mddev->degraded == 1)
3258 		mddev->recovery_cp = MaxSector;
3259 
3260 	if (mddev->recovery_cp != MaxSector)
3261 		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3262 			mdname(mddev));
3263 	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3264 		mdname(mddev), mddev->raid_disks - mddev->degraded,
3265 		mddev->raid_disks);
3266 
3267 	/*
3268 	 * Ok, everything is just fine now
3269 	 */
3270 	rcu_assign_pointer(mddev->thread, conf->thread);
3271 	rcu_assign_pointer(conf->thread, NULL);
3272 	mddev->private = conf;
3273 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3274 
3275 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3276 
3277 	ret = md_integrity_register(mddev);
3278 	if (ret)
3279 		md_unregister_thread(mddev, &mddev->thread);
3280 	return ret;
3281 }
3282 
3283 static void raid1_free(struct mddev *mddev, void *priv)
3284 {
3285 	struct r1conf *conf = priv;
3286 
3287 	mempool_exit(&conf->r1bio_pool);
3288 	kfree(conf->mirrors);
3289 	safe_put_page(conf->tmppage);
3290 	kfree(conf->poolinfo);
3291 	kfree(conf->nr_pending);
3292 	kfree(conf->nr_waiting);
3293 	kfree(conf->nr_queued);
3294 	kfree(conf->barrier);
3295 	bioset_exit(&conf->bio_split);
3296 	kfree(conf);
3297 }
3298 
3299 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3300 {
3301 	/* no resync is happening, and there is enough space
3302 	 * on all devices, so we can resize.
3303 	 * We need to make sure resync covers any new space.
3304 	 * If the array is shrinking we should possibly wait until
3305 	 * any io in the removed space completes, but it hardly seems
3306 	 * worth it.
3307 	 */
3308 	sector_t newsize = raid1_size(mddev, sectors, 0);
3309 	if (mddev->external_size &&
3310 	    mddev->array_sectors > newsize)
3311 		return -EINVAL;
3312 	if (mddev->bitmap) {
3313 		int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3314 		if (ret)
3315 			return ret;
3316 	}
3317 	md_set_array_sectors(mddev, newsize);
3318 	if (sectors > mddev->dev_sectors &&
3319 	    mddev->recovery_cp > mddev->dev_sectors) {
3320 		mddev->recovery_cp = mddev->dev_sectors;
3321 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3322 	}
3323 	mddev->dev_sectors = sectors;
3324 	mddev->resync_max_sectors = sectors;
3325 	return 0;
3326 }
3327 
3328 static int raid1_reshape(struct mddev *mddev)
3329 {
3330 	/* We need to:
3331 	 * 1/ resize the r1bio_pool
3332 	 * 2/ resize conf->mirrors
3333 	 *
3334 	 * We allocate a new r1bio_pool if we can.
3335 	 * Then raise a device barrier and wait until all IO stops.
3336 	 * Then resize conf->mirrors and swap in the new r1bio pool.
3337 	 *
3338 	 * At the same time, we "pack" the devices so that all the missing
3339 	 * devices have the higher raid_disk numbers.
3340 	 */
3341 	mempool_t newpool, oldpool;
3342 	struct pool_info *newpoolinfo;
3343 	struct raid1_info *newmirrors;
3344 	struct r1conf *conf = mddev->private;
3345 	int cnt, raid_disks;
3346 	unsigned long flags;
3347 	int d, d2;
3348 	int ret;
3349 
3350 	memset(&newpool, 0, sizeof(newpool));
3351 	memset(&oldpool, 0, sizeof(oldpool));
3352 
3353 	/* Cannot change chunk_size, layout, or level */
3354 	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3355 	    mddev->layout != mddev->new_layout ||
3356 	    mddev->level != mddev->new_level) {
3357 		mddev->new_chunk_sectors = mddev->chunk_sectors;
3358 		mddev->new_layout = mddev->layout;
3359 		mddev->new_level = mddev->level;
3360 		return -EINVAL;
3361 	}
3362 
3363 	if (!mddev_is_clustered(mddev))
3364 		md_allow_write(mddev);
3365 
3366 	raid_disks = mddev->raid_disks + mddev->delta_disks;
3367 
3368 	if (raid_disks < conf->raid_disks) {
3369 		cnt=0;
3370 		for (d= 0; d < conf->raid_disks; d++)
3371 			if (conf->mirrors[d].rdev)
3372 				cnt++;
3373 		if (cnt > raid_disks)
3374 			return -EBUSY;
3375 	}
3376 
3377 	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3378 	if (!newpoolinfo)
3379 		return -ENOMEM;
3380 	newpoolinfo->mddev = mddev;
3381 	newpoolinfo->raid_disks = raid_disks * 2;
3382 
3383 	ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3384 			   rbio_pool_free, newpoolinfo);
3385 	if (ret) {
3386 		kfree(newpoolinfo);
3387 		return ret;
3388 	}
3389 	newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3390 					 raid_disks, 2),
3391 			     GFP_KERNEL);
3392 	if (!newmirrors) {
3393 		kfree(newpoolinfo);
3394 		mempool_exit(&newpool);
3395 		return -ENOMEM;
3396 	}
3397 
3398 	freeze_array(conf, 0);
3399 
3400 	/* ok, everything is stopped */
3401 	oldpool = conf->r1bio_pool;
3402 	conf->r1bio_pool = newpool;
3403 
3404 	for (d = d2 = 0; d < conf->raid_disks; d++) {
3405 		struct md_rdev *rdev = conf->mirrors[d].rdev;
3406 		if (rdev && rdev->raid_disk != d2) {
3407 			sysfs_unlink_rdev(mddev, rdev);
3408 			rdev->raid_disk = d2;
3409 			sysfs_unlink_rdev(mddev, rdev);
3410 			if (sysfs_link_rdev(mddev, rdev))
3411 				pr_warn("md/raid1:%s: cannot register rd%d\n",
3412 					mdname(mddev), rdev->raid_disk);
3413 		}
3414 		if (rdev)
3415 			newmirrors[d2++].rdev = rdev;
3416 	}
3417 	kfree(conf->mirrors);
3418 	conf->mirrors = newmirrors;
3419 	kfree(conf->poolinfo);
3420 	conf->poolinfo = newpoolinfo;
3421 
3422 	spin_lock_irqsave(&conf->device_lock, flags);
3423 	mddev->degraded += (raid_disks - conf->raid_disks);
3424 	spin_unlock_irqrestore(&conf->device_lock, flags);
3425 	conf->raid_disks = mddev->raid_disks = raid_disks;
3426 	mddev->delta_disks = 0;
3427 
3428 	unfreeze_array(conf);
3429 
3430 	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3431 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3432 	md_wakeup_thread(mddev->thread);
3433 
3434 	mempool_exit(&oldpool);
3435 	return 0;
3436 }
3437 
3438 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3439 {
3440 	struct r1conf *conf = mddev->private;
3441 
3442 	if (quiesce)
3443 		freeze_array(conf, 0);
3444 	else
3445 		unfreeze_array(conf);
3446 }
3447 
3448 static void *raid1_takeover(struct mddev *mddev)
3449 {
3450 	/* raid1 can take over:
3451 	 *  raid5 with 2 devices, any layout or chunk size
3452 	 */
3453 	if (mddev->level == 5 && mddev->raid_disks == 2) {
3454 		struct r1conf *conf;
3455 		mddev->new_level = 1;
3456 		mddev->new_layout = 0;
3457 		mddev->new_chunk_sectors = 0;
3458 		conf = setup_conf(mddev);
3459 		if (!IS_ERR(conf)) {
3460 			/* Array must appear to be quiesced */
3461 			conf->array_frozen = 1;
3462 			mddev_clear_unsupported_flags(mddev,
3463 				UNSUPPORTED_MDDEV_FLAGS);
3464 		}
3465 		return conf;
3466 	}
3467 	return ERR_PTR(-EINVAL);
3468 }
3469 
3470 static struct md_personality raid1_personality =
3471 {
3472 	.name		= "raid1",
3473 	.level		= 1,
3474 	.owner		= THIS_MODULE,
3475 	.make_request	= raid1_make_request,
3476 	.run		= raid1_run,
3477 	.free		= raid1_free,
3478 	.status		= raid1_status,
3479 	.error_handler	= raid1_error,
3480 	.hot_add_disk	= raid1_add_disk,
3481 	.hot_remove_disk= raid1_remove_disk,
3482 	.spare_active	= raid1_spare_active,
3483 	.sync_request	= raid1_sync_request,
3484 	.resize		= raid1_resize,
3485 	.size		= raid1_size,
3486 	.check_reshape	= raid1_reshape,
3487 	.quiesce	= raid1_quiesce,
3488 	.takeover	= raid1_takeover,
3489 };
3490 
3491 static int __init raid_init(void)
3492 {
3493 	return register_md_personality(&raid1_personality);
3494 }
3495 
3496 static void raid_exit(void)
3497 {
3498 	unregister_md_personality(&raid1_personality);
3499 }
3500 
3501 module_init(raid_init);
3502 module_exit(raid_exit);
3503 MODULE_LICENSE("GPL");
3504 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3505 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3506 MODULE_ALIAS("md-raid1");
3507 MODULE_ALIAS("md-level-1");
3508