xref: /linux/drivers/md/raid1.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid1.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6  *
7  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8  *
9  * RAID-1 management functions.
10  *
11  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12  *
13  * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
14  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15  *
16  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17  * bitmapped intelligence in resync:
18  *
19  *      - bitmap marked during normal i/o
20  *      - bitmap used to skip nondirty blocks during sync
21  *
22  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23  * - persistent bitmap code
24  */
25 
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/ratelimit.h>
32 #include <linux/interval_tree_generic.h>
33 
34 #include <trace/events/block.h>
35 
36 #include "md.h"
37 #include "raid1.h"
38 #include "md-bitmap.h"
39 #include "md-cluster.h"
40 
41 #define UNSUPPORTED_MDDEV_FLAGS		\
42 	((1L << MD_HAS_JOURNAL) |	\
43 	 (1L << MD_JOURNAL_CLEAN) |	\
44 	 (1L << MD_HAS_PPL) |		\
45 	 (1L << MD_HAS_MULTIPLE_PPLS))
46 
47 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
48 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
49 static void raid1_free(struct mddev *mddev, void *priv);
50 
51 #define RAID_1_10_NAME "raid1"
52 #include "raid1-10.c"
53 
54 #define START(node) ((node)->start)
55 #define LAST(node) ((node)->last)
56 INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 		     START, LAST, static inline, raid1_rb);
58 
check_and_add_serial(struct md_rdev * rdev,struct r1bio * r1_bio,struct serial_info * si)59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 				struct serial_info *si)
61 {
62 	unsigned long flags;
63 	int ret = 0;
64 	sector_t lo = r1_bio->sector;
65 	sector_t hi = lo + r1_bio->sectors - 1;
66 	int idx = sector_to_idx(r1_bio->sector);
67 	struct serial_in_rdev *serial = &rdev->serial[idx];
68 	struct serial_info *head_si;
69 
70 	spin_lock_irqsave(&serial->serial_lock, flags);
71 	/* collision happened */
72 	head_si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
73 	if (head_si && head_si != si) {
74 		si->start = lo;
75 		si->last = hi;
76 		si->wnode_start = head_si->wnode_start;
77 		list_add_tail(&si->list_node, &head_si->waiters);
78 		ret = -EBUSY;
79 	} else if (!head_si) {
80 		si->start = lo;
81 		si->last = hi;
82 		si->wnode_start = si->start;
83 		raid1_rb_insert(si, &serial->serial_rb);
84 	}
85 	spin_unlock_irqrestore(&serial->serial_lock, flags);
86 
87 	return ret;
88 }
89 
wait_for_serialization(struct md_rdev * rdev,struct r1bio * r1_bio)90 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
91 {
92 	struct mddev *mddev = rdev->mddev;
93 	struct serial_info *si;
94 
95 	if (WARN_ON(!mddev->serial_info_pool))
96 		return;
97 	si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
98 	INIT_LIST_HEAD(&si->waiters);
99 	INIT_LIST_HEAD(&si->list_node);
100 	init_completion(&si->ready);
101 	while (check_and_add_serial(rdev, r1_bio, si)) {
102 		wait_for_completion(&si->ready);
103 		reinit_completion(&si->ready);
104 	}
105 }
106 
remove_serial(struct md_rdev * rdev,sector_t lo,sector_t hi)107 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
108 {
109 	struct serial_info *si, *iter_si;
110 	unsigned long flags;
111 	int found = 0;
112 	struct mddev *mddev = rdev->mddev;
113 	int idx = sector_to_idx(lo);
114 	struct serial_in_rdev *serial = &rdev->serial[idx];
115 
116 	spin_lock_irqsave(&serial->serial_lock, flags);
117 	for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
118 	     si; si = raid1_rb_iter_next(si, lo, hi)) {
119 		if (si->start == lo && si->last == hi) {
120 			found = 1;
121 			break;
122 		}
123 	}
124 	if (found) {
125 		raid1_rb_remove(si, &serial->serial_rb);
126 		if (!list_empty(&si->waiters)) {
127 			list_for_each_entry(iter_si, &si->waiters, list_node) {
128 				if (iter_si->wnode_start == si->wnode_start) {
129 					list_del_init(&iter_si->list_node);
130 					list_splice_init(&si->waiters, &iter_si->waiters);
131 					raid1_rb_insert(iter_si, &serial->serial_rb);
132 					complete(&iter_si->ready);
133 					break;
134 				}
135 			}
136 		}
137 		mempool_free(si, mddev->serial_info_pool);
138 	} else {
139 		WARN(1, "The write IO is not recorded for serialization\n");
140 	}
141 	spin_unlock_irqrestore(&serial->serial_lock, flags);
142 }
143 
144 /*
145  * for resync bio, r1bio pointer can be retrieved from the per-bio
146  * 'struct resync_pages'.
147  */
get_resync_r1bio(struct bio * bio)148 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
149 {
150 	return get_resync_pages(bio)->raid_bio;
151 }
152 
r1bio_pool_alloc(gfp_t gfp_flags,struct r1conf * conf)153 static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
154 {
155 	int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
156 
157 	/* allocate a r1bio with room for raid_disks entries in the bios array */
158 	return kzalloc(size, gfp_flags);
159 }
160 
161 #define RESYNC_DEPTH 32
162 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
163 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
164 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
165 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
166 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
167 
r1buf_pool_alloc(gfp_t gfp_flags,void * data)168 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
169 {
170 	struct r1conf *conf = data;
171 	struct r1bio *r1_bio;
172 	struct bio *bio;
173 	int need_pages;
174 	int j;
175 	struct resync_pages *rps;
176 
177 	r1_bio = r1bio_pool_alloc(gfp_flags, conf);
178 	if (!r1_bio)
179 		return NULL;
180 
181 	rps = kmalloc_objs(struct resync_pages, conf->raid_disks * 2, gfp_flags);
182 	if (!rps)
183 		goto out_free_r1bio;
184 
185 	/*
186 	 * Allocate bios : 1 for reading, n-1 for writing
187 	 */
188 	for (j = conf->raid_disks * 2; j-- ; ) {
189 		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
190 		if (!bio)
191 			goto out_free_bio;
192 		bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
193 		r1_bio->bios[j] = bio;
194 	}
195 	/*
196 	 * Allocate RESYNC_PAGES data pages and attach them to
197 	 * the first bio.
198 	 * If this is a user-requested check/repair, allocate
199 	 * RESYNC_PAGES for each bio.
200 	 */
201 	if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
202 		need_pages = conf->raid_disks * 2;
203 	else
204 		need_pages = 1;
205 	for (j = 0; j < conf->raid_disks * 2; j++) {
206 		struct resync_pages *rp = &rps[j];
207 
208 		bio = r1_bio->bios[j];
209 
210 		if (j < need_pages) {
211 			if (resync_alloc_pages(rp, gfp_flags))
212 				goto out_free_pages;
213 		} else {
214 			memcpy(rp, &rps[0], sizeof(*rp));
215 			resync_get_all_pages(rp);
216 		}
217 
218 		rp->raid_bio = r1_bio;
219 		bio->bi_private = rp;
220 	}
221 
222 	r1_bio->master_bio = NULL;
223 
224 	return r1_bio;
225 
226 out_free_pages:
227 	while (--j >= 0)
228 		resync_free_pages(&rps[j]);
229 
230 out_free_bio:
231 	while (++j < conf->raid_disks * 2) {
232 		bio_uninit(r1_bio->bios[j]);
233 		kfree(r1_bio->bios[j]);
234 	}
235 	kfree(rps);
236 
237 out_free_r1bio:
238 	rbio_pool_free(r1_bio, data);
239 	return NULL;
240 }
241 
r1buf_pool_free(void * __r1_bio,void * data)242 static void r1buf_pool_free(void *__r1_bio, void *data)
243 {
244 	struct r1conf *conf = data;
245 	int i;
246 	struct r1bio *r1bio = __r1_bio;
247 	struct resync_pages *rp = NULL;
248 
249 	for (i = conf->raid_disks * 2; i--; ) {
250 		rp = get_resync_pages(r1bio->bios[i]);
251 		resync_free_pages(rp);
252 		bio_uninit(r1bio->bios[i]);
253 		kfree(r1bio->bios[i]);
254 	}
255 
256 	/* resync pages array stored in the 1st bio's .bi_private */
257 	kfree(rp);
258 
259 	rbio_pool_free(r1bio, data);
260 }
261 
put_all_bios(struct r1conf * conf,struct r1bio * r1_bio)262 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
263 {
264 	int i;
265 
266 	for (i = 0; i < conf->raid_disks * 2; i++) {
267 		struct bio **bio = r1_bio->bios + i;
268 		if (!BIO_SPECIAL(*bio))
269 			bio_put(*bio);
270 		*bio = NULL;
271 	}
272 }
273 
free_r1bio(struct r1bio * r1_bio)274 static void free_r1bio(struct r1bio *r1_bio)
275 {
276 	struct r1conf *conf = r1_bio->mddev->private;
277 
278 	put_all_bios(conf, r1_bio);
279 	mempool_free(r1_bio, conf->r1bio_pool);
280 }
281 
put_buf(struct r1bio * r1_bio)282 static void put_buf(struct r1bio *r1_bio)
283 {
284 	struct r1conf *conf = r1_bio->mddev->private;
285 	sector_t sect = r1_bio->sector;
286 	int i;
287 
288 	for (i = 0; i < conf->raid_disks * 2; i++) {
289 		struct bio *bio = r1_bio->bios[i];
290 		if (bio->bi_end_io)
291 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
292 	}
293 
294 	mempool_free(r1_bio, &conf->r1buf_pool);
295 
296 	lower_barrier(conf, sect);
297 }
298 
reschedule_retry(struct r1bio * r1_bio)299 static void reschedule_retry(struct r1bio *r1_bio)
300 {
301 	unsigned long flags;
302 	struct mddev *mddev = r1_bio->mddev;
303 	struct r1conf *conf = mddev->private;
304 	int idx;
305 
306 	idx = sector_to_idx(r1_bio->sector);
307 	spin_lock_irqsave(&conf->device_lock, flags);
308 	list_add(&r1_bio->retry_list, &conf->retry_list);
309 	atomic_inc(&conf->nr_queued[idx]);
310 	spin_unlock_irqrestore(&conf->device_lock, flags);
311 
312 	wake_up(&conf->wait_barrier);
313 	md_wakeup_thread(mddev->thread);
314 }
315 
316 /*
317  * raid_end_bio_io() is called when we have finished servicing a mirrored
318  * operation and are ready to return a success/failure code to the buffer
319  * cache layer.
320  */
call_bio_endio(struct r1bio * r1_bio)321 static void call_bio_endio(struct r1bio *r1_bio)
322 {
323 	struct bio *bio = r1_bio->master_bio;
324 
325 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
326 		bio->bi_status = BLK_STS_IOERR;
327 
328 	bio_endio(bio);
329 }
330 
raid_end_bio_io(struct r1bio * r1_bio)331 static void raid_end_bio_io(struct r1bio *r1_bio)
332 {
333 	struct bio *bio = r1_bio->master_bio;
334 	struct r1conf *conf = r1_bio->mddev->private;
335 	sector_t sector = r1_bio->sector;
336 
337 	/* if nobody has done the final endio yet, do it now */
338 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
339 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
340 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
341 			 (unsigned long long) bio->bi_iter.bi_sector,
342 			 (unsigned long long) bio_end_sector(bio) - 1);
343 
344 		call_bio_endio(r1_bio);
345 	}
346 
347 	free_r1bio(r1_bio);
348 	/*
349 	 * Wake up any possible resync thread that waits for the device
350 	 * to go idle.  All I/Os, even write-behind writes, are done.
351 	 */
352 	allow_barrier(conf, sector);
353 }
354 
355 /*
356  * Update disk head position estimator based on IRQ completion info.
357  */
update_head_pos(int disk,struct r1bio * r1_bio)358 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
359 {
360 	struct r1conf *conf = r1_bio->mddev->private;
361 
362 	conf->mirrors[disk].head_position =
363 		r1_bio->sector + (r1_bio->sectors);
364 }
365 
366 /*
367  * Find the disk number which triggered given bio
368  */
find_bio_disk(struct r1bio * r1_bio,struct bio * bio)369 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
370 {
371 	int mirror;
372 	struct r1conf *conf = r1_bio->mddev->private;
373 	int raid_disks = conf->raid_disks;
374 
375 	for (mirror = 0; mirror < raid_disks * 2; mirror++)
376 		if (r1_bio->bios[mirror] == bio)
377 			break;
378 
379 	BUG_ON(mirror == raid_disks * 2);
380 	update_head_pos(mirror, r1_bio);
381 
382 	return mirror;
383 }
384 
raid1_end_read_request(struct bio * bio)385 static void raid1_end_read_request(struct bio *bio)
386 {
387 	int uptodate = !bio->bi_status;
388 	struct r1bio *r1_bio = bio->bi_private;
389 	struct r1conf *conf = r1_bio->mddev->private;
390 	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
391 
392 	/*
393 	 * this branch is our 'one mirror IO has finished' event handler:
394 	 */
395 	update_head_pos(r1_bio->read_disk, r1_bio);
396 
397 	if (uptodate) {
398 		set_bit(R1BIO_Uptodate, &r1_bio->state);
399 	} else if (test_bit(FailFast, &rdev->flags) &&
400 		 test_bit(R1BIO_FailFast, &r1_bio->state)) {
401 		/* This was a fail-fast read so we definitely
402 		 * want to retry */
403 		;
404 	} else if (!raid1_should_handle_error(bio)) {
405 		uptodate = 1;
406 	} else {
407 		/* If all other devices have failed, we want to return
408 		 * the error upwards rather than fail the last device.
409 		 * Here we redefine "uptodate" to mean "Don't want to retry"
410 		 */
411 		unsigned long flags;
412 		spin_lock_irqsave(&conf->device_lock, flags);
413 		if (r1_bio->mddev->degraded == conf->raid_disks ||
414 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
415 		     test_bit(In_sync, &rdev->flags)))
416 			uptodate = 1;
417 		spin_unlock_irqrestore(&conf->device_lock, flags);
418 	}
419 
420 	if (uptodate) {
421 		raid_end_bio_io(r1_bio);
422 		rdev_dec_pending(rdev, conf->mddev);
423 	} else {
424 		/*
425 		 * oops, read error:
426 		 */
427 		pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
428 				   mdname(conf->mddev),
429 				   rdev->bdev,
430 				   (unsigned long long)r1_bio->sector);
431 		set_bit(R1BIO_ReadError, &r1_bio->state);
432 		reschedule_retry(r1_bio);
433 		/* don't drop the reference on read_disk yet */
434 	}
435 }
436 
close_write(struct r1bio * r1_bio)437 static void close_write(struct r1bio *r1_bio)
438 {
439 	struct mddev *mddev = r1_bio->mddev;
440 
441 	/* it really is the end of this request */
442 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
443 		bio_free_pages(r1_bio->behind_master_bio);
444 		bio_put(r1_bio->behind_master_bio);
445 		r1_bio->behind_master_bio = NULL;
446 	}
447 
448 	if (test_bit(R1BIO_BehindIO, &r1_bio->state))
449 		mddev->bitmap_ops->end_behind_write(mddev);
450 	md_write_end(mddev);
451 }
452 
r1_bio_write_done(struct r1bio * r1_bio)453 static void r1_bio_write_done(struct r1bio *r1_bio)
454 {
455 	if (!atomic_dec_and_test(&r1_bio->remaining))
456 		return;
457 
458 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
459 		reschedule_retry(r1_bio);
460 	else {
461 		close_write(r1_bio);
462 		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
463 			reschedule_retry(r1_bio);
464 		else
465 			raid_end_bio_io(r1_bio);
466 	}
467 }
468 
raid1_end_write_request(struct bio * bio)469 static void raid1_end_write_request(struct bio *bio)
470 {
471 	struct r1bio *r1_bio = bio->bi_private;
472 	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
473 	struct r1conf *conf = r1_bio->mddev->private;
474 	struct bio *to_put = NULL;
475 	int mirror = find_bio_disk(r1_bio, bio);
476 	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
477 	sector_t lo = r1_bio->sector;
478 	sector_t hi = r1_bio->sector + r1_bio->sectors - 1;
479 	bool ignore_error = !raid1_should_handle_error(bio) ||
480 		(bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
481 
482 	/*
483 	 * 'one mirror IO has finished' event handler:
484 	 */
485 	if (bio->bi_status && !ignore_error) {
486 		set_bit(WriteErrorSeen,	&rdev->flags);
487 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
488 			set_bit(MD_RECOVERY_NEEDED, &
489 				conf->mddev->recovery);
490 
491 		if (test_bit(FailFast, &rdev->flags) &&
492 		    (bio->bi_opf & MD_FAILFAST) &&
493 		    /* We never try FailFast to WriteMostly devices */
494 		    !test_bit(WriteMostly, &rdev->flags)) {
495 			md_error(r1_bio->mddev, rdev);
496 		}
497 
498 		/*
499 		 * When the device is faulty, it is not necessary to
500 		 * handle write error.
501 		 */
502 		if (!test_bit(Faulty, &rdev->flags))
503 			set_bit(R1BIO_WriteError, &r1_bio->state);
504 		else {
505 			/* Finished with this branch */
506 			r1_bio->bios[mirror] = NULL;
507 			to_put = bio;
508 		}
509 	} else {
510 		/*
511 		 * Set R1BIO_Uptodate in our master bio, so that we
512 		 * will return a good error code for to the higher
513 		 * levels even if IO on some other mirrored buffer
514 		 * fails.
515 		 *
516 		 * The 'master' represents the composite IO operation
517 		 * to user-side. So if something waits for IO, then it
518 		 * will wait for the 'master' bio.
519 		 */
520 		r1_bio->bios[mirror] = NULL;
521 		to_put = bio;
522 		/*
523 		 * Do not set R1BIO_Uptodate if the current device is
524 		 * rebuilding or Faulty. This is because we cannot use
525 		 * such device for properly reading the data back (we could
526 		 * potentially use it, if the current write would have felt
527 		 * before rdev->recovery_offset, but for simplicity we don't
528 		 * check this here.
529 		 */
530 		if (test_bit(In_sync, &rdev->flags) &&
531 		    !test_bit(Faulty, &rdev->flags))
532 			set_bit(R1BIO_Uptodate, &r1_bio->state);
533 
534 		/* Maybe we can clear some bad blocks. */
535 		if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
536 		    !ignore_error) {
537 			r1_bio->bios[mirror] = IO_MADE_GOOD;
538 			set_bit(R1BIO_MadeGood, &r1_bio->state);
539 		}
540 	}
541 
542 	if (behind) {
543 		if (test_bit(CollisionCheck, &rdev->flags))
544 			remove_serial(rdev, lo, hi);
545 		if (test_bit(WriteMostly, &rdev->flags))
546 			atomic_dec(&r1_bio->behind_remaining);
547 
548 		/*
549 		 * In behind mode, we ACK the master bio once the I/O
550 		 * has safely reached all non-writemostly
551 		 * disks. Setting the Returned bit ensures that this
552 		 * gets done only once -- we don't ever want to return
553 		 * -EIO here, instead we'll wait
554 		 */
555 		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
556 		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
557 			/* Maybe we can return now */
558 			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
559 				struct bio *mbio = r1_bio->master_bio;
560 				pr_debug("raid1: behind end write sectors"
561 					 " %llu-%llu\n",
562 					 (unsigned long long) mbio->bi_iter.bi_sector,
563 					 (unsigned long long) bio_end_sector(mbio) - 1);
564 				call_bio_endio(r1_bio);
565 			}
566 		}
567 	} else if (test_bit(MD_SERIALIZE_POLICY, &rdev->mddev->flags))
568 		remove_serial(rdev, lo, hi);
569 	if (r1_bio->bios[mirror] == NULL)
570 		rdev_dec_pending(rdev, conf->mddev);
571 
572 	/*
573 	 * Let's see if all mirrored write operations have finished
574 	 * already.
575 	 */
576 	r1_bio_write_done(r1_bio);
577 
578 	if (to_put)
579 		bio_put(to_put);
580 }
581 
align_to_barrier_unit_end(sector_t start_sector,sector_t sectors)582 static sector_t align_to_barrier_unit_end(sector_t start_sector,
583 					  sector_t sectors)
584 {
585 	sector_t len;
586 
587 	WARN_ON(sectors == 0);
588 	/*
589 	 * len is the number of sectors from start_sector to end of the
590 	 * barrier unit which start_sector belongs to.
591 	 */
592 	len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
593 	      start_sector;
594 
595 	if (len > sectors)
596 		len = sectors;
597 
598 	return len;
599 }
600 
update_read_sectors(struct r1conf * conf,int disk,sector_t this_sector,int len)601 static void update_read_sectors(struct r1conf *conf, int disk,
602 				sector_t this_sector, int len)
603 {
604 	struct raid1_info *info = &conf->mirrors[disk];
605 
606 	atomic_inc(&info->rdev->nr_pending);
607 	if (info->next_seq_sect != this_sector)
608 		info->seq_start = this_sector;
609 	info->next_seq_sect = this_sector + len;
610 }
611 
choose_first_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)612 static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
613 			     int *max_sectors)
614 {
615 	sector_t this_sector = r1_bio->sector;
616 	int len = r1_bio->sectors;
617 	int disk;
618 
619 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
620 		struct md_rdev *rdev;
621 		int read_len;
622 
623 		if (r1_bio->bios[disk] == IO_BLOCKED)
624 			continue;
625 
626 		rdev = conf->mirrors[disk].rdev;
627 		if (!rdev || test_bit(Faulty, &rdev->flags))
628 			continue;
629 
630 		/* choose the first disk even if it has some bad blocks. */
631 		read_len = raid1_check_read_range(rdev, this_sector, &len);
632 		if (read_len > 0) {
633 			update_read_sectors(conf, disk, this_sector, read_len);
634 			*max_sectors = read_len;
635 			return disk;
636 		}
637 	}
638 
639 	return -1;
640 }
641 
rdev_in_recovery(struct md_rdev * rdev,struct r1bio * r1_bio)642 static bool rdev_in_recovery(struct md_rdev *rdev, struct r1bio *r1_bio)
643 {
644 	return !test_bit(In_sync, &rdev->flags) &&
645 	       rdev->recovery_offset < r1_bio->sector + r1_bio->sectors;
646 }
647 
choose_bb_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)648 static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
649 			  int *max_sectors)
650 {
651 	sector_t this_sector = r1_bio->sector;
652 	int best_disk = -1;
653 	int best_len = 0;
654 	int disk;
655 
656 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
657 		struct md_rdev *rdev;
658 		int len;
659 		int read_len;
660 
661 		if (r1_bio->bios[disk] == IO_BLOCKED)
662 			continue;
663 
664 		rdev = conf->mirrors[disk].rdev;
665 		if (!rdev || test_bit(Faulty, &rdev->flags) ||
666 		    rdev_in_recovery(rdev, r1_bio) ||
667 		    test_bit(WriteMostly, &rdev->flags))
668 			continue;
669 
670 		/* keep track of the disk with the most readable sectors. */
671 		len = r1_bio->sectors;
672 		read_len = raid1_check_read_range(rdev, this_sector, &len);
673 		if (read_len > best_len) {
674 			best_disk = disk;
675 			best_len = read_len;
676 		}
677 	}
678 
679 	if (best_disk != -1) {
680 		*max_sectors = best_len;
681 		update_read_sectors(conf, best_disk, this_sector, best_len);
682 	}
683 
684 	return best_disk;
685 }
686 
choose_slow_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)687 static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
688 			    int *max_sectors)
689 {
690 	sector_t this_sector = r1_bio->sector;
691 	int bb_disk = -1;
692 	int bb_read_len = 0;
693 	int disk;
694 
695 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
696 		struct md_rdev *rdev;
697 		int len;
698 		int read_len;
699 
700 		if (r1_bio->bios[disk] == IO_BLOCKED)
701 			continue;
702 
703 		rdev = conf->mirrors[disk].rdev;
704 		if (!rdev || test_bit(Faulty, &rdev->flags) ||
705 		    !test_bit(WriteMostly, &rdev->flags) ||
706 		    rdev_in_recovery(rdev, r1_bio))
707 			continue;
708 
709 		/* there are no bad blocks, we can use this disk */
710 		len = r1_bio->sectors;
711 		read_len = raid1_check_read_range(rdev, this_sector, &len);
712 		if (read_len == r1_bio->sectors) {
713 			*max_sectors = read_len;
714 			update_read_sectors(conf, disk, this_sector, read_len);
715 			return disk;
716 		}
717 
718 		/*
719 		 * there are partial bad blocks, choose the rdev with largest
720 		 * read length.
721 		 */
722 		if (read_len > bb_read_len) {
723 			bb_disk = disk;
724 			bb_read_len = read_len;
725 		}
726 	}
727 
728 	if (bb_disk != -1) {
729 		*max_sectors = bb_read_len;
730 		update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
731 	}
732 
733 	return bb_disk;
734 }
735 
is_sequential(struct r1conf * conf,int disk,struct r1bio * r1_bio)736 static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
737 {
738 	/* TODO: address issues with this check and concurrency. */
739 	return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
740 	       conf->mirrors[disk].head_position == r1_bio->sector;
741 }
742 
743 /*
744  * If buffered sequential IO size exceeds optimal iosize, check if there is idle
745  * disk. If yes, choose the idle disk.
746  */
should_choose_next(struct r1conf * conf,int disk)747 static bool should_choose_next(struct r1conf *conf, int disk)
748 {
749 	struct raid1_info *mirror = &conf->mirrors[disk];
750 	int opt_iosize;
751 
752 	if (!test_bit(Nonrot, &mirror->rdev->flags))
753 		return false;
754 
755 	opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
756 	return opt_iosize > 0 && mirror->seq_start != MaxSector &&
757 	       mirror->next_seq_sect > opt_iosize &&
758 	       mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
759 }
760 
rdev_readable(struct md_rdev * rdev,struct r1bio * r1_bio)761 static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
762 {
763 	if (!rdev || test_bit(Faulty, &rdev->flags))
764 		return false;
765 
766 	if (rdev_in_recovery(rdev, r1_bio))
767 		return false;
768 
769 	/* don't read from slow disk unless have to */
770 	if (test_bit(WriteMostly, &rdev->flags))
771 		return false;
772 
773 	/* don't split IO for bad blocks unless have to */
774 	if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
775 		return false;
776 
777 	return true;
778 }
779 
780 struct read_balance_ctl {
781 	sector_t closest_dist;
782 	int closest_dist_disk;
783 	int min_pending;
784 	int min_pending_disk;
785 	int sequential_disk;
786 	int readable_disks;
787 };
788 
choose_best_rdev(struct r1conf * conf,struct r1bio * r1_bio)789 static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
790 {
791 	int disk;
792 	struct read_balance_ctl ctl = {
793 		.closest_dist_disk      = -1,
794 		.closest_dist           = MaxSector,
795 		.min_pending_disk       = -1,
796 		.min_pending            = UINT_MAX,
797 		.sequential_disk	= -1,
798 	};
799 
800 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
801 		struct md_rdev *rdev;
802 		sector_t dist;
803 		unsigned int pending;
804 
805 		if (r1_bio->bios[disk] == IO_BLOCKED)
806 			continue;
807 
808 		rdev = conf->mirrors[disk].rdev;
809 		if (!rdev_readable(rdev, r1_bio))
810 			continue;
811 
812 		/* At least two disks to choose from so failfast is OK */
813 		if (ctl.readable_disks++ == 1)
814 			set_bit(R1BIO_FailFast, &r1_bio->state);
815 
816 		pending = atomic_read(&rdev->nr_pending);
817 		dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
818 
819 		/* Don't change to another disk for sequential reads */
820 		if (is_sequential(conf, disk, r1_bio)) {
821 			if (!should_choose_next(conf, disk))
822 				return disk;
823 
824 			/*
825 			 * Add 'pending' to avoid choosing this disk if
826 			 * there is other idle disk.
827 			 */
828 			pending++;
829 			/*
830 			 * If there is no other idle disk, this disk
831 			 * will be chosen.
832 			 */
833 			ctl.sequential_disk = disk;
834 		}
835 
836 		if (ctl.min_pending > pending) {
837 			ctl.min_pending = pending;
838 			ctl.min_pending_disk = disk;
839 		}
840 
841 		if (ctl.closest_dist > dist) {
842 			ctl.closest_dist = dist;
843 			ctl.closest_dist_disk = disk;
844 		}
845 	}
846 
847 	/*
848 	 * sequential IO size exceeds optimal iosize, however, there is no other
849 	 * idle disk, so choose the sequential disk.
850 	 */
851 	if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
852 		return ctl.sequential_disk;
853 
854 	/*
855 	 * If all disks are rotational, choose the closest disk. If any disk is
856 	 * non-rotational, choose the disk with less pending request even the
857 	 * disk is rotational, which might/might not be optimal for raids with
858 	 * mixed ratation/non-rotational disks depending on workload.
859 	 */
860 	if (ctl.min_pending_disk != -1 &&
861 	    (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
862 		return ctl.min_pending_disk;
863 	else
864 		return ctl.closest_dist_disk;
865 }
866 
867 /*
868  * This routine returns the disk from which the requested read should be done.
869  *
870  * 1) If resync is in progress, find the first usable disk and use it even if it
871  * has some bad blocks.
872  *
873  * 2) Now that there is no resync, loop through all disks and skipping slow
874  * disks and disks with bad blocks for now. Only pay attention to key disk
875  * choice.
876  *
877  * 3) If we've made it this far, now look for disks with bad blocks and choose
878  * the one with most number of sectors.
879  *
880  * 4) If we are all the way at the end, we have no choice but to use a disk even
881  * if it is write mostly.
882  *
883  * The rdev for the device selected will have nr_pending incremented.
884  */
read_balance(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)885 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
886 			int *max_sectors)
887 {
888 	int disk;
889 
890 	clear_bit(R1BIO_FailFast, &r1_bio->state);
891 
892 	if (raid1_should_read_first(conf->mddev, r1_bio->sector,
893 				    r1_bio->sectors))
894 		return choose_first_rdev(conf, r1_bio, max_sectors);
895 
896 	disk = choose_best_rdev(conf, r1_bio);
897 	if (disk >= 0) {
898 		*max_sectors = r1_bio->sectors;
899 		update_read_sectors(conf, disk, r1_bio->sector,
900 				    r1_bio->sectors);
901 		return disk;
902 	}
903 
904 	/*
905 	 * If we are here it means we didn't find a perfectly good disk so
906 	 * now spend a bit more time trying to find one with the most good
907 	 * sectors.
908 	 */
909 	disk = choose_bb_rdev(conf, r1_bio, max_sectors);
910 	if (disk >= 0)
911 		return disk;
912 
913 	return choose_slow_rdev(conf, r1_bio, max_sectors);
914 }
915 
wake_up_barrier(struct r1conf * conf)916 static void wake_up_barrier(struct r1conf *conf)
917 {
918 	if (wq_has_sleeper(&conf->wait_barrier))
919 		wake_up(&conf->wait_barrier);
920 }
921 
flush_bio_list(struct r1conf * conf,struct bio * bio)922 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
923 {
924 	/* flush any pending bitmap writes to disk before proceeding w/ I/O */
925 	raid1_prepare_flush_writes(conf->mddev);
926 	wake_up_barrier(conf);
927 
928 	while (bio) { /* submit pending writes */
929 		struct bio *next = bio->bi_next;
930 
931 		raid1_submit_write(bio);
932 		bio = next;
933 		cond_resched();
934 	}
935 }
936 
flush_pending_writes(struct r1conf * conf)937 static void flush_pending_writes(struct r1conf *conf)
938 {
939 	/* Any writes that have been queued but are awaiting
940 	 * bitmap updates get flushed here.
941 	 */
942 	spin_lock_irq(&conf->device_lock);
943 
944 	if (conf->pending_bio_list.head) {
945 		struct blk_plug plug;
946 		struct bio *bio;
947 
948 		bio = bio_list_get(&conf->pending_bio_list);
949 		spin_unlock_irq(&conf->device_lock);
950 
951 		/*
952 		 * As this is called in a wait_event() loop (see freeze_array),
953 		 * current->state might be TASK_UNINTERRUPTIBLE which will
954 		 * cause a warning when we prepare to wait again.  As it is
955 		 * rare that this path is taken, it is perfectly safe to force
956 		 * us to go around the wait_event() loop again, so the warning
957 		 * is a false-positive.  Silence the warning by resetting
958 		 * thread state
959 		 */
960 		__set_current_state(TASK_RUNNING);
961 		blk_start_plug(&plug);
962 		flush_bio_list(conf, bio);
963 		blk_finish_plug(&plug);
964 	} else
965 		spin_unlock_irq(&conf->device_lock);
966 }
967 
968 /* Barriers....
969  * Sometimes we need to suspend IO while we do something else,
970  * either some resync/recovery, or reconfigure the array.
971  * To do this we raise a 'barrier'.
972  * The 'barrier' is a counter that can be raised multiple times
973  * to count how many activities are happening which preclude
974  * normal IO.
975  * We can only raise the barrier if there is no pending IO.
976  * i.e. if nr_pending == 0.
977  * We choose only to raise the barrier if no-one is waiting for the
978  * barrier to go down.  This means that as soon as an IO request
979  * is ready, no other operations which require a barrier will start
980  * until the IO request has had a chance.
981  *
982  * So: regular IO calls 'wait_barrier'.  When that returns there
983  *    is no backgroup IO happening,  It must arrange to call
984  *    allow_barrier when it has finished its IO.
985  * backgroup IO calls must call raise_barrier.  Once that returns
986  *    there is no normal IO happeing.  It must arrange to call
987  *    lower_barrier when the particular background IO completes.
988  *
989  * If resync/recovery is interrupted, returns -EINTR;
990  * Otherwise, returns 0.
991  */
raise_barrier(struct r1conf * conf,sector_t sector_nr)992 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
993 {
994 	int idx = sector_to_idx(sector_nr);
995 
996 	spin_lock_irq(&conf->resync_lock);
997 
998 	/* Wait until no block IO is waiting */
999 	wait_event_lock_irq(conf->wait_barrier,
1000 			    !atomic_read(&conf->nr_waiting[idx]),
1001 			    conf->resync_lock);
1002 
1003 	/* block any new IO from starting */
1004 	atomic_inc(&conf->barrier[idx]);
1005 	/*
1006 	 * In raise_barrier() we firstly increase conf->barrier[idx] then
1007 	 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
1008 	 * increase conf->nr_pending[idx] then check conf->barrier[idx].
1009 	 * A memory barrier here to make sure conf->nr_pending[idx] won't
1010 	 * be fetched before conf->barrier[idx] is increased. Otherwise
1011 	 * there will be a race between raise_barrier() and _wait_barrier().
1012 	 */
1013 	smp_mb__after_atomic();
1014 
1015 	/* For these conditions we must wait:
1016 	 * A: while the array is in frozen state
1017 	 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
1018 	 *    existing in corresponding I/O barrier bucket.
1019 	 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
1020 	 *    max resync count which allowed on current I/O barrier bucket.
1021 	 */
1022 	wait_event_lock_irq(conf->wait_barrier,
1023 			    (!conf->array_frozen &&
1024 			     !atomic_read(&conf->nr_pending[idx]) &&
1025 			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
1026 				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
1027 			    conf->resync_lock);
1028 
1029 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
1030 		atomic_dec(&conf->barrier[idx]);
1031 		spin_unlock_irq(&conf->resync_lock);
1032 		wake_up(&conf->wait_barrier);
1033 		return -EINTR;
1034 	}
1035 
1036 	atomic_inc(&conf->nr_sync_pending);
1037 	spin_unlock_irq(&conf->resync_lock);
1038 
1039 	return 0;
1040 }
1041 
lower_barrier(struct r1conf * conf,sector_t sector_nr)1042 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
1043 {
1044 	int idx = sector_to_idx(sector_nr);
1045 
1046 	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
1047 
1048 	atomic_dec(&conf->barrier[idx]);
1049 	atomic_dec(&conf->nr_sync_pending);
1050 	wake_up(&conf->wait_barrier);
1051 }
1052 
_wait_barrier(struct r1conf * conf,int idx,bool nowait)1053 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
1054 {
1055 	bool ret = true;
1056 
1057 	/*
1058 	 * We need to increase conf->nr_pending[idx] very early here,
1059 	 * then raise_barrier() can be blocked when it waits for
1060 	 * conf->nr_pending[idx] to be 0. Then we can avoid holding
1061 	 * conf->resync_lock when there is no barrier raised in same
1062 	 * barrier unit bucket. Also if the array is frozen, I/O
1063 	 * should be blocked until array is unfrozen.
1064 	 */
1065 	atomic_inc(&conf->nr_pending[idx]);
1066 	/*
1067 	 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
1068 	 * check conf->barrier[idx]. In raise_barrier() we firstly increase
1069 	 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
1070 	 * barrier is necessary here to make sure conf->barrier[idx] won't be
1071 	 * fetched before conf->nr_pending[idx] is increased. Otherwise there
1072 	 * will be a race between _wait_barrier() and raise_barrier().
1073 	 */
1074 	smp_mb__after_atomic();
1075 
1076 	/*
1077 	 * Don't worry about checking two atomic_t variables at same time
1078 	 * here. If during we check conf->barrier[idx], the array is
1079 	 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
1080 	 * 0, it is safe to return and make the I/O continue. Because the
1081 	 * array is frozen, all I/O returned here will eventually complete
1082 	 * or be queued, no race will happen. See code comment in
1083 	 * frozen_array().
1084 	 */
1085 	if (!READ_ONCE(conf->array_frozen) &&
1086 	    !atomic_read(&conf->barrier[idx]))
1087 		return ret;
1088 
1089 	/*
1090 	 * After holding conf->resync_lock, conf->nr_pending[idx]
1091 	 * should be decreased before waiting for barrier to drop.
1092 	 * Otherwise, we may encounter a race condition because
1093 	 * raise_barrer() might be waiting for conf->nr_pending[idx]
1094 	 * to be 0 at same time.
1095 	 */
1096 	spin_lock_irq(&conf->resync_lock);
1097 	atomic_inc(&conf->nr_waiting[idx]);
1098 	atomic_dec(&conf->nr_pending[idx]);
1099 	/*
1100 	 * In case freeze_array() is waiting for
1101 	 * get_unqueued_pending() == extra
1102 	 */
1103 	wake_up_barrier(conf);
1104 	/* Wait for the barrier in same barrier unit bucket to drop. */
1105 
1106 	/* Return false when nowait flag is set */
1107 	if (nowait) {
1108 		ret = false;
1109 	} else {
1110 		wait_event_lock_irq(conf->wait_barrier,
1111 				!conf->array_frozen &&
1112 				!atomic_read(&conf->barrier[idx]),
1113 				conf->resync_lock);
1114 		atomic_inc(&conf->nr_pending[idx]);
1115 	}
1116 
1117 	atomic_dec(&conf->nr_waiting[idx]);
1118 	spin_unlock_irq(&conf->resync_lock);
1119 	return ret;
1120 }
1121 
wait_read_barrier(struct r1conf * conf,sector_t sector_nr,bool nowait)1122 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1123 {
1124 	int idx = sector_to_idx(sector_nr);
1125 	bool ret = true;
1126 
1127 	/*
1128 	 * Very similar to _wait_barrier(). The difference is, for read
1129 	 * I/O we don't need wait for sync I/O, but if the whole array
1130 	 * is frozen, the read I/O still has to wait until the array is
1131 	 * unfrozen. Since there is no ordering requirement with
1132 	 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1133 	 */
1134 	atomic_inc(&conf->nr_pending[idx]);
1135 
1136 	if (!READ_ONCE(conf->array_frozen))
1137 		return ret;
1138 
1139 	spin_lock_irq(&conf->resync_lock);
1140 	atomic_inc(&conf->nr_waiting[idx]);
1141 	atomic_dec(&conf->nr_pending[idx]);
1142 	/*
1143 	 * In case freeze_array() is waiting for
1144 	 * get_unqueued_pending() == extra
1145 	 */
1146 	wake_up_barrier(conf);
1147 	/* Wait for array to be unfrozen */
1148 
1149 	/* Return false when nowait flag is set */
1150 	if (nowait) {
1151 		/* Return false when nowait flag is set */
1152 		ret = false;
1153 	} else {
1154 		wait_event_lock_irq(conf->wait_barrier,
1155 				!conf->array_frozen,
1156 				conf->resync_lock);
1157 		atomic_inc(&conf->nr_pending[idx]);
1158 	}
1159 
1160 	atomic_dec(&conf->nr_waiting[idx]);
1161 	spin_unlock_irq(&conf->resync_lock);
1162 	return ret;
1163 }
1164 
wait_barrier(struct r1conf * conf,sector_t sector_nr,bool nowait)1165 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1166 {
1167 	int idx = sector_to_idx(sector_nr);
1168 
1169 	return _wait_barrier(conf, idx, nowait);
1170 }
1171 
_allow_barrier(struct r1conf * conf,int idx)1172 static void _allow_barrier(struct r1conf *conf, int idx)
1173 {
1174 	atomic_dec(&conf->nr_pending[idx]);
1175 	wake_up_barrier(conf);
1176 }
1177 
allow_barrier(struct r1conf * conf,sector_t sector_nr)1178 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1179 {
1180 	int idx = sector_to_idx(sector_nr);
1181 
1182 	_allow_barrier(conf, idx);
1183 }
1184 
1185 /* conf->resync_lock should be held */
get_unqueued_pending(struct r1conf * conf)1186 static int get_unqueued_pending(struct r1conf *conf)
1187 {
1188 	int idx, ret;
1189 
1190 	ret = atomic_read(&conf->nr_sync_pending);
1191 	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1192 		ret += atomic_read(&conf->nr_pending[idx]) -
1193 			atomic_read(&conf->nr_queued[idx]);
1194 
1195 	return ret;
1196 }
1197 
freeze_array(struct r1conf * conf,int extra)1198 static void freeze_array(struct r1conf *conf, int extra)
1199 {
1200 	/* Stop sync I/O and normal I/O and wait for everything to
1201 	 * go quiet.
1202 	 * This is called in two situations:
1203 	 * 1) management command handlers (reshape, remove disk, quiesce).
1204 	 * 2) one normal I/O request failed.
1205 
1206 	 * After array_frozen is set to 1, new sync IO will be blocked at
1207 	 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1208 	 * or wait_read_barrier(). The flying I/Os will either complete or be
1209 	 * queued. When everything goes quite, there are only queued I/Os left.
1210 
1211 	 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1212 	 * barrier bucket index which this I/O request hits. When all sync and
1213 	 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1214 	 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1215 	 * in handle_read_error(), we may call freeze_array() before trying to
1216 	 * fix the read error. In this case, the error read I/O is not queued,
1217 	 * so get_unqueued_pending() == 1.
1218 	 *
1219 	 * Therefore before this function returns, we need to wait until
1220 	 * get_unqueued_pendings(conf) gets equal to extra. For
1221 	 * normal I/O context, extra is 1, in rested situations extra is 0.
1222 	 */
1223 	spin_lock_irq(&conf->resync_lock);
1224 	conf->array_frozen = 1;
1225 	mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
1226 	wait_event_lock_irq_cmd(
1227 		conf->wait_barrier,
1228 		get_unqueued_pending(conf) == extra,
1229 		conf->resync_lock,
1230 		flush_pending_writes(conf));
1231 	spin_unlock_irq(&conf->resync_lock);
1232 }
unfreeze_array(struct r1conf * conf)1233 static void unfreeze_array(struct r1conf *conf)
1234 {
1235 	/* reverse the effect of the freeze */
1236 	spin_lock_irq(&conf->resync_lock);
1237 	conf->array_frozen = 0;
1238 	spin_unlock_irq(&conf->resync_lock);
1239 	wake_up(&conf->wait_barrier);
1240 }
1241 
alloc_behind_master_bio(struct r1bio * r1_bio,struct bio * bio)1242 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1243 					   struct bio *bio)
1244 {
1245 	int size = bio->bi_iter.bi_size;
1246 	unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1247 	int i = 0;
1248 	struct bio *behind_bio = NULL;
1249 
1250 	behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
1251 				      &r1_bio->mddev->bio_set);
1252 
1253 	/* discard op, we don't support writezero/writesame yet */
1254 	if (!bio_has_data(bio)) {
1255 		behind_bio->bi_iter.bi_size = size;
1256 		goto skip_copy;
1257 	}
1258 
1259 	while (i < vcnt && size) {
1260 		struct page *page;
1261 		int len = min_t(int, PAGE_SIZE, size);
1262 
1263 		page = alloc_page(GFP_NOIO);
1264 		if (unlikely(!page))
1265 			goto free_pages;
1266 
1267 		if (!bio_add_page(behind_bio, page, len, 0)) {
1268 			put_page(page);
1269 			goto free_pages;
1270 		}
1271 
1272 		size -= len;
1273 		i++;
1274 	}
1275 
1276 	bio_copy_data(behind_bio, bio);
1277 skip_copy:
1278 	r1_bio->behind_master_bio = behind_bio;
1279 	set_bit(R1BIO_BehindIO, &r1_bio->state);
1280 
1281 	return;
1282 
1283 free_pages:
1284 	pr_debug("%dB behind alloc failed, doing sync I/O\n",
1285 		 bio->bi_iter.bi_size);
1286 	bio_free_pages(behind_bio);
1287 	bio_put(behind_bio);
1288 }
1289 
raid1_unplug(struct blk_plug_cb * cb,bool from_schedule)1290 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1291 {
1292 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1293 						  cb);
1294 	struct mddev *mddev = plug->cb.data;
1295 	struct r1conf *conf = mddev->private;
1296 	struct bio *bio;
1297 
1298 	if (from_schedule) {
1299 		spin_lock_irq(&conf->device_lock);
1300 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1301 		spin_unlock_irq(&conf->device_lock);
1302 		wake_up_barrier(conf);
1303 		md_wakeup_thread(mddev->thread);
1304 		kfree(plug);
1305 		return;
1306 	}
1307 
1308 	/* we aren't scheduling, so we can do the write-out directly. */
1309 	bio = bio_list_get(&plug->pending);
1310 	flush_bio_list(conf, bio);
1311 	kfree(plug);
1312 }
1313 
init_r1bio(struct r1bio * r1_bio,struct mddev * mddev,struct bio * bio)1314 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1315 {
1316 	r1_bio->master_bio = bio;
1317 	r1_bio->sectors = bio_sectors(bio);
1318 	r1_bio->state = 0;
1319 	r1_bio->mddev = mddev;
1320 	r1_bio->sector = bio->bi_iter.bi_sector;
1321 }
1322 
1323 static inline struct r1bio *
alloc_r1bio(struct mddev * mddev,struct bio * bio)1324 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1325 {
1326 	struct r1conf *conf = mddev->private;
1327 	struct r1bio *r1_bio;
1328 
1329 	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1330 	memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
1331 	init_r1bio(r1_bio, mddev, bio);
1332 	return r1_bio;
1333 }
1334 
raid1_read_request(struct mddev * mddev,struct bio * bio,int max_read_sectors,struct r1bio * r1_bio)1335 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1336 			       int max_read_sectors, struct r1bio *r1_bio)
1337 {
1338 	struct r1conf *conf = mddev->private;
1339 	struct raid1_info *mirror;
1340 	struct bio *read_bio;
1341 	int max_sectors;
1342 	int rdisk;
1343 	bool r1bio_existed = !!r1_bio;
1344 
1345 	/*
1346 	 * If r1_bio is set, we are blocking the raid1d thread
1347 	 * so there is a tiny risk of deadlock.  So ask for
1348 	 * emergency memory if needed.
1349 	 */
1350 	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1351 
1352 	/*
1353 	 * Still need barrier for READ in case that whole
1354 	 * array is frozen.
1355 	 */
1356 	if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
1357 				bio->bi_opf & REQ_NOWAIT)) {
1358 		bio_wouldblock_error(bio);
1359 		return;
1360 	}
1361 
1362 	if (!r1_bio)
1363 		r1_bio = alloc_r1bio(mddev, bio);
1364 	else
1365 		init_r1bio(r1_bio, mddev, bio);
1366 	r1_bio->sectors = max_read_sectors;
1367 
1368 	/*
1369 	 * make_request() can abort the operation when read-ahead is being
1370 	 * used and no empty request is available.
1371 	 */
1372 	rdisk = read_balance(conf, r1_bio, &max_sectors);
1373 	if (rdisk < 0) {
1374 		/* couldn't find anywhere to read from */
1375 		if (r1bio_existed)
1376 			pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
1377 					    mdname(mddev),
1378 					    conf->mirrors[r1_bio->read_disk].rdev->bdev,
1379 					    r1_bio->sector);
1380 		raid_end_bio_io(r1_bio);
1381 		return;
1382 	}
1383 	mirror = conf->mirrors + rdisk;
1384 
1385 	if (r1bio_existed)
1386 		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
1387 				    mdname(mddev),
1388 				    (unsigned long long)r1_bio->sector,
1389 				    mirror->rdev->bdev);
1390 
1391 	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1392 	    md_bitmap_enabled(mddev, false)) {
1393 		/*
1394 		 * Reading from a write-mostly device must take care not to
1395 		 * over-take any writes that are 'behind'
1396 		 */
1397 		mddev_add_trace_msg(mddev, "raid1 wait behind writes");
1398 		mddev->bitmap_ops->wait_behind_writes(mddev);
1399 	}
1400 
1401 	if (max_sectors < bio_sectors(bio)) {
1402 		bio = bio_submit_split_bioset(bio, max_sectors,
1403 					      &conf->bio_split);
1404 		if (!bio) {
1405 			set_bit(R1BIO_Returned, &r1_bio->state);
1406 			goto err_handle;
1407 		}
1408 
1409 		r1_bio->master_bio = bio;
1410 		r1_bio->sectors = max_sectors;
1411 	}
1412 
1413 	r1_bio->read_disk = rdisk;
1414 	if (!r1bio_existed) {
1415 		md_account_bio(mddev, &bio);
1416 		r1_bio->master_bio = bio;
1417 	}
1418 	read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1419 				   &mddev->bio_set);
1420 	read_bio->bi_opf &= ~REQ_NOWAIT;
1421 	r1_bio->bios[rdisk] = read_bio;
1422 
1423 	read_bio->bi_iter.bi_sector = r1_bio->sector +
1424 		mirror->rdev->data_offset;
1425 	read_bio->bi_end_io = raid1_end_read_request;
1426 	if (test_bit(FailFast, &mirror->rdev->flags) &&
1427 	    test_bit(R1BIO_FailFast, &r1_bio->state))
1428 	        read_bio->bi_opf |= MD_FAILFAST;
1429 	read_bio->bi_private = r1_bio;
1430 	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
1431 	submit_bio_noacct(read_bio);
1432 	return;
1433 
1434 err_handle:
1435 	atomic_dec(&mirror->rdev->nr_pending);
1436 	raid_end_bio_io(r1_bio);
1437 }
1438 
wait_blocked_rdev(struct mddev * mddev,struct bio * bio)1439 static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
1440 {
1441 	struct r1conf *conf = mddev->private;
1442 	int disks = conf->raid_disks * 2;
1443 	int i;
1444 
1445 retry:
1446 	for (i = 0; i < disks; i++) {
1447 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1448 
1449 		if (!rdev)
1450 			continue;
1451 
1452 		/* don't write here until the bad block is acknowledged */
1453 		if (test_bit(WriteErrorSeen, &rdev->flags) &&
1454 		    rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
1455 				      bio_sectors(bio)) < 0)
1456 			set_bit(BlockedBadBlocks, &rdev->flags);
1457 
1458 		if (rdev_blocked(rdev)) {
1459 			if (bio->bi_opf & REQ_NOWAIT)
1460 				return false;
1461 
1462 			mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
1463 					    rdev->raid_disk);
1464 			atomic_inc(&rdev->nr_pending);
1465 			md_wait_for_blocked_rdev(rdev, rdev->mddev);
1466 			goto retry;
1467 		}
1468 	}
1469 
1470 	return true;
1471 }
1472 
raid1_start_write_behind(struct mddev * mddev,struct r1bio * r1_bio,struct bio * bio)1473 static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
1474 				     struct bio *bio)
1475 {
1476 	unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
1477 	struct md_bitmap_stats stats;
1478 	int err;
1479 
1480 	/* behind write rely on bitmap, see bitmap_operations */
1481 	if (!md_bitmap_enabled(mddev, false))
1482 		return;
1483 
1484 	err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
1485 	if (err)
1486 		return;
1487 
1488 	/* Don't do behind IO if reader is waiting, or there are too many. */
1489 	if (!stats.behind_wait && stats.behind_writes < max_write_behind)
1490 		alloc_behind_master_bio(r1_bio, bio);
1491 
1492 	if (test_bit(R1BIO_BehindIO, &r1_bio->state))
1493 		mddev->bitmap_ops->start_behind_write(mddev);
1494 
1495 }
1496 
raid1_write_request(struct mddev * mddev,struct bio * bio,int max_write_sectors)1497 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1498 				int max_write_sectors)
1499 {
1500 	struct r1conf *conf = mddev->private;
1501 	struct r1bio *r1_bio;
1502 	int i, disks, k;
1503 	unsigned long flags;
1504 	int first_clone;
1505 	int max_sectors;
1506 	bool write_behind = false;
1507 	bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
1508 
1509 	if (mddev_is_clustered(mddev) &&
1510 	    mddev->cluster_ops->area_resyncing(mddev, WRITE,
1511 		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1512 
1513 		if (bio->bi_opf & REQ_NOWAIT) {
1514 			bio_wouldblock_error(bio);
1515 			return;
1516 		}
1517 		wait_event_idle(conf->wait_barrier,
1518 				!mddev->cluster_ops->area_resyncing(mddev, WRITE,
1519 								    bio->bi_iter.bi_sector,
1520 								    bio_end_sector(bio)));
1521 	}
1522 
1523 	/*
1524 	 * Register the new request and wait if the reconstruction
1525 	 * thread has put up a bar for new requests.
1526 	 * Continue immediately if no resync is active currently.
1527 	 */
1528 	if (!wait_barrier(conf, bio->bi_iter.bi_sector,
1529 				bio->bi_opf & REQ_NOWAIT)) {
1530 		bio_wouldblock_error(bio);
1531 		return;
1532 	}
1533 
1534 	if (!wait_blocked_rdev(mddev, bio)) {
1535 		bio_wouldblock_error(bio);
1536 		return;
1537 	}
1538 
1539 	r1_bio = alloc_r1bio(mddev, bio);
1540 	r1_bio->sectors = max_write_sectors;
1541 
1542 	/* first select target devices under rcu_lock and
1543 	 * inc refcount on their rdev.  Record them by setting
1544 	 * bios[x] to bio
1545 	 * If there are known/acknowledged bad blocks on any device on
1546 	 * which we have seen a write error, we want to avoid writing those
1547 	 * blocks.
1548 	 * This potentially requires several writes to write around
1549 	 * the bad blocks.  Each set of writes gets it's own r1bio
1550 	 * with a set of bios attached.
1551 	 */
1552 
1553 	disks = conf->raid_disks * 2;
1554 	max_sectors = r1_bio->sectors;
1555 	for (i = 0;  i < disks; i++) {
1556 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1557 
1558 		/*
1559 		 * The write-behind io is only attempted on drives marked as
1560 		 * write-mostly, which means we could allocate write behind
1561 		 * bio later.
1562 		 */
1563 		if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
1564 			write_behind = true;
1565 
1566 		r1_bio->bios[i] = NULL;
1567 		if (!rdev || test_bit(Faulty, &rdev->flags))
1568 			continue;
1569 
1570 		atomic_inc(&rdev->nr_pending);
1571 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1572 			sector_t first_bad;
1573 			sector_t bad_sectors;
1574 			int is_bad;
1575 
1576 			is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1577 					     &first_bad, &bad_sectors);
1578 			if (is_bad && first_bad <= r1_bio->sector) {
1579 				/* Cannot write here at all */
1580 				bad_sectors -= (r1_bio->sector - first_bad);
1581 				if (bad_sectors < max_sectors)
1582 					/* mustn't write more than bad_sectors
1583 					 * to other devices yet
1584 					 */
1585 					max_sectors = bad_sectors;
1586 				rdev_dec_pending(rdev, mddev);
1587 				continue;
1588 			}
1589 			if (is_bad) {
1590 				int good_sectors;
1591 
1592 				/*
1593 				 * We cannot atomically write this, so just
1594 				 * error in that case. It could be possible to
1595 				 * atomically write other mirrors, but the
1596 				 * complexity of supporting that is not worth
1597 				 * the benefit.
1598 				 */
1599 				if (bio->bi_opf & REQ_ATOMIC)
1600 					goto err_handle;
1601 
1602 				good_sectors = first_bad - r1_bio->sector;
1603 				if (good_sectors < max_sectors)
1604 					max_sectors = good_sectors;
1605 			}
1606 		}
1607 		r1_bio->bios[i] = bio;
1608 	}
1609 
1610 	/*
1611 	 * When using a bitmap, we may call alloc_behind_master_bio below.
1612 	 * alloc_behind_master_bio allocates a copy of the data payload a page
1613 	 * at a time and thus needs a new bio that can fit the whole payload
1614 	 * this bio in page sized chunks.
1615 	 */
1616 	if (write_behind && mddev->bitmap)
1617 		max_sectors = min_t(int, max_sectors,
1618 				    BIO_MAX_VECS * (PAGE_SIZE >> 9));
1619 	if (max_sectors < bio_sectors(bio)) {
1620 		bio = bio_submit_split_bioset(bio, max_sectors,
1621 					      &conf->bio_split);
1622 		if (!bio) {
1623 			set_bit(R1BIO_Returned, &r1_bio->state);
1624 			goto err_handle;
1625 		}
1626 
1627 		r1_bio->master_bio = bio;
1628 		r1_bio->sectors = max_sectors;
1629 	}
1630 
1631 	md_account_bio(mddev, &bio);
1632 	r1_bio->master_bio = bio;
1633 	atomic_set(&r1_bio->remaining, 1);
1634 	atomic_set(&r1_bio->behind_remaining, 0);
1635 
1636 	first_clone = 1;
1637 
1638 	for (i = 0; i < disks; i++) {
1639 		struct bio *mbio = NULL;
1640 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1641 		if (!r1_bio->bios[i])
1642 			continue;
1643 
1644 		if (first_clone) {
1645 			if (write_behind)
1646 				raid1_start_write_behind(mddev, r1_bio, bio);
1647 			first_clone = 0;
1648 		}
1649 
1650 		if (r1_bio->behind_master_bio) {
1651 			mbio = bio_alloc_clone(rdev->bdev,
1652 					       r1_bio->behind_master_bio,
1653 					       GFP_NOIO, &mddev->bio_set);
1654 			if (test_bit(CollisionCheck, &rdev->flags))
1655 				wait_for_serialization(rdev, r1_bio);
1656 			if (test_bit(WriteMostly, &rdev->flags))
1657 				atomic_inc(&r1_bio->behind_remaining);
1658 		} else {
1659 			mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1660 					       &mddev->bio_set);
1661 
1662 			if (test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
1663 				wait_for_serialization(rdev, r1_bio);
1664 		}
1665 
1666 		mbio->bi_opf &= ~REQ_NOWAIT;
1667 		r1_bio->bios[i] = mbio;
1668 
1669 		mbio->bi_iter.bi_sector	= (r1_bio->sector + rdev->data_offset);
1670 		mbio->bi_end_io	= raid1_end_write_request;
1671 		if (test_bit(FailFast, &rdev->flags) &&
1672 		    !test_bit(WriteMostly, &rdev->flags) &&
1673 		    conf->raid_disks - mddev->degraded > 1)
1674 			mbio->bi_opf |= MD_FAILFAST;
1675 		mbio->bi_private = r1_bio;
1676 
1677 		atomic_inc(&r1_bio->remaining);
1678 		mddev_trace_remap(mddev, mbio, r1_bio->sector);
1679 		/* flush_pending_writes() needs access to the rdev so...*/
1680 		mbio->bi_bdev = (void *)rdev;
1681 		if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
1682 			spin_lock_irqsave(&conf->device_lock, flags);
1683 			bio_list_add(&conf->pending_bio_list, mbio);
1684 			spin_unlock_irqrestore(&conf->device_lock, flags);
1685 			md_wakeup_thread(mddev->thread);
1686 		}
1687 	}
1688 
1689 	r1_bio_write_done(r1_bio);
1690 
1691 	/* In case raid1d snuck in to freeze_array */
1692 	wake_up_barrier(conf);
1693 	return;
1694 err_handle:
1695 	for (k = 0; k < i; k++) {
1696 		if (r1_bio->bios[k]) {
1697 			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
1698 			r1_bio->bios[k] = NULL;
1699 		}
1700 	}
1701 
1702 	raid_end_bio_io(r1_bio);
1703 }
1704 
raid1_make_request(struct mddev * mddev,struct bio * bio)1705 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1706 {
1707 	sector_t sectors;
1708 
1709 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1710 	    && md_flush_request(mddev, bio))
1711 		return true;
1712 
1713 	/*
1714 	 * There is a limit to the maximum size, but
1715 	 * the read/write handler might find a lower limit
1716 	 * due to bad blocks.  To avoid multiple splits,
1717 	 * we pass the maximum number of sectors down
1718 	 * and let the lower level perform the split.
1719 	 */
1720 	sectors = align_to_barrier_unit_end(
1721 		bio->bi_iter.bi_sector, bio_sectors(bio));
1722 
1723 	if (bio_data_dir(bio) == READ)
1724 		raid1_read_request(mddev, bio, sectors, NULL);
1725 	else {
1726 		md_write_start(mddev,bio);
1727 		raid1_write_request(mddev, bio, sectors);
1728 	}
1729 	return true;
1730 }
1731 
raid1_status(struct seq_file * seq,struct mddev * mddev)1732 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1733 {
1734 	struct r1conf *conf = mddev->private;
1735 	int i;
1736 
1737 	lockdep_assert_held(&mddev->lock);
1738 
1739 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1740 		   conf->raid_disks - mddev->degraded);
1741 	for (i = 0; i < conf->raid_disks; i++) {
1742 		struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1743 
1744 		seq_printf(seq, "%s",
1745 			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1746 	}
1747 	seq_printf(seq, "]");
1748 }
1749 
1750 /**
1751  * raid1_error() - RAID1 error handler.
1752  * @mddev: affected md device.
1753  * @rdev: member device to fail.
1754  *
1755  * The routine acknowledges &rdev failure and determines new @mddev state.
1756  * If it failed, then:
1757  *	- &MD_BROKEN flag is set in &mddev->flags.
1758  *	- recovery is disabled.
1759  * Otherwise, it must be degraded:
1760  *	- recovery is interrupted.
1761  *	- &mddev->degraded is bumped.
1762  *
1763  * @rdev is marked as &Faulty excluding case when array is failed and
1764  * MD_FAILLAST_DEV is not set.
1765  */
raid1_error(struct mddev * mddev,struct md_rdev * rdev)1766 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1767 {
1768 	struct r1conf *conf = mddev->private;
1769 	unsigned long flags;
1770 
1771 	spin_lock_irqsave(&conf->device_lock, flags);
1772 
1773 	if (test_bit(In_sync, &rdev->flags) &&
1774 	    (conf->raid_disks - mddev->degraded) == 1) {
1775 		set_bit(MD_BROKEN, &mddev->flags);
1776 
1777 		if (!test_bit(MD_FAILLAST_DEV, &mddev->flags)) {
1778 			spin_unlock_irqrestore(&conf->device_lock, flags);
1779 			return;
1780 		}
1781 	}
1782 	set_bit(Blocked, &rdev->flags);
1783 	if (test_and_clear_bit(In_sync, &rdev->flags))
1784 		mddev->degraded++;
1785 	set_bit(Faulty, &rdev->flags);
1786 	spin_unlock_irqrestore(&conf->device_lock, flags);
1787 	/*
1788 	 * if recovery is running, make sure it aborts.
1789 	 */
1790 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1791 	set_mask_bits(&mddev->sb_flags, 0,
1792 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1793 	pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
1794 		"md/raid1:%s: Operation continuing on %d devices.\n",
1795 		mdname(mddev), rdev->bdev,
1796 		mdname(mddev), conf->raid_disks - mddev->degraded);
1797 }
1798 
print_conf(struct r1conf * conf)1799 static void print_conf(struct r1conf *conf)
1800 {
1801 	int i;
1802 
1803 	pr_debug("RAID1 conf printout:\n");
1804 	if (!conf) {
1805 		pr_debug("(!conf)\n");
1806 		return;
1807 	}
1808 	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1809 		 conf->raid_disks);
1810 
1811 	lockdep_assert_held(&conf->mddev->reconfig_mutex);
1812 	for (i = 0; i < conf->raid_disks; i++) {
1813 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1814 		if (rdev)
1815 			pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
1816 				 i, !test_bit(In_sync, &rdev->flags),
1817 				 !test_bit(Faulty, &rdev->flags),
1818 				 rdev->bdev);
1819 	}
1820 }
1821 
close_sync(struct r1conf * conf)1822 static void close_sync(struct r1conf *conf)
1823 {
1824 	int idx;
1825 
1826 	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1827 		_wait_barrier(conf, idx, false);
1828 		_allow_barrier(conf, idx);
1829 	}
1830 
1831 	mempool_exit(&conf->r1buf_pool);
1832 }
1833 
raid1_spare_active(struct mddev * mddev)1834 static int raid1_spare_active(struct mddev *mddev)
1835 {
1836 	int i;
1837 	struct r1conf *conf = mddev->private;
1838 	int count = 0;
1839 	unsigned long flags;
1840 
1841 	/*
1842 	 * Find all failed disks within the RAID1 configuration
1843 	 * and mark them readable.
1844 	 * Called under mddev lock, so rcu protection not needed.
1845 	 * device_lock used to avoid races with raid1_end_read_request
1846 	 * which expects 'In_sync' flags and ->degraded to be consistent.
1847 	 */
1848 	spin_lock_irqsave(&conf->device_lock, flags);
1849 	for (i = 0; i < conf->raid_disks; i++) {
1850 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1851 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1852 		if (repl
1853 		    && !test_bit(Candidate, &repl->flags)
1854 		    && repl->recovery_offset == MaxSector
1855 		    && !test_bit(Faulty, &repl->flags)
1856 		    && !test_and_set_bit(In_sync, &repl->flags)) {
1857 			/* replacement has just become active */
1858 			if (!rdev ||
1859 			    !test_and_clear_bit(In_sync, &rdev->flags))
1860 				count++;
1861 			if (rdev) {
1862 				/* Replaced device not technically
1863 				 * faulty, but we need to be sure
1864 				 * it gets removed and never re-added
1865 				 */
1866 				set_bit(Faulty, &rdev->flags);
1867 				sysfs_notify_dirent_safe(
1868 					rdev->sysfs_state);
1869 			}
1870 		}
1871 		if (rdev
1872 		    && rdev->recovery_offset == MaxSector
1873 		    && !test_bit(Faulty, &rdev->flags)
1874 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1875 			count++;
1876 			sysfs_notify_dirent_safe(rdev->sysfs_state);
1877 		}
1878 	}
1879 	mddev->degraded -= count;
1880 	spin_unlock_irqrestore(&conf->device_lock, flags);
1881 
1882 	print_conf(conf);
1883 	return count;
1884 }
1885 
raid1_add_conf(struct r1conf * conf,struct md_rdev * rdev,int disk,bool replacement)1886 static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
1887 			   bool replacement)
1888 {
1889 	struct raid1_info *info = conf->mirrors + disk;
1890 
1891 	if (replacement)
1892 		info += conf->raid_disks;
1893 
1894 	if (info->rdev)
1895 		return false;
1896 
1897 	if (!bdev_rot(rdev->bdev)) {
1898 		set_bit(Nonrot, &rdev->flags);
1899 		WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
1900 	}
1901 
1902 	rdev->raid_disk = disk;
1903 	info->head_position = 0;
1904 	info->seq_start = MaxSector;
1905 	WRITE_ONCE(info->rdev, rdev);
1906 
1907 	return true;
1908 }
1909 
raid1_remove_conf(struct r1conf * conf,int disk)1910 static bool raid1_remove_conf(struct r1conf *conf, int disk)
1911 {
1912 	struct raid1_info *info = conf->mirrors + disk;
1913 	struct md_rdev *rdev = info->rdev;
1914 
1915 	if (!rdev || test_bit(In_sync, &rdev->flags) ||
1916 	    atomic_read(&rdev->nr_pending))
1917 		return false;
1918 
1919 	/* Only remove non-faulty devices if recovery is not possible. */
1920 	if (!test_bit(Faulty, &rdev->flags) &&
1921 	    rdev->mddev->degraded < conf->raid_disks)
1922 		return false;
1923 
1924 	if (test_and_clear_bit(Nonrot, &rdev->flags))
1925 		WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
1926 
1927 	WRITE_ONCE(info->rdev, NULL);
1928 	return true;
1929 }
1930 
raid1_add_disk(struct mddev * mddev,struct md_rdev * rdev)1931 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1932 {
1933 	struct r1conf *conf = mddev->private;
1934 	int err = -EEXIST;
1935 	int mirror = 0, repl_slot = -1;
1936 	struct raid1_info *p;
1937 	int first = 0;
1938 	int last = conf->raid_disks - 1;
1939 
1940 	if (rdev->raid_disk >= 0)
1941 		first = last = rdev->raid_disk;
1942 
1943 	/*
1944 	 * find the disk ... but prefer rdev->saved_raid_disk
1945 	 * if possible.
1946 	 */
1947 	if (rdev->saved_raid_disk >= 0 &&
1948 	    rdev->saved_raid_disk >= first &&
1949 	    rdev->saved_raid_disk < conf->raid_disks &&
1950 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1951 		first = last = rdev->saved_raid_disk;
1952 
1953 	for (mirror = first; mirror <= last; mirror++) {
1954 		p = conf->mirrors + mirror;
1955 		if (!p->rdev) {
1956 			err = mddev_stack_new_rdev(mddev, rdev);
1957 			if (err)
1958 				return err;
1959 
1960 			raid1_add_conf(conf, rdev, mirror, false);
1961 			/* As all devices are equivalent, we don't need a full recovery
1962 			 * if this was recently any drive of the array
1963 			 */
1964 			if (rdev->saved_raid_disk < 0)
1965 				conf->fullsync = 1;
1966 			break;
1967 		}
1968 		if (test_bit(WantReplacement, &p->rdev->flags) &&
1969 		    p[conf->raid_disks].rdev == NULL && repl_slot < 0)
1970 			repl_slot = mirror;
1971 	}
1972 
1973 	if (err && repl_slot >= 0) {
1974 		/* Add this device as a replacement */
1975 		clear_bit(In_sync, &rdev->flags);
1976 		set_bit(Replacement, &rdev->flags);
1977 		raid1_add_conf(conf, rdev, repl_slot, true);
1978 		err = 0;
1979 		conf->fullsync = 1;
1980 	}
1981 
1982 	print_conf(conf);
1983 	return err;
1984 }
1985 
raid1_remove_disk(struct mddev * mddev,struct md_rdev * rdev)1986 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1987 {
1988 	struct r1conf *conf = mddev->private;
1989 	int err = 0;
1990 	int number = rdev->raid_disk;
1991 	struct raid1_info *p = conf->mirrors + number;
1992 
1993 	if (unlikely(number >= conf->raid_disks))
1994 		goto abort;
1995 
1996 	if (rdev != p->rdev) {
1997 		number += conf->raid_disks;
1998 		p = conf->mirrors + number;
1999 	}
2000 
2001 	print_conf(conf);
2002 	if (rdev == p->rdev) {
2003 		if (!raid1_remove_conf(conf, number)) {
2004 			err = -EBUSY;
2005 			goto abort;
2006 		}
2007 
2008 		if (number < conf->raid_disks &&
2009 		    conf->mirrors[conf->raid_disks + number].rdev) {
2010 			/* We just removed a device that is being replaced.
2011 			 * Move down the replacement.  We drain all IO before
2012 			 * doing this to avoid confusion.
2013 			 */
2014 			struct md_rdev *repl =
2015 				conf->mirrors[conf->raid_disks + number].rdev;
2016 			freeze_array(conf, 0);
2017 			if (atomic_read(&repl->nr_pending)) {
2018 				/* It means that some queued IO of retry_list
2019 				 * hold repl. Thus, we cannot set replacement
2020 				 * as NULL, avoiding rdev NULL pointer
2021 				 * dereference in sync_request_write and
2022 				 * handle_write_finished.
2023 				 */
2024 				err = -EBUSY;
2025 				unfreeze_array(conf);
2026 				goto abort;
2027 			}
2028 			clear_bit(Replacement, &repl->flags);
2029 			WRITE_ONCE(p->rdev, repl);
2030 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
2031 			unfreeze_array(conf);
2032 		}
2033 
2034 		clear_bit(WantReplacement, &rdev->flags);
2035 		err = md_integrity_register(mddev);
2036 	}
2037 abort:
2038 
2039 	print_conf(conf);
2040 	return err;
2041 }
2042 
end_sync_read(struct bio * bio)2043 static void end_sync_read(struct bio *bio)
2044 {
2045 	struct r1bio *r1_bio = get_resync_r1bio(bio);
2046 
2047 	update_head_pos(r1_bio->read_disk, r1_bio);
2048 
2049 	/*
2050 	 * we have read a block, now it needs to be re-written,
2051 	 * or re-read if the read failed.
2052 	 * We don't do much here, just schedule handling by raid1d
2053 	 */
2054 	if (!bio->bi_status)
2055 		set_bit(R1BIO_Uptodate, &r1_bio->state);
2056 
2057 	if (atomic_dec_and_test(&r1_bio->remaining))
2058 		reschedule_retry(r1_bio);
2059 }
2060 
abort_sync_write(struct mddev * mddev,struct r1bio * r1_bio)2061 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
2062 {
2063 	sector_t sync_blocks = 0;
2064 	sector_t s = r1_bio->sector;
2065 	long sectors_to_go = r1_bio->sectors;
2066 
2067 	/* make sure these bits don't get cleared. */
2068 	do {
2069 		md_bitmap_end_sync(mddev, s, &sync_blocks);
2070 		s += sync_blocks;
2071 		sectors_to_go -= sync_blocks;
2072 	} while (sectors_to_go > 0);
2073 }
2074 
put_sync_write_buf(struct r1bio * r1_bio)2075 static void put_sync_write_buf(struct r1bio *r1_bio)
2076 {
2077 	if (atomic_dec_and_test(&r1_bio->remaining)) {
2078 		struct mddev *mddev = r1_bio->mddev;
2079 		int s = r1_bio->sectors;
2080 
2081 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2082 		    test_bit(R1BIO_WriteError, &r1_bio->state))
2083 			reschedule_retry(r1_bio);
2084 		else {
2085 			put_buf(r1_bio);
2086 			md_done_sync(mddev, s);
2087 		}
2088 	}
2089 }
2090 
end_sync_write(struct bio * bio)2091 static void end_sync_write(struct bio *bio)
2092 {
2093 	struct r1bio *r1_bio = get_resync_r1bio(bio);
2094 	struct mddev *mddev = r1_bio->mddev;
2095 	struct r1conf *conf = mddev->private;
2096 	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
2097 
2098 	if (bio->bi_status) {
2099 		abort_sync_write(mddev, r1_bio);
2100 		set_bit(WriteErrorSeen, &rdev->flags);
2101 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2102 			set_bit(MD_RECOVERY_NEEDED, &
2103 				mddev->recovery);
2104 		set_bit(R1BIO_WriteError, &r1_bio->state);
2105 	} else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
2106 		   !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
2107 				      r1_bio->sector, r1_bio->sectors)) {
2108 		set_bit(R1BIO_MadeGood, &r1_bio->state);
2109 	}
2110 
2111 	put_sync_write_buf(r1_bio);
2112 }
2113 
r1_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,blk_opf_t rw)2114 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
2115 			   int sectors, struct page *page, blk_opf_t rw)
2116 {
2117 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2118 		/* success */
2119 		return 1;
2120 	if (rw == REQ_OP_WRITE) {
2121 		set_bit(WriteErrorSeen, &rdev->flags);
2122 		if (!test_and_set_bit(WantReplacement,
2123 				      &rdev->flags))
2124 			set_bit(MD_RECOVERY_NEEDED, &
2125 				rdev->mddev->recovery);
2126 	}
2127 	/* need to record an error - either for the block or the device */
2128 	rdev_set_badblocks(rdev, sector, sectors, 0);
2129 	return 0;
2130 }
2131 
fix_sync_read_error(struct r1bio * r1_bio)2132 static int fix_sync_read_error(struct r1bio *r1_bio)
2133 {
2134 	/* Try some synchronous reads of other devices to get
2135 	 * good data, much like with normal read errors.  Only
2136 	 * read into the pages we already have so we don't
2137 	 * need to re-issue the read request.
2138 	 * We don't need to freeze the array, because being in an
2139 	 * active sync request, there is no normal IO, and
2140 	 * no overlapping syncs.
2141 	 * We don't need to check is_badblock() again as we
2142 	 * made sure that anything with a bad block in range
2143 	 * will have bi_end_io clear.
2144 	 */
2145 	struct mddev *mddev = r1_bio->mddev;
2146 	struct r1conf *conf = mddev->private;
2147 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
2148 	struct page **pages = get_resync_pages(bio)->pages;
2149 	sector_t sect = r1_bio->sector;
2150 	int sectors = r1_bio->sectors;
2151 	int idx = 0;
2152 	struct md_rdev *rdev;
2153 
2154 	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2155 	if (test_bit(FailFast, &rdev->flags)) {
2156 		/* Don't try recovering from here - just fail it
2157 		 * ... unless it is the last working device of course */
2158 		md_error(mddev, rdev);
2159 		if (test_bit(Faulty, &rdev->flags))
2160 			/* Don't try to read from here, but make sure
2161 			 * put_buf does it's thing
2162 			 */
2163 			bio->bi_end_io = end_sync_write;
2164 	}
2165 
2166 	while(sectors) {
2167 		int s = sectors;
2168 		int d = r1_bio->read_disk;
2169 		int success = 0;
2170 		int start;
2171 
2172 		if (s > (PAGE_SIZE>>9))
2173 			s = PAGE_SIZE >> 9;
2174 		do {
2175 			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2176 				/* No rcu protection needed here devices
2177 				 * can only be removed when no resync is
2178 				 * active, and resync is currently active
2179 				 */
2180 				rdev = conf->mirrors[d].rdev;
2181 				if (sync_page_io(rdev, sect, s<<9,
2182 						 pages[idx],
2183 						 REQ_OP_READ, false)) {
2184 					success = 1;
2185 					break;
2186 				}
2187 			}
2188 			d++;
2189 			if (d == conf->raid_disks * 2)
2190 				d = 0;
2191 		} while (!success && d != r1_bio->read_disk);
2192 
2193 		if (!success) {
2194 			int abort = 0;
2195 			/* Cannot read from anywhere, this block is lost.
2196 			 * Record a bad block on each device.  If that doesn't
2197 			 * work just disable and interrupt the recovery.
2198 			 * Don't fail devices as that won't really help.
2199 			 */
2200 			pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
2201 					    mdname(mddev), bio->bi_bdev,
2202 					    (unsigned long long)r1_bio->sector);
2203 			for (d = 0; d < conf->raid_disks * 2; d++) {
2204 				rdev = conf->mirrors[d].rdev;
2205 				if (!rdev || test_bit(Faulty, &rdev->flags))
2206 					continue;
2207 				if (!rdev_set_badblocks(rdev, sect, s, 0))
2208 					abort = 1;
2209 			}
2210 			if (abort)
2211 				return 0;
2212 
2213 			/* Try next page */
2214 			sectors -= s;
2215 			sect += s;
2216 			idx++;
2217 			continue;
2218 		}
2219 
2220 		start = d;
2221 		/* write it back and re-read */
2222 		while (d != r1_bio->read_disk) {
2223 			if (d == 0)
2224 				d = conf->raid_disks * 2;
2225 			d--;
2226 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2227 				continue;
2228 			rdev = conf->mirrors[d].rdev;
2229 			if (r1_sync_page_io(rdev, sect, s,
2230 					    pages[idx],
2231 					    REQ_OP_WRITE) == 0) {
2232 				r1_bio->bios[d]->bi_end_io = NULL;
2233 				rdev_dec_pending(rdev, mddev);
2234 			}
2235 		}
2236 		d = start;
2237 		while (d != r1_bio->read_disk) {
2238 			if (d == 0)
2239 				d = conf->raid_disks * 2;
2240 			d--;
2241 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2242 				continue;
2243 			rdev = conf->mirrors[d].rdev;
2244 			if (r1_sync_page_io(rdev, sect, s,
2245 					    pages[idx],
2246 					    REQ_OP_READ) != 0)
2247 				atomic_add(s, &rdev->corrected_errors);
2248 		}
2249 		sectors -= s;
2250 		sect += s;
2251 		idx ++;
2252 	}
2253 	set_bit(R1BIO_Uptodate, &r1_bio->state);
2254 	bio->bi_status = 0;
2255 	return 1;
2256 }
2257 
process_checks(struct r1bio * r1_bio)2258 static void process_checks(struct r1bio *r1_bio)
2259 {
2260 	/* We have read all readable devices.  If we haven't
2261 	 * got the block, then there is no hope left.
2262 	 * If we have, then we want to do a comparison
2263 	 * and skip the write if everything is the same.
2264 	 * If any blocks failed to read, then we need to
2265 	 * attempt an over-write
2266 	 */
2267 	struct mddev *mddev = r1_bio->mddev;
2268 	struct r1conf *conf = mddev->private;
2269 	int primary;
2270 	int i;
2271 	int vcnt;
2272 
2273 	/* Fix variable parts of all bios */
2274 	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2275 	for (i = 0; i < conf->raid_disks * 2; i++) {
2276 		blk_status_t status;
2277 		struct bio *b = r1_bio->bios[i];
2278 		struct resync_pages *rp = get_resync_pages(b);
2279 		if (b->bi_end_io != end_sync_read)
2280 			continue;
2281 		/* fixup the bio for reuse, but preserve errno */
2282 		status = b->bi_status;
2283 		bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
2284 		b->bi_status = status;
2285 		b->bi_iter.bi_sector = r1_bio->sector +
2286 			conf->mirrors[i].rdev->data_offset;
2287 		b->bi_end_io = end_sync_read;
2288 		rp->raid_bio = r1_bio;
2289 		b->bi_private = rp;
2290 
2291 		/* initialize bvec table again */
2292 		md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2293 	}
2294 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
2295 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2296 		    !r1_bio->bios[primary]->bi_status) {
2297 			r1_bio->bios[primary]->bi_end_io = NULL;
2298 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2299 			break;
2300 		}
2301 	r1_bio->read_disk = primary;
2302 	for (i = 0; i < conf->raid_disks * 2; i++) {
2303 		int j = 0;
2304 		struct bio *pbio = r1_bio->bios[primary];
2305 		struct bio *sbio = r1_bio->bios[i];
2306 		blk_status_t status = sbio->bi_status;
2307 		struct page **ppages = get_resync_pages(pbio)->pages;
2308 		struct page **spages = get_resync_pages(sbio)->pages;
2309 		struct bio_vec *bi;
2310 		int page_len[RESYNC_PAGES] = { 0 };
2311 		struct bvec_iter_all iter_all;
2312 
2313 		if (sbio->bi_end_io != end_sync_read)
2314 			continue;
2315 		/* Now we can 'fixup' the error value */
2316 		sbio->bi_status = 0;
2317 
2318 		bio_for_each_segment_all(bi, sbio, iter_all)
2319 			page_len[j++] = bi->bv_len;
2320 
2321 		if (!status) {
2322 			for (j = vcnt; j-- ; ) {
2323 				if (memcmp(page_address(ppages[j]),
2324 					   page_address(spages[j]),
2325 					   page_len[j]))
2326 					break;
2327 			}
2328 		} else
2329 			j = 0;
2330 		if (j >= 0)
2331 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2332 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2333 			      && !status)) {
2334 			/* No need to write to this device. */
2335 			sbio->bi_end_io = NULL;
2336 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2337 			continue;
2338 		}
2339 
2340 		bio_copy_data(sbio, pbio);
2341 	}
2342 }
2343 
sync_request_write(struct mddev * mddev,struct r1bio * r1_bio)2344 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2345 {
2346 	struct r1conf *conf = mddev->private;
2347 	int i;
2348 	int disks = conf->raid_disks * 2;
2349 	struct bio *wbio;
2350 
2351 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
2352 		/*
2353 		 * ouch - failed to read all of that.
2354 		 * No need to fix read error for check/repair
2355 		 * because all member disks are read.
2356 		 */
2357 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
2358 		    !fix_sync_read_error(r1_bio)) {
2359 			md_done_sync(mddev, r1_bio->sectors);
2360 			md_sync_error(mddev);
2361 			put_buf(r1_bio);
2362 			return;
2363 		}
2364 	}
2365 
2366 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2367 		process_checks(r1_bio);
2368 
2369 	/*
2370 	 * schedule writes
2371 	 */
2372 	atomic_set(&r1_bio->remaining, 1);
2373 	for (i = 0; i < disks ; i++) {
2374 		wbio = r1_bio->bios[i];
2375 		if (wbio->bi_end_io == NULL ||
2376 		    (wbio->bi_end_io == end_sync_read &&
2377 		     (i == r1_bio->read_disk ||
2378 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2379 			continue;
2380 		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2381 			abort_sync_write(mddev, r1_bio);
2382 			continue;
2383 		}
2384 
2385 		wbio->bi_opf = REQ_OP_WRITE;
2386 		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2387 			wbio->bi_opf |= MD_FAILFAST;
2388 
2389 		wbio->bi_end_io = end_sync_write;
2390 		atomic_inc(&r1_bio->remaining);
2391 
2392 		submit_bio_noacct(wbio);
2393 	}
2394 
2395 	put_sync_write_buf(r1_bio);
2396 }
2397 
2398 /*
2399  * This is a kernel thread which:
2400  *
2401  *	1.	Retries failed read operations on working mirrors.
2402  *	2.	Updates the raid superblock when problems encounter.
2403  *	3.	Performs writes following reads for array synchronising.
2404  */
2405 
fix_read_error(struct r1conf * conf,struct r1bio * r1_bio)2406 static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2407 {
2408 	sector_t sect = r1_bio->sector;
2409 	int sectors = r1_bio->sectors;
2410 	int read_disk = r1_bio->read_disk;
2411 	struct mddev *mddev = conf->mddev;
2412 	struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2413 
2414 	if (exceed_read_errors(mddev, rdev)) {
2415 		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2416 		return;
2417 	}
2418 
2419 	while(sectors) {
2420 		int s = sectors;
2421 		int d = read_disk;
2422 		int success = 0;
2423 		int start;
2424 
2425 		if (s > (PAGE_SIZE>>9))
2426 			s = PAGE_SIZE >> 9;
2427 
2428 		do {
2429 			rdev = conf->mirrors[d].rdev;
2430 			if (rdev &&
2431 			    (test_bit(In_sync, &rdev->flags) ||
2432 			     (!test_bit(Faulty, &rdev->flags) &&
2433 			      rdev->recovery_offset >= sect + s)) &&
2434 			    rdev_has_badblock(rdev, sect, s) == 0) {
2435 				atomic_inc(&rdev->nr_pending);
2436 				if (sync_page_io(rdev, sect, s<<9,
2437 					 conf->tmppage, REQ_OP_READ, false))
2438 					success = 1;
2439 				rdev_dec_pending(rdev, mddev);
2440 				if (success)
2441 					break;
2442 			}
2443 
2444 			d++;
2445 			if (d == conf->raid_disks * 2)
2446 				d = 0;
2447 		} while (d != read_disk);
2448 
2449 		if (!success) {
2450 			/* Cannot read from anywhere - mark it bad */
2451 			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2452 			rdev_set_badblocks(rdev, sect, s, 0);
2453 			break;
2454 		}
2455 		/* write it back and re-read */
2456 		start = d;
2457 		while (d != read_disk) {
2458 			if (d==0)
2459 				d = conf->raid_disks * 2;
2460 			d--;
2461 			rdev = conf->mirrors[d].rdev;
2462 			if (rdev &&
2463 			    !test_bit(Faulty, &rdev->flags)) {
2464 				atomic_inc(&rdev->nr_pending);
2465 				r1_sync_page_io(rdev, sect, s,
2466 						conf->tmppage, REQ_OP_WRITE);
2467 				rdev_dec_pending(rdev, mddev);
2468 			}
2469 		}
2470 		d = start;
2471 		while (d != read_disk) {
2472 			if (d==0)
2473 				d = conf->raid_disks * 2;
2474 			d--;
2475 			rdev = conf->mirrors[d].rdev;
2476 			if (rdev &&
2477 			    !test_bit(Faulty, &rdev->flags)) {
2478 				atomic_inc(&rdev->nr_pending);
2479 				if (r1_sync_page_io(rdev, sect, s,
2480 						conf->tmppage, REQ_OP_READ)) {
2481 					atomic_add(s, &rdev->corrected_errors);
2482 					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
2483 						mdname(mddev), s,
2484 						(unsigned long long)(sect +
2485 								     rdev->data_offset),
2486 						rdev->bdev);
2487 				}
2488 				rdev_dec_pending(rdev, mddev);
2489 			}
2490 		}
2491 		sectors -= s;
2492 		sect += s;
2493 	}
2494 }
2495 
narrow_write_error(struct r1bio * r1_bio,int i)2496 static void narrow_write_error(struct r1bio *r1_bio, int i)
2497 {
2498 	struct mddev *mddev = r1_bio->mddev;
2499 	struct r1conf *conf = mddev->private;
2500 	struct md_rdev *rdev = conf->mirrors[i].rdev;
2501 
2502 	/* bio has the data to be written to device 'i' where
2503 	 * we just recently had a write error.
2504 	 * We repeatedly clone the bio and trim down to one block,
2505 	 * then try the write.  Where the write fails we record
2506 	 * a bad block.
2507 	 * It is conceivable that the bio doesn't exactly align with
2508 	 * blocks.  We must handle this somehow.
2509 	 *
2510 	 * We currently own a reference on the rdev.
2511 	 */
2512 
2513 	int block_sectors, lbs = bdev_logical_block_size(rdev->bdev) >> 9;
2514 	sector_t sector;
2515 	int sectors;
2516 	int sect_to_write = r1_bio->sectors;
2517 
2518 	if (rdev->badblocks.shift < 0)
2519 		block_sectors = lbs;
2520 	else
2521 		block_sectors = roundup(1 << rdev->badblocks.shift, lbs);
2522 
2523 	sector = r1_bio->sector;
2524 	sectors = ((sector + block_sectors)
2525 		   & ~(sector_t)(block_sectors - 1))
2526 		- sector;
2527 
2528 	while (sect_to_write) {
2529 		struct bio *wbio;
2530 		if (sectors > sect_to_write)
2531 			sectors = sect_to_write;
2532 		/* Write at 'sector' for 'sectors'*/
2533 
2534 		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2535 			wbio = bio_alloc_clone(rdev->bdev,
2536 					       r1_bio->behind_master_bio,
2537 					       GFP_NOIO, &mddev->bio_set);
2538 		} else {
2539 			wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
2540 					       GFP_NOIO, &mddev->bio_set);
2541 		}
2542 
2543 		wbio->bi_opf = REQ_OP_WRITE;
2544 		wbio->bi_iter.bi_sector = r1_bio->sector;
2545 		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2546 
2547 		bio_trim(wbio, sector - r1_bio->sector, sectors);
2548 		wbio->bi_iter.bi_sector += rdev->data_offset;
2549 
2550 		if (submit_bio_wait(wbio) &&
2551 		    !rdev_set_badblocks(rdev, sector, sectors, 0)) {
2552 			/*
2553 			 * Badblocks set failed, disk marked Faulty.
2554 			 * No further operations needed.
2555 			 */
2556 			bio_put(wbio);
2557 			break;
2558 		}
2559 
2560 		bio_put(wbio);
2561 		sect_to_write -= sectors;
2562 		sector += sectors;
2563 		sectors = block_sectors;
2564 	}
2565 }
2566 
handle_sync_write_finished(struct r1conf * conf,struct r1bio * r1_bio)2567 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2568 {
2569 	int m;
2570 	int s = r1_bio->sectors;
2571 	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2572 		struct md_rdev *rdev = conf->mirrors[m].rdev;
2573 		struct bio *bio = r1_bio->bios[m];
2574 		if (bio->bi_end_io == NULL)
2575 			continue;
2576 		if (!bio->bi_status &&
2577 		    test_bit(R1BIO_MadeGood, &r1_bio->state))
2578 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2579 		if (bio->bi_status &&
2580 		    test_bit(R1BIO_WriteError, &r1_bio->state))
2581 			rdev_set_badblocks(rdev, r1_bio->sector, s, 0);
2582 	}
2583 	put_buf(r1_bio);
2584 	md_done_sync(conf->mddev, s);
2585 }
2586 
handle_write_finished(struct r1conf * conf,struct r1bio * r1_bio)2587 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2588 {
2589 	int m, idx;
2590 	bool fail = false;
2591 
2592 	for (m = 0; m < conf->raid_disks * 2 ; m++)
2593 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2594 			struct md_rdev *rdev = conf->mirrors[m].rdev;
2595 			rdev_clear_badblocks(rdev,
2596 					     r1_bio->sector,
2597 					     r1_bio->sectors, 0);
2598 			rdev_dec_pending(rdev, conf->mddev);
2599 		} else if (r1_bio->bios[m] != NULL) {
2600 			/* This drive got a write error.  We need to
2601 			 * narrow down and record precise write
2602 			 * errors.
2603 			 */
2604 			fail = true;
2605 			narrow_write_error(r1_bio, m);
2606 			rdev_dec_pending(conf->mirrors[m].rdev,
2607 					 conf->mddev);
2608 		}
2609 	if (fail) {
2610 		spin_lock_irq(&conf->device_lock);
2611 		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2612 		idx = sector_to_idx(r1_bio->sector);
2613 		atomic_inc(&conf->nr_queued[idx]);
2614 		spin_unlock_irq(&conf->device_lock);
2615 		/*
2616 		 * In case freeze_array() is waiting for condition
2617 		 * get_unqueued_pending() == extra to be true.
2618 		 */
2619 		wake_up(&conf->wait_barrier);
2620 		md_wakeup_thread(conf->mddev->thread);
2621 	} else {
2622 		if (test_bit(R1BIO_WriteError, &r1_bio->state))
2623 			close_write(r1_bio);
2624 		raid_end_bio_io(r1_bio);
2625 	}
2626 }
2627 
handle_read_error(struct r1conf * conf,struct r1bio * r1_bio)2628 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2629 {
2630 	struct mddev *mddev = conf->mddev;
2631 	struct bio *bio;
2632 	struct md_rdev *rdev;
2633 	sector_t sector;
2634 
2635 	clear_bit(R1BIO_ReadError, &r1_bio->state);
2636 	/* we got a read error. Maybe the drive is bad.  Maybe just
2637 	 * the block and we can fix it.
2638 	 * We freeze all other IO, and try reading the block from
2639 	 * other devices.  When we find one, we re-write
2640 	 * and check it that fixes the read error.
2641 	 * This is all done synchronously while the array is
2642 	 * frozen
2643 	 */
2644 
2645 	bio = r1_bio->bios[r1_bio->read_disk];
2646 	bio_put(bio);
2647 	r1_bio->bios[r1_bio->read_disk] = NULL;
2648 
2649 	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2650 	if (mddev->ro == 0
2651 	    && !test_bit(FailFast, &rdev->flags)) {
2652 		freeze_array(conf, 1);
2653 		fix_read_error(conf, r1_bio);
2654 		unfreeze_array(conf);
2655 	} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2656 		md_error(mddev, rdev);
2657 	} else {
2658 		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2659 	}
2660 
2661 	rdev_dec_pending(rdev, conf->mddev);
2662 	sector = r1_bio->sector;
2663 	bio = r1_bio->master_bio;
2664 
2665 	/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2666 	r1_bio->state = 0;
2667 	raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2668 	allow_barrier(conf, sector);
2669 }
2670 
raid1d(struct md_thread * thread)2671 static void raid1d(struct md_thread *thread)
2672 {
2673 	struct mddev *mddev = thread->mddev;
2674 	struct r1bio *r1_bio;
2675 	unsigned long flags;
2676 	struct r1conf *conf = mddev->private;
2677 	struct list_head *head = &conf->retry_list;
2678 	struct blk_plug plug;
2679 	int idx;
2680 
2681 	md_check_recovery(mddev);
2682 
2683 	if (!list_empty_careful(&conf->bio_end_io_list) &&
2684 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2685 		LIST_HEAD(tmp);
2686 		spin_lock_irqsave(&conf->device_lock, flags);
2687 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2688 			list_splice_init(&conf->bio_end_io_list, &tmp);
2689 		spin_unlock_irqrestore(&conf->device_lock, flags);
2690 		while (!list_empty(&tmp)) {
2691 			r1_bio = list_first_entry(&tmp, struct r1bio,
2692 						  retry_list);
2693 			list_del(&r1_bio->retry_list);
2694 			idx = sector_to_idx(r1_bio->sector);
2695 			atomic_dec(&conf->nr_queued[idx]);
2696 			if (test_bit(R1BIO_WriteError, &r1_bio->state))
2697 				close_write(r1_bio);
2698 			raid_end_bio_io(r1_bio);
2699 		}
2700 	}
2701 
2702 	blk_start_plug(&plug);
2703 	for (;;) {
2704 
2705 		flush_pending_writes(conf);
2706 
2707 		spin_lock_irqsave(&conf->device_lock, flags);
2708 		if (list_empty(head)) {
2709 			spin_unlock_irqrestore(&conf->device_lock, flags);
2710 			break;
2711 		}
2712 		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2713 		list_del(head->prev);
2714 		idx = sector_to_idx(r1_bio->sector);
2715 		atomic_dec(&conf->nr_queued[idx]);
2716 		spin_unlock_irqrestore(&conf->device_lock, flags);
2717 
2718 		mddev = r1_bio->mddev;
2719 		conf = mddev->private;
2720 		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2721 			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2722 			    test_bit(R1BIO_WriteError, &r1_bio->state))
2723 				handle_sync_write_finished(conf, r1_bio);
2724 			else
2725 				sync_request_write(mddev, r1_bio);
2726 		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2727 			   test_bit(R1BIO_WriteError, &r1_bio->state))
2728 			handle_write_finished(conf, r1_bio);
2729 		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2730 			handle_read_error(conf, r1_bio);
2731 		else
2732 			WARN_ON_ONCE(1);
2733 
2734 		cond_resched();
2735 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2736 			md_check_recovery(mddev);
2737 	}
2738 	blk_finish_plug(&plug);
2739 }
2740 
init_resync(struct r1conf * conf)2741 static int init_resync(struct r1conf *conf)
2742 {
2743 	int buffs;
2744 
2745 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2746 	BUG_ON(mempool_initialized(&conf->r1buf_pool));
2747 
2748 	return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2749 			    r1buf_pool_free, conf);
2750 }
2751 
raid1_alloc_init_r1buf(struct r1conf * conf)2752 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2753 {
2754 	struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2755 	struct resync_pages *rps;
2756 	struct bio *bio;
2757 	int i;
2758 
2759 	for (i = conf->raid_disks * 2; i--; ) {
2760 		bio = r1bio->bios[i];
2761 		rps = bio->bi_private;
2762 		bio_reset(bio, NULL, 0);
2763 		bio->bi_private = rps;
2764 	}
2765 	r1bio->master_bio = NULL;
2766 	return r1bio;
2767 }
2768 
2769 /*
2770  * perform a "sync" on one "block"
2771  *
2772  * We need to make sure that no normal I/O request - particularly write
2773  * requests - conflict with active sync requests.
2774  *
2775  * This is achieved by tracking pending requests and a 'barrier' concept
2776  * that can be installed to exclude normal IO requests.
2777  */
2778 
raid1_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)2779 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2780 				   sector_t max_sector, int *skipped)
2781 {
2782 	struct r1conf *conf = mddev->private;
2783 	struct r1bio *r1_bio;
2784 	struct bio *bio;
2785 	sector_t nr_sectors;
2786 	int disk = -1;
2787 	int i;
2788 	int wonly = -1;
2789 	int write_targets = 0, read_targets = 0;
2790 	sector_t sync_blocks;
2791 	bool still_degraded = false;
2792 	int good_sectors = RESYNC_SECTORS;
2793 	int min_bad = 0; /* number of sectors that are bad in all devices */
2794 	int idx = sector_to_idx(sector_nr);
2795 	int page_idx = 0;
2796 
2797 	if (!mempool_initialized(&conf->r1buf_pool))
2798 		if (init_resync(conf))
2799 			return 0;
2800 
2801 	if (sector_nr >= max_sector) {
2802 		/* If we aborted, we need to abort the
2803 		 * sync on the 'current' bitmap chunk (there will
2804 		 * only be one in raid1 resync.
2805 		 * We can find the current addess in mddev->curr_resync
2806 		 */
2807 		if (mddev->curr_resync < max_sector) /* aborted */
2808 			md_bitmap_end_sync(mddev, mddev->curr_resync,
2809 					   &sync_blocks);
2810 		else /* completed sync */
2811 			conf->fullsync = 0;
2812 
2813 		if (md_bitmap_enabled(mddev, false))
2814 			mddev->bitmap_ops->close_sync(mddev);
2815 		close_sync(conf);
2816 
2817 		if (mddev_is_clustered(mddev)) {
2818 			conf->cluster_sync_low = 0;
2819 			conf->cluster_sync_high = 0;
2820 		}
2821 		return 0;
2822 	}
2823 
2824 	if (mddev->bitmap == NULL &&
2825 	    mddev->resync_offset == MaxSector &&
2826 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2827 	    conf->fullsync == 0) {
2828 		*skipped = 1;
2829 		return max_sector - sector_nr;
2830 	}
2831 	/* before building a request, check if we can skip these blocks..
2832 	 * This call the bitmap_start_sync doesn't actually record anything
2833 	 */
2834 	if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
2835 	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2836 		/* We can skip this block, and probably several more */
2837 		*skipped = 1;
2838 		return sync_blocks;
2839 	}
2840 
2841 	/*
2842 	 * If there is non-resync activity waiting for a turn, then let it
2843 	 * though before starting on this new sync request.
2844 	 */
2845 	if (atomic_read(&conf->nr_waiting[idx]))
2846 		schedule_timeout_uninterruptible(1);
2847 
2848 	/* we are incrementing sector_nr below. To be safe, we check against
2849 	 * sector_nr + two times RESYNC_SECTORS
2850 	 */
2851 	if (md_bitmap_enabled(mddev, false))
2852 		mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
2853 			mddev_is_clustered(mddev) &&
2854 			(sector_nr + 2 * RESYNC_SECTORS >
2855 			 conf->cluster_sync_high));
2856 
2857 	if (raise_barrier(conf, sector_nr))
2858 		return 0;
2859 
2860 	r1_bio = raid1_alloc_init_r1buf(conf);
2861 
2862 	/*
2863 	 * If we get a correctably read error during resync or recovery,
2864 	 * we might want to read from a different device.  So we
2865 	 * flag all drives that could conceivably be read from for READ,
2866 	 * and any others (which will be non-In_sync devices) for WRITE.
2867 	 * If a read fails, we try reading from something else for which READ
2868 	 * is OK.
2869 	 */
2870 
2871 	r1_bio->mddev = mddev;
2872 	r1_bio->sector = sector_nr;
2873 	r1_bio->state = 0;
2874 	set_bit(R1BIO_IsSync, &r1_bio->state);
2875 	/* make sure good_sectors won't go across barrier unit boundary */
2876 	good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2877 
2878 	for (i = 0; i < conf->raid_disks * 2; i++) {
2879 		struct md_rdev *rdev;
2880 		bio = r1_bio->bios[i];
2881 
2882 		rdev = conf->mirrors[i].rdev;
2883 		if (rdev == NULL ||
2884 		    test_bit(Faulty, &rdev->flags)) {
2885 			if (i < conf->raid_disks)
2886 				still_degraded = true;
2887 		} else if (!test_bit(In_sync, &rdev->flags)) {
2888 			bio->bi_opf = REQ_OP_WRITE;
2889 			bio->bi_end_io = end_sync_write;
2890 			write_targets ++;
2891 		} else {
2892 			/* may need to read from here */
2893 			sector_t first_bad = MaxSector;
2894 			sector_t bad_sectors;
2895 
2896 			if (is_badblock(rdev, sector_nr, good_sectors,
2897 					&first_bad, &bad_sectors)) {
2898 				if (first_bad > sector_nr)
2899 					good_sectors = first_bad - sector_nr;
2900 				else {
2901 					bad_sectors -= (sector_nr - first_bad);
2902 					if (min_bad == 0 ||
2903 					    min_bad > bad_sectors)
2904 						min_bad = bad_sectors;
2905 				}
2906 			}
2907 			if (sector_nr < first_bad) {
2908 				if (test_bit(WriteMostly, &rdev->flags)) {
2909 					if (wonly < 0)
2910 						wonly = i;
2911 				} else {
2912 					if (disk < 0)
2913 						disk = i;
2914 				}
2915 				bio->bi_opf = REQ_OP_READ;
2916 				bio->bi_end_io = end_sync_read;
2917 				read_targets++;
2918 			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2919 				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2920 				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2921 				/*
2922 				 * The device is suitable for reading (InSync),
2923 				 * but has bad block(s) here. Let's try to correct them,
2924 				 * if we are doing resync or repair. Otherwise, leave
2925 				 * this device alone for this sync request.
2926 				 */
2927 				bio->bi_opf = REQ_OP_WRITE;
2928 				bio->bi_end_io = end_sync_write;
2929 				write_targets++;
2930 			}
2931 		}
2932 		if (rdev && bio->bi_end_io) {
2933 			atomic_inc(&rdev->nr_pending);
2934 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2935 			bio_set_dev(bio, rdev->bdev);
2936 			if (test_bit(FailFast, &rdev->flags))
2937 				bio->bi_opf |= MD_FAILFAST;
2938 		}
2939 	}
2940 	if (disk < 0)
2941 		disk = wonly;
2942 	r1_bio->read_disk = disk;
2943 
2944 	if (read_targets == 0 && min_bad > 0) {
2945 		/* These sectors are bad on all InSync devices, so we
2946 		 * need to mark them bad on all write targets
2947 		 */
2948 		int ok = 1;
2949 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2950 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2951 				struct md_rdev *rdev = conf->mirrors[i].rdev;
2952 				ok = rdev_set_badblocks(rdev, sector_nr,
2953 							min_bad, 0
2954 					) && ok;
2955 			}
2956 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2957 		*skipped = 1;
2958 		put_buf(r1_bio);
2959 
2960 		if (!ok)
2961 			/* Cannot record the badblocks, md_error has set INTR,
2962 			 * abort the resync.
2963 			 */
2964 			return 0;
2965 		else
2966 			return min_bad;
2967 
2968 	}
2969 	if (min_bad > 0 && min_bad < good_sectors) {
2970 		/* only resync enough to reach the next bad->good
2971 		 * transition */
2972 		good_sectors = min_bad;
2973 	}
2974 
2975 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2976 		/* extra read targets are also write targets */
2977 		write_targets += read_targets-1;
2978 
2979 	if (write_targets == 0 || read_targets == 0) {
2980 		/* There is nowhere to write, so all non-sync
2981 		 * drives must be failed - so we are finished
2982 		 */
2983 		sector_t rv;
2984 		if (min_bad > 0)
2985 			max_sector = sector_nr + min_bad;
2986 		rv = max_sector - sector_nr;
2987 		*skipped = 1;
2988 		put_buf(r1_bio);
2989 		return rv;
2990 	}
2991 
2992 	if (max_sector > mddev->resync_max)
2993 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2994 	if (max_sector > sector_nr + good_sectors)
2995 		max_sector = sector_nr + good_sectors;
2996 	nr_sectors = 0;
2997 	sync_blocks = 0;
2998 	do {
2999 		struct page *page;
3000 		int len = PAGE_SIZE;
3001 		if (sector_nr + (len>>9) > max_sector)
3002 			len = (max_sector - sector_nr) << 9;
3003 		if (len == 0)
3004 			break;
3005 		if (sync_blocks == 0) {
3006 			if (!md_bitmap_start_sync(mddev, sector_nr,
3007 						  &sync_blocks, still_degraded) &&
3008 			    !conf->fullsync &&
3009 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3010 				break;
3011 			if ((len >> 9) > sync_blocks)
3012 				len = sync_blocks<<9;
3013 		}
3014 
3015 		for (i = 0 ; i < conf->raid_disks * 2; i++) {
3016 			struct resync_pages *rp;
3017 
3018 			bio = r1_bio->bios[i];
3019 			rp = get_resync_pages(bio);
3020 			if (bio->bi_end_io) {
3021 				page = resync_fetch_page(rp, page_idx);
3022 
3023 				/*
3024 				 * won't fail because the vec table is big
3025 				 * enough to hold all these pages
3026 				 */
3027 				__bio_add_page(bio, page, len, 0);
3028 			}
3029 		}
3030 		nr_sectors += len>>9;
3031 		sector_nr += len>>9;
3032 		sync_blocks -= (len>>9);
3033 	} while (++page_idx < RESYNC_PAGES);
3034 
3035 	r1_bio->sectors = nr_sectors;
3036 
3037 	if (mddev_is_clustered(mddev) &&
3038 			conf->cluster_sync_high < sector_nr + nr_sectors) {
3039 		conf->cluster_sync_low = mddev->curr_resync_completed;
3040 		conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
3041 		/* Send resync message */
3042 		mddev->cluster_ops->resync_info_update(mddev,
3043 						       conf->cluster_sync_low,
3044 						       conf->cluster_sync_high);
3045 	}
3046 
3047 	/* For a user-requested sync, we read all readable devices and do a
3048 	 * compare
3049 	 */
3050 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
3051 		atomic_set(&r1_bio->remaining, read_targets);
3052 		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
3053 			bio = r1_bio->bios[i];
3054 			if (bio->bi_end_io == end_sync_read) {
3055 				read_targets--;
3056 				if (read_targets == 1)
3057 					bio->bi_opf &= ~MD_FAILFAST;
3058 				submit_bio_noacct(bio);
3059 			}
3060 		}
3061 	} else {
3062 		atomic_set(&r1_bio->remaining, 1);
3063 		bio = r1_bio->bios[r1_bio->read_disk];
3064 		if (read_targets == 1)
3065 			bio->bi_opf &= ~MD_FAILFAST;
3066 		submit_bio_noacct(bio);
3067 	}
3068 	return nr_sectors;
3069 }
3070 
raid1_size(struct mddev * mddev,sector_t sectors,int raid_disks)3071 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3072 {
3073 	if (sectors)
3074 		return sectors;
3075 
3076 	return mddev->dev_sectors;
3077 }
3078 
setup_conf(struct mddev * mddev)3079 static struct r1conf *setup_conf(struct mddev *mddev)
3080 {
3081 	struct r1conf *conf;
3082 	int i;
3083 	struct raid1_info *disk;
3084 	struct md_rdev *rdev;
3085 	size_t r1bio_size;
3086 	int err = -ENOMEM;
3087 
3088 	conf = kzalloc_obj(struct r1conf);
3089 	if (!conf)
3090 		goto abort;
3091 
3092 	conf->nr_pending = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
3093 	if (!conf->nr_pending)
3094 		goto abort;
3095 
3096 	conf->nr_waiting = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
3097 	if (!conf->nr_waiting)
3098 		goto abort;
3099 
3100 	conf->nr_queued = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
3101 	if (!conf->nr_queued)
3102 		goto abort;
3103 
3104 	conf->barrier = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
3105 	if (!conf->barrier)
3106 		goto abort;
3107 
3108 	conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3109 					    mddev->raid_disks, 2),
3110 				GFP_KERNEL);
3111 	if (!conf->mirrors)
3112 		goto abort;
3113 
3114 	conf->tmppage = alloc_page(GFP_KERNEL);
3115 	if (!conf->tmppage)
3116 		goto abort;
3117 
3118 	r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
3119 	conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
3120 	if (!conf->r1bio_pool)
3121 		goto abort;
3122 
3123 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3124 	if (err)
3125 		goto abort;
3126 
3127 	err = -EINVAL;
3128 	spin_lock_init(&conf->device_lock);
3129 	conf->raid_disks = mddev->raid_disks;
3130 	rdev_for_each(rdev, mddev) {
3131 		int disk_idx = rdev->raid_disk;
3132 
3133 		if (disk_idx >= conf->raid_disks || disk_idx < 0)
3134 			continue;
3135 
3136 		if (!raid1_add_conf(conf, rdev, disk_idx,
3137 				    test_bit(Replacement, &rdev->flags)))
3138 			goto abort;
3139 	}
3140 	conf->mddev = mddev;
3141 	INIT_LIST_HEAD(&conf->retry_list);
3142 	INIT_LIST_HEAD(&conf->bio_end_io_list);
3143 
3144 	spin_lock_init(&conf->resync_lock);
3145 	init_waitqueue_head(&conf->wait_barrier);
3146 
3147 	bio_list_init(&conf->pending_bio_list);
3148 
3149 	err = -EIO;
3150 	for (i = 0; i < conf->raid_disks * 2; i++) {
3151 
3152 		disk = conf->mirrors + i;
3153 
3154 		if (i < conf->raid_disks &&
3155 		    disk[conf->raid_disks].rdev) {
3156 			/* This slot has a replacement. */
3157 			if (!disk->rdev) {
3158 				/* No original, just make the replacement
3159 				 * a recovering spare
3160 				 */
3161 				disk->rdev =
3162 					disk[conf->raid_disks].rdev;
3163 				disk[conf->raid_disks].rdev = NULL;
3164 			} else if (!test_bit(In_sync, &disk->rdev->flags))
3165 				/* Original is not in_sync - bad */
3166 				goto abort;
3167 		}
3168 
3169 		if (!disk->rdev ||
3170 		    !test_bit(In_sync, &disk->rdev->flags)) {
3171 			disk->head_position = 0;
3172 			if (disk->rdev &&
3173 			    (disk->rdev->saved_raid_disk < 0))
3174 				conf->fullsync = 1;
3175 		}
3176 	}
3177 
3178 	err = -ENOMEM;
3179 	rcu_assign_pointer(conf->thread,
3180 			   md_register_thread(raid1d, mddev, "raid1"));
3181 	if (!conf->thread)
3182 		goto abort;
3183 
3184 	return conf;
3185 
3186  abort:
3187 	if (conf) {
3188 		mempool_destroy(conf->r1bio_pool);
3189 		kfree(conf->mirrors);
3190 		safe_put_page(conf->tmppage);
3191 		kfree(conf->nr_pending);
3192 		kfree(conf->nr_waiting);
3193 		kfree(conf->nr_queued);
3194 		kfree(conf->barrier);
3195 		bioset_exit(&conf->bio_split);
3196 		kfree(conf);
3197 	}
3198 	return ERR_PTR(err);
3199 }
3200 
raid1_set_limits(struct mddev * mddev)3201 static int raid1_set_limits(struct mddev *mddev)
3202 {
3203 	struct queue_limits lim;
3204 	int err;
3205 
3206 	md_init_stacking_limits(&lim);
3207 	lim.max_write_zeroes_sectors = 0;
3208 	lim.max_hw_wzeroes_unmap_sectors = 0;
3209 	lim.logical_block_size = mddev->logical_block_size;
3210 	lim.features |= BLK_FEAT_ATOMIC_WRITES;
3211 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
3212 	if (err)
3213 		return err;
3214 	return queue_limits_set(mddev->gendisk->queue, &lim);
3215 }
3216 
raid1_run(struct mddev * mddev)3217 static int raid1_run(struct mddev *mddev)
3218 {
3219 	struct r1conf *conf;
3220 	int i;
3221 	int ret;
3222 
3223 	if (mddev->level != 1) {
3224 		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3225 			mdname(mddev), mddev->level);
3226 		return -EIO;
3227 	}
3228 	if (mddev->reshape_position != MaxSector) {
3229 		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3230 			mdname(mddev));
3231 		return -EIO;
3232 	}
3233 
3234 	/*
3235 	 * copy the already verified devices into our private RAID1
3236 	 * bookkeeping area. [whatever we allocate in run(),
3237 	 * should be freed in raid1_free()]
3238 	 */
3239 	if (mddev->private == NULL)
3240 		conf = setup_conf(mddev);
3241 	else
3242 		conf = mddev->private;
3243 
3244 	if (IS_ERR(conf))
3245 		return PTR_ERR(conf);
3246 
3247 	if (!mddev_is_dm(mddev)) {
3248 		ret = raid1_set_limits(mddev);
3249 		if (ret) {
3250 			md_unregister_thread(mddev, &conf->thread);
3251 			if (!mddev->private)
3252 				raid1_free(mddev, conf);
3253 			return ret;
3254 		}
3255 	}
3256 
3257 	mddev->degraded = 0;
3258 	for (i = 0; i < conf->raid_disks; i++)
3259 		if (conf->mirrors[i].rdev == NULL ||
3260 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3261 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3262 			mddev->degraded++;
3263 	/*
3264 	 * RAID1 needs at least one disk in active
3265 	 */
3266 	if (conf->raid_disks - mddev->degraded < 1) {
3267 		md_unregister_thread(mddev, &conf->thread);
3268 		if (!mddev->private)
3269 			raid1_free(mddev, conf);
3270 		return -EINVAL;
3271 	}
3272 
3273 	if (conf->raid_disks - mddev->degraded == 1)
3274 		mddev->resync_offset = MaxSector;
3275 
3276 	if (mddev->resync_offset != MaxSector)
3277 		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3278 			mdname(mddev));
3279 	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3280 		mdname(mddev), mddev->raid_disks - mddev->degraded,
3281 		mddev->raid_disks);
3282 
3283 	/*
3284 	 * Ok, everything is just fine now
3285 	 */
3286 	rcu_assign_pointer(mddev->thread, conf->thread);
3287 	rcu_assign_pointer(conf->thread, NULL);
3288 	mddev->private = conf;
3289 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3290 
3291 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3292 
3293 	ret = md_integrity_register(mddev);
3294 	if (ret)
3295 		md_unregister_thread(mddev, &mddev->thread);
3296 	return ret;
3297 }
3298 
raid1_free(struct mddev * mddev,void * priv)3299 static void raid1_free(struct mddev *mddev, void *priv)
3300 {
3301 	struct r1conf *conf = priv;
3302 
3303 	mempool_destroy(conf->r1bio_pool);
3304 	kfree(conf->mirrors);
3305 	safe_put_page(conf->tmppage);
3306 	kfree(conf->nr_pending);
3307 	kfree(conf->nr_waiting);
3308 	kfree(conf->nr_queued);
3309 	kfree(conf->barrier);
3310 	bioset_exit(&conf->bio_split);
3311 	kfree(conf);
3312 }
3313 
raid1_resize(struct mddev * mddev,sector_t sectors)3314 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3315 {
3316 	/* no resync is happening, and there is enough space
3317 	 * on all devices, so we can resize.
3318 	 * We need to make sure resync covers any new space.
3319 	 * If the array is shrinking we should possibly wait until
3320 	 * any io in the removed space completes, but it hardly seems
3321 	 * worth it.
3322 	 */
3323 	sector_t newsize = raid1_size(mddev, sectors, 0);
3324 
3325 	if (mddev->external_size &&
3326 	    mddev->array_sectors > newsize)
3327 		return -EINVAL;
3328 
3329 	if (md_bitmap_enabled(mddev, false)) {
3330 		int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
3331 
3332 		if (ret)
3333 			return ret;
3334 	}
3335 
3336 	md_set_array_sectors(mddev, newsize);
3337 	if (sectors > mddev->dev_sectors &&
3338 	    mddev->resync_offset > mddev->dev_sectors) {
3339 		mddev->resync_offset = mddev->dev_sectors;
3340 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3341 	}
3342 	mddev->dev_sectors = sectors;
3343 	mddev->resync_max_sectors = sectors;
3344 	return 0;
3345 }
3346 
raid1_reshape(struct mddev * mddev)3347 static int raid1_reshape(struct mddev *mddev)
3348 {
3349 	/* We need to:
3350 	 * 1/ resize the r1bio_pool
3351 	 * 2/ resize conf->mirrors
3352 	 *
3353 	 * We allocate a new r1bio_pool if we can.
3354 	 * Then raise a device barrier and wait until all IO stops.
3355 	 * Then resize conf->mirrors and swap in the new r1bio pool.
3356 	 *
3357 	 * At the same time, we "pack" the devices so that all the missing
3358 	 * devices have the higher raid_disk numbers.
3359 	 */
3360 	mempool_t *newpool, *oldpool;
3361 	size_t new_r1bio_size;
3362 	struct raid1_info *newmirrors;
3363 	struct r1conf *conf = mddev->private;
3364 	int cnt, raid_disks;
3365 	unsigned long flags;
3366 	int d, d2;
3367 
3368 	/* Cannot change chunk_size, layout, or level */
3369 	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3370 	    mddev->layout != mddev->new_layout ||
3371 	    mddev->level != mddev->new_level) {
3372 		mddev->new_chunk_sectors = mddev->chunk_sectors;
3373 		mddev->new_layout = mddev->layout;
3374 		mddev->new_level = mddev->level;
3375 		return -EINVAL;
3376 	}
3377 
3378 	if (!mddev_is_clustered(mddev))
3379 		md_allow_write(mddev);
3380 
3381 	raid_disks = mddev->raid_disks + mddev->delta_disks;
3382 
3383 	if (raid_disks < conf->raid_disks) {
3384 		cnt=0;
3385 		for (d= 0; d < conf->raid_disks; d++)
3386 			if (conf->mirrors[d].rdev)
3387 				cnt++;
3388 		if (cnt > raid_disks)
3389 			return -EBUSY;
3390 	}
3391 
3392 	new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
3393 	newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
3394 	if (!newpool) {
3395 		return -ENOMEM;
3396 	}
3397 	newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3398 					 raid_disks, 2),
3399 			     GFP_KERNEL);
3400 	if (!newmirrors) {
3401 		mempool_destroy(newpool);
3402 		return -ENOMEM;
3403 	}
3404 
3405 	freeze_array(conf, 0);
3406 
3407 	/* ok, everything is stopped */
3408 	oldpool = conf->r1bio_pool;
3409 	conf->r1bio_pool = newpool;
3410 
3411 	for (d = d2 = 0; d < conf->raid_disks; d++) {
3412 		struct md_rdev *rdev = conf->mirrors[d].rdev;
3413 		if (rdev && rdev->raid_disk != d2) {
3414 			sysfs_unlink_rdev(mddev, rdev);
3415 			rdev->raid_disk = d2;
3416 			sysfs_unlink_rdev(mddev, rdev);
3417 			if (sysfs_link_rdev(mddev, rdev))
3418 				pr_warn("md/raid1:%s: cannot register rd%d\n",
3419 					mdname(mddev), rdev->raid_disk);
3420 		}
3421 		if (rdev)
3422 			newmirrors[d2++].rdev = rdev;
3423 	}
3424 	kfree(conf->mirrors);
3425 	conf->mirrors = newmirrors;
3426 
3427 	spin_lock_irqsave(&conf->device_lock, flags);
3428 	mddev->degraded += (raid_disks - conf->raid_disks);
3429 	spin_unlock_irqrestore(&conf->device_lock, flags);
3430 	conf->raid_disks = mddev->raid_disks = raid_disks;
3431 	mddev->delta_disks = 0;
3432 
3433 	unfreeze_array(conf);
3434 
3435 	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3436 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3437 	md_wakeup_thread(mddev->thread);
3438 
3439 	mempool_destroy(oldpool);
3440 	return 0;
3441 }
3442 
raid1_quiesce(struct mddev * mddev,int quiesce)3443 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3444 {
3445 	struct r1conf *conf = mddev->private;
3446 
3447 	if (quiesce)
3448 		freeze_array(conf, 0);
3449 	else
3450 		unfreeze_array(conf);
3451 }
3452 
raid1_takeover(struct mddev * mddev)3453 static void *raid1_takeover(struct mddev *mddev)
3454 {
3455 	/* raid1 can take over:
3456 	 *  raid5 with 2 devices, any layout or chunk size
3457 	 */
3458 	if (mddev->level == 5 && mddev->raid_disks == 2) {
3459 		struct r1conf *conf;
3460 		mddev->new_level = 1;
3461 		mddev->new_layout = 0;
3462 		mddev->new_chunk_sectors = 0;
3463 		conf = setup_conf(mddev);
3464 		if (!IS_ERR(conf)) {
3465 			/* Array must appear to be quiesced */
3466 			conf->array_frozen = 1;
3467 			mddev_clear_unsupported_flags(mddev,
3468 				UNSUPPORTED_MDDEV_FLAGS);
3469 		}
3470 		return conf;
3471 	}
3472 	return ERR_PTR(-EINVAL);
3473 }
3474 
3475 static struct md_personality raid1_personality =
3476 {
3477 	.head = {
3478 		.type	= MD_PERSONALITY,
3479 		.id	= ID_RAID1,
3480 		.name	= "raid1",
3481 		.owner	= THIS_MODULE,
3482 	},
3483 
3484 	.make_request	= raid1_make_request,
3485 	.run		= raid1_run,
3486 	.free		= raid1_free,
3487 	.status		= raid1_status,
3488 	.error_handler	= raid1_error,
3489 	.hot_add_disk	= raid1_add_disk,
3490 	.hot_remove_disk= raid1_remove_disk,
3491 	.spare_active	= raid1_spare_active,
3492 	.sync_request	= raid1_sync_request,
3493 	.resize		= raid1_resize,
3494 	.size		= raid1_size,
3495 	.check_reshape	= raid1_reshape,
3496 	.quiesce	= raid1_quiesce,
3497 	.takeover	= raid1_takeover,
3498 };
3499 
raid1_init(void)3500 static int __init raid1_init(void)
3501 {
3502 	return register_md_submodule(&raid1_personality.head);
3503 }
3504 
raid1_exit(void)3505 static void __exit raid1_exit(void)
3506 {
3507 	unregister_md_submodule(&raid1_personality.head);
3508 }
3509 
3510 module_init(raid1_init);
3511 module_exit(raid1_exit);
3512 MODULE_LICENSE("GPL");
3513 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3514 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3515 MODULE_ALIAS("md-raid1");
3516 MODULE_ALIAS("md-level-1");
3517