1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * raid1.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 *
7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 *
9 * RAID-1 management functions.
10 *
11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12 *
13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15 *
16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17 * bitmapped intelligence in resync:
18 *
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
21 *
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
24 */
25
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/ratelimit.h>
32 #include <linux/interval_tree_generic.h>
33
34 #include <trace/events/block.h>
35
36 #include "md.h"
37 #include "raid1.h"
38 #include "md-bitmap.h"
39 #include "md-cluster.h"
40
41 #define UNSUPPORTED_MDDEV_FLAGS \
42 ((1L << MD_HAS_JOURNAL) | \
43 (1L << MD_JOURNAL_CLEAN) | \
44 (1L << MD_HAS_PPL) | \
45 (1L << MD_HAS_MULTIPLE_PPLS))
46
47 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
48 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
49 static void raid1_free(struct mddev *mddev, void *priv);
50
51 #define RAID_1_10_NAME "raid1"
52 #include "raid1-10.c"
53
54 #define START(node) ((node)->start)
55 #define LAST(node) ((node)->last)
56 INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 START, LAST, static inline, raid1_rb);
58
check_and_add_serial(struct md_rdev * rdev,struct r1bio * r1_bio,struct serial_info * si,int idx)59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 struct serial_info *si, int idx)
61 {
62 unsigned long flags;
63 int ret = 0;
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
66 struct serial_in_rdev *serial = &rdev->serial[idx];
67
68 spin_lock_irqsave(&serial->serial_lock, flags);
69 /* collision happened */
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71 ret = -EBUSY;
72 else {
73 si->start = lo;
74 si->last = hi;
75 raid1_rb_insert(si, &serial->serial_rb);
76 }
77 spin_unlock_irqrestore(&serial->serial_lock, flags);
78
79 return ret;
80 }
81
wait_for_serialization(struct md_rdev * rdev,struct r1bio * r1_bio)82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83 {
84 struct mddev *mddev = rdev->mddev;
85 struct serial_info *si;
86 int idx = sector_to_idx(r1_bio->sector);
87 struct serial_in_rdev *serial = &rdev->serial[idx];
88
89 if (WARN_ON(!mddev->serial_info_pool))
90 return;
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92 wait_event(serial->serial_io_wait,
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94 }
95
remove_serial(struct md_rdev * rdev,sector_t lo,sector_t hi)96 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
97 {
98 struct serial_info *si;
99 unsigned long flags;
100 int found = 0;
101 struct mddev *mddev = rdev->mddev;
102 int idx = sector_to_idx(lo);
103 struct serial_in_rdev *serial = &rdev->serial[idx];
104
105 spin_lock_irqsave(&serial->serial_lock, flags);
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107 si; si = raid1_rb_iter_next(si, lo, hi)) {
108 if (si->start == lo && si->last == hi) {
109 raid1_rb_remove(si, &serial->serial_rb);
110 mempool_free(si, mddev->serial_info_pool);
111 found = 1;
112 break;
113 }
114 }
115 if (!found)
116 WARN(1, "The write IO is not recorded for serialization\n");
117 spin_unlock_irqrestore(&serial->serial_lock, flags);
118 wake_up(&serial->serial_io_wait);
119 }
120
121 /*
122 * for resync bio, r1bio pointer can be retrieved from the per-bio
123 * 'struct resync_pages'.
124 */
get_resync_r1bio(struct bio * bio)125 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126 {
127 return get_resync_pages(bio)->raid_bio;
128 }
129
r1bio_pool_alloc(gfp_t gfp_flags,struct r1conf * conf)130 static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
131 {
132 int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
133
134 /* allocate a r1bio with room for raid_disks entries in the bios array */
135 return kzalloc(size, gfp_flags);
136 }
137
138 #define RESYNC_DEPTH 32
139 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
140 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
141 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
142 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
143 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
144
r1buf_pool_alloc(gfp_t gfp_flags,void * data)145 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
146 {
147 struct r1conf *conf = data;
148 struct r1bio *r1_bio;
149 struct bio *bio;
150 int need_pages;
151 int j;
152 struct resync_pages *rps;
153
154 r1_bio = r1bio_pool_alloc(gfp_flags, conf);
155 if (!r1_bio)
156 return NULL;
157
158 rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages),
159 gfp_flags);
160 if (!rps)
161 goto out_free_r1bio;
162
163 /*
164 * Allocate bios : 1 for reading, n-1 for writing
165 */
166 for (j = conf->raid_disks * 2; j-- ; ) {
167 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
168 if (!bio)
169 goto out_free_bio;
170 bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
171 r1_bio->bios[j] = bio;
172 }
173 /*
174 * Allocate RESYNC_PAGES data pages and attach them to
175 * the first bio.
176 * If this is a user-requested check/repair, allocate
177 * RESYNC_PAGES for each bio.
178 */
179 if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
180 need_pages = conf->raid_disks * 2;
181 else
182 need_pages = 1;
183 for (j = 0; j < conf->raid_disks * 2; j++) {
184 struct resync_pages *rp = &rps[j];
185
186 bio = r1_bio->bios[j];
187
188 if (j < need_pages) {
189 if (resync_alloc_pages(rp, gfp_flags))
190 goto out_free_pages;
191 } else {
192 memcpy(rp, &rps[0], sizeof(*rp));
193 resync_get_all_pages(rp);
194 }
195
196 rp->raid_bio = r1_bio;
197 bio->bi_private = rp;
198 }
199
200 r1_bio->master_bio = NULL;
201
202 return r1_bio;
203
204 out_free_pages:
205 while (--j >= 0)
206 resync_free_pages(&rps[j]);
207
208 out_free_bio:
209 while (++j < conf->raid_disks * 2) {
210 bio_uninit(r1_bio->bios[j]);
211 kfree(r1_bio->bios[j]);
212 }
213 kfree(rps);
214
215 out_free_r1bio:
216 rbio_pool_free(r1_bio, data);
217 return NULL;
218 }
219
r1buf_pool_free(void * __r1_bio,void * data)220 static void r1buf_pool_free(void *__r1_bio, void *data)
221 {
222 struct r1conf *conf = data;
223 int i;
224 struct r1bio *r1bio = __r1_bio;
225 struct resync_pages *rp = NULL;
226
227 for (i = conf->raid_disks * 2; i--; ) {
228 rp = get_resync_pages(r1bio->bios[i]);
229 resync_free_pages(rp);
230 bio_uninit(r1bio->bios[i]);
231 kfree(r1bio->bios[i]);
232 }
233
234 /* resync pages array stored in the 1st bio's .bi_private */
235 kfree(rp);
236
237 rbio_pool_free(r1bio, data);
238 }
239
put_all_bios(struct r1conf * conf,struct r1bio * r1_bio)240 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
241 {
242 int i;
243
244 for (i = 0; i < conf->raid_disks * 2; i++) {
245 struct bio **bio = r1_bio->bios + i;
246 if (!BIO_SPECIAL(*bio))
247 bio_put(*bio);
248 *bio = NULL;
249 }
250 }
251
free_r1bio(struct r1bio * r1_bio)252 static void free_r1bio(struct r1bio *r1_bio)
253 {
254 struct r1conf *conf = r1_bio->mddev->private;
255
256 put_all_bios(conf, r1_bio);
257 mempool_free(r1_bio, conf->r1bio_pool);
258 }
259
put_buf(struct r1bio * r1_bio)260 static void put_buf(struct r1bio *r1_bio)
261 {
262 struct r1conf *conf = r1_bio->mddev->private;
263 sector_t sect = r1_bio->sector;
264 int i;
265
266 for (i = 0; i < conf->raid_disks * 2; i++) {
267 struct bio *bio = r1_bio->bios[i];
268 if (bio->bi_end_io)
269 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
270 }
271
272 mempool_free(r1_bio, &conf->r1buf_pool);
273
274 lower_barrier(conf, sect);
275 }
276
reschedule_retry(struct r1bio * r1_bio)277 static void reschedule_retry(struct r1bio *r1_bio)
278 {
279 unsigned long flags;
280 struct mddev *mddev = r1_bio->mddev;
281 struct r1conf *conf = mddev->private;
282 int idx;
283
284 idx = sector_to_idx(r1_bio->sector);
285 spin_lock_irqsave(&conf->device_lock, flags);
286 list_add(&r1_bio->retry_list, &conf->retry_list);
287 atomic_inc(&conf->nr_queued[idx]);
288 spin_unlock_irqrestore(&conf->device_lock, flags);
289
290 wake_up(&conf->wait_barrier);
291 md_wakeup_thread(mddev->thread);
292 }
293
294 /*
295 * raid_end_bio_io() is called when we have finished servicing a mirrored
296 * operation and are ready to return a success/failure code to the buffer
297 * cache layer.
298 */
call_bio_endio(struct r1bio * r1_bio)299 static void call_bio_endio(struct r1bio *r1_bio)
300 {
301 struct bio *bio = r1_bio->master_bio;
302
303 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
304 bio->bi_status = BLK_STS_IOERR;
305
306 bio_endio(bio);
307 }
308
raid_end_bio_io(struct r1bio * r1_bio)309 static void raid_end_bio_io(struct r1bio *r1_bio)
310 {
311 struct bio *bio = r1_bio->master_bio;
312 struct r1conf *conf = r1_bio->mddev->private;
313 sector_t sector = r1_bio->sector;
314
315 /* if nobody has done the final endio yet, do it now */
316 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
317 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
318 (bio_data_dir(bio) == WRITE) ? "write" : "read",
319 (unsigned long long) bio->bi_iter.bi_sector,
320 (unsigned long long) bio_end_sector(bio) - 1);
321
322 call_bio_endio(r1_bio);
323 }
324
325 free_r1bio(r1_bio);
326 /*
327 * Wake up any possible resync thread that waits for the device
328 * to go idle. All I/Os, even write-behind writes, are done.
329 */
330 allow_barrier(conf, sector);
331 }
332
333 /*
334 * Update disk head position estimator based on IRQ completion info.
335 */
update_head_pos(int disk,struct r1bio * r1_bio)336 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
337 {
338 struct r1conf *conf = r1_bio->mddev->private;
339
340 conf->mirrors[disk].head_position =
341 r1_bio->sector + (r1_bio->sectors);
342 }
343
344 /*
345 * Find the disk number which triggered given bio
346 */
find_bio_disk(struct r1bio * r1_bio,struct bio * bio)347 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
348 {
349 int mirror;
350 struct r1conf *conf = r1_bio->mddev->private;
351 int raid_disks = conf->raid_disks;
352
353 for (mirror = 0; mirror < raid_disks * 2; mirror++)
354 if (r1_bio->bios[mirror] == bio)
355 break;
356
357 BUG_ON(mirror == raid_disks * 2);
358 update_head_pos(mirror, r1_bio);
359
360 return mirror;
361 }
362
raid1_end_read_request(struct bio * bio)363 static void raid1_end_read_request(struct bio *bio)
364 {
365 int uptodate = !bio->bi_status;
366 struct r1bio *r1_bio = bio->bi_private;
367 struct r1conf *conf = r1_bio->mddev->private;
368 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
369
370 /*
371 * this branch is our 'one mirror IO has finished' event handler:
372 */
373 update_head_pos(r1_bio->read_disk, r1_bio);
374
375 if (uptodate) {
376 set_bit(R1BIO_Uptodate, &r1_bio->state);
377 } else if (test_bit(FailFast, &rdev->flags) &&
378 test_bit(R1BIO_FailFast, &r1_bio->state)) {
379 /* This was a fail-fast read so we definitely
380 * want to retry */
381 ;
382 } else if (!raid1_should_handle_error(bio)) {
383 uptodate = 1;
384 } else {
385 /* If all other devices have failed, we want to return
386 * the error upwards rather than fail the last device.
387 * Here we redefine "uptodate" to mean "Don't want to retry"
388 */
389 unsigned long flags;
390 spin_lock_irqsave(&conf->device_lock, flags);
391 if (r1_bio->mddev->degraded == conf->raid_disks ||
392 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
393 test_bit(In_sync, &rdev->flags)))
394 uptodate = 1;
395 spin_unlock_irqrestore(&conf->device_lock, flags);
396 }
397
398 if (uptodate) {
399 raid_end_bio_io(r1_bio);
400 rdev_dec_pending(rdev, conf->mddev);
401 } else {
402 /*
403 * oops, read error:
404 */
405 pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
406 mdname(conf->mddev),
407 rdev->bdev,
408 (unsigned long long)r1_bio->sector);
409 set_bit(R1BIO_ReadError, &r1_bio->state);
410 reschedule_retry(r1_bio);
411 /* don't drop the reference on read_disk yet */
412 }
413 }
414
close_write(struct r1bio * r1_bio)415 static void close_write(struct r1bio *r1_bio)
416 {
417 struct mddev *mddev = r1_bio->mddev;
418
419 /* it really is the end of this request */
420 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
421 bio_free_pages(r1_bio->behind_master_bio);
422 bio_put(r1_bio->behind_master_bio);
423 r1_bio->behind_master_bio = NULL;
424 }
425
426 if (test_bit(R1BIO_BehindIO, &r1_bio->state))
427 mddev->bitmap_ops->end_behind_write(mddev);
428 md_write_end(mddev);
429 }
430
r1_bio_write_done(struct r1bio * r1_bio)431 static void r1_bio_write_done(struct r1bio *r1_bio)
432 {
433 if (!atomic_dec_and_test(&r1_bio->remaining))
434 return;
435
436 if (test_bit(R1BIO_WriteError, &r1_bio->state))
437 reschedule_retry(r1_bio);
438 else {
439 close_write(r1_bio);
440 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
441 reschedule_retry(r1_bio);
442 else
443 raid_end_bio_io(r1_bio);
444 }
445 }
446
raid1_end_write_request(struct bio * bio)447 static void raid1_end_write_request(struct bio *bio)
448 {
449 struct r1bio *r1_bio = bio->bi_private;
450 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
451 struct r1conf *conf = r1_bio->mddev->private;
452 struct bio *to_put = NULL;
453 int mirror = find_bio_disk(r1_bio, bio);
454 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
455 sector_t lo = r1_bio->sector;
456 sector_t hi = r1_bio->sector + r1_bio->sectors;
457 bool ignore_error = !raid1_should_handle_error(bio) ||
458 (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
459
460 /*
461 * 'one mirror IO has finished' event handler:
462 */
463 if (bio->bi_status && !ignore_error) {
464 set_bit(WriteErrorSeen, &rdev->flags);
465 if (!test_and_set_bit(WantReplacement, &rdev->flags))
466 set_bit(MD_RECOVERY_NEEDED, &
467 conf->mddev->recovery);
468
469 if (test_bit(FailFast, &rdev->flags) &&
470 (bio->bi_opf & MD_FAILFAST) &&
471 /* We never try FailFast to WriteMostly devices */
472 !test_bit(WriteMostly, &rdev->flags)) {
473 md_error(r1_bio->mddev, rdev);
474 }
475
476 /*
477 * When the device is faulty, it is not necessary to
478 * handle write error.
479 */
480 if (!test_bit(Faulty, &rdev->flags))
481 set_bit(R1BIO_WriteError, &r1_bio->state);
482 else {
483 /* Finished with this branch */
484 r1_bio->bios[mirror] = NULL;
485 to_put = bio;
486 }
487 } else {
488 /*
489 * Set R1BIO_Uptodate in our master bio, so that we
490 * will return a good error code for to the higher
491 * levels even if IO on some other mirrored buffer
492 * fails.
493 *
494 * The 'master' represents the composite IO operation
495 * to user-side. So if something waits for IO, then it
496 * will wait for the 'master' bio.
497 */
498 r1_bio->bios[mirror] = NULL;
499 to_put = bio;
500 /*
501 * Do not set R1BIO_Uptodate if the current device is
502 * rebuilding or Faulty. This is because we cannot use
503 * such device for properly reading the data back (we could
504 * potentially use it, if the current write would have felt
505 * before rdev->recovery_offset, but for simplicity we don't
506 * check this here.
507 */
508 if (test_bit(In_sync, &rdev->flags) &&
509 !test_bit(Faulty, &rdev->flags))
510 set_bit(R1BIO_Uptodate, &r1_bio->state);
511
512 /* Maybe we can clear some bad blocks. */
513 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
514 !ignore_error) {
515 r1_bio->bios[mirror] = IO_MADE_GOOD;
516 set_bit(R1BIO_MadeGood, &r1_bio->state);
517 }
518 }
519
520 if (behind) {
521 if (test_bit(CollisionCheck, &rdev->flags))
522 remove_serial(rdev, lo, hi);
523 if (test_bit(WriteMostly, &rdev->flags))
524 atomic_dec(&r1_bio->behind_remaining);
525
526 /*
527 * In behind mode, we ACK the master bio once the I/O
528 * has safely reached all non-writemostly
529 * disks. Setting the Returned bit ensures that this
530 * gets done only once -- we don't ever want to return
531 * -EIO here, instead we'll wait
532 */
533 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
534 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
535 /* Maybe we can return now */
536 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
537 struct bio *mbio = r1_bio->master_bio;
538 pr_debug("raid1: behind end write sectors"
539 " %llu-%llu\n",
540 (unsigned long long) mbio->bi_iter.bi_sector,
541 (unsigned long long) bio_end_sector(mbio) - 1);
542 call_bio_endio(r1_bio);
543 }
544 }
545 } else if (rdev->mddev->serialize_policy)
546 remove_serial(rdev, lo, hi);
547 if (r1_bio->bios[mirror] == NULL)
548 rdev_dec_pending(rdev, conf->mddev);
549
550 /*
551 * Let's see if all mirrored write operations have finished
552 * already.
553 */
554 r1_bio_write_done(r1_bio);
555
556 if (to_put)
557 bio_put(to_put);
558 }
559
align_to_barrier_unit_end(sector_t start_sector,sector_t sectors)560 static sector_t align_to_barrier_unit_end(sector_t start_sector,
561 sector_t sectors)
562 {
563 sector_t len;
564
565 WARN_ON(sectors == 0);
566 /*
567 * len is the number of sectors from start_sector to end of the
568 * barrier unit which start_sector belongs to.
569 */
570 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
571 start_sector;
572
573 if (len > sectors)
574 len = sectors;
575
576 return len;
577 }
578
update_read_sectors(struct r1conf * conf,int disk,sector_t this_sector,int len)579 static void update_read_sectors(struct r1conf *conf, int disk,
580 sector_t this_sector, int len)
581 {
582 struct raid1_info *info = &conf->mirrors[disk];
583
584 atomic_inc(&info->rdev->nr_pending);
585 if (info->next_seq_sect != this_sector)
586 info->seq_start = this_sector;
587 info->next_seq_sect = this_sector + len;
588 }
589
choose_first_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)590 static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
591 int *max_sectors)
592 {
593 sector_t this_sector = r1_bio->sector;
594 int len = r1_bio->sectors;
595 int disk;
596
597 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
598 struct md_rdev *rdev;
599 int read_len;
600
601 if (r1_bio->bios[disk] == IO_BLOCKED)
602 continue;
603
604 rdev = conf->mirrors[disk].rdev;
605 if (!rdev || test_bit(Faulty, &rdev->flags))
606 continue;
607
608 /* choose the first disk even if it has some bad blocks. */
609 read_len = raid1_check_read_range(rdev, this_sector, &len);
610 if (read_len > 0) {
611 update_read_sectors(conf, disk, this_sector, read_len);
612 *max_sectors = read_len;
613 return disk;
614 }
615 }
616
617 return -1;
618 }
619
rdev_in_recovery(struct md_rdev * rdev,struct r1bio * r1_bio)620 static bool rdev_in_recovery(struct md_rdev *rdev, struct r1bio *r1_bio)
621 {
622 return !test_bit(In_sync, &rdev->flags) &&
623 rdev->recovery_offset < r1_bio->sector + r1_bio->sectors;
624 }
625
choose_bb_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)626 static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
627 int *max_sectors)
628 {
629 sector_t this_sector = r1_bio->sector;
630 int best_disk = -1;
631 int best_len = 0;
632 int disk;
633
634 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
635 struct md_rdev *rdev;
636 int len;
637 int read_len;
638
639 if (r1_bio->bios[disk] == IO_BLOCKED)
640 continue;
641
642 rdev = conf->mirrors[disk].rdev;
643 if (!rdev || test_bit(Faulty, &rdev->flags) ||
644 rdev_in_recovery(rdev, r1_bio) ||
645 test_bit(WriteMostly, &rdev->flags))
646 continue;
647
648 /* keep track of the disk with the most readable sectors. */
649 len = r1_bio->sectors;
650 read_len = raid1_check_read_range(rdev, this_sector, &len);
651 if (read_len > best_len) {
652 best_disk = disk;
653 best_len = read_len;
654 }
655 }
656
657 if (best_disk != -1) {
658 *max_sectors = best_len;
659 update_read_sectors(conf, best_disk, this_sector, best_len);
660 }
661
662 return best_disk;
663 }
664
choose_slow_rdev(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)665 static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
666 int *max_sectors)
667 {
668 sector_t this_sector = r1_bio->sector;
669 int bb_disk = -1;
670 int bb_read_len = 0;
671 int disk;
672
673 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
674 struct md_rdev *rdev;
675 int len;
676 int read_len;
677
678 if (r1_bio->bios[disk] == IO_BLOCKED)
679 continue;
680
681 rdev = conf->mirrors[disk].rdev;
682 if (!rdev || test_bit(Faulty, &rdev->flags) ||
683 !test_bit(WriteMostly, &rdev->flags) ||
684 rdev_in_recovery(rdev, r1_bio))
685 continue;
686
687 /* there are no bad blocks, we can use this disk */
688 len = r1_bio->sectors;
689 read_len = raid1_check_read_range(rdev, this_sector, &len);
690 if (read_len == r1_bio->sectors) {
691 *max_sectors = read_len;
692 update_read_sectors(conf, disk, this_sector, read_len);
693 return disk;
694 }
695
696 /*
697 * there are partial bad blocks, choose the rdev with largest
698 * read length.
699 */
700 if (read_len > bb_read_len) {
701 bb_disk = disk;
702 bb_read_len = read_len;
703 }
704 }
705
706 if (bb_disk != -1) {
707 *max_sectors = bb_read_len;
708 update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
709 }
710
711 return bb_disk;
712 }
713
is_sequential(struct r1conf * conf,int disk,struct r1bio * r1_bio)714 static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
715 {
716 /* TODO: address issues with this check and concurrency. */
717 return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
718 conf->mirrors[disk].head_position == r1_bio->sector;
719 }
720
721 /*
722 * If buffered sequential IO size exceeds optimal iosize, check if there is idle
723 * disk. If yes, choose the idle disk.
724 */
should_choose_next(struct r1conf * conf,int disk)725 static bool should_choose_next(struct r1conf *conf, int disk)
726 {
727 struct raid1_info *mirror = &conf->mirrors[disk];
728 int opt_iosize;
729
730 if (!test_bit(Nonrot, &mirror->rdev->flags))
731 return false;
732
733 opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
734 return opt_iosize > 0 && mirror->seq_start != MaxSector &&
735 mirror->next_seq_sect > opt_iosize &&
736 mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
737 }
738
rdev_readable(struct md_rdev * rdev,struct r1bio * r1_bio)739 static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
740 {
741 if (!rdev || test_bit(Faulty, &rdev->flags))
742 return false;
743
744 if (rdev_in_recovery(rdev, r1_bio))
745 return false;
746
747 /* don't read from slow disk unless have to */
748 if (test_bit(WriteMostly, &rdev->flags))
749 return false;
750
751 /* don't split IO for bad blocks unless have to */
752 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
753 return false;
754
755 return true;
756 }
757
758 struct read_balance_ctl {
759 sector_t closest_dist;
760 int closest_dist_disk;
761 int min_pending;
762 int min_pending_disk;
763 int sequential_disk;
764 int readable_disks;
765 };
766
choose_best_rdev(struct r1conf * conf,struct r1bio * r1_bio)767 static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
768 {
769 int disk;
770 struct read_balance_ctl ctl = {
771 .closest_dist_disk = -1,
772 .closest_dist = MaxSector,
773 .min_pending_disk = -1,
774 .min_pending = UINT_MAX,
775 .sequential_disk = -1,
776 };
777
778 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
779 struct md_rdev *rdev;
780 sector_t dist;
781 unsigned int pending;
782
783 if (r1_bio->bios[disk] == IO_BLOCKED)
784 continue;
785
786 rdev = conf->mirrors[disk].rdev;
787 if (!rdev_readable(rdev, r1_bio))
788 continue;
789
790 /* At least two disks to choose from so failfast is OK */
791 if (ctl.readable_disks++ == 1)
792 set_bit(R1BIO_FailFast, &r1_bio->state);
793
794 pending = atomic_read(&rdev->nr_pending);
795 dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
796
797 /* Don't change to another disk for sequential reads */
798 if (is_sequential(conf, disk, r1_bio)) {
799 if (!should_choose_next(conf, disk))
800 return disk;
801
802 /*
803 * Add 'pending' to avoid choosing this disk if
804 * there is other idle disk.
805 */
806 pending++;
807 /*
808 * If there is no other idle disk, this disk
809 * will be chosen.
810 */
811 ctl.sequential_disk = disk;
812 }
813
814 if (ctl.min_pending > pending) {
815 ctl.min_pending = pending;
816 ctl.min_pending_disk = disk;
817 }
818
819 if (ctl.closest_dist > dist) {
820 ctl.closest_dist = dist;
821 ctl.closest_dist_disk = disk;
822 }
823 }
824
825 /*
826 * sequential IO size exceeds optimal iosize, however, there is no other
827 * idle disk, so choose the sequential disk.
828 */
829 if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
830 return ctl.sequential_disk;
831
832 /*
833 * If all disks are rotational, choose the closest disk. If any disk is
834 * non-rotational, choose the disk with less pending request even the
835 * disk is rotational, which might/might not be optimal for raids with
836 * mixed ratation/non-rotational disks depending on workload.
837 */
838 if (ctl.min_pending_disk != -1 &&
839 (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
840 return ctl.min_pending_disk;
841 else
842 return ctl.closest_dist_disk;
843 }
844
845 /*
846 * This routine returns the disk from which the requested read should be done.
847 *
848 * 1) If resync is in progress, find the first usable disk and use it even if it
849 * has some bad blocks.
850 *
851 * 2) Now that there is no resync, loop through all disks and skipping slow
852 * disks and disks with bad blocks for now. Only pay attention to key disk
853 * choice.
854 *
855 * 3) If we've made it this far, now look for disks with bad blocks and choose
856 * the one with most number of sectors.
857 *
858 * 4) If we are all the way at the end, we have no choice but to use a disk even
859 * if it is write mostly.
860 *
861 * The rdev for the device selected will have nr_pending incremented.
862 */
read_balance(struct r1conf * conf,struct r1bio * r1_bio,int * max_sectors)863 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
864 int *max_sectors)
865 {
866 int disk;
867
868 clear_bit(R1BIO_FailFast, &r1_bio->state);
869
870 if (raid1_should_read_first(conf->mddev, r1_bio->sector,
871 r1_bio->sectors))
872 return choose_first_rdev(conf, r1_bio, max_sectors);
873
874 disk = choose_best_rdev(conf, r1_bio);
875 if (disk >= 0) {
876 *max_sectors = r1_bio->sectors;
877 update_read_sectors(conf, disk, r1_bio->sector,
878 r1_bio->sectors);
879 return disk;
880 }
881
882 /*
883 * If we are here it means we didn't find a perfectly good disk so
884 * now spend a bit more time trying to find one with the most good
885 * sectors.
886 */
887 disk = choose_bb_rdev(conf, r1_bio, max_sectors);
888 if (disk >= 0)
889 return disk;
890
891 return choose_slow_rdev(conf, r1_bio, max_sectors);
892 }
893
wake_up_barrier(struct r1conf * conf)894 static void wake_up_barrier(struct r1conf *conf)
895 {
896 if (wq_has_sleeper(&conf->wait_barrier))
897 wake_up(&conf->wait_barrier);
898 }
899
flush_bio_list(struct r1conf * conf,struct bio * bio)900 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
901 {
902 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
903 raid1_prepare_flush_writes(conf->mddev);
904 wake_up_barrier(conf);
905
906 while (bio) { /* submit pending writes */
907 struct bio *next = bio->bi_next;
908
909 raid1_submit_write(bio);
910 bio = next;
911 cond_resched();
912 }
913 }
914
flush_pending_writes(struct r1conf * conf)915 static void flush_pending_writes(struct r1conf *conf)
916 {
917 /* Any writes that have been queued but are awaiting
918 * bitmap updates get flushed here.
919 */
920 spin_lock_irq(&conf->device_lock);
921
922 if (conf->pending_bio_list.head) {
923 struct blk_plug plug;
924 struct bio *bio;
925
926 bio = bio_list_get(&conf->pending_bio_list);
927 spin_unlock_irq(&conf->device_lock);
928
929 /*
930 * As this is called in a wait_event() loop (see freeze_array),
931 * current->state might be TASK_UNINTERRUPTIBLE which will
932 * cause a warning when we prepare to wait again. As it is
933 * rare that this path is taken, it is perfectly safe to force
934 * us to go around the wait_event() loop again, so the warning
935 * is a false-positive. Silence the warning by resetting
936 * thread state
937 */
938 __set_current_state(TASK_RUNNING);
939 blk_start_plug(&plug);
940 flush_bio_list(conf, bio);
941 blk_finish_plug(&plug);
942 } else
943 spin_unlock_irq(&conf->device_lock);
944 }
945
946 /* Barriers....
947 * Sometimes we need to suspend IO while we do something else,
948 * either some resync/recovery, or reconfigure the array.
949 * To do this we raise a 'barrier'.
950 * The 'barrier' is a counter that can be raised multiple times
951 * to count how many activities are happening which preclude
952 * normal IO.
953 * We can only raise the barrier if there is no pending IO.
954 * i.e. if nr_pending == 0.
955 * We choose only to raise the barrier if no-one is waiting for the
956 * barrier to go down. This means that as soon as an IO request
957 * is ready, no other operations which require a barrier will start
958 * until the IO request has had a chance.
959 *
960 * So: regular IO calls 'wait_barrier'. When that returns there
961 * is no backgroup IO happening, It must arrange to call
962 * allow_barrier when it has finished its IO.
963 * backgroup IO calls must call raise_barrier. Once that returns
964 * there is no normal IO happeing. It must arrange to call
965 * lower_barrier when the particular background IO completes.
966 *
967 * If resync/recovery is interrupted, returns -EINTR;
968 * Otherwise, returns 0.
969 */
raise_barrier(struct r1conf * conf,sector_t sector_nr)970 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
971 {
972 int idx = sector_to_idx(sector_nr);
973
974 spin_lock_irq(&conf->resync_lock);
975
976 /* Wait until no block IO is waiting */
977 wait_event_lock_irq(conf->wait_barrier,
978 !atomic_read(&conf->nr_waiting[idx]),
979 conf->resync_lock);
980
981 /* block any new IO from starting */
982 atomic_inc(&conf->barrier[idx]);
983 /*
984 * In raise_barrier() we firstly increase conf->barrier[idx] then
985 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
986 * increase conf->nr_pending[idx] then check conf->barrier[idx].
987 * A memory barrier here to make sure conf->nr_pending[idx] won't
988 * be fetched before conf->barrier[idx] is increased. Otherwise
989 * there will be a race between raise_barrier() and _wait_barrier().
990 */
991 smp_mb__after_atomic();
992
993 /* For these conditions we must wait:
994 * A: while the array is in frozen state
995 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
996 * existing in corresponding I/O barrier bucket.
997 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
998 * max resync count which allowed on current I/O barrier bucket.
999 */
1000 wait_event_lock_irq(conf->wait_barrier,
1001 (!conf->array_frozen &&
1002 !atomic_read(&conf->nr_pending[idx]) &&
1003 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
1004 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
1005 conf->resync_lock);
1006
1007 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
1008 atomic_dec(&conf->barrier[idx]);
1009 spin_unlock_irq(&conf->resync_lock);
1010 wake_up(&conf->wait_barrier);
1011 return -EINTR;
1012 }
1013
1014 atomic_inc(&conf->nr_sync_pending);
1015 spin_unlock_irq(&conf->resync_lock);
1016
1017 return 0;
1018 }
1019
lower_barrier(struct r1conf * conf,sector_t sector_nr)1020 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
1021 {
1022 int idx = sector_to_idx(sector_nr);
1023
1024 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
1025
1026 atomic_dec(&conf->barrier[idx]);
1027 atomic_dec(&conf->nr_sync_pending);
1028 wake_up(&conf->wait_barrier);
1029 }
1030
_wait_barrier(struct r1conf * conf,int idx,bool nowait)1031 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
1032 {
1033 bool ret = true;
1034
1035 /*
1036 * We need to increase conf->nr_pending[idx] very early here,
1037 * then raise_barrier() can be blocked when it waits for
1038 * conf->nr_pending[idx] to be 0. Then we can avoid holding
1039 * conf->resync_lock when there is no barrier raised in same
1040 * barrier unit bucket. Also if the array is frozen, I/O
1041 * should be blocked until array is unfrozen.
1042 */
1043 atomic_inc(&conf->nr_pending[idx]);
1044 /*
1045 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
1046 * check conf->barrier[idx]. In raise_barrier() we firstly increase
1047 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
1048 * barrier is necessary here to make sure conf->barrier[idx] won't be
1049 * fetched before conf->nr_pending[idx] is increased. Otherwise there
1050 * will be a race between _wait_barrier() and raise_barrier().
1051 */
1052 smp_mb__after_atomic();
1053
1054 /*
1055 * Don't worry about checking two atomic_t variables at same time
1056 * here. If during we check conf->barrier[idx], the array is
1057 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
1058 * 0, it is safe to return and make the I/O continue. Because the
1059 * array is frozen, all I/O returned here will eventually complete
1060 * or be queued, no race will happen. See code comment in
1061 * frozen_array().
1062 */
1063 if (!READ_ONCE(conf->array_frozen) &&
1064 !atomic_read(&conf->barrier[idx]))
1065 return ret;
1066
1067 /*
1068 * After holding conf->resync_lock, conf->nr_pending[idx]
1069 * should be decreased before waiting for barrier to drop.
1070 * Otherwise, we may encounter a race condition because
1071 * raise_barrer() might be waiting for conf->nr_pending[idx]
1072 * to be 0 at same time.
1073 */
1074 spin_lock_irq(&conf->resync_lock);
1075 atomic_inc(&conf->nr_waiting[idx]);
1076 atomic_dec(&conf->nr_pending[idx]);
1077 /*
1078 * In case freeze_array() is waiting for
1079 * get_unqueued_pending() == extra
1080 */
1081 wake_up_barrier(conf);
1082 /* Wait for the barrier in same barrier unit bucket to drop. */
1083
1084 /* Return false when nowait flag is set */
1085 if (nowait) {
1086 ret = false;
1087 } else {
1088 wait_event_lock_irq(conf->wait_barrier,
1089 !conf->array_frozen &&
1090 !atomic_read(&conf->barrier[idx]),
1091 conf->resync_lock);
1092 atomic_inc(&conf->nr_pending[idx]);
1093 }
1094
1095 atomic_dec(&conf->nr_waiting[idx]);
1096 spin_unlock_irq(&conf->resync_lock);
1097 return ret;
1098 }
1099
wait_read_barrier(struct r1conf * conf,sector_t sector_nr,bool nowait)1100 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1101 {
1102 int idx = sector_to_idx(sector_nr);
1103 bool ret = true;
1104
1105 /*
1106 * Very similar to _wait_barrier(). The difference is, for read
1107 * I/O we don't need wait for sync I/O, but if the whole array
1108 * is frozen, the read I/O still has to wait until the array is
1109 * unfrozen. Since there is no ordering requirement with
1110 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1111 */
1112 atomic_inc(&conf->nr_pending[idx]);
1113
1114 if (!READ_ONCE(conf->array_frozen))
1115 return ret;
1116
1117 spin_lock_irq(&conf->resync_lock);
1118 atomic_inc(&conf->nr_waiting[idx]);
1119 atomic_dec(&conf->nr_pending[idx]);
1120 /*
1121 * In case freeze_array() is waiting for
1122 * get_unqueued_pending() == extra
1123 */
1124 wake_up_barrier(conf);
1125 /* Wait for array to be unfrozen */
1126
1127 /* Return false when nowait flag is set */
1128 if (nowait) {
1129 /* Return false when nowait flag is set */
1130 ret = false;
1131 } else {
1132 wait_event_lock_irq(conf->wait_barrier,
1133 !conf->array_frozen,
1134 conf->resync_lock);
1135 atomic_inc(&conf->nr_pending[idx]);
1136 }
1137
1138 atomic_dec(&conf->nr_waiting[idx]);
1139 spin_unlock_irq(&conf->resync_lock);
1140 return ret;
1141 }
1142
wait_barrier(struct r1conf * conf,sector_t sector_nr,bool nowait)1143 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1144 {
1145 int idx = sector_to_idx(sector_nr);
1146
1147 return _wait_barrier(conf, idx, nowait);
1148 }
1149
_allow_barrier(struct r1conf * conf,int idx)1150 static void _allow_barrier(struct r1conf *conf, int idx)
1151 {
1152 atomic_dec(&conf->nr_pending[idx]);
1153 wake_up_barrier(conf);
1154 }
1155
allow_barrier(struct r1conf * conf,sector_t sector_nr)1156 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1157 {
1158 int idx = sector_to_idx(sector_nr);
1159
1160 _allow_barrier(conf, idx);
1161 }
1162
1163 /* conf->resync_lock should be held */
get_unqueued_pending(struct r1conf * conf)1164 static int get_unqueued_pending(struct r1conf *conf)
1165 {
1166 int idx, ret;
1167
1168 ret = atomic_read(&conf->nr_sync_pending);
1169 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1170 ret += atomic_read(&conf->nr_pending[idx]) -
1171 atomic_read(&conf->nr_queued[idx]);
1172
1173 return ret;
1174 }
1175
freeze_array(struct r1conf * conf,int extra)1176 static void freeze_array(struct r1conf *conf, int extra)
1177 {
1178 /* Stop sync I/O and normal I/O and wait for everything to
1179 * go quiet.
1180 * This is called in two situations:
1181 * 1) management command handlers (reshape, remove disk, quiesce).
1182 * 2) one normal I/O request failed.
1183
1184 * After array_frozen is set to 1, new sync IO will be blocked at
1185 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1186 * or wait_read_barrier(). The flying I/Os will either complete or be
1187 * queued. When everything goes quite, there are only queued I/Os left.
1188
1189 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1190 * barrier bucket index which this I/O request hits. When all sync and
1191 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1192 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1193 * in handle_read_error(), we may call freeze_array() before trying to
1194 * fix the read error. In this case, the error read I/O is not queued,
1195 * so get_unqueued_pending() == 1.
1196 *
1197 * Therefore before this function returns, we need to wait until
1198 * get_unqueued_pendings(conf) gets equal to extra. For
1199 * normal I/O context, extra is 1, in rested situations extra is 0.
1200 */
1201 spin_lock_irq(&conf->resync_lock);
1202 conf->array_frozen = 1;
1203 mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
1204 wait_event_lock_irq_cmd(
1205 conf->wait_barrier,
1206 get_unqueued_pending(conf) == extra,
1207 conf->resync_lock,
1208 flush_pending_writes(conf));
1209 spin_unlock_irq(&conf->resync_lock);
1210 }
unfreeze_array(struct r1conf * conf)1211 static void unfreeze_array(struct r1conf *conf)
1212 {
1213 /* reverse the effect of the freeze */
1214 spin_lock_irq(&conf->resync_lock);
1215 conf->array_frozen = 0;
1216 spin_unlock_irq(&conf->resync_lock);
1217 wake_up(&conf->wait_barrier);
1218 }
1219
alloc_behind_master_bio(struct r1bio * r1_bio,struct bio * bio)1220 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1221 struct bio *bio)
1222 {
1223 int size = bio->bi_iter.bi_size;
1224 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1225 int i = 0;
1226 struct bio *behind_bio = NULL;
1227
1228 behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
1229 &r1_bio->mddev->bio_set);
1230
1231 /* discard op, we don't support writezero/writesame yet */
1232 if (!bio_has_data(bio)) {
1233 behind_bio->bi_iter.bi_size = size;
1234 goto skip_copy;
1235 }
1236
1237 while (i < vcnt && size) {
1238 struct page *page;
1239 int len = min_t(int, PAGE_SIZE, size);
1240
1241 page = alloc_page(GFP_NOIO);
1242 if (unlikely(!page))
1243 goto free_pages;
1244
1245 if (!bio_add_page(behind_bio, page, len, 0)) {
1246 put_page(page);
1247 goto free_pages;
1248 }
1249
1250 size -= len;
1251 i++;
1252 }
1253
1254 bio_copy_data(behind_bio, bio);
1255 skip_copy:
1256 r1_bio->behind_master_bio = behind_bio;
1257 set_bit(R1BIO_BehindIO, &r1_bio->state);
1258
1259 return;
1260
1261 free_pages:
1262 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1263 bio->bi_iter.bi_size);
1264 bio_free_pages(behind_bio);
1265 bio_put(behind_bio);
1266 }
1267
raid1_unplug(struct blk_plug_cb * cb,bool from_schedule)1268 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1269 {
1270 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1271 cb);
1272 struct mddev *mddev = plug->cb.data;
1273 struct r1conf *conf = mddev->private;
1274 struct bio *bio;
1275
1276 if (from_schedule) {
1277 spin_lock_irq(&conf->device_lock);
1278 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1279 spin_unlock_irq(&conf->device_lock);
1280 wake_up_barrier(conf);
1281 md_wakeup_thread(mddev->thread);
1282 kfree(plug);
1283 return;
1284 }
1285
1286 /* we aren't scheduling, so we can do the write-out directly. */
1287 bio = bio_list_get(&plug->pending);
1288 flush_bio_list(conf, bio);
1289 kfree(plug);
1290 }
1291
init_r1bio(struct r1bio * r1_bio,struct mddev * mddev,struct bio * bio)1292 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1293 {
1294 r1_bio->master_bio = bio;
1295 r1_bio->sectors = bio_sectors(bio);
1296 r1_bio->state = 0;
1297 r1_bio->mddev = mddev;
1298 r1_bio->sector = bio->bi_iter.bi_sector;
1299 }
1300
1301 static inline struct r1bio *
alloc_r1bio(struct mddev * mddev,struct bio * bio)1302 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1303 {
1304 struct r1conf *conf = mddev->private;
1305 struct r1bio *r1_bio;
1306
1307 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1308 memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
1309 init_r1bio(r1_bio, mddev, bio);
1310 return r1_bio;
1311 }
1312
raid1_read_request(struct mddev * mddev,struct bio * bio,int max_read_sectors,struct r1bio * r1_bio)1313 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1314 int max_read_sectors, struct r1bio *r1_bio)
1315 {
1316 struct r1conf *conf = mddev->private;
1317 struct raid1_info *mirror;
1318 struct bio *read_bio;
1319 int max_sectors;
1320 int rdisk;
1321 bool r1bio_existed = !!r1_bio;
1322
1323 /*
1324 * If r1_bio is set, we are blocking the raid1d thread
1325 * so there is a tiny risk of deadlock. So ask for
1326 * emergency memory if needed.
1327 */
1328 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1329
1330 /*
1331 * Still need barrier for READ in case that whole
1332 * array is frozen.
1333 */
1334 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
1335 bio->bi_opf & REQ_NOWAIT)) {
1336 bio_wouldblock_error(bio);
1337 return;
1338 }
1339
1340 if (!r1_bio)
1341 r1_bio = alloc_r1bio(mddev, bio);
1342 else
1343 init_r1bio(r1_bio, mddev, bio);
1344 r1_bio->sectors = max_read_sectors;
1345
1346 /*
1347 * make_request() can abort the operation when read-ahead is being
1348 * used and no empty request is available.
1349 */
1350 rdisk = read_balance(conf, r1_bio, &max_sectors);
1351 if (rdisk < 0) {
1352 /* couldn't find anywhere to read from */
1353 if (r1bio_existed)
1354 pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
1355 mdname(mddev),
1356 conf->mirrors[r1_bio->read_disk].rdev->bdev,
1357 r1_bio->sector);
1358 raid_end_bio_io(r1_bio);
1359 return;
1360 }
1361 mirror = conf->mirrors + rdisk;
1362
1363 if (r1bio_existed)
1364 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
1365 mdname(mddev),
1366 (unsigned long long)r1_bio->sector,
1367 mirror->rdev->bdev);
1368
1369 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1370 md_bitmap_enabled(mddev, false)) {
1371 /*
1372 * Reading from a write-mostly device must take care not to
1373 * over-take any writes that are 'behind'
1374 */
1375 mddev_add_trace_msg(mddev, "raid1 wait behind writes");
1376 mddev->bitmap_ops->wait_behind_writes(mddev);
1377 }
1378
1379 if (max_sectors < bio_sectors(bio)) {
1380 bio = bio_submit_split_bioset(bio, max_sectors,
1381 &conf->bio_split);
1382 if (!bio) {
1383 set_bit(R1BIO_Returned, &r1_bio->state);
1384 goto err_handle;
1385 }
1386
1387 r1_bio->master_bio = bio;
1388 r1_bio->sectors = max_sectors;
1389 }
1390
1391 r1_bio->read_disk = rdisk;
1392 if (!r1bio_existed) {
1393 md_account_bio(mddev, &bio);
1394 r1_bio->master_bio = bio;
1395 }
1396 read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1397 &mddev->bio_set);
1398 read_bio->bi_opf &= ~REQ_NOWAIT;
1399 r1_bio->bios[rdisk] = read_bio;
1400
1401 read_bio->bi_iter.bi_sector = r1_bio->sector +
1402 mirror->rdev->data_offset;
1403 read_bio->bi_end_io = raid1_end_read_request;
1404 if (test_bit(FailFast, &mirror->rdev->flags) &&
1405 test_bit(R1BIO_FailFast, &r1_bio->state))
1406 read_bio->bi_opf |= MD_FAILFAST;
1407 read_bio->bi_private = r1_bio;
1408 mddev_trace_remap(mddev, read_bio, r1_bio->sector);
1409 submit_bio_noacct(read_bio);
1410 return;
1411
1412 err_handle:
1413 atomic_dec(&mirror->rdev->nr_pending);
1414 raid_end_bio_io(r1_bio);
1415 }
1416
wait_blocked_rdev(struct mddev * mddev,struct bio * bio)1417 static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
1418 {
1419 struct r1conf *conf = mddev->private;
1420 int disks = conf->raid_disks * 2;
1421 int i;
1422
1423 retry:
1424 for (i = 0; i < disks; i++) {
1425 struct md_rdev *rdev = conf->mirrors[i].rdev;
1426
1427 if (!rdev)
1428 continue;
1429
1430 /* don't write here until the bad block is acknowledged */
1431 if (test_bit(WriteErrorSeen, &rdev->flags) &&
1432 rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
1433 bio_sectors(bio)) < 0)
1434 set_bit(BlockedBadBlocks, &rdev->flags);
1435
1436 if (rdev_blocked(rdev)) {
1437 if (bio->bi_opf & REQ_NOWAIT)
1438 return false;
1439
1440 mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
1441 rdev->raid_disk);
1442 atomic_inc(&rdev->nr_pending);
1443 md_wait_for_blocked_rdev(rdev, rdev->mddev);
1444 goto retry;
1445 }
1446 }
1447
1448 return true;
1449 }
1450
raid1_start_write_behind(struct mddev * mddev,struct r1bio * r1_bio,struct bio * bio)1451 static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
1452 struct bio *bio)
1453 {
1454 unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
1455 struct md_bitmap_stats stats;
1456 int err;
1457
1458 /* behind write rely on bitmap, see bitmap_operations */
1459 if (!md_bitmap_enabled(mddev, false))
1460 return;
1461
1462 err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
1463 if (err)
1464 return;
1465
1466 /* Don't do behind IO if reader is waiting, or there are too many. */
1467 if (!stats.behind_wait && stats.behind_writes < max_write_behind)
1468 alloc_behind_master_bio(r1_bio, bio);
1469
1470 if (test_bit(R1BIO_BehindIO, &r1_bio->state))
1471 mddev->bitmap_ops->start_behind_write(mddev);
1472
1473 }
1474
raid1_write_request(struct mddev * mddev,struct bio * bio,int max_write_sectors)1475 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1476 int max_write_sectors)
1477 {
1478 struct r1conf *conf = mddev->private;
1479 struct r1bio *r1_bio;
1480 int i, disks, k;
1481 unsigned long flags;
1482 int first_clone;
1483 int max_sectors;
1484 bool write_behind = false;
1485 bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
1486
1487 if (mddev_is_clustered(mddev) &&
1488 mddev->cluster_ops->area_resyncing(mddev, WRITE,
1489 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1490
1491 DEFINE_WAIT(w);
1492 if (bio->bi_opf & REQ_NOWAIT) {
1493 bio_wouldblock_error(bio);
1494 return;
1495 }
1496 for (;;) {
1497 prepare_to_wait(&conf->wait_barrier,
1498 &w, TASK_IDLE);
1499 if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
1500 bio->bi_iter.bi_sector,
1501 bio_end_sector(bio)))
1502 break;
1503 schedule();
1504 }
1505 finish_wait(&conf->wait_barrier, &w);
1506 }
1507
1508 /*
1509 * Register the new request and wait if the reconstruction
1510 * thread has put up a bar for new requests.
1511 * Continue immediately if no resync is active currently.
1512 */
1513 if (!wait_barrier(conf, bio->bi_iter.bi_sector,
1514 bio->bi_opf & REQ_NOWAIT)) {
1515 bio_wouldblock_error(bio);
1516 return;
1517 }
1518
1519 if (!wait_blocked_rdev(mddev, bio)) {
1520 bio_wouldblock_error(bio);
1521 return;
1522 }
1523
1524 r1_bio = alloc_r1bio(mddev, bio);
1525 r1_bio->sectors = max_write_sectors;
1526
1527 /* first select target devices under rcu_lock and
1528 * inc refcount on their rdev. Record them by setting
1529 * bios[x] to bio
1530 * If there are known/acknowledged bad blocks on any device on
1531 * which we have seen a write error, we want to avoid writing those
1532 * blocks.
1533 * This potentially requires several writes to write around
1534 * the bad blocks. Each set of writes gets it's own r1bio
1535 * with a set of bios attached.
1536 */
1537
1538 disks = conf->raid_disks * 2;
1539 max_sectors = r1_bio->sectors;
1540 for (i = 0; i < disks; i++) {
1541 struct md_rdev *rdev = conf->mirrors[i].rdev;
1542
1543 /*
1544 * The write-behind io is only attempted on drives marked as
1545 * write-mostly, which means we could allocate write behind
1546 * bio later.
1547 */
1548 if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
1549 write_behind = true;
1550
1551 r1_bio->bios[i] = NULL;
1552 if (!rdev || test_bit(Faulty, &rdev->flags))
1553 continue;
1554
1555 atomic_inc(&rdev->nr_pending);
1556 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1557 sector_t first_bad;
1558 sector_t bad_sectors;
1559 int is_bad;
1560
1561 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1562 &first_bad, &bad_sectors);
1563 if (is_bad && first_bad <= r1_bio->sector) {
1564 /* Cannot write here at all */
1565 bad_sectors -= (r1_bio->sector - first_bad);
1566 if (bad_sectors < max_sectors)
1567 /* mustn't write more than bad_sectors
1568 * to other devices yet
1569 */
1570 max_sectors = bad_sectors;
1571 rdev_dec_pending(rdev, mddev);
1572 continue;
1573 }
1574 if (is_bad) {
1575 int good_sectors;
1576
1577 /*
1578 * We cannot atomically write this, so just
1579 * error in that case. It could be possible to
1580 * atomically write other mirrors, but the
1581 * complexity of supporting that is not worth
1582 * the benefit.
1583 */
1584 if (bio->bi_opf & REQ_ATOMIC)
1585 goto err_handle;
1586
1587 good_sectors = first_bad - r1_bio->sector;
1588 if (good_sectors < max_sectors)
1589 max_sectors = good_sectors;
1590 }
1591 }
1592 r1_bio->bios[i] = bio;
1593 }
1594
1595 /*
1596 * When using a bitmap, we may call alloc_behind_master_bio below.
1597 * alloc_behind_master_bio allocates a copy of the data payload a page
1598 * at a time and thus needs a new bio that can fit the whole payload
1599 * this bio in page sized chunks.
1600 */
1601 if (write_behind && mddev->bitmap)
1602 max_sectors = min_t(int, max_sectors,
1603 BIO_MAX_VECS * (PAGE_SIZE >> 9));
1604 if (max_sectors < bio_sectors(bio)) {
1605 bio = bio_submit_split_bioset(bio, max_sectors,
1606 &conf->bio_split);
1607 if (!bio) {
1608 set_bit(R1BIO_Returned, &r1_bio->state);
1609 goto err_handle;
1610 }
1611
1612 r1_bio->master_bio = bio;
1613 r1_bio->sectors = max_sectors;
1614 }
1615
1616 md_account_bio(mddev, &bio);
1617 r1_bio->master_bio = bio;
1618 atomic_set(&r1_bio->remaining, 1);
1619 atomic_set(&r1_bio->behind_remaining, 0);
1620
1621 first_clone = 1;
1622
1623 for (i = 0; i < disks; i++) {
1624 struct bio *mbio = NULL;
1625 struct md_rdev *rdev = conf->mirrors[i].rdev;
1626 if (!r1_bio->bios[i])
1627 continue;
1628
1629 if (first_clone) {
1630 if (write_behind)
1631 raid1_start_write_behind(mddev, r1_bio, bio);
1632 first_clone = 0;
1633 }
1634
1635 if (r1_bio->behind_master_bio) {
1636 mbio = bio_alloc_clone(rdev->bdev,
1637 r1_bio->behind_master_bio,
1638 GFP_NOIO, &mddev->bio_set);
1639 if (test_bit(CollisionCheck, &rdev->flags))
1640 wait_for_serialization(rdev, r1_bio);
1641 if (test_bit(WriteMostly, &rdev->flags))
1642 atomic_inc(&r1_bio->behind_remaining);
1643 } else {
1644 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1645 &mddev->bio_set);
1646
1647 if (mddev->serialize_policy)
1648 wait_for_serialization(rdev, r1_bio);
1649 }
1650
1651 mbio->bi_opf &= ~REQ_NOWAIT;
1652 r1_bio->bios[i] = mbio;
1653
1654 mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
1655 mbio->bi_end_io = raid1_end_write_request;
1656 if (test_bit(FailFast, &rdev->flags) &&
1657 !test_bit(WriteMostly, &rdev->flags) &&
1658 conf->raid_disks - mddev->degraded > 1)
1659 mbio->bi_opf |= MD_FAILFAST;
1660 mbio->bi_private = r1_bio;
1661
1662 atomic_inc(&r1_bio->remaining);
1663 mddev_trace_remap(mddev, mbio, r1_bio->sector);
1664 /* flush_pending_writes() needs access to the rdev so...*/
1665 mbio->bi_bdev = (void *)rdev;
1666 if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
1667 spin_lock_irqsave(&conf->device_lock, flags);
1668 bio_list_add(&conf->pending_bio_list, mbio);
1669 spin_unlock_irqrestore(&conf->device_lock, flags);
1670 md_wakeup_thread(mddev->thread);
1671 }
1672 }
1673
1674 r1_bio_write_done(r1_bio);
1675
1676 /* In case raid1d snuck in to freeze_array */
1677 wake_up_barrier(conf);
1678 return;
1679 err_handle:
1680 for (k = 0; k < i; k++) {
1681 if (r1_bio->bios[k]) {
1682 rdev_dec_pending(conf->mirrors[k].rdev, mddev);
1683 r1_bio->bios[k] = NULL;
1684 }
1685 }
1686
1687 raid_end_bio_io(r1_bio);
1688 }
1689
raid1_make_request(struct mddev * mddev,struct bio * bio)1690 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1691 {
1692 sector_t sectors;
1693
1694 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1695 && md_flush_request(mddev, bio))
1696 return true;
1697
1698 /*
1699 * There is a limit to the maximum size, but
1700 * the read/write handler might find a lower limit
1701 * due to bad blocks. To avoid multiple splits,
1702 * we pass the maximum number of sectors down
1703 * and let the lower level perform the split.
1704 */
1705 sectors = align_to_barrier_unit_end(
1706 bio->bi_iter.bi_sector, bio_sectors(bio));
1707
1708 if (bio_data_dir(bio) == READ)
1709 raid1_read_request(mddev, bio, sectors, NULL);
1710 else {
1711 md_write_start(mddev,bio);
1712 raid1_write_request(mddev, bio, sectors);
1713 }
1714 return true;
1715 }
1716
raid1_status(struct seq_file * seq,struct mddev * mddev)1717 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1718 {
1719 struct r1conf *conf = mddev->private;
1720 int i;
1721
1722 lockdep_assert_held(&mddev->lock);
1723
1724 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1725 conf->raid_disks - mddev->degraded);
1726 for (i = 0; i < conf->raid_disks; i++) {
1727 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1728
1729 seq_printf(seq, "%s",
1730 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1731 }
1732 seq_printf(seq, "]");
1733 }
1734
1735 /**
1736 * raid1_error() - RAID1 error handler.
1737 * @mddev: affected md device.
1738 * @rdev: member device to fail.
1739 *
1740 * The routine acknowledges &rdev failure and determines new @mddev state.
1741 * If it failed, then:
1742 * - &MD_BROKEN flag is set in &mddev->flags.
1743 * - recovery is disabled.
1744 * Otherwise, it must be degraded:
1745 * - recovery is interrupted.
1746 * - &mddev->degraded is bumped.
1747 *
1748 * @rdev is marked as &Faulty excluding case when array is failed and
1749 * &mddev->fail_last_dev is off.
1750 */
raid1_error(struct mddev * mddev,struct md_rdev * rdev)1751 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1752 {
1753 struct r1conf *conf = mddev->private;
1754 unsigned long flags;
1755
1756 spin_lock_irqsave(&conf->device_lock, flags);
1757
1758 if (test_bit(In_sync, &rdev->flags) &&
1759 (conf->raid_disks - mddev->degraded) == 1) {
1760 set_bit(MD_BROKEN, &mddev->flags);
1761
1762 if (!mddev->fail_last_dev) {
1763 conf->recovery_disabled = mddev->recovery_disabled;
1764 spin_unlock_irqrestore(&conf->device_lock, flags);
1765 return;
1766 }
1767 }
1768 set_bit(Blocked, &rdev->flags);
1769 if (test_and_clear_bit(In_sync, &rdev->flags))
1770 mddev->degraded++;
1771 set_bit(Faulty, &rdev->flags);
1772 spin_unlock_irqrestore(&conf->device_lock, flags);
1773 /*
1774 * if recovery is running, make sure it aborts.
1775 */
1776 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1777 set_mask_bits(&mddev->sb_flags, 0,
1778 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1779 pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
1780 "md/raid1:%s: Operation continuing on %d devices.\n",
1781 mdname(mddev), rdev->bdev,
1782 mdname(mddev), conf->raid_disks - mddev->degraded);
1783 }
1784
print_conf(struct r1conf * conf)1785 static void print_conf(struct r1conf *conf)
1786 {
1787 int i;
1788
1789 pr_debug("RAID1 conf printout:\n");
1790 if (!conf) {
1791 pr_debug("(!conf)\n");
1792 return;
1793 }
1794 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1795 conf->raid_disks);
1796
1797 lockdep_assert_held(&conf->mddev->reconfig_mutex);
1798 for (i = 0; i < conf->raid_disks; i++) {
1799 struct md_rdev *rdev = conf->mirrors[i].rdev;
1800 if (rdev)
1801 pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
1802 i, !test_bit(In_sync, &rdev->flags),
1803 !test_bit(Faulty, &rdev->flags),
1804 rdev->bdev);
1805 }
1806 }
1807
close_sync(struct r1conf * conf)1808 static void close_sync(struct r1conf *conf)
1809 {
1810 int idx;
1811
1812 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1813 _wait_barrier(conf, idx, false);
1814 _allow_barrier(conf, idx);
1815 }
1816
1817 mempool_exit(&conf->r1buf_pool);
1818 }
1819
raid1_spare_active(struct mddev * mddev)1820 static int raid1_spare_active(struct mddev *mddev)
1821 {
1822 int i;
1823 struct r1conf *conf = mddev->private;
1824 int count = 0;
1825 unsigned long flags;
1826
1827 /*
1828 * Find all failed disks within the RAID1 configuration
1829 * and mark them readable.
1830 * Called under mddev lock, so rcu protection not needed.
1831 * device_lock used to avoid races with raid1_end_read_request
1832 * which expects 'In_sync' flags and ->degraded to be consistent.
1833 */
1834 spin_lock_irqsave(&conf->device_lock, flags);
1835 for (i = 0; i < conf->raid_disks; i++) {
1836 struct md_rdev *rdev = conf->mirrors[i].rdev;
1837 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1838 if (repl
1839 && !test_bit(Candidate, &repl->flags)
1840 && repl->recovery_offset == MaxSector
1841 && !test_bit(Faulty, &repl->flags)
1842 && !test_and_set_bit(In_sync, &repl->flags)) {
1843 /* replacement has just become active */
1844 if (!rdev ||
1845 !test_and_clear_bit(In_sync, &rdev->flags))
1846 count++;
1847 if (rdev) {
1848 /* Replaced device not technically
1849 * faulty, but we need to be sure
1850 * it gets removed and never re-added
1851 */
1852 set_bit(Faulty, &rdev->flags);
1853 sysfs_notify_dirent_safe(
1854 rdev->sysfs_state);
1855 }
1856 }
1857 if (rdev
1858 && rdev->recovery_offset == MaxSector
1859 && !test_bit(Faulty, &rdev->flags)
1860 && !test_and_set_bit(In_sync, &rdev->flags)) {
1861 count++;
1862 sysfs_notify_dirent_safe(rdev->sysfs_state);
1863 }
1864 }
1865 mddev->degraded -= count;
1866 spin_unlock_irqrestore(&conf->device_lock, flags);
1867
1868 print_conf(conf);
1869 return count;
1870 }
1871
raid1_add_conf(struct r1conf * conf,struct md_rdev * rdev,int disk,bool replacement)1872 static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
1873 bool replacement)
1874 {
1875 struct raid1_info *info = conf->mirrors + disk;
1876
1877 if (replacement)
1878 info += conf->raid_disks;
1879
1880 if (info->rdev)
1881 return false;
1882
1883 if (bdev_nonrot(rdev->bdev)) {
1884 set_bit(Nonrot, &rdev->flags);
1885 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
1886 }
1887
1888 rdev->raid_disk = disk;
1889 info->head_position = 0;
1890 info->seq_start = MaxSector;
1891 WRITE_ONCE(info->rdev, rdev);
1892
1893 return true;
1894 }
1895
raid1_remove_conf(struct r1conf * conf,int disk)1896 static bool raid1_remove_conf(struct r1conf *conf, int disk)
1897 {
1898 struct raid1_info *info = conf->mirrors + disk;
1899 struct md_rdev *rdev = info->rdev;
1900
1901 if (!rdev || test_bit(In_sync, &rdev->flags) ||
1902 atomic_read(&rdev->nr_pending))
1903 return false;
1904
1905 /* Only remove non-faulty devices if recovery is not possible. */
1906 if (!test_bit(Faulty, &rdev->flags) &&
1907 rdev->mddev->recovery_disabled != conf->recovery_disabled &&
1908 rdev->mddev->degraded < conf->raid_disks)
1909 return false;
1910
1911 if (test_and_clear_bit(Nonrot, &rdev->flags))
1912 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
1913
1914 WRITE_ONCE(info->rdev, NULL);
1915 return true;
1916 }
1917
raid1_add_disk(struct mddev * mddev,struct md_rdev * rdev)1918 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1919 {
1920 struct r1conf *conf = mddev->private;
1921 int err = -EEXIST;
1922 int mirror = 0, repl_slot = -1;
1923 struct raid1_info *p;
1924 int first = 0;
1925 int last = conf->raid_disks - 1;
1926
1927 if (mddev->recovery_disabled == conf->recovery_disabled)
1928 return -EBUSY;
1929
1930 if (rdev->raid_disk >= 0)
1931 first = last = rdev->raid_disk;
1932
1933 /*
1934 * find the disk ... but prefer rdev->saved_raid_disk
1935 * if possible.
1936 */
1937 if (rdev->saved_raid_disk >= 0 &&
1938 rdev->saved_raid_disk >= first &&
1939 rdev->saved_raid_disk < conf->raid_disks &&
1940 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1941 first = last = rdev->saved_raid_disk;
1942
1943 for (mirror = first; mirror <= last; mirror++) {
1944 p = conf->mirrors + mirror;
1945 if (!p->rdev) {
1946 err = mddev_stack_new_rdev(mddev, rdev);
1947 if (err)
1948 return err;
1949
1950 raid1_add_conf(conf, rdev, mirror, false);
1951 /* As all devices are equivalent, we don't need a full recovery
1952 * if this was recently any drive of the array
1953 */
1954 if (rdev->saved_raid_disk < 0)
1955 conf->fullsync = 1;
1956 break;
1957 }
1958 if (test_bit(WantReplacement, &p->rdev->flags) &&
1959 p[conf->raid_disks].rdev == NULL && repl_slot < 0)
1960 repl_slot = mirror;
1961 }
1962
1963 if (err && repl_slot >= 0) {
1964 /* Add this device as a replacement */
1965 clear_bit(In_sync, &rdev->flags);
1966 set_bit(Replacement, &rdev->flags);
1967 raid1_add_conf(conf, rdev, repl_slot, true);
1968 err = 0;
1969 conf->fullsync = 1;
1970 }
1971
1972 print_conf(conf);
1973 return err;
1974 }
1975
raid1_remove_disk(struct mddev * mddev,struct md_rdev * rdev)1976 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1977 {
1978 struct r1conf *conf = mddev->private;
1979 int err = 0;
1980 int number = rdev->raid_disk;
1981 struct raid1_info *p = conf->mirrors + number;
1982
1983 if (unlikely(number >= conf->raid_disks))
1984 goto abort;
1985
1986 if (rdev != p->rdev) {
1987 number += conf->raid_disks;
1988 p = conf->mirrors + number;
1989 }
1990
1991 print_conf(conf);
1992 if (rdev == p->rdev) {
1993 if (!raid1_remove_conf(conf, number)) {
1994 err = -EBUSY;
1995 goto abort;
1996 }
1997
1998 if (number < conf->raid_disks &&
1999 conf->mirrors[conf->raid_disks + number].rdev) {
2000 /* We just removed a device that is being replaced.
2001 * Move down the replacement. We drain all IO before
2002 * doing this to avoid confusion.
2003 */
2004 struct md_rdev *repl =
2005 conf->mirrors[conf->raid_disks + number].rdev;
2006 freeze_array(conf, 0);
2007 if (atomic_read(&repl->nr_pending)) {
2008 /* It means that some queued IO of retry_list
2009 * hold repl. Thus, we cannot set replacement
2010 * as NULL, avoiding rdev NULL pointer
2011 * dereference in sync_request_write and
2012 * handle_write_finished.
2013 */
2014 err = -EBUSY;
2015 unfreeze_array(conf);
2016 goto abort;
2017 }
2018 clear_bit(Replacement, &repl->flags);
2019 WRITE_ONCE(p->rdev, repl);
2020 conf->mirrors[conf->raid_disks + number].rdev = NULL;
2021 unfreeze_array(conf);
2022 }
2023
2024 clear_bit(WantReplacement, &rdev->flags);
2025 err = md_integrity_register(mddev);
2026 }
2027 abort:
2028
2029 print_conf(conf);
2030 return err;
2031 }
2032
end_sync_read(struct bio * bio)2033 static void end_sync_read(struct bio *bio)
2034 {
2035 struct r1bio *r1_bio = get_resync_r1bio(bio);
2036
2037 update_head_pos(r1_bio->read_disk, r1_bio);
2038
2039 /*
2040 * we have read a block, now it needs to be re-written,
2041 * or re-read if the read failed.
2042 * We don't do much here, just schedule handling by raid1d
2043 */
2044 if (!bio->bi_status)
2045 set_bit(R1BIO_Uptodate, &r1_bio->state);
2046
2047 if (atomic_dec_and_test(&r1_bio->remaining))
2048 reschedule_retry(r1_bio);
2049 }
2050
abort_sync_write(struct mddev * mddev,struct r1bio * r1_bio)2051 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
2052 {
2053 sector_t sync_blocks = 0;
2054 sector_t s = r1_bio->sector;
2055 long sectors_to_go = r1_bio->sectors;
2056
2057 /* make sure these bits don't get cleared. */
2058 do {
2059 md_bitmap_end_sync(mddev, s, &sync_blocks);
2060 s += sync_blocks;
2061 sectors_to_go -= sync_blocks;
2062 } while (sectors_to_go > 0);
2063 }
2064
put_sync_write_buf(struct r1bio * r1_bio,int uptodate)2065 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
2066 {
2067 if (atomic_dec_and_test(&r1_bio->remaining)) {
2068 struct mddev *mddev = r1_bio->mddev;
2069 int s = r1_bio->sectors;
2070
2071 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2072 test_bit(R1BIO_WriteError, &r1_bio->state))
2073 reschedule_retry(r1_bio);
2074 else {
2075 put_buf(r1_bio);
2076 md_done_sync(mddev, s, uptodate);
2077 }
2078 }
2079 }
2080
end_sync_write(struct bio * bio)2081 static void end_sync_write(struct bio *bio)
2082 {
2083 int uptodate = !bio->bi_status;
2084 struct r1bio *r1_bio = get_resync_r1bio(bio);
2085 struct mddev *mddev = r1_bio->mddev;
2086 struct r1conf *conf = mddev->private;
2087 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
2088
2089 if (!uptodate) {
2090 abort_sync_write(mddev, r1_bio);
2091 set_bit(WriteErrorSeen, &rdev->flags);
2092 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2093 set_bit(MD_RECOVERY_NEEDED, &
2094 mddev->recovery);
2095 set_bit(R1BIO_WriteError, &r1_bio->state);
2096 } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
2097 !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
2098 r1_bio->sector, r1_bio->sectors)) {
2099 set_bit(R1BIO_MadeGood, &r1_bio->state);
2100 }
2101
2102 put_sync_write_buf(r1_bio, uptodate);
2103 }
2104
r1_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,blk_opf_t rw)2105 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
2106 int sectors, struct page *page, blk_opf_t rw)
2107 {
2108 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2109 /* success */
2110 return 1;
2111 if (rw == REQ_OP_WRITE) {
2112 set_bit(WriteErrorSeen, &rdev->flags);
2113 if (!test_and_set_bit(WantReplacement,
2114 &rdev->flags))
2115 set_bit(MD_RECOVERY_NEEDED, &
2116 rdev->mddev->recovery);
2117 }
2118 /* need to record an error - either for the block or the device */
2119 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2120 md_error(rdev->mddev, rdev);
2121 return 0;
2122 }
2123
fix_sync_read_error(struct r1bio * r1_bio)2124 static int fix_sync_read_error(struct r1bio *r1_bio)
2125 {
2126 /* Try some synchronous reads of other devices to get
2127 * good data, much like with normal read errors. Only
2128 * read into the pages we already have so we don't
2129 * need to re-issue the read request.
2130 * We don't need to freeze the array, because being in an
2131 * active sync request, there is no normal IO, and
2132 * no overlapping syncs.
2133 * We don't need to check is_badblock() again as we
2134 * made sure that anything with a bad block in range
2135 * will have bi_end_io clear.
2136 */
2137 struct mddev *mddev = r1_bio->mddev;
2138 struct r1conf *conf = mddev->private;
2139 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
2140 struct page **pages = get_resync_pages(bio)->pages;
2141 sector_t sect = r1_bio->sector;
2142 int sectors = r1_bio->sectors;
2143 int idx = 0;
2144 struct md_rdev *rdev;
2145
2146 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2147 if (test_bit(FailFast, &rdev->flags)) {
2148 /* Don't try recovering from here - just fail it
2149 * ... unless it is the last working device of course */
2150 md_error(mddev, rdev);
2151 if (test_bit(Faulty, &rdev->flags))
2152 /* Don't try to read from here, but make sure
2153 * put_buf does it's thing
2154 */
2155 bio->bi_end_io = end_sync_write;
2156 }
2157
2158 while(sectors) {
2159 int s = sectors;
2160 int d = r1_bio->read_disk;
2161 int success = 0;
2162 int start;
2163
2164 if (s > (PAGE_SIZE>>9))
2165 s = PAGE_SIZE >> 9;
2166 do {
2167 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2168 /* No rcu protection needed here devices
2169 * can only be removed when no resync is
2170 * active, and resync is currently active
2171 */
2172 rdev = conf->mirrors[d].rdev;
2173 if (sync_page_io(rdev, sect, s<<9,
2174 pages[idx],
2175 REQ_OP_READ, false)) {
2176 success = 1;
2177 break;
2178 }
2179 }
2180 d++;
2181 if (d == conf->raid_disks * 2)
2182 d = 0;
2183 } while (!success && d != r1_bio->read_disk);
2184
2185 if (!success) {
2186 int abort = 0;
2187 /* Cannot read from anywhere, this block is lost.
2188 * Record a bad block on each device. If that doesn't
2189 * work just disable and interrupt the recovery.
2190 * Don't fail devices as that won't really help.
2191 */
2192 pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
2193 mdname(mddev), bio->bi_bdev,
2194 (unsigned long long)r1_bio->sector);
2195 for (d = 0; d < conf->raid_disks * 2; d++) {
2196 rdev = conf->mirrors[d].rdev;
2197 if (!rdev || test_bit(Faulty, &rdev->flags))
2198 continue;
2199 if (!rdev_set_badblocks(rdev, sect, s, 0))
2200 abort = 1;
2201 }
2202 if (abort)
2203 return 0;
2204
2205 /* Try next page */
2206 sectors -= s;
2207 sect += s;
2208 idx++;
2209 continue;
2210 }
2211
2212 start = d;
2213 /* write it back and re-read */
2214 while (d != r1_bio->read_disk) {
2215 if (d == 0)
2216 d = conf->raid_disks * 2;
2217 d--;
2218 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2219 continue;
2220 rdev = conf->mirrors[d].rdev;
2221 if (r1_sync_page_io(rdev, sect, s,
2222 pages[idx],
2223 REQ_OP_WRITE) == 0) {
2224 r1_bio->bios[d]->bi_end_io = NULL;
2225 rdev_dec_pending(rdev, mddev);
2226 }
2227 }
2228 d = start;
2229 while (d != r1_bio->read_disk) {
2230 if (d == 0)
2231 d = conf->raid_disks * 2;
2232 d--;
2233 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2234 continue;
2235 rdev = conf->mirrors[d].rdev;
2236 if (r1_sync_page_io(rdev, sect, s,
2237 pages[idx],
2238 REQ_OP_READ) != 0)
2239 atomic_add(s, &rdev->corrected_errors);
2240 }
2241 sectors -= s;
2242 sect += s;
2243 idx ++;
2244 }
2245 set_bit(R1BIO_Uptodate, &r1_bio->state);
2246 bio->bi_status = 0;
2247 return 1;
2248 }
2249
process_checks(struct r1bio * r1_bio)2250 static void process_checks(struct r1bio *r1_bio)
2251 {
2252 /* We have read all readable devices. If we haven't
2253 * got the block, then there is no hope left.
2254 * If we have, then we want to do a comparison
2255 * and skip the write if everything is the same.
2256 * If any blocks failed to read, then we need to
2257 * attempt an over-write
2258 */
2259 struct mddev *mddev = r1_bio->mddev;
2260 struct r1conf *conf = mddev->private;
2261 int primary;
2262 int i;
2263 int vcnt;
2264
2265 /* Fix variable parts of all bios */
2266 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2267 for (i = 0; i < conf->raid_disks * 2; i++) {
2268 blk_status_t status;
2269 struct bio *b = r1_bio->bios[i];
2270 struct resync_pages *rp = get_resync_pages(b);
2271 if (b->bi_end_io != end_sync_read)
2272 continue;
2273 /* fixup the bio for reuse, but preserve errno */
2274 status = b->bi_status;
2275 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
2276 b->bi_status = status;
2277 b->bi_iter.bi_sector = r1_bio->sector +
2278 conf->mirrors[i].rdev->data_offset;
2279 b->bi_end_io = end_sync_read;
2280 rp->raid_bio = r1_bio;
2281 b->bi_private = rp;
2282
2283 /* initialize bvec table again */
2284 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2285 }
2286 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2287 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2288 !r1_bio->bios[primary]->bi_status) {
2289 r1_bio->bios[primary]->bi_end_io = NULL;
2290 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2291 break;
2292 }
2293 r1_bio->read_disk = primary;
2294 for (i = 0; i < conf->raid_disks * 2; i++) {
2295 int j = 0;
2296 struct bio *pbio = r1_bio->bios[primary];
2297 struct bio *sbio = r1_bio->bios[i];
2298 blk_status_t status = sbio->bi_status;
2299 struct page **ppages = get_resync_pages(pbio)->pages;
2300 struct page **spages = get_resync_pages(sbio)->pages;
2301 struct bio_vec *bi;
2302 int page_len[RESYNC_PAGES] = { 0 };
2303 struct bvec_iter_all iter_all;
2304
2305 if (sbio->bi_end_io != end_sync_read)
2306 continue;
2307 /* Now we can 'fixup' the error value */
2308 sbio->bi_status = 0;
2309
2310 bio_for_each_segment_all(bi, sbio, iter_all)
2311 page_len[j++] = bi->bv_len;
2312
2313 if (!status) {
2314 for (j = vcnt; j-- ; ) {
2315 if (memcmp(page_address(ppages[j]),
2316 page_address(spages[j]),
2317 page_len[j]))
2318 break;
2319 }
2320 } else
2321 j = 0;
2322 if (j >= 0)
2323 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2324 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2325 && !status)) {
2326 /* No need to write to this device. */
2327 sbio->bi_end_io = NULL;
2328 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2329 continue;
2330 }
2331
2332 bio_copy_data(sbio, pbio);
2333 }
2334 }
2335
sync_request_write(struct mddev * mddev,struct r1bio * r1_bio)2336 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2337 {
2338 struct r1conf *conf = mddev->private;
2339 int i;
2340 int disks = conf->raid_disks * 2;
2341 struct bio *wbio;
2342
2343 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
2344 /*
2345 * ouch - failed to read all of that.
2346 * No need to fix read error for check/repair
2347 * because all member disks are read.
2348 */
2349 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
2350 !fix_sync_read_error(r1_bio)) {
2351 conf->recovery_disabled = mddev->recovery_disabled;
2352 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2353 md_done_sync(mddev, r1_bio->sectors, 0);
2354 put_buf(r1_bio);
2355 return;
2356 }
2357 }
2358
2359 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2360 process_checks(r1_bio);
2361
2362 /*
2363 * schedule writes
2364 */
2365 atomic_set(&r1_bio->remaining, 1);
2366 for (i = 0; i < disks ; i++) {
2367 wbio = r1_bio->bios[i];
2368 if (wbio->bi_end_io == NULL ||
2369 (wbio->bi_end_io == end_sync_read &&
2370 (i == r1_bio->read_disk ||
2371 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2372 continue;
2373 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2374 abort_sync_write(mddev, r1_bio);
2375 continue;
2376 }
2377
2378 wbio->bi_opf = REQ_OP_WRITE;
2379 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2380 wbio->bi_opf |= MD_FAILFAST;
2381
2382 wbio->bi_end_io = end_sync_write;
2383 atomic_inc(&r1_bio->remaining);
2384
2385 submit_bio_noacct(wbio);
2386 }
2387
2388 put_sync_write_buf(r1_bio, 1);
2389 }
2390
2391 /*
2392 * This is a kernel thread which:
2393 *
2394 * 1. Retries failed read operations on working mirrors.
2395 * 2. Updates the raid superblock when problems encounter.
2396 * 3. Performs writes following reads for array synchronising.
2397 */
2398
fix_read_error(struct r1conf * conf,struct r1bio * r1_bio)2399 static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2400 {
2401 sector_t sect = r1_bio->sector;
2402 int sectors = r1_bio->sectors;
2403 int read_disk = r1_bio->read_disk;
2404 struct mddev *mddev = conf->mddev;
2405 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2406
2407 if (exceed_read_errors(mddev, rdev)) {
2408 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2409 return;
2410 }
2411
2412 while(sectors) {
2413 int s = sectors;
2414 int d = read_disk;
2415 int success = 0;
2416 int start;
2417
2418 if (s > (PAGE_SIZE>>9))
2419 s = PAGE_SIZE >> 9;
2420
2421 do {
2422 rdev = conf->mirrors[d].rdev;
2423 if (rdev &&
2424 (test_bit(In_sync, &rdev->flags) ||
2425 (!test_bit(Faulty, &rdev->flags) &&
2426 rdev->recovery_offset >= sect + s)) &&
2427 rdev_has_badblock(rdev, sect, s) == 0) {
2428 atomic_inc(&rdev->nr_pending);
2429 if (sync_page_io(rdev, sect, s<<9,
2430 conf->tmppage, REQ_OP_READ, false))
2431 success = 1;
2432 rdev_dec_pending(rdev, mddev);
2433 if (success)
2434 break;
2435 }
2436
2437 d++;
2438 if (d == conf->raid_disks * 2)
2439 d = 0;
2440 } while (d != read_disk);
2441
2442 if (!success) {
2443 /* Cannot read from anywhere - mark it bad */
2444 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2445 if (!rdev_set_badblocks(rdev, sect, s, 0))
2446 md_error(mddev, rdev);
2447 break;
2448 }
2449 /* write it back and re-read */
2450 start = d;
2451 while (d != read_disk) {
2452 if (d==0)
2453 d = conf->raid_disks * 2;
2454 d--;
2455 rdev = conf->mirrors[d].rdev;
2456 if (rdev &&
2457 !test_bit(Faulty, &rdev->flags)) {
2458 atomic_inc(&rdev->nr_pending);
2459 r1_sync_page_io(rdev, sect, s,
2460 conf->tmppage, REQ_OP_WRITE);
2461 rdev_dec_pending(rdev, mddev);
2462 }
2463 }
2464 d = start;
2465 while (d != read_disk) {
2466 if (d==0)
2467 d = conf->raid_disks * 2;
2468 d--;
2469 rdev = conf->mirrors[d].rdev;
2470 if (rdev &&
2471 !test_bit(Faulty, &rdev->flags)) {
2472 atomic_inc(&rdev->nr_pending);
2473 if (r1_sync_page_io(rdev, sect, s,
2474 conf->tmppage, REQ_OP_READ)) {
2475 atomic_add(s, &rdev->corrected_errors);
2476 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
2477 mdname(mddev), s,
2478 (unsigned long long)(sect +
2479 rdev->data_offset),
2480 rdev->bdev);
2481 }
2482 rdev_dec_pending(rdev, mddev);
2483 }
2484 }
2485 sectors -= s;
2486 sect += s;
2487 }
2488 }
2489
narrow_write_error(struct r1bio * r1_bio,int i)2490 static bool narrow_write_error(struct r1bio *r1_bio, int i)
2491 {
2492 struct mddev *mddev = r1_bio->mddev;
2493 struct r1conf *conf = mddev->private;
2494 struct md_rdev *rdev = conf->mirrors[i].rdev;
2495
2496 /* bio has the data to be written to device 'i' where
2497 * we just recently had a write error.
2498 * We repeatedly clone the bio and trim down to one block,
2499 * then try the write. Where the write fails we record
2500 * a bad block.
2501 * It is conceivable that the bio doesn't exactly align with
2502 * blocks. We must handle this somehow.
2503 *
2504 * We currently own a reference on the rdev.
2505 */
2506
2507 int block_sectors;
2508 sector_t sector;
2509 int sectors;
2510 int sect_to_write = r1_bio->sectors;
2511 bool ok = true;
2512
2513 if (rdev->badblocks.shift < 0)
2514 return false;
2515
2516 block_sectors = roundup(1 << rdev->badblocks.shift,
2517 bdev_logical_block_size(rdev->bdev) >> 9);
2518 sector = r1_bio->sector;
2519 sectors = ((sector + block_sectors)
2520 & ~(sector_t)(block_sectors - 1))
2521 - sector;
2522
2523 while (sect_to_write) {
2524 struct bio *wbio;
2525 if (sectors > sect_to_write)
2526 sectors = sect_to_write;
2527 /* Write at 'sector' for 'sectors'*/
2528
2529 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2530 wbio = bio_alloc_clone(rdev->bdev,
2531 r1_bio->behind_master_bio,
2532 GFP_NOIO, &mddev->bio_set);
2533 } else {
2534 wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
2535 GFP_NOIO, &mddev->bio_set);
2536 }
2537
2538 wbio->bi_opf = REQ_OP_WRITE;
2539 wbio->bi_iter.bi_sector = r1_bio->sector;
2540 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2541
2542 bio_trim(wbio, sector - r1_bio->sector, sectors);
2543 wbio->bi_iter.bi_sector += rdev->data_offset;
2544
2545 if (submit_bio_wait(wbio) < 0)
2546 /* failure! */
2547 ok = rdev_set_badblocks(rdev, sector,
2548 sectors, 0)
2549 && ok;
2550
2551 bio_put(wbio);
2552 sect_to_write -= sectors;
2553 sector += sectors;
2554 sectors = block_sectors;
2555 }
2556 return ok;
2557 }
2558
handle_sync_write_finished(struct r1conf * conf,struct r1bio * r1_bio)2559 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2560 {
2561 int m;
2562 int s = r1_bio->sectors;
2563 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2564 struct md_rdev *rdev = conf->mirrors[m].rdev;
2565 struct bio *bio = r1_bio->bios[m];
2566 if (bio->bi_end_io == NULL)
2567 continue;
2568 if (!bio->bi_status &&
2569 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2570 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2571 }
2572 if (bio->bi_status &&
2573 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2574 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2575 md_error(conf->mddev, rdev);
2576 }
2577 }
2578 put_buf(r1_bio);
2579 md_done_sync(conf->mddev, s, 1);
2580 }
2581
handle_write_finished(struct r1conf * conf,struct r1bio * r1_bio)2582 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2583 {
2584 int m, idx;
2585 bool fail = false;
2586
2587 for (m = 0; m < conf->raid_disks * 2 ; m++)
2588 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2589 struct md_rdev *rdev = conf->mirrors[m].rdev;
2590 rdev_clear_badblocks(rdev,
2591 r1_bio->sector,
2592 r1_bio->sectors, 0);
2593 rdev_dec_pending(rdev, conf->mddev);
2594 } else if (r1_bio->bios[m] != NULL) {
2595 /* This drive got a write error. We need to
2596 * narrow down and record precise write
2597 * errors.
2598 */
2599 fail = true;
2600 if (!narrow_write_error(r1_bio, m))
2601 md_error(conf->mddev,
2602 conf->mirrors[m].rdev);
2603 /* an I/O failed, we can't clear the bitmap */
2604 rdev_dec_pending(conf->mirrors[m].rdev,
2605 conf->mddev);
2606 }
2607 if (fail) {
2608 spin_lock_irq(&conf->device_lock);
2609 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2610 idx = sector_to_idx(r1_bio->sector);
2611 atomic_inc(&conf->nr_queued[idx]);
2612 spin_unlock_irq(&conf->device_lock);
2613 /*
2614 * In case freeze_array() is waiting for condition
2615 * get_unqueued_pending() == extra to be true.
2616 */
2617 wake_up(&conf->wait_barrier);
2618 md_wakeup_thread(conf->mddev->thread);
2619 } else {
2620 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2621 close_write(r1_bio);
2622 raid_end_bio_io(r1_bio);
2623 }
2624 }
2625
handle_read_error(struct r1conf * conf,struct r1bio * r1_bio)2626 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2627 {
2628 struct mddev *mddev = conf->mddev;
2629 struct bio *bio;
2630 struct md_rdev *rdev;
2631 sector_t sector;
2632
2633 clear_bit(R1BIO_ReadError, &r1_bio->state);
2634 /* we got a read error. Maybe the drive is bad. Maybe just
2635 * the block and we can fix it.
2636 * We freeze all other IO, and try reading the block from
2637 * other devices. When we find one, we re-write
2638 * and check it that fixes the read error.
2639 * This is all done synchronously while the array is
2640 * frozen
2641 */
2642
2643 bio = r1_bio->bios[r1_bio->read_disk];
2644 bio_put(bio);
2645 r1_bio->bios[r1_bio->read_disk] = NULL;
2646
2647 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2648 if (mddev->ro == 0
2649 && !test_bit(FailFast, &rdev->flags)) {
2650 freeze_array(conf, 1);
2651 fix_read_error(conf, r1_bio);
2652 unfreeze_array(conf);
2653 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2654 md_error(mddev, rdev);
2655 } else {
2656 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2657 }
2658
2659 rdev_dec_pending(rdev, conf->mddev);
2660 sector = r1_bio->sector;
2661 bio = r1_bio->master_bio;
2662
2663 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2664 r1_bio->state = 0;
2665 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2666 allow_barrier(conf, sector);
2667 }
2668
raid1d(struct md_thread * thread)2669 static void raid1d(struct md_thread *thread)
2670 {
2671 struct mddev *mddev = thread->mddev;
2672 struct r1bio *r1_bio;
2673 unsigned long flags;
2674 struct r1conf *conf = mddev->private;
2675 struct list_head *head = &conf->retry_list;
2676 struct blk_plug plug;
2677 int idx;
2678
2679 md_check_recovery(mddev);
2680
2681 if (!list_empty_careful(&conf->bio_end_io_list) &&
2682 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2683 LIST_HEAD(tmp);
2684 spin_lock_irqsave(&conf->device_lock, flags);
2685 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2686 list_splice_init(&conf->bio_end_io_list, &tmp);
2687 spin_unlock_irqrestore(&conf->device_lock, flags);
2688 while (!list_empty(&tmp)) {
2689 r1_bio = list_first_entry(&tmp, struct r1bio,
2690 retry_list);
2691 list_del(&r1_bio->retry_list);
2692 idx = sector_to_idx(r1_bio->sector);
2693 atomic_dec(&conf->nr_queued[idx]);
2694 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2695 close_write(r1_bio);
2696 raid_end_bio_io(r1_bio);
2697 }
2698 }
2699
2700 blk_start_plug(&plug);
2701 for (;;) {
2702
2703 flush_pending_writes(conf);
2704
2705 spin_lock_irqsave(&conf->device_lock, flags);
2706 if (list_empty(head)) {
2707 spin_unlock_irqrestore(&conf->device_lock, flags);
2708 break;
2709 }
2710 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2711 list_del(head->prev);
2712 idx = sector_to_idx(r1_bio->sector);
2713 atomic_dec(&conf->nr_queued[idx]);
2714 spin_unlock_irqrestore(&conf->device_lock, flags);
2715
2716 mddev = r1_bio->mddev;
2717 conf = mddev->private;
2718 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2719 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2720 test_bit(R1BIO_WriteError, &r1_bio->state))
2721 handle_sync_write_finished(conf, r1_bio);
2722 else
2723 sync_request_write(mddev, r1_bio);
2724 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2725 test_bit(R1BIO_WriteError, &r1_bio->state))
2726 handle_write_finished(conf, r1_bio);
2727 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2728 handle_read_error(conf, r1_bio);
2729 else
2730 WARN_ON_ONCE(1);
2731
2732 cond_resched();
2733 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2734 md_check_recovery(mddev);
2735 }
2736 blk_finish_plug(&plug);
2737 }
2738
init_resync(struct r1conf * conf)2739 static int init_resync(struct r1conf *conf)
2740 {
2741 int buffs;
2742
2743 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2744 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2745
2746 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2747 r1buf_pool_free, conf);
2748 }
2749
raid1_alloc_init_r1buf(struct r1conf * conf)2750 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2751 {
2752 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2753 struct resync_pages *rps;
2754 struct bio *bio;
2755 int i;
2756
2757 for (i = conf->raid_disks * 2; i--; ) {
2758 bio = r1bio->bios[i];
2759 rps = bio->bi_private;
2760 bio_reset(bio, NULL, 0);
2761 bio->bi_private = rps;
2762 }
2763 r1bio->master_bio = NULL;
2764 return r1bio;
2765 }
2766
2767 /*
2768 * perform a "sync" on one "block"
2769 *
2770 * We need to make sure that no normal I/O request - particularly write
2771 * requests - conflict with active sync requests.
2772 *
2773 * This is achieved by tracking pending requests and a 'barrier' concept
2774 * that can be installed to exclude normal IO requests.
2775 */
2776
raid1_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)2777 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2778 sector_t max_sector, int *skipped)
2779 {
2780 struct r1conf *conf = mddev->private;
2781 struct r1bio *r1_bio;
2782 struct bio *bio;
2783 sector_t nr_sectors;
2784 int disk = -1;
2785 int i;
2786 int wonly = -1;
2787 int write_targets = 0, read_targets = 0;
2788 sector_t sync_blocks;
2789 bool still_degraded = false;
2790 int good_sectors = RESYNC_SECTORS;
2791 int min_bad = 0; /* number of sectors that are bad in all devices */
2792 int idx = sector_to_idx(sector_nr);
2793 int page_idx = 0;
2794
2795 if (!mempool_initialized(&conf->r1buf_pool))
2796 if (init_resync(conf))
2797 return 0;
2798
2799 if (sector_nr >= max_sector) {
2800 /* If we aborted, we need to abort the
2801 * sync on the 'current' bitmap chunk (there will
2802 * only be one in raid1 resync.
2803 * We can find the current addess in mddev->curr_resync
2804 */
2805 if (mddev->curr_resync < max_sector) /* aborted */
2806 md_bitmap_end_sync(mddev, mddev->curr_resync,
2807 &sync_blocks);
2808 else /* completed sync */
2809 conf->fullsync = 0;
2810
2811 if (md_bitmap_enabled(mddev, false))
2812 mddev->bitmap_ops->close_sync(mddev);
2813 close_sync(conf);
2814
2815 if (mddev_is_clustered(mddev)) {
2816 conf->cluster_sync_low = 0;
2817 conf->cluster_sync_high = 0;
2818 }
2819 return 0;
2820 }
2821
2822 if (mddev->bitmap == NULL &&
2823 mddev->resync_offset == MaxSector &&
2824 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2825 conf->fullsync == 0) {
2826 *skipped = 1;
2827 return max_sector - sector_nr;
2828 }
2829 /* before building a request, check if we can skip these blocks..
2830 * This call the bitmap_start_sync doesn't actually record anything
2831 */
2832 if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
2833 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2834 /* We can skip this block, and probably several more */
2835 *skipped = 1;
2836 return sync_blocks;
2837 }
2838
2839 /*
2840 * If there is non-resync activity waiting for a turn, then let it
2841 * though before starting on this new sync request.
2842 */
2843 if (atomic_read(&conf->nr_waiting[idx]))
2844 schedule_timeout_uninterruptible(1);
2845
2846 /* we are incrementing sector_nr below. To be safe, we check against
2847 * sector_nr + two times RESYNC_SECTORS
2848 */
2849 if (md_bitmap_enabled(mddev, false))
2850 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
2851 mddev_is_clustered(mddev) &&
2852 (sector_nr + 2 * RESYNC_SECTORS >
2853 conf->cluster_sync_high));
2854
2855 if (raise_barrier(conf, sector_nr))
2856 return 0;
2857
2858 r1_bio = raid1_alloc_init_r1buf(conf);
2859
2860 /*
2861 * If we get a correctably read error during resync or recovery,
2862 * we might want to read from a different device. So we
2863 * flag all drives that could conceivably be read from for READ,
2864 * and any others (which will be non-In_sync devices) for WRITE.
2865 * If a read fails, we try reading from something else for which READ
2866 * is OK.
2867 */
2868
2869 r1_bio->mddev = mddev;
2870 r1_bio->sector = sector_nr;
2871 r1_bio->state = 0;
2872 set_bit(R1BIO_IsSync, &r1_bio->state);
2873 /* make sure good_sectors won't go across barrier unit boundary */
2874 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2875
2876 for (i = 0; i < conf->raid_disks * 2; i++) {
2877 struct md_rdev *rdev;
2878 bio = r1_bio->bios[i];
2879
2880 rdev = conf->mirrors[i].rdev;
2881 if (rdev == NULL ||
2882 test_bit(Faulty, &rdev->flags)) {
2883 if (i < conf->raid_disks)
2884 still_degraded = true;
2885 } else if (!test_bit(In_sync, &rdev->flags)) {
2886 bio->bi_opf = REQ_OP_WRITE;
2887 bio->bi_end_io = end_sync_write;
2888 write_targets ++;
2889 } else {
2890 /* may need to read from here */
2891 sector_t first_bad = MaxSector;
2892 sector_t bad_sectors;
2893
2894 if (is_badblock(rdev, sector_nr, good_sectors,
2895 &first_bad, &bad_sectors)) {
2896 if (first_bad > sector_nr)
2897 good_sectors = first_bad - sector_nr;
2898 else {
2899 bad_sectors -= (sector_nr - first_bad);
2900 if (min_bad == 0 ||
2901 min_bad > bad_sectors)
2902 min_bad = bad_sectors;
2903 }
2904 }
2905 if (sector_nr < first_bad) {
2906 if (test_bit(WriteMostly, &rdev->flags)) {
2907 if (wonly < 0)
2908 wonly = i;
2909 } else {
2910 if (disk < 0)
2911 disk = i;
2912 }
2913 bio->bi_opf = REQ_OP_READ;
2914 bio->bi_end_io = end_sync_read;
2915 read_targets++;
2916 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2917 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2918 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2919 /*
2920 * The device is suitable for reading (InSync),
2921 * but has bad block(s) here. Let's try to correct them,
2922 * if we are doing resync or repair. Otherwise, leave
2923 * this device alone for this sync request.
2924 */
2925 bio->bi_opf = REQ_OP_WRITE;
2926 bio->bi_end_io = end_sync_write;
2927 write_targets++;
2928 }
2929 }
2930 if (rdev && bio->bi_end_io) {
2931 atomic_inc(&rdev->nr_pending);
2932 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2933 bio_set_dev(bio, rdev->bdev);
2934 if (test_bit(FailFast, &rdev->flags))
2935 bio->bi_opf |= MD_FAILFAST;
2936 }
2937 }
2938 if (disk < 0)
2939 disk = wonly;
2940 r1_bio->read_disk = disk;
2941
2942 if (read_targets == 0 && min_bad > 0) {
2943 /* These sectors are bad on all InSync devices, so we
2944 * need to mark them bad on all write targets
2945 */
2946 int ok = 1;
2947 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2948 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2949 struct md_rdev *rdev = conf->mirrors[i].rdev;
2950 ok = rdev_set_badblocks(rdev, sector_nr,
2951 min_bad, 0
2952 ) && ok;
2953 }
2954 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2955 *skipped = 1;
2956 put_buf(r1_bio);
2957
2958 if (!ok) {
2959 /* Cannot record the badblocks, so need to
2960 * abort the resync.
2961 * If there are multiple read targets, could just
2962 * fail the really bad ones ???
2963 */
2964 conf->recovery_disabled = mddev->recovery_disabled;
2965 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2966 return 0;
2967 } else
2968 return min_bad;
2969
2970 }
2971 if (min_bad > 0 && min_bad < good_sectors) {
2972 /* only resync enough to reach the next bad->good
2973 * transition */
2974 good_sectors = min_bad;
2975 }
2976
2977 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2978 /* extra read targets are also write targets */
2979 write_targets += read_targets-1;
2980
2981 if (write_targets == 0 || read_targets == 0) {
2982 /* There is nowhere to write, so all non-sync
2983 * drives must be failed - so we are finished
2984 */
2985 sector_t rv;
2986 if (min_bad > 0)
2987 max_sector = sector_nr + min_bad;
2988 rv = max_sector - sector_nr;
2989 *skipped = 1;
2990 put_buf(r1_bio);
2991 return rv;
2992 }
2993
2994 if (max_sector > mddev->resync_max)
2995 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2996 if (max_sector > sector_nr + good_sectors)
2997 max_sector = sector_nr + good_sectors;
2998 nr_sectors = 0;
2999 sync_blocks = 0;
3000 do {
3001 struct page *page;
3002 int len = PAGE_SIZE;
3003 if (sector_nr + (len>>9) > max_sector)
3004 len = (max_sector - sector_nr) << 9;
3005 if (len == 0)
3006 break;
3007 if (sync_blocks == 0) {
3008 if (!md_bitmap_start_sync(mddev, sector_nr,
3009 &sync_blocks, still_degraded) &&
3010 !conf->fullsync &&
3011 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3012 break;
3013 if ((len >> 9) > sync_blocks)
3014 len = sync_blocks<<9;
3015 }
3016
3017 for (i = 0 ; i < conf->raid_disks * 2; i++) {
3018 struct resync_pages *rp;
3019
3020 bio = r1_bio->bios[i];
3021 rp = get_resync_pages(bio);
3022 if (bio->bi_end_io) {
3023 page = resync_fetch_page(rp, page_idx);
3024
3025 /*
3026 * won't fail because the vec table is big
3027 * enough to hold all these pages
3028 */
3029 __bio_add_page(bio, page, len, 0);
3030 }
3031 }
3032 nr_sectors += len>>9;
3033 sector_nr += len>>9;
3034 sync_blocks -= (len>>9);
3035 } while (++page_idx < RESYNC_PAGES);
3036
3037 r1_bio->sectors = nr_sectors;
3038
3039 if (mddev_is_clustered(mddev) &&
3040 conf->cluster_sync_high < sector_nr + nr_sectors) {
3041 conf->cluster_sync_low = mddev->curr_resync_completed;
3042 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
3043 /* Send resync message */
3044 mddev->cluster_ops->resync_info_update(mddev,
3045 conf->cluster_sync_low,
3046 conf->cluster_sync_high);
3047 }
3048
3049 /* For a user-requested sync, we read all readable devices and do a
3050 * compare
3051 */
3052 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
3053 atomic_set(&r1_bio->remaining, read_targets);
3054 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
3055 bio = r1_bio->bios[i];
3056 if (bio->bi_end_io == end_sync_read) {
3057 read_targets--;
3058 if (read_targets == 1)
3059 bio->bi_opf &= ~MD_FAILFAST;
3060 submit_bio_noacct(bio);
3061 }
3062 }
3063 } else {
3064 atomic_set(&r1_bio->remaining, 1);
3065 bio = r1_bio->bios[r1_bio->read_disk];
3066 if (read_targets == 1)
3067 bio->bi_opf &= ~MD_FAILFAST;
3068 submit_bio_noacct(bio);
3069 }
3070 return nr_sectors;
3071 }
3072
raid1_size(struct mddev * mddev,sector_t sectors,int raid_disks)3073 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3074 {
3075 if (sectors)
3076 return sectors;
3077
3078 return mddev->dev_sectors;
3079 }
3080
setup_conf(struct mddev * mddev)3081 static struct r1conf *setup_conf(struct mddev *mddev)
3082 {
3083 struct r1conf *conf;
3084 int i;
3085 struct raid1_info *disk;
3086 struct md_rdev *rdev;
3087 size_t r1bio_size;
3088 int err = -ENOMEM;
3089
3090 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
3091 if (!conf)
3092 goto abort;
3093
3094 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
3095 sizeof(atomic_t), GFP_KERNEL);
3096 if (!conf->nr_pending)
3097 goto abort;
3098
3099 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
3100 sizeof(atomic_t), GFP_KERNEL);
3101 if (!conf->nr_waiting)
3102 goto abort;
3103
3104 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
3105 sizeof(atomic_t), GFP_KERNEL);
3106 if (!conf->nr_queued)
3107 goto abort;
3108
3109 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
3110 sizeof(atomic_t), GFP_KERNEL);
3111 if (!conf->barrier)
3112 goto abort;
3113
3114 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3115 mddev->raid_disks, 2),
3116 GFP_KERNEL);
3117 if (!conf->mirrors)
3118 goto abort;
3119
3120 conf->tmppage = alloc_page(GFP_KERNEL);
3121 if (!conf->tmppage)
3122 goto abort;
3123
3124 r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
3125 conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
3126 if (!conf->r1bio_pool)
3127 goto abort;
3128
3129 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3130 if (err)
3131 goto abort;
3132
3133 err = -EINVAL;
3134 spin_lock_init(&conf->device_lock);
3135 conf->raid_disks = mddev->raid_disks;
3136 rdev_for_each(rdev, mddev) {
3137 int disk_idx = rdev->raid_disk;
3138
3139 if (disk_idx >= conf->raid_disks || disk_idx < 0)
3140 continue;
3141
3142 if (!raid1_add_conf(conf, rdev, disk_idx,
3143 test_bit(Replacement, &rdev->flags)))
3144 goto abort;
3145 }
3146 conf->mddev = mddev;
3147 INIT_LIST_HEAD(&conf->retry_list);
3148 INIT_LIST_HEAD(&conf->bio_end_io_list);
3149
3150 spin_lock_init(&conf->resync_lock);
3151 init_waitqueue_head(&conf->wait_barrier);
3152
3153 bio_list_init(&conf->pending_bio_list);
3154 conf->recovery_disabled = mddev->recovery_disabled - 1;
3155
3156 err = -EIO;
3157 for (i = 0; i < conf->raid_disks * 2; i++) {
3158
3159 disk = conf->mirrors + i;
3160
3161 if (i < conf->raid_disks &&
3162 disk[conf->raid_disks].rdev) {
3163 /* This slot has a replacement. */
3164 if (!disk->rdev) {
3165 /* No original, just make the replacement
3166 * a recovering spare
3167 */
3168 disk->rdev =
3169 disk[conf->raid_disks].rdev;
3170 disk[conf->raid_disks].rdev = NULL;
3171 } else if (!test_bit(In_sync, &disk->rdev->flags))
3172 /* Original is not in_sync - bad */
3173 goto abort;
3174 }
3175
3176 if (!disk->rdev ||
3177 !test_bit(In_sync, &disk->rdev->flags)) {
3178 disk->head_position = 0;
3179 if (disk->rdev &&
3180 (disk->rdev->saved_raid_disk < 0))
3181 conf->fullsync = 1;
3182 }
3183 }
3184
3185 err = -ENOMEM;
3186 rcu_assign_pointer(conf->thread,
3187 md_register_thread(raid1d, mddev, "raid1"));
3188 if (!conf->thread)
3189 goto abort;
3190
3191 return conf;
3192
3193 abort:
3194 if (conf) {
3195 mempool_destroy(conf->r1bio_pool);
3196 kfree(conf->mirrors);
3197 safe_put_page(conf->tmppage);
3198 kfree(conf->nr_pending);
3199 kfree(conf->nr_waiting);
3200 kfree(conf->nr_queued);
3201 kfree(conf->barrier);
3202 bioset_exit(&conf->bio_split);
3203 kfree(conf);
3204 }
3205 return ERR_PTR(err);
3206 }
3207
raid1_set_limits(struct mddev * mddev)3208 static int raid1_set_limits(struct mddev *mddev)
3209 {
3210 struct queue_limits lim;
3211 int err;
3212
3213 md_init_stacking_limits(&lim);
3214 lim.max_write_zeroes_sectors = 0;
3215 lim.max_hw_wzeroes_unmap_sectors = 0;
3216 lim.features |= BLK_FEAT_ATOMIC_WRITES;
3217 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
3218 if (err)
3219 return err;
3220 return queue_limits_set(mddev->gendisk->queue, &lim);
3221 }
3222
raid1_run(struct mddev * mddev)3223 static int raid1_run(struct mddev *mddev)
3224 {
3225 struct r1conf *conf;
3226 int i;
3227 int ret;
3228
3229 if (mddev->level != 1) {
3230 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3231 mdname(mddev), mddev->level);
3232 return -EIO;
3233 }
3234 if (mddev->reshape_position != MaxSector) {
3235 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3236 mdname(mddev));
3237 return -EIO;
3238 }
3239
3240 /*
3241 * copy the already verified devices into our private RAID1
3242 * bookkeeping area. [whatever we allocate in run(),
3243 * should be freed in raid1_free()]
3244 */
3245 if (mddev->private == NULL)
3246 conf = setup_conf(mddev);
3247 else
3248 conf = mddev->private;
3249
3250 if (IS_ERR(conf))
3251 return PTR_ERR(conf);
3252
3253 if (!mddev_is_dm(mddev)) {
3254 ret = raid1_set_limits(mddev);
3255 if (ret) {
3256 if (!mddev->private)
3257 raid1_free(mddev, conf);
3258 return ret;
3259 }
3260 }
3261
3262 mddev->degraded = 0;
3263 for (i = 0; i < conf->raid_disks; i++)
3264 if (conf->mirrors[i].rdev == NULL ||
3265 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3266 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3267 mddev->degraded++;
3268 /*
3269 * RAID1 needs at least one disk in active
3270 */
3271 if (conf->raid_disks - mddev->degraded < 1) {
3272 md_unregister_thread(mddev, &conf->thread);
3273 if (!mddev->private)
3274 raid1_free(mddev, conf);
3275 return -EINVAL;
3276 }
3277
3278 if (conf->raid_disks - mddev->degraded == 1)
3279 mddev->resync_offset = MaxSector;
3280
3281 if (mddev->resync_offset != MaxSector)
3282 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3283 mdname(mddev));
3284 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3285 mdname(mddev), mddev->raid_disks - mddev->degraded,
3286 mddev->raid_disks);
3287
3288 /*
3289 * Ok, everything is just fine now
3290 */
3291 rcu_assign_pointer(mddev->thread, conf->thread);
3292 rcu_assign_pointer(conf->thread, NULL);
3293 mddev->private = conf;
3294 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3295
3296 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3297
3298 ret = md_integrity_register(mddev);
3299 if (ret)
3300 md_unregister_thread(mddev, &mddev->thread);
3301 return ret;
3302 }
3303
raid1_free(struct mddev * mddev,void * priv)3304 static void raid1_free(struct mddev *mddev, void *priv)
3305 {
3306 struct r1conf *conf = priv;
3307
3308 mempool_destroy(conf->r1bio_pool);
3309 kfree(conf->mirrors);
3310 safe_put_page(conf->tmppage);
3311 kfree(conf->nr_pending);
3312 kfree(conf->nr_waiting);
3313 kfree(conf->nr_queued);
3314 kfree(conf->barrier);
3315 bioset_exit(&conf->bio_split);
3316 kfree(conf);
3317 }
3318
raid1_resize(struct mddev * mddev,sector_t sectors)3319 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3320 {
3321 /* no resync is happening, and there is enough space
3322 * on all devices, so we can resize.
3323 * We need to make sure resync covers any new space.
3324 * If the array is shrinking we should possibly wait until
3325 * any io in the removed space completes, but it hardly seems
3326 * worth it.
3327 */
3328 sector_t newsize = raid1_size(mddev, sectors, 0);
3329
3330 if (mddev->external_size &&
3331 mddev->array_sectors > newsize)
3332 return -EINVAL;
3333
3334 if (md_bitmap_enabled(mddev, false)) {
3335 int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
3336
3337 if (ret)
3338 return ret;
3339 }
3340
3341 md_set_array_sectors(mddev, newsize);
3342 if (sectors > mddev->dev_sectors &&
3343 mddev->resync_offset > mddev->dev_sectors) {
3344 mddev->resync_offset = mddev->dev_sectors;
3345 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3346 }
3347 mddev->dev_sectors = sectors;
3348 mddev->resync_max_sectors = sectors;
3349 return 0;
3350 }
3351
raid1_reshape(struct mddev * mddev)3352 static int raid1_reshape(struct mddev *mddev)
3353 {
3354 /* We need to:
3355 * 1/ resize the r1bio_pool
3356 * 2/ resize conf->mirrors
3357 *
3358 * We allocate a new r1bio_pool if we can.
3359 * Then raise a device barrier and wait until all IO stops.
3360 * Then resize conf->mirrors and swap in the new r1bio pool.
3361 *
3362 * At the same time, we "pack" the devices so that all the missing
3363 * devices have the higher raid_disk numbers.
3364 */
3365 mempool_t *newpool, *oldpool;
3366 size_t new_r1bio_size;
3367 struct raid1_info *newmirrors;
3368 struct r1conf *conf = mddev->private;
3369 int cnt, raid_disks;
3370 unsigned long flags;
3371 int d, d2;
3372
3373 /* Cannot change chunk_size, layout, or level */
3374 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3375 mddev->layout != mddev->new_layout ||
3376 mddev->level != mddev->new_level) {
3377 mddev->new_chunk_sectors = mddev->chunk_sectors;
3378 mddev->new_layout = mddev->layout;
3379 mddev->new_level = mddev->level;
3380 return -EINVAL;
3381 }
3382
3383 if (!mddev_is_clustered(mddev))
3384 md_allow_write(mddev);
3385
3386 raid_disks = mddev->raid_disks + mddev->delta_disks;
3387
3388 if (raid_disks < conf->raid_disks) {
3389 cnt=0;
3390 for (d= 0; d < conf->raid_disks; d++)
3391 if (conf->mirrors[d].rdev)
3392 cnt++;
3393 if (cnt > raid_disks)
3394 return -EBUSY;
3395 }
3396
3397 new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
3398 newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
3399 if (!newpool) {
3400 return -ENOMEM;
3401 }
3402 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3403 raid_disks, 2),
3404 GFP_KERNEL);
3405 if (!newmirrors) {
3406 mempool_destroy(newpool);
3407 return -ENOMEM;
3408 }
3409
3410 freeze_array(conf, 0);
3411
3412 /* ok, everything is stopped */
3413 oldpool = conf->r1bio_pool;
3414 conf->r1bio_pool = newpool;
3415
3416 for (d = d2 = 0; d < conf->raid_disks; d++) {
3417 struct md_rdev *rdev = conf->mirrors[d].rdev;
3418 if (rdev && rdev->raid_disk != d2) {
3419 sysfs_unlink_rdev(mddev, rdev);
3420 rdev->raid_disk = d2;
3421 sysfs_unlink_rdev(mddev, rdev);
3422 if (sysfs_link_rdev(mddev, rdev))
3423 pr_warn("md/raid1:%s: cannot register rd%d\n",
3424 mdname(mddev), rdev->raid_disk);
3425 }
3426 if (rdev)
3427 newmirrors[d2++].rdev = rdev;
3428 }
3429 kfree(conf->mirrors);
3430 conf->mirrors = newmirrors;
3431
3432 spin_lock_irqsave(&conf->device_lock, flags);
3433 mddev->degraded += (raid_disks - conf->raid_disks);
3434 spin_unlock_irqrestore(&conf->device_lock, flags);
3435 conf->raid_disks = mddev->raid_disks = raid_disks;
3436 mddev->delta_disks = 0;
3437
3438 unfreeze_array(conf);
3439
3440 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3441 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3442 md_wakeup_thread(mddev->thread);
3443
3444 mempool_destroy(oldpool);
3445 return 0;
3446 }
3447
raid1_quiesce(struct mddev * mddev,int quiesce)3448 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3449 {
3450 struct r1conf *conf = mddev->private;
3451
3452 if (quiesce)
3453 freeze_array(conf, 0);
3454 else
3455 unfreeze_array(conf);
3456 }
3457
raid1_takeover(struct mddev * mddev)3458 static void *raid1_takeover(struct mddev *mddev)
3459 {
3460 /* raid1 can take over:
3461 * raid5 with 2 devices, any layout or chunk size
3462 */
3463 if (mddev->level == 5 && mddev->raid_disks == 2) {
3464 struct r1conf *conf;
3465 mddev->new_level = 1;
3466 mddev->new_layout = 0;
3467 mddev->new_chunk_sectors = 0;
3468 conf = setup_conf(mddev);
3469 if (!IS_ERR(conf)) {
3470 /* Array must appear to be quiesced */
3471 conf->array_frozen = 1;
3472 mddev_clear_unsupported_flags(mddev,
3473 UNSUPPORTED_MDDEV_FLAGS);
3474 }
3475 return conf;
3476 }
3477 return ERR_PTR(-EINVAL);
3478 }
3479
3480 static struct md_personality raid1_personality =
3481 {
3482 .head = {
3483 .type = MD_PERSONALITY,
3484 .id = ID_RAID1,
3485 .name = "raid1",
3486 .owner = THIS_MODULE,
3487 },
3488
3489 .make_request = raid1_make_request,
3490 .run = raid1_run,
3491 .free = raid1_free,
3492 .status = raid1_status,
3493 .error_handler = raid1_error,
3494 .hot_add_disk = raid1_add_disk,
3495 .hot_remove_disk= raid1_remove_disk,
3496 .spare_active = raid1_spare_active,
3497 .sync_request = raid1_sync_request,
3498 .resize = raid1_resize,
3499 .size = raid1_size,
3500 .check_reshape = raid1_reshape,
3501 .quiesce = raid1_quiesce,
3502 .takeover = raid1_takeover,
3503 };
3504
raid1_init(void)3505 static int __init raid1_init(void)
3506 {
3507 return register_md_submodule(&raid1_personality.head);
3508 }
3509
raid1_exit(void)3510 static void __exit raid1_exit(void)
3511 {
3512 unregister_md_submodule(&raid1_personality.head);
3513 }
3514
3515 module_init(raid1_init);
3516 module_exit(raid1_exit);
3517 MODULE_LICENSE("GPL");
3518 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3519 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3520 MODULE_ALIAS("md-raid1");
3521 MODULE_ALIAS("md-level-1");
3522