1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * raid10.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 2000-2004 Neil Brown
6 *
7 * RAID-10 support for md.
8 *
9 * Base on code in raid1.c. See raid1.c for further copyright information.
10 */
11
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22
23 #define RAID_1_10_NAME "raid10"
24 #include "raid10.h"
25 #include "raid0.h"
26 #include "md-bitmap.h"
27
28 /*
29 * RAID10 provides a combination of RAID0 and RAID1 functionality.
30 * The layout of data is defined by
31 * chunk_size
32 * raid_disks
33 * near_copies (stored in low byte of layout)
34 * far_copies (stored in second byte of layout)
35 * far_offset (stored in bit 16 of layout )
36 * use_far_sets (stored in bit 17 of layout )
37 * use_far_sets_bugfixed (stored in bit 18 of layout )
38 *
39 * The data to be stored is divided into chunks using chunksize. Each device
40 * is divided into far_copies sections. In each section, chunks are laid out
41 * in a style similar to raid0, but near_copies copies of each chunk is stored
42 * (each on a different drive). The starting device for each section is offset
43 * near_copies from the starting device of the previous section. Thus there
44 * are (near_copies * far_copies) of each chunk, and each is on a different
45 * drive. near_copies and far_copies must be at least one, and their product
46 * is at most raid_disks.
47 *
48 * If far_offset is true, then the far_copies are handled a bit differently.
49 * The copies are still in different stripes, but instead of being very far
50 * apart on disk, there are adjacent stripes.
51 *
52 * The far and offset algorithms are handled slightly differently if
53 * 'use_far_sets' is true. In this case, the array's devices are grouped into
54 * sets that are (near_copies * far_copies) in size. The far copied stripes
55 * are still shifted by 'near_copies' devices, but this shifting stays confined
56 * to the set rather than the entire array. This is done to improve the number
57 * of device combinations that can fail without causing the array to fail.
58 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
59 * on a device):
60 * A B C D A B C D E
61 * ... ...
62 * D A B C E A B C D
63 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
64 * [A B] [C D] [A B] [C D E]
65 * |...| |...| |...| | ... |
66 * [B A] [D C] [B A] [E C D]
67 */
68
69 static void allow_barrier(struct r10conf *conf);
70 static void lower_barrier(struct r10conf *conf);
71 static int _enough(struct r10conf *conf, int previous, int ignore);
72 static int enough(struct r10conf *conf, int ignore);
73 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
74 int *skipped);
75 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
76 static void end_reshape_write(struct bio *bio);
77 static void end_reshape(struct r10conf *conf);
78
79 #include "raid1-10.c"
80
81 #define NULL_CMD
82 #define cmd_before(conf, cmd) \
83 do { \
84 write_sequnlock_irq(&(conf)->resync_lock); \
85 cmd; \
86 } while (0)
87 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
88
89 #define wait_event_barrier_cmd(conf, cond, cmd) \
90 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
91 cmd_after(conf))
92
93 #define wait_event_barrier(conf, cond) \
94 wait_event_barrier_cmd(conf, cond, NULL_CMD)
95
96 /*
97 * for resync bio, r10bio pointer can be retrieved from the per-bio
98 * 'struct resync_pages'.
99 */
get_resync_r10bio(struct bio * bio)100 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
101 {
102 return get_resync_pages(bio)->raid_bio;
103 }
104
r10bio_pool_alloc(gfp_t gfp_flags,void * data)105 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
106 {
107 struct r10conf *conf = data;
108 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
109
110 /* allocate a r10bio with room for raid_disks entries in the
111 * bios array */
112 return kzalloc(size, gfp_flags);
113 }
114
115 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
116 /* amount of memory to reserve for resync requests */
117 #define RESYNC_WINDOW (1024*1024)
118 /* maximum number of concurrent requests, memory permitting */
119 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
120 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
121 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
122
123 /*
124 * When performing a resync, we need to read and compare, so
125 * we need as many pages are there are copies.
126 * When performing a recovery, we need 2 bios, one for read,
127 * one for write (we recover only one drive per r10buf)
128 *
129 */
r10buf_pool_alloc(gfp_t gfp_flags,void * data)130 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
131 {
132 struct r10conf *conf = data;
133 struct r10bio *r10_bio;
134 struct bio *bio;
135 int j;
136 int nalloc, nalloc_rp;
137 struct resync_pages *rps;
138
139 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
140 if (!r10_bio)
141 return NULL;
142
143 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
144 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
145 nalloc = conf->copies; /* resync */
146 else
147 nalloc = 2; /* recovery */
148
149 /* allocate once for all bios */
150 if (!conf->have_replacement)
151 nalloc_rp = nalloc;
152 else
153 nalloc_rp = nalloc * 2;
154 rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
155 if (!rps)
156 goto out_free_r10bio;
157
158 /*
159 * Allocate bios.
160 */
161 for (j = nalloc ; j-- ; ) {
162 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
163 if (!bio)
164 goto out_free_bio;
165 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
166 r10_bio->devs[j].bio = bio;
167 if (!conf->have_replacement)
168 continue;
169 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
170 if (!bio)
171 goto out_free_bio;
172 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
173 r10_bio->devs[j].repl_bio = bio;
174 }
175 /*
176 * Allocate RESYNC_PAGES data pages and attach them
177 * where needed.
178 */
179 for (j = 0; j < nalloc; j++) {
180 struct bio *rbio = r10_bio->devs[j].repl_bio;
181 struct resync_pages *rp, *rp_repl;
182
183 rp = &rps[j];
184 if (rbio)
185 rp_repl = &rps[nalloc + j];
186
187 bio = r10_bio->devs[j].bio;
188
189 if (!j || test_bit(MD_RECOVERY_SYNC,
190 &conf->mddev->recovery)) {
191 if (resync_alloc_pages(rp, gfp_flags))
192 goto out_free_pages;
193 } else {
194 memcpy(rp, &rps[0], sizeof(*rp));
195 resync_get_all_pages(rp);
196 }
197
198 rp->raid_bio = r10_bio;
199 bio->bi_private = rp;
200 if (rbio) {
201 memcpy(rp_repl, rp, sizeof(*rp));
202 rbio->bi_private = rp_repl;
203 }
204 }
205
206 return r10_bio;
207
208 out_free_pages:
209 while (--j >= 0)
210 resync_free_pages(&rps[j]);
211
212 j = 0;
213 out_free_bio:
214 for ( ; j < nalloc; j++) {
215 if (r10_bio->devs[j].bio)
216 bio_uninit(r10_bio->devs[j].bio);
217 kfree(r10_bio->devs[j].bio);
218 if (r10_bio->devs[j].repl_bio)
219 bio_uninit(r10_bio->devs[j].repl_bio);
220 kfree(r10_bio->devs[j].repl_bio);
221 }
222 kfree(rps);
223 out_free_r10bio:
224 rbio_pool_free(r10_bio, conf);
225 return NULL;
226 }
227
r10buf_pool_free(void * __r10_bio,void * data)228 static void r10buf_pool_free(void *__r10_bio, void *data)
229 {
230 struct r10conf *conf = data;
231 struct r10bio *r10bio = __r10_bio;
232 int j;
233 struct resync_pages *rp = NULL;
234
235 for (j = conf->copies; j--; ) {
236 struct bio *bio = r10bio->devs[j].bio;
237
238 if (bio) {
239 rp = get_resync_pages(bio);
240 resync_free_pages(rp);
241 bio_uninit(bio);
242 kfree(bio);
243 }
244
245 bio = r10bio->devs[j].repl_bio;
246 if (bio) {
247 bio_uninit(bio);
248 kfree(bio);
249 }
250 }
251
252 /* resync pages array stored in the 1st bio's .bi_private */
253 kfree(rp);
254
255 rbio_pool_free(r10bio, conf);
256 }
257
put_all_bios(struct r10conf * conf,struct r10bio * r10_bio)258 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
259 {
260 int i;
261
262 for (i = 0; i < conf->geo.raid_disks; i++) {
263 struct bio **bio = & r10_bio->devs[i].bio;
264 if (!BIO_SPECIAL(*bio))
265 bio_put(*bio);
266 *bio = NULL;
267 bio = &r10_bio->devs[i].repl_bio;
268 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
269 bio_put(*bio);
270 *bio = NULL;
271 }
272 }
273
free_r10bio(struct r10bio * r10_bio)274 static void free_r10bio(struct r10bio *r10_bio)
275 {
276 struct r10conf *conf = r10_bio->mddev->private;
277
278 put_all_bios(conf, r10_bio);
279 mempool_free(r10_bio, &conf->r10bio_pool);
280 }
281
put_buf(struct r10bio * r10_bio)282 static void put_buf(struct r10bio *r10_bio)
283 {
284 struct r10conf *conf = r10_bio->mddev->private;
285
286 mempool_free(r10_bio, &conf->r10buf_pool);
287
288 lower_barrier(conf);
289 }
290
wake_up_barrier(struct r10conf * conf)291 static void wake_up_barrier(struct r10conf *conf)
292 {
293 if (wq_has_sleeper(&conf->wait_barrier))
294 wake_up(&conf->wait_barrier);
295 }
296
reschedule_retry(struct r10bio * r10_bio)297 static void reschedule_retry(struct r10bio *r10_bio)
298 {
299 unsigned long flags;
300 struct mddev *mddev = r10_bio->mddev;
301 struct r10conf *conf = mddev->private;
302
303 spin_lock_irqsave(&conf->device_lock, flags);
304 list_add(&r10_bio->retry_list, &conf->retry_list);
305 conf->nr_queued ++;
306 spin_unlock_irqrestore(&conf->device_lock, flags);
307
308 /* wake up frozen array... */
309 wake_up(&conf->wait_barrier);
310
311 md_wakeup_thread(mddev->thread);
312 }
313
314 /*
315 * raid_end_bio_io() is called when we have finished servicing a mirrored
316 * operation and are ready to return a success/failure code to the buffer
317 * cache layer.
318 */
raid_end_bio_io(struct r10bio * r10_bio)319 static void raid_end_bio_io(struct r10bio *r10_bio)
320 {
321 struct bio *bio = r10_bio->master_bio;
322 struct r10conf *conf = r10_bio->mddev->private;
323
324 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
325 bio->bi_status = BLK_STS_IOERR;
326
327 bio_endio(bio);
328 /*
329 * Wake up any possible resync thread that waits for the device
330 * to go idle.
331 */
332 allow_barrier(conf);
333
334 free_r10bio(r10_bio);
335 }
336
337 /*
338 * Update disk head position estimator based on IRQ completion info.
339 */
update_head_pos(int slot,struct r10bio * r10_bio)340 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
341 {
342 struct r10conf *conf = r10_bio->mddev->private;
343
344 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
345 r10_bio->devs[slot].addr + (r10_bio->sectors);
346 }
347
348 /*
349 * Find the disk number which triggered given bio
350 */
find_bio_disk(struct r10conf * conf,struct r10bio * r10_bio,struct bio * bio,int * slotp,int * replp)351 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
352 struct bio *bio, int *slotp, int *replp)
353 {
354 int slot;
355 int repl = 0;
356
357 for (slot = 0; slot < conf->geo.raid_disks; slot++) {
358 if (r10_bio->devs[slot].bio == bio)
359 break;
360 if (r10_bio->devs[slot].repl_bio == bio) {
361 repl = 1;
362 break;
363 }
364 }
365
366 update_head_pos(slot, r10_bio);
367
368 if (slotp)
369 *slotp = slot;
370 if (replp)
371 *replp = repl;
372 return r10_bio->devs[slot].devnum;
373 }
374
raid10_end_read_request(struct bio * bio)375 static void raid10_end_read_request(struct bio *bio)
376 {
377 int uptodate = !bio->bi_status;
378 struct r10bio *r10_bio = bio->bi_private;
379 int slot;
380 struct md_rdev *rdev;
381 struct r10conf *conf = r10_bio->mddev->private;
382
383 slot = r10_bio->read_slot;
384 rdev = r10_bio->devs[slot].rdev;
385 /*
386 * this branch is our 'one mirror IO has finished' event handler:
387 */
388 update_head_pos(slot, r10_bio);
389
390 if (uptodate) {
391 /*
392 * Set R10BIO_Uptodate in our master bio, so that
393 * we will return a good error code to the higher
394 * levels even if IO on some other mirrored buffer fails.
395 *
396 * The 'master' represents the composite IO operation to
397 * user-side. So if something waits for IO, then it will
398 * wait for the 'master' bio.
399 */
400 set_bit(R10BIO_Uptodate, &r10_bio->state);
401 } else {
402 /* If all other devices that store this block have
403 * failed, we want to return the error upwards rather
404 * than fail the last device. Here we redefine
405 * "uptodate" to mean "Don't want to retry"
406 */
407 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
408 rdev->raid_disk))
409 uptodate = 1;
410 }
411 if (uptodate) {
412 raid_end_bio_io(r10_bio);
413 rdev_dec_pending(rdev, conf->mddev);
414 } else {
415 /*
416 * oops, read error - keep the refcount on the rdev
417 */
418 pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
419 mdname(conf->mddev),
420 rdev->bdev,
421 (unsigned long long)r10_bio->sector);
422 set_bit(R10BIO_ReadError, &r10_bio->state);
423 reschedule_retry(r10_bio);
424 }
425 }
426
close_write(struct r10bio * r10_bio)427 static void close_write(struct r10bio *r10_bio)
428 {
429 struct mddev *mddev = r10_bio->mddev;
430
431 /* clear the bitmap if all writes complete successfully */
432 mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
433 !test_bit(R10BIO_Degraded, &r10_bio->state),
434 false);
435 md_write_end(mddev);
436 }
437
one_write_done(struct r10bio * r10_bio)438 static void one_write_done(struct r10bio *r10_bio)
439 {
440 if (atomic_dec_and_test(&r10_bio->remaining)) {
441 if (test_bit(R10BIO_WriteError, &r10_bio->state))
442 reschedule_retry(r10_bio);
443 else {
444 close_write(r10_bio);
445 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
446 reschedule_retry(r10_bio);
447 else
448 raid_end_bio_io(r10_bio);
449 }
450 }
451 }
452
raid10_end_write_request(struct bio * bio)453 static void raid10_end_write_request(struct bio *bio)
454 {
455 struct r10bio *r10_bio = bio->bi_private;
456 int dev;
457 int dec_rdev = 1;
458 struct r10conf *conf = r10_bio->mddev->private;
459 int slot, repl;
460 struct md_rdev *rdev = NULL;
461 struct bio *to_put = NULL;
462 bool discard_error;
463
464 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
465
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
467
468 if (repl)
469 rdev = conf->mirrors[dev].replacement;
470 if (!rdev) {
471 smp_rmb();
472 repl = 0;
473 rdev = conf->mirrors[dev].rdev;
474 }
475 /*
476 * this branch is our 'one mirror IO has finished' event handler:
477 */
478 if (bio->bi_status && !discard_error) {
479 if (repl)
480 /* Never record new bad blocks to replacement,
481 * just fail it.
482 */
483 md_error(rdev->mddev, rdev);
484 else {
485 set_bit(WriteErrorSeen, &rdev->flags);
486 if (!test_and_set_bit(WantReplacement, &rdev->flags))
487 set_bit(MD_RECOVERY_NEEDED,
488 &rdev->mddev->recovery);
489
490 dec_rdev = 0;
491 if (test_bit(FailFast, &rdev->flags) &&
492 (bio->bi_opf & MD_FAILFAST)) {
493 md_error(rdev->mddev, rdev);
494 }
495
496 /*
497 * When the device is faulty, it is not necessary to
498 * handle write error.
499 */
500 if (!test_bit(Faulty, &rdev->flags))
501 set_bit(R10BIO_WriteError, &r10_bio->state);
502 else {
503 /* Fail the request */
504 set_bit(R10BIO_Degraded, &r10_bio->state);
505 r10_bio->devs[slot].bio = NULL;
506 to_put = bio;
507 dec_rdev = 1;
508 }
509 }
510 } else {
511 /*
512 * Set R10BIO_Uptodate in our master bio, so that
513 * we will return a good error code for to the higher
514 * levels even if IO on some other mirrored buffer fails.
515 *
516 * The 'master' represents the composite IO operation to
517 * user-side. So if something waits for IO, then it will
518 * wait for the 'master' bio.
519 *
520 * Do not set R10BIO_Uptodate if the current device is
521 * rebuilding or Faulty. This is because we cannot use
522 * such device for properly reading the data back (we could
523 * potentially use it, if the current write would have felt
524 * before rdev->recovery_offset, but for simplicity we don't
525 * check this here.
526 */
527 if (test_bit(In_sync, &rdev->flags) &&
528 !test_bit(Faulty, &rdev->flags))
529 set_bit(R10BIO_Uptodate, &r10_bio->state);
530
531 /* Maybe we can clear some bad blocks. */
532 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
533 r10_bio->sectors) &&
534 !discard_error) {
535 bio_put(bio);
536 if (repl)
537 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
538 else
539 r10_bio->devs[slot].bio = IO_MADE_GOOD;
540 dec_rdev = 0;
541 set_bit(R10BIO_MadeGood, &r10_bio->state);
542 }
543 }
544
545 /*
546 *
547 * Let's see if all mirrored write operations have finished
548 * already.
549 */
550 one_write_done(r10_bio);
551 if (dec_rdev)
552 rdev_dec_pending(rdev, conf->mddev);
553 if (to_put)
554 bio_put(to_put);
555 }
556
557 /*
558 * RAID10 layout manager
559 * As well as the chunksize and raid_disks count, there are two
560 * parameters: near_copies and far_copies.
561 * near_copies * far_copies must be <= raid_disks.
562 * Normally one of these will be 1.
563 * If both are 1, we get raid0.
564 * If near_copies == raid_disks, we get raid1.
565 *
566 * Chunks are laid out in raid0 style with near_copies copies of the
567 * first chunk, followed by near_copies copies of the next chunk and
568 * so on.
569 * If far_copies > 1, then after 1/far_copies of the array has been assigned
570 * as described above, we start again with a device offset of near_copies.
571 * So we effectively have another copy of the whole array further down all
572 * the drives, but with blocks on different drives.
573 * With this layout, and block is never stored twice on the one device.
574 *
575 * raid10_find_phys finds the sector offset of a given virtual sector
576 * on each device that it is on.
577 *
578 * raid10_find_virt does the reverse mapping, from a device and a
579 * sector offset to a virtual address
580 */
581
__raid10_find_phys(struct geom * geo,struct r10bio * r10bio)582 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
583 {
584 int n,f;
585 sector_t sector;
586 sector_t chunk;
587 sector_t stripe;
588 int dev;
589 int slot = 0;
590 int last_far_set_start, last_far_set_size;
591
592 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
593 last_far_set_start *= geo->far_set_size;
594
595 last_far_set_size = geo->far_set_size;
596 last_far_set_size += (geo->raid_disks % geo->far_set_size);
597
598 /* now calculate first sector/dev */
599 chunk = r10bio->sector >> geo->chunk_shift;
600 sector = r10bio->sector & geo->chunk_mask;
601
602 chunk *= geo->near_copies;
603 stripe = chunk;
604 dev = sector_div(stripe, geo->raid_disks);
605 if (geo->far_offset)
606 stripe *= geo->far_copies;
607
608 sector += stripe << geo->chunk_shift;
609
610 /* and calculate all the others */
611 for (n = 0; n < geo->near_copies; n++) {
612 int d = dev;
613 int set;
614 sector_t s = sector;
615 r10bio->devs[slot].devnum = d;
616 r10bio->devs[slot].addr = s;
617 slot++;
618
619 for (f = 1; f < geo->far_copies; f++) {
620 set = d / geo->far_set_size;
621 d += geo->near_copies;
622
623 if ((geo->raid_disks % geo->far_set_size) &&
624 (d > last_far_set_start)) {
625 d -= last_far_set_start;
626 d %= last_far_set_size;
627 d += last_far_set_start;
628 } else {
629 d %= geo->far_set_size;
630 d += geo->far_set_size * set;
631 }
632 s += geo->stride;
633 r10bio->devs[slot].devnum = d;
634 r10bio->devs[slot].addr = s;
635 slot++;
636 }
637 dev++;
638 if (dev >= geo->raid_disks) {
639 dev = 0;
640 sector += (geo->chunk_mask + 1);
641 }
642 }
643 }
644
raid10_find_phys(struct r10conf * conf,struct r10bio * r10bio)645 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
646 {
647 struct geom *geo = &conf->geo;
648
649 if (conf->reshape_progress != MaxSector &&
650 ((r10bio->sector >= conf->reshape_progress) !=
651 conf->mddev->reshape_backwards)) {
652 set_bit(R10BIO_Previous, &r10bio->state);
653 geo = &conf->prev;
654 } else
655 clear_bit(R10BIO_Previous, &r10bio->state);
656
657 __raid10_find_phys(geo, r10bio);
658 }
659
raid10_find_virt(struct r10conf * conf,sector_t sector,int dev)660 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
661 {
662 sector_t offset, chunk, vchunk;
663 /* Never use conf->prev as this is only called during resync
664 * or recovery, so reshape isn't happening
665 */
666 struct geom *geo = &conf->geo;
667 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
668 int far_set_size = geo->far_set_size;
669 int last_far_set_start;
670
671 if (geo->raid_disks % geo->far_set_size) {
672 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
673 last_far_set_start *= geo->far_set_size;
674
675 if (dev >= last_far_set_start) {
676 far_set_size = geo->far_set_size;
677 far_set_size += (geo->raid_disks % geo->far_set_size);
678 far_set_start = last_far_set_start;
679 }
680 }
681
682 offset = sector & geo->chunk_mask;
683 if (geo->far_offset) {
684 int fc;
685 chunk = sector >> geo->chunk_shift;
686 fc = sector_div(chunk, geo->far_copies);
687 dev -= fc * geo->near_copies;
688 if (dev < far_set_start)
689 dev += far_set_size;
690 } else {
691 while (sector >= geo->stride) {
692 sector -= geo->stride;
693 if (dev < (geo->near_copies + far_set_start))
694 dev += far_set_size - geo->near_copies;
695 else
696 dev -= geo->near_copies;
697 }
698 chunk = sector >> geo->chunk_shift;
699 }
700 vchunk = chunk * geo->raid_disks + dev;
701 sector_div(vchunk, geo->near_copies);
702 return (vchunk << geo->chunk_shift) + offset;
703 }
704
705 /*
706 * This routine returns the disk from which the requested read should
707 * be done. There is a per-array 'next expected sequential IO' sector
708 * number - if this matches on the next IO then we use the last disk.
709 * There is also a per-disk 'last know head position' sector that is
710 * maintained from IRQ contexts, both the normal and the resync IO
711 * completion handlers update this position correctly. If there is no
712 * perfect sequential match then we pick the disk whose head is closest.
713 *
714 * If there are 2 mirrors in the same 2 devices, performance degrades
715 * because position is mirror, not device based.
716 *
717 * The rdev for the device selected will have nr_pending incremented.
718 */
719
720 /*
721 * FIXME: possibly should rethink readbalancing and do it differently
722 * depending on near_copies / far_copies geometry.
723 */
read_balance(struct r10conf * conf,struct r10bio * r10_bio,int * max_sectors)724 static struct md_rdev *read_balance(struct r10conf *conf,
725 struct r10bio *r10_bio,
726 int *max_sectors)
727 {
728 const sector_t this_sector = r10_bio->sector;
729 int disk, slot;
730 int sectors = r10_bio->sectors;
731 int best_good_sectors;
732 sector_t new_distance, best_dist;
733 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
734 int do_balance;
735 int best_dist_slot, best_pending_slot;
736 bool has_nonrot_disk = false;
737 unsigned int min_pending;
738 struct geom *geo = &conf->geo;
739
740 raid10_find_phys(conf, r10_bio);
741 best_dist_slot = -1;
742 min_pending = UINT_MAX;
743 best_dist_rdev = NULL;
744 best_pending_rdev = NULL;
745 best_dist = MaxSector;
746 best_good_sectors = 0;
747 do_balance = 1;
748 clear_bit(R10BIO_FailFast, &r10_bio->state);
749
750 if (raid1_should_read_first(conf->mddev, this_sector, sectors))
751 do_balance = 0;
752
753 for (slot = 0; slot < conf->copies ; slot++) {
754 sector_t first_bad;
755 int bad_sectors;
756 sector_t dev_sector;
757 unsigned int pending;
758 bool nonrot;
759
760 if (r10_bio->devs[slot].bio == IO_BLOCKED)
761 continue;
762 disk = r10_bio->devs[slot].devnum;
763 rdev = conf->mirrors[disk].replacement;
764 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
765 r10_bio->devs[slot].addr + sectors >
766 rdev->recovery_offset)
767 rdev = conf->mirrors[disk].rdev;
768 if (rdev == NULL ||
769 test_bit(Faulty, &rdev->flags))
770 continue;
771 if (!test_bit(In_sync, &rdev->flags) &&
772 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
773 continue;
774
775 dev_sector = r10_bio->devs[slot].addr;
776 if (is_badblock(rdev, dev_sector, sectors,
777 &first_bad, &bad_sectors)) {
778 if (best_dist < MaxSector)
779 /* Already have a better slot */
780 continue;
781 if (first_bad <= dev_sector) {
782 /* Cannot read here. If this is the
783 * 'primary' device, then we must not read
784 * beyond 'bad_sectors' from another device.
785 */
786 bad_sectors -= (dev_sector - first_bad);
787 if (!do_balance && sectors > bad_sectors)
788 sectors = bad_sectors;
789 if (best_good_sectors > sectors)
790 best_good_sectors = sectors;
791 } else {
792 sector_t good_sectors =
793 first_bad - dev_sector;
794 if (good_sectors > best_good_sectors) {
795 best_good_sectors = good_sectors;
796 best_dist_slot = slot;
797 best_dist_rdev = rdev;
798 }
799 if (!do_balance)
800 /* Must read from here */
801 break;
802 }
803 continue;
804 } else
805 best_good_sectors = sectors;
806
807 if (!do_balance)
808 break;
809
810 nonrot = bdev_nonrot(rdev->bdev);
811 has_nonrot_disk |= nonrot;
812 pending = atomic_read(&rdev->nr_pending);
813 if (min_pending > pending && nonrot) {
814 min_pending = pending;
815 best_pending_slot = slot;
816 best_pending_rdev = rdev;
817 }
818
819 if (best_dist_slot >= 0)
820 /* At least 2 disks to choose from so failfast is OK */
821 set_bit(R10BIO_FailFast, &r10_bio->state);
822 /* This optimisation is debatable, and completely destroys
823 * sequential read speed for 'far copies' arrays. So only
824 * keep it for 'near' arrays, and review those later.
825 */
826 if (geo->near_copies > 1 && !pending)
827 new_distance = 0;
828
829 /* for far > 1 always use the lowest address */
830 else if (geo->far_copies > 1)
831 new_distance = r10_bio->devs[slot].addr;
832 else
833 new_distance = abs(r10_bio->devs[slot].addr -
834 conf->mirrors[disk].head_position);
835
836 if (new_distance < best_dist) {
837 best_dist = new_distance;
838 best_dist_slot = slot;
839 best_dist_rdev = rdev;
840 }
841 }
842 if (slot >= conf->copies) {
843 if (has_nonrot_disk) {
844 slot = best_pending_slot;
845 rdev = best_pending_rdev;
846 } else {
847 slot = best_dist_slot;
848 rdev = best_dist_rdev;
849 }
850 }
851
852 if (slot >= 0) {
853 atomic_inc(&rdev->nr_pending);
854 r10_bio->read_slot = slot;
855 } else
856 rdev = NULL;
857 *max_sectors = best_good_sectors;
858
859 return rdev;
860 }
861
flush_pending_writes(struct r10conf * conf)862 static void flush_pending_writes(struct r10conf *conf)
863 {
864 /* Any writes that have been queued but are awaiting
865 * bitmap updates get flushed here.
866 */
867 spin_lock_irq(&conf->device_lock);
868
869 if (conf->pending_bio_list.head) {
870 struct blk_plug plug;
871 struct bio *bio;
872
873 bio = bio_list_get(&conf->pending_bio_list);
874 spin_unlock_irq(&conf->device_lock);
875
876 /*
877 * As this is called in a wait_event() loop (see freeze_array),
878 * current->state might be TASK_UNINTERRUPTIBLE which will
879 * cause a warning when we prepare to wait again. As it is
880 * rare that this path is taken, it is perfectly safe to force
881 * us to go around the wait_event() loop again, so the warning
882 * is a false-positive. Silence the warning by resetting
883 * thread state
884 */
885 __set_current_state(TASK_RUNNING);
886
887 blk_start_plug(&plug);
888 raid1_prepare_flush_writes(conf->mddev);
889 wake_up(&conf->wait_barrier);
890
891 while (bio) { /* submit pending writes */
892 struct bio *next = bio->bi_next;
893
894 raid1_submit_write(bio);
895 bio = next;
896 cond_resched();
897 }
898 blk_finish_plug(&plug);
899 } else
900 spin_unlock_irq(&conf->device_lock);
901 }
902
903 /* Barriers....
904 * Sometimes we need to suspend IO while we do something else,
905 * either some resync/recovery, or reconfigure the array.
906 * To do this we raise a 'barrier'.
907 * The 'barrier' is a counter that can be raised multiple times
908 * to count how many activities are happening which preclude
909 * normal IO.
910 * We can only raise the barrier if there is no pending IO.
911 * i.e. if nr_pending == 0.
912 * We choose only to raise the barrier if no-one is waiting for the
913 * barrier to go down. This means that as soon as an IO request
914 * is ready, no other operations which require a barrier will start
915 * until the IO request has had a chance.
916 *
917 * So: regular IO calls 'wait_barrier'. When that returns there
918 * is no backgroup IO happening, It must arrange to call
919 * allow_barrier when it has finished its IO.
920 * backgroup IO calls must call raise_barrier. Once that returns
921 * there is no normal IO happeing. It must arrange to call
922 * lower_barrier when the particular background IO completes.
923 */
924
raise_barrier(struct r10conf * conf,int force)925 static void raise_barrier(struct r10conf *conf, int force)
926 {
927 write_seqlock_irq(&conf->resync_lock);
928
929 if (WARN_ON_ONCE(force && !conf->barrier))
930 force = false;
931
932 /* Wait until no block IO is waiting (unless 'force') */
933 wait_event_barrier(conf, force || !conf->nr_waiting);
934
935 /* block any new IO from starting */
936 WRITE_ONCE(conf->barrier, conf->barrier + 1);
937
938 /* Now wait for all pending IO to complete */
939 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
940 conf->barrier < RESYNC_DEPTH);
941
942 write_sequnlock_irq(&conf->resync_lock);
943 }
944
lower_barrier(struct r10conf * conf)945 static void lower_barrier(struct r10conf *conf)
946 {
947 unsigned long flags;
948
949 write_seqlock_irqsave(&conf->resync_lock, flags);
950 WRITE_ONCE(conf->barrier, conf->barrier - 1);
951 write_sequnlock_irqrestore(&conf->resync_lock, flags);
952 wake_up(&conf->wait_barrier);
953 }
954
stop_waiting_barrier(struct r10conf * conf)955 static bool stop_waiting_barrier(struct r10conf *conf)
956 {
957 struct bio_list *bio_list = current->bio_list;
958 struct md_thread *thread;
959
960 /* barrier is dropped */
961 if (!conf->barrier)
962 return true;
963
964 /*
965 * If there are already pending requests (preventing the barrier from
966 * rising completely), and the pre-process bio queue isn't empty, then
967 * don't wait, as we need to empty that queue to get the nr_pending
968 * count down.
969 */
970 if (atomic_read(&conf->nr_pending) && bio_list &&
971 (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
972 return true;
973
974 /* daemon thread must exist while handling io */
975 thread = rcu_dereference_protected(conf->mddev->thread, true);
976 /*
977 * move on if io is issued from raid10d(), nr_pending is not released
978 * from original io(see handle_read_error()). All raise barrier is
979 * blocked until this io is done.
980 */
981 if (thread->tsk == current) {
982 WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
983 return true;
984 }
985
986 return false;
987 }
988
wait_barrier_nolock(struct r10conf * conf)989 static bool wait_barrier_nolock(struct r10conf *conf)
990 {
991 unsigned int seq = read_seqbegin(&conf->resync_lock);
992
993 if (READ_ONCE(conf->barrier))
994 return false;
995
996 atomic_inc(&conf->nr_pending);
997 if (!read_seqretry(&conf->resync_lock, seq))
998 return true;
999
1000 if (atomic_dec_and_test(&conf->nr_pending))
1001 wake_up_barrier(conf);
1002
1003 return false;
1004 }
1005
wait_barrier(struct r10conf * conf,bool nowait)1006 static bool wait_barrier(struct r10conf *conf, bool nowait)
1007 {
1008 bool ret = true;
1009
1010 if (wait_barrier_nolock(conf))
1011 return true;
1012
1013 write_seqlock_irq(&conf->resync_lock);
1014 if (conf->barrier) {
1015 /* Return false when nowait flag is set */
1016 if (nowait) {
1017 ret = false;
1018 } else {
1019 conf->nr_waiting++;
1020 mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
1021 wait_event_barrier(conf, stop_waiting_barrier(conf));
1022 conf->nr_waiting--;
1023 }
1024 if (!conf->nr_waiting)
1025 wake_up(&conf->wait_barrier);
1026 }
1027 /* Only increment nr_pending when we wait */
1028 if (ret)
1029 atomic_inc(&conf->nr_pending);
1030 write_sequnlock_irq(&conf->resync_lock);
1031 return ret;
1032 }
1033
allow_barrier(struct r10conf * conf)1034 static void allow_barrier(struct r10conf *conf)
1035 {
1036 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1037 (conf->array_freeze_pending))
1038 wake_up_barrier(conf);
1039 }
1040
freeze_array(struct r10conf * conf,int extra)1041 static void freeze_array(struct r10conf *conf, int extra)
1042 {
1043 /* stop syncio and normal IO and wait for everything to
1044 * go quiet.
1045 * We increment barrier and nr_waiting, and then
1046 * wait until nr_pending match nr_queued+extra
1047 * This is called in the context of one normal IO request
1048 * that has failed. Thus any sync request that might be pending
1049 * will be blocked by nr_pending, and we need to wait for
1050 * pending IO requests to complete or be queued for re-try.
1051 * Thus the number queued (nr_queued) plus this request (extra)
1052 * must match the number of pending IOs (nr_pending) before
1053 * we continue.
1054 */
1055 write_seqlock_irq(&conf->resync_lock);
1056 conf->array_freeze_pending++;
1057 WRITE_ONCE(conf->barrier, conf->barrier + 1);
1058 conf->nr_waiting++;
1059 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1060 conf->nr_queued + extra, flush_pending_writes(conf));
1061 conf->array_freeze_pending--;
1062 write_sequnlock_irq(&conf->resync_lock);
1063 }
1064
unfreeze_array(struct r10conf * conf)1065 static void unfreeze_array(struct r10conf *conf)
1066 {
1067 /* reverse the effect of the freeze */
1068 write_seqlock_irq(&conf->resync_lock);
1069 WRITE_ONCE(conf->barrier, conf->barrier - 1);
1070 conf->nr_waiting--;
1071 wake_up(&conf->wait_barrier);
1072 write_sequnlock_irq(&conf->resync_lock);
1073 }
1074
choose_data_offset(struct r10bio * r10_bio,struct md_rdev * rdev)1075 static sector_t choose_data_offset(struct r10bio *r10_bio,
1076 struct md_rdev *rdev)
1077 {
1078 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1079 test_bit(R10BIO_Previous, &r10_bio->state))
1080 return rdev->data_offset;
1081 else
1082 return rdev->new_data_offset;
1083 }
1084
raid10_unplug(struct blk_plug_cb * cb,bool from_schedule)1085 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1086 {
1087 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
1088 struct mddev *mddev = plug->cb.data;
1089 struct r10conf *conf = mddev->private;
1090 struct bio *bio;
1091
1092 if (from_schedule) {
1093 spin_lock_irq(&conf->device_lock);
1094 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1095 spin_unlock_irq(&conf->device_lock);
1096 wake_up_barrier(conf);
1097 md_wakeup_thread(mddev->thread);
1098 kfree(plug);
1099 return;
1100 }
1101
1102 /* we aren't scheduling, so we can do the write-out directly. */
1103 bio = bio_list_get(&plug->pending);
1104 raid1_prepare_flush_writes(mddev);
1105 wake_up_barrier(conf);
1106
1107 while (bio) { /* submit pending writes */
1108 struct bio *next = bio->bi_next;
1109
1110 raid1_submit_write(bio);
1111 bio = next;
1112 cond_resched();
1113 }
1114 kfree(plug);
1115 }
1116
1117 /*
1118 * 1. Register the new request and wait if the reconstruction thread has put
1119 * up a bar for new requests. Continue immediately if no resync is active
1120 * currently.
1121 * 2. If IO spans the reshape position. Need to wait for reshape to pass.
1122 */
regular_request_wait(struct mddev * mddev,struct r10conf * conf,struct bio * bio,sector_t sectors)1123 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1124 struct bio *bio, sector_t sectors)
1125 {
1126 /* Bail out if REQ_NOWAIT is set for the bio */
1127 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1128 bio_wouldblock_error(bio);
1129 return false;
1130 }
1131 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1132 bio->bi_iter.bi_sector < conf->reshape_progress &&
1133 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1134 allow_barrier(conf);
1135 if (bio->bi_opf & REQ_NOWAIT) {
1136 bio_wouldblock_error(bio);
1137 return false;
1138 }
1139 mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
1140 wait_event(conf->wait_barrier,
1141 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1142 conf->reshape_progress >= bio->bi_iter.bi_sector +
1143 sectors);
1144 wait_barrier(conf, false);
1145 }
1146 return true;
1147 }
1148
raid10_read_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio,bool io_accounting)1149 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1150 struct r10bio *r10_bio, bool io_accounting)
1151 {
1152 struct r10conf *conf = mddev->private;
1153 struct bio *read_bio;
1154 const enum req_op op = bio_op(bio);
1155 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1156 int max_sectors;
1157 struct md_rdev *rdev;
1158 char b[BDEVNAME_SIZE];
1159 int slot = r10_bio->read_slot;
1160 struct md_rdev *err_rdev = NULL;
1161 gfp_t gfp = GFP_NOIO;
1162
1163 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1164 /*
1165 * This is an error retry, but we cannot
1166 * safely dereference the rdev in the r10_bio,
1167 * we must use the one in conf.
1168 * If it has already been disconnected (unlikely)
1169 * we lose the device name in error messages.
1170 */
1171 int disk;
1172 /*
1173 * As we are blocking raid10, it is a little safer to
1174 * use __GFP_HIGH.
1175 */
1176 gfp = GFP_NOIO | __GFP_HIGH;
1177
1178 disk = r10_bio->devs[slot].devnum;
1179 err_rdev = conf->mirrors[disk].rdev;
1180 if (err_rdev)
1181 snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1182 else {
1183 strcpy(b, "???");
1184 /* This never gets dereferenced */
1185 err_rdev = r10_bio->devs[slot].rdev;
1186 }
1187 }
1188
1189 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1190 return;
1191 rdev = read_balance(conf, r10_bio, &max_sectors);
1192 if (!rdev) {
1193 if (err_rdev) {
1194 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1195 mdname(mddev), b,
1196 (unsigned long long)r10_bio->sector);
1197 }
1198 raid_end_bio_io(r10_bio);
1199 return;
1200 }
1201 if (err_rdev)
1202 pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1203 mdname(mddev),
1204 rdev->bdev,
1205 (unsigned long long)r10_bio->sector);
1206 if (max_sectors < bio_sectors(bio)) {
1207 struct bio *split = bio_split(bio, max_sectors,
1208 gfp, &conf->bio_split);
1209 bio_chain(split, bio);
1210 allow_barrier(conf);
1211 submit_bio_noacct(bio);
1212 wait_barrier(conf, false);
1213 bio = split;
1214 r10_bio->master_bio = bio;
1215 r10_bio->sectors = max_sectors;
1216 }
1217 slot = r10_bio->read_slot;
1218
1219 if (io_accounting) {
1220 md_account_bio(mddev, &bio);
1221 r10_bio->master_bio = bio;
1222 }
1223 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1224
1225 r10_bio->devs[slot].bio = read_bio;
1226 r10_bio->devs[slot].rdev = rdev;
1227
1228 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1229 choose_data_offset(r10_bio, rdev);
1230 read_bio->bi_end_io = raid10_end_read_request;
1231 read_bio->bi_opf = op | do_sync;
1232 if (test_bit(FailFast, &rdev->flags) &&
1233 test_bit(R10BIO_FailFast, &r10_bio->state))
1234 read_bio->bi_opf |= MD_FAILFAST;
1235 read_bio->bi_private = r10_bio;
1236 mddev_trace_remap(mddev, read_bio, r10_bio->sector);
1237 submit_bio_noacct(read_bio);
1238 return;
1239 }
1240
raid10_write_one_disk(struct mddev * mddev,struct r10bio * r10_bio,struct bio * bio,bool replacement,int n_copy)1241 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1242 struct bio *bio, bool replacement,
1243 int n_copy)
1244 {
1245 const enum req_op op = bio_op(bio);
1246 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1247 const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
1248 unsigned long flags;
1249 struct r10conf *conf = mddev->private;
1250 struct md_rdev *rdev;
1251 int devnum = r10_bio->devs[n_copy].devnum;
1252 struct bio *mbio;
1253
1254 rdev = replacement ? conf->mirrors[devnum].replacement :
1255 conf->mirrors[devnum].rdev;
1256
1257 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1258 if (replacement)
1259 r10_bio->devs[n_copy].repl_bio = mbio;
1260 else
1261 r10_bio->devs[n_copy].bio = mbio;
1262
1263 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1264 choose_data_offset(r10_bio, rdev));
1265 mbio->bi_end_io = raid10_end_write_request;
1266 mbio->bi_opf = op | do_sync | do_fua;
1267 if (!replacement && test_bit(FailFast,
1268 &conf->mirrors[devnum].rdev->flags)
1269 && enough(conf, devnum))
1270 mbio->bi_opf |= MD_FAILFAST;
1271 mbio->bi_private = r10_bio;
1272 mddev_trace_remap(mddev, mbio, r10_bio->sector);
1273 /* flush_pending_writes() needs access to the rdev so...*/
1274 mbio->bi_bdev = (void *)rdev;
1275
1276 atomic_inc(&r10_bio->remaining);
1277
1278 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1279 spin_lock_irqsave(&conf->device_lock, flags);
1280 bio_list_add(&conf->pending_bio_list, mbio);
1281 spin_unlock_irqrestore(&conf->device_lock, flags);
1282 md_wakeup_thread(mddev->thread);
1283 }
1284 }
1285
wait_blocked_dev(struct mddev * mddev,struct r10bio * r10_bio)1286 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1287 {
1288 int i;
1289 struct r10conf *conf = mddev->private;
1290 struct md_rdev *blocked_rdev;
1291
1292 retry_wait:
1293 blocked_rdev = NULL;
1294 for (i = 0; i < conf->copies; i++) {
1295 struct md_rdev *rdev, *rrdev;
1296
1297 rdev = conf->mirrors[i].rdev;
1298 rrdev = conf->mirrors[i].replacement;
1299 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1300 atomic_inc(&rdev->nr_pending);
1301 blocked_rdev = rdev;
1302 break;
1303 }
1304 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1305 atomic_inc(&rrdev->nr_pending);
1306 blocked_rdev = rrdev;
1307 break;
1308 }
1309
1310 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1311 sector_t dev_sector = r10_bio->devs[i].addr;
1312
1313 /*
1314 * Discard request doesn't care the write result
1315 * so it doesn't need to wait blocked disk here.
1316 */
1317 if (!r10_bio->sectors)
1318 continue;
1319
1320 if (rdev_has_badblock(rdev, dev_sector,
1321 r10_bio->sectors) < 0) {
1322 /*
1323 * Mustn't write here until the bad block
1324 * is acknowledged
1325 */
1326 atomic_inc(&rdev->nr_pending);
1327 set_bit(BlockedBadBlocks, &rdev->flags);
1328 blocked_rdev = rdev;
1329 break;
1330 }
1331 }
1332 }
1333
1334 if (unlikely(blocked_rdev)) {
1335 /* Have to wait for this device to get unblocked, then retry */
1336 allow_barrier(conf);
1337 mddev_add_trace_msg(conf->mddev,
1338 "raid10 %s wait rdev %d blocked",
1339 __func__, blocked_rdev->raid_disk);
1340 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1341 wait_barrier(conf, false);
1342 goto retry_wait;
1343 }
1344 }
1345
raid10_write_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio)1346 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1347 struct r10bio *r10_bio)
1348 {
1349 struct r10conf *conf = mddev->private;
1350 int i;
1351 sector_t sectors;
1352 int max_sectors;
1353
1354 if ((mddev_is_clustered(mddev) &&
1355 md_cluster_ops->area_resyncing(mddev, WRITE,
1356 bio->bi_iter.bi_sector,
1357 bio_end_sector(bio)))) {
1358 DEFINE_WAIT(w);
1359 /* Bail out if REQ_NOWAIT is set for the bio */
1360 if (bio->bi_opf & REQ_NOWAIT) {
1361 bio_wouldblock_error(bio);
1362 return;
1363 }
1364 for (;;) {
1365 prepare_to_wait(&conf->wait_barrier,
1366 &w, TASK_IDLE);
1367 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1368 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1369 break;
1370 schedule();
1371 }
1372 finish_wait(&conf->wait_barrier, &w);
1373 }
1374
1375 sectors = r10_bio->sectors;
1376 if (!regular_request_wait(mddev, conf, bio, sectors))
1377 return;
1378 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1379 (mddev->reshape_backwards
1380 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1381 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1382 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1383 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1384 /* Need to update reshape_position in metadata */
1385 mddev->reshape_position = conf->reshape_progress;
1386 set_mask_bits(&mddev->sb_flags, 0,
1387 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1388 md_wakeup_thread(mddev->thread);
1389 if (bio->bi_opf & REQ_NOWAIT) {
1390 allow_barrier(conf);
1391 bio_wouldblock_error(bio);
1392 return;
1393 }
1394 mddev_add_trace_msg(conf->mddev,
1395 "raid10 wait reshape metadata");
1396 wait_event(mddev->sb_wait,
1397 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1398
1399 conf->reshape_safe = mddev->reshape_position;
1400 }
1401
1402 /* first select target devices under rcu_lock and
1403 * inc refcount on their rdev. Record them by setting
1404 * bios[x] to bio
1405 * If there are known/acknowledged bad blocks on any device
1406 * on which we have seen a write error, we want to avoid
1407 * writing to those blocks. This potentially requires several
1408 * writes to write around the bad blocks. Each set of writes
1409 * gets its own r10_bio with a set of bios attached.
1410 */
1411
1412 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1413 raid10_find_phys(conf, r10_bio);
1414
1415 wait_blocked_dev(mddev, r10_bio);
1416
1417 max_sectors = r10_bio->sectors;
1418
1419 for (i = 0; i < conf->copies; i++) {
1420 int d = r10_bio->devs[i].devnum;
1421 struct md_rdev *rdev, *rrdev;
1422
1423 rdev = conf->mirrors[d].rdev;
1424 rrdev = conf->mirrors[d].replacement;
1425 if (rdev && (test_bit(Faulty, &rdev->flags)))
1426 rdev = NULL;
1427 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1428 rrdev = NULL;
1429
1430 r10_bio->devs[i].bio = NULL;
1431 r10_bio->devs[i].repl_bio = NULL;
1432
1433 if (!rdev && !rrdev) {
1434 set_bit(R10BIO_Degraded, &r10_bio->state);
1435 continue;
1436 }
1437 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1438 sector_t first_bad;
1439 sector_t dev_sector = r10_bio->devs[i].addr;
1440 int bad_sectors;
1441 int is_bad;
1442
1443 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1444 &first_bad, &bad_sectors);
1445 if (is_bad && first_bad <= dev_sector) {
1446 /* Cannot write here at all */
1447 bad_sectors -= (dev_sector - first_bad);
1448 if (bad_sectors < max_sectors)
1449 /* Mustn't write more than bad_sectors
1450 * to other devices yet
1451 */
1452 max_sectors = bad_sectors;
1453 /* We don't set R10BIO_Degraded as that
1454 * only applies if the disk is missing,
1455 * so it might be re-added, and we want to
1456 * know to recover this chunk.
1457 * In this case the device is here, and the
1458 * fact that this chunk is not in-sync is
1459 * recorded in the bad block log.
1460 */
1461 continue;
1462 }
1463 if (is_bad) {
1464 int good_sectors = first_bad - dev_sector;
1465 if (good_sectors < max_sectors)
1466 max_sectors = good_sectors;
1467 }
1468 }
1469 if (rdev) {
1470 r10_bio->devs[i].bio = bio;
1471 atomic_inc(&rdev->nr_pending);
1472 }
1473 if (rrdev) {
1474 r10_bio->devs[i].repl_bio = bio;
1475 atomic_inc(&rrdev->nr_pending);
1476 }
1477 }
1478
1479 if (max_sectors < r10_bio->sectors)
1480 r10_bio->sectors = max_sectors;
1481
1482 if (r10_bio->sectors < bio_sectors(bio)) {
1483 struct bio *split = bio_split(bio, r10_bio->sectors,
1484 GFP_NOIO, &conf->bio_split);
1485 bio_chain(split, bio);
1486 allow_barrier(conf);
1487 submit_bio_noacct(bio);
1488 wait_barrier(conf, false);
1489 bio = split;
1490 r10_bio->master_bio = bio;
1491 }
1492
1493 md_account_bio(mddev, &bio);
1494 r10_bio->master_bio = bio;
1495 atomic_set(&r10_bio->remaining, 1);
1496 mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
1497 false);
1498
1499 for (i = 0; i < conf->copies; i++) {
1500 if (r10_bio->devs[i].bio)
1501 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1502 if (r10_bio->devs[i].repl_bio)
1503 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1504 }
1505 one_write_done(r10_bio);
1506 }
1507
__make_request(struct mddev * mddev,struct bio * bio,int sectors)1508 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1509 {
1510 struct r10conf *conf = mddev->private;
1511 struct r10bio *r10_bio;
1512
1513 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1514
1515 r10_bio->master_bio = bio;
1516 r10_bio->sectors = sectors;
1517
1518 r10_bio->mddev = mddev;
1519 r10_bio->sector = bio->bi_iter.bi_sector;
1520 r10_bio->state = 0;
1521 r10_bio->read_slot = -1;
1522 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1523 conf->geo.raid_disks);
1524
1525 if (bio_data_dir(bio) == READ)
1526 raid10_read_request(mddev, bio, r10_bio, true);
1527 else
1528 raid10_write_request(mddev, bio, r10_bio);
1529 }
1530
raid_end_discard_bio(struct r10bio * r10bio)1531 static void raid_end_discard_bio(struct r10bio *r10bio)
1532 {
1533 struct r10conf *conf = r10bio->mddev->private;
1534 struct r10bio *first_r10bio;
1535
1536 while (atomic_dec_and_test(&r10bio->remaining)) {
1537
1538 allow_barrier(conf);
1539
1540 if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1541 first_r10bio = (struct r10bio *)r10bio->master_bio;
1542 free_r10bio(r10bio);
1543 r10bio = first_r10bio;
1544 } else {
1545 md_write_end(r10bio->mddev);
1546 bio_endio(r10bio->master_bio);
1547 free_r10bio(r10bio);
1548 break;
1549 }
1550 }
1551 }
1552
raid10_end_discard_request(struct bio * bio)1553 static void raid10_end_discard_request(struct bio *bio)
1554 {
1555 struct r10bio *r10_bio = bio->bi_private;
1556 struct r10conf *conf = r10_bio->mddev->private;
1557 struct md_rdev *rdev = NULL;
1558 int dev;
1559 int slot, repl;
1560
1561 /*
1562 * We don't care the return value of discard bio
1563 */
1564 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1565 set_bit(R10BIO_Uptodate, &r10_bio->state);
1566
1567 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1568 rdev = repl ? conf->mirrors[dev].replacement :
1569 conf->mirrors[dev].rdev;
1570
1571 raid_end_discard_bio(r10_bio);
1572 rdev_dec_pending(rdev, conf->mddev);
1573 }
1574
1575 /*
1576 * There are some limitations to handle discard bio
1577 * 1st, the discard size is bigger than stripe_size*2.
1578 * 2st, if the discard bio spans reshape progress, we use the old way to
1579 * handle discard bio
1580 */
raid10_handle_discard(struct mddev * mddev,struct bio * bio)1581 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1582 {
1583 struct r10conf *conf = mddev->private;
1584 struct geom *geo = &conf->geo;
1585 int far_copies = geo->far_copies;
1586 bool first_copy = true;
1587 struct r10bio *r10_bio, *first_r10bio;
1588 struct bio *split;
1589 int disk;
1590 sector_t chunk;
1591 unsigned int stripe_size;
1592 unsigned int stripe_data_disks;
1593 sector_t split_size;
1594 sector_t bio_start, bio_end;
1595 sector_t first_stripe_index, last_stripe_index;
1596 sector_t start_disk_offset;
1597 unsigned int start_disk_index;
1598 sector_t end_disk_offset;
1599 unsigned int end_disk_index;
1600 unsigned int remainder;
1601
1602 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1603 return -EAGAIN;
1604
1605 if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1606 bio_wouldblock_error(bio);
1607 return 0;
1608 }
1609 wait_barrier(conf, false);
1610
1611 /*
1612 * Check reshape again to avoid reshape happens after checking
1613 * MD_RECOVERY_RESHAPE and before wait_barrier
1614 */
1615 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1616 goto out;
1617
1618 if (geo->near_copies)
1619 stripe_data_disks = geo->raid_disks / geo->near_copies +
1620 geo->raid_disks % geo->near_copies;
1621 else
1622 stripe_data_disks = geo->raid_disks;
1623
1624 stripe_size = stripe_data_disks << geo->chunk_shift;
1625
1626 bio_start = bio->bi_iter.bi_sector;
1627 bio_end = bio_end_sector(bio);
1628
1629 /*
1630 * Maybe one discard bio is smaller than strip size or across one
1631 * stripe and discard region is larger than one stripe size. For far
1632 * offset layout, if the discard region is not aligned with stripe
1633 * size, there is hole when we submit discard bio to member disk.
1634 * For simplicity, we only handle discard bio which discard region
1635 * is bigger than stripe_size * 2
1636 */
1637 if (bio_sectors(bio) < stripe_size*2)
1638 goto out;
1639
1640 /*
1641 * Keep bio aligned with strip size.
1642 */
1643 div_u64_rem(bio_start, stripe_size, &remainder);
1644 if (remainder) {
1645 split_size = stripe_size - remainder;
1646 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1647 bio_chain(split, bio);
1648 allow_barrier(conf);
1649 /* Resend the fist split part */
1650 submit_bio_noacct(split);
1651 wait_barrier(conf, false);
1652 }
1653 div_u64_rem(bio_end, stripe_size, &remainder);
1654 if (remainder) {
1655 split_size = bio_sectors(bio) - remainder;
1656 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1657 bio_chain(split, bio);
1658 allow_barrier(conf);
1659 /* Resend the second split part */
1660 submit_bio_noacct(bio);
1661 bio = split;
1662 wait_barrier(conf, false);
1663 }
1664
1665 bio_start = bio->bi_iter.bi_sector;
1666 bio_end = bio_end_sector(bio);
1667
1668 /*
1669 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1670 * One stripe contains the chunks from all member disk (one chunk from
1671 * one disk at the same HBA address). For layout detail, see 'man md 4'
1672 */
1673 chunk = bio_start >> geo->chunk_shift;
1674 chunk *= geo->near_copies;
1675 first_stripe_index = chunk;
1676 start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1677 if (geo->far_offset)
1678 first_stripe_index *= geo->far_copies;
1679 start_disk_offset = (bio_start & geo->chunk_mask) +
1680 (first_stripe_index << geo->chunk_shift);
1681
1682 chunk = bio_end >> geo->chunk_shift;
1683 chunk *= geo->near_copies;
1684 last_stripe_index = chunk;
1685 end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1686 if (geo->far_offset)
1687 last_stripe_index *= geo->far_copies;
1688 end_disk_offset = (bio_end & geo->chunk_mask) +
1689 (last_stripe_index << geo->chunk_shift);
1690
1691 retry_discard:
1692 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1693 r10_bio->mddev = mddev;
1694 r10_bio->state = 0;
1695 r10_bio->sectors = 0;
1696 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1697 wait_blocked_dev(mddev, r10_bio);
1698
1699 /*
1700 * For far layout it needs more than one r10bio to cover all regions.
1701 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1702 * to record the discard bio. Other r10bio->master_bio record the first
1703 * r10bio. The first r10bio only release after all other r10bios finish.
1704 * The discard bio returns only first r10bio finishes
1705 */
1706 if (first_copy) {
1707 r10_bio->master_bio = bio;
1708 set_bit(R10BIO_Discard, &r10_bio->state);
1709 first_copy = false;
1710 first_r10bio = r10_bio;
1711 } else
1712 r10_bio->master_bio = (struct bio *)first_r10bio;
1713
1714 /*
1715 * first select target devices under rcu_lock and
1716 * inc refcount on their rdev. Record them by setting
1717 * bios[x] to bio
1718 */
1719 for (disk = 0; disk < geo->raid_disks; disk++) {
1720 struct md_rdev *rdev, *rrdev;
1721
1722 rdev = conf->mirrors[disk].rdev;
1723 rrdev = conf->mirrors[disk].replacement;
1724 r10_bio->devs[disk].bio = NULL;
1725 r10_bio->devs[disk].repl_bio = NULL;
1726
1727 if (rdev && (test_bit(Faulty, &rdev->flags)))
1728 rdev = NULL;
1729 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1730 rrdev = NULL;
1731 if (!rdev && !rrdev)
1732 continue;
1733
1734 if (rdev) {
1735 r10_bio->devs[disk].bio = bio;
1736 atomic_inc(&rdev->nr_pending);
1737 }
1738 if (rrdev) {
1739 r10_bio->devs[disk].repl_bio = bio;
1740 atomic_inc(&rrdev->nr_pending);
1741 }
1742 }
1743
1744 atomic_set(&r10_bio->remaining, 1);
1745 for (disk = 0; disk < geo->raid_disks; disk++) {
1746 sector_t dev_start, dev_end;
1747 struct bio *mbio, *rbio = NULL;
1748
1749 /*
1750 * Now start to calculate the start and end address for each disk.
1751 * The space between dev_start and dev_end is the discard region.
1752 *
1753 * For dev_start, it needs to consider three conditions:
1754 * 1st, the disk is before start_disk, you can imagine the disk in
1755 * the next stripe. So the dev_start is the start address of next
1756 * stripe.
1757 * 2st, the disk is after start_disk, it means the disk is at the
1758 * same stripe of first disk
1759 * 3st, the first disk itself, we can use start_disk_offset directly
1760 */
1761 if (disk < start_disk_index)
1762 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1763 else if (disk > start_disk_index)
1764 dev_start = first_stripe_index * mddev->chunk_sectors;
1765 else
1766 dev_start = start_disk_offset;
1767
1768 if (disk < end_disk_index)
1769 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1770 else if (disk > end_disk_index)
1771 dev_end = last_stripe_index * mddev->chunk_sectors;
1772 else
1773 dev_end = end_disk_offset;
1774
1775 /*
1776 * It only handles discard bio which size is >= stripe size, so
1777 * dev_end > dev_start all the time.
1778 * It doesn't need to use rcu lock to get rdev here. We already
1779 * add rdev->nr_pending in the first loop.
1780 */
1781 if (r10_bio->devs[disk].bio) {
1782 struct md_rdev *rdev = conf->mirrors[disk].rdev;
1783 mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1784 &mddev->bio_set);
1785 mbio->bi_end_io = raid10_end_discard_request;
1786 mbio->bi_private = r10_bio;
1787 r10_bio->devs[disk].bio = mbio;
1788 r10_bio->devs[disk].devnum = disk;
1789 atomic_inc(&r10_bio->remaining);
1790 md_submit_discard_bio(mddev, rdev, mbio,
1791 dev_start + choose_data_offset(r10_bio, rdev),
1792 dev_end - dev_start);
1793 bio_endio(mbio);
1794 }
1795 if (r10_bio->devs[disk].repl_bio) {
1796 struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1797 rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1798 &mddev->bio_set);
1799 rbio->bi_end_io = raid10_end_discard_request;
1800 rbio->bi_private = r10_bio;
1801 r10_bio->devs[disk].repl_bio = rbio;
1802 r10_bio->devs[disk].devnum = disk;
1803 atomic_inc(&r10_bio->remaining);
1804 md_submit_discard_bio(mddev, rrdev, rbio,
1805 dev_start + choose_data_offset(r10_bio, rrdev),
1806 dev_end - dev_start);
1807 bio_endio(rbio);
1808 }
1809 }
1810
1811 if (!geo->far_offset && --far_copies) {
1812 first_stripe_index += geo->stride >> geo->chunk_shift;
1813 start_disk_offset += geo->stride;
1814 last_stripe_index += geo->stride >> geo->chunk_shift;
1815 end_disk_offset += geo->stride;
1816 atomic_inc(&first_r10bio->remaining);
1817 raid_end_discard_bio(r10_bio);
1818 wait_barrier(conf, false);
1819 goto retry_discard;
1820 }
1821
1822 raid_end_discard_bio(r10_bio);
1823
1824 return 0;
1825 out:
1826 allow_barrier(conf);
1827 return -EAGAIN;
1828 }
1829
raid10_make_request(struct mddev * mddev,struct bio * bio)1830 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1831 {
1832 struct r10conf *conf = mddev->private;
1833 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1834 int chunk_sects = chunk_mask + 1;
1835 int sectors = bio_sectors(bio);
1836
1837 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1838 && md_flush_request(mddev, bio))
1839 return true;
1840
1841 md_write_start(mddev, bio);
1842
1843 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1844 if (!raid10_handle_discard(mddev, bio))
1845 return true;
1846
1847 /*
1848 * If this request crosses a chunk boundary, we need to split
1849 * it.
1850 */
1851 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1852 sectors > chunk_sects
1853 && (conf->geo.near_copies < conf->geo.raid_disks
1854 || conf->prev.near_copies <
1855 conf->prev.raid_disks)))
1856 sectors = chunk_sects -
1857 (bio->bi_iter.bi_sector &
1858 (chunk_sects - 1));
1859 __make_request(mddev, bio, sectors);
1860
1861 /* In case raid10d snuck in to freeze_array */
1862 wake_up_barrier(conf);
1863 return true;
1864 }
1865
raid10_status(struct seq_file * seq,struct mddev * mddev)1866 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1867 {
1868 struct r10conf *conf = mddev->private;
1869 int i;
1870
1871 lockdep_assert_held(&mddev->lock);
1872
1873 if (conf->geo.near_copies < conf->geo.raid_disks)
1874 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1875 if (conf->geo.near_copies > 1)
1876 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1877 if (conf->geo.far_copies > 1) {
1878 if (conf->geo.far_offset)
1879 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1880 else
1881 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1882 if (conf->geo.far_set_size != conf->geo.raid_disks)
1883 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1884 }
1885 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1886 conf->geo.raid_disks - mddev->degraded);
1887 for (i = 0; i < conf->geo.raid_disks; i++) {
1888 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1889
1890 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1891 }
1892 seq_printf(seq, "]");
1893 }
1894
1895 /* check if there are enough drives for
1896 * every block to appear on atleast one.
1897 * Don't consider the device numbered 'ignore'
1898 * as we might be about to remove it.
1899 */
_enough(struct r10conf * conf,int previous,int ignore)1900 static int _enough(struct r10conf *conf, int previous, int ignore)
1901 {
1902 int first = 0;
1903 int has_enough = 0;
1904 int disks, ncopies;
1905 if (previous) {
1906 disks = conf->prev.raid_disks;
1907 ncopies = conf->prev.near_copies;
1908 } else {
1909 disks = conf->geo.raid_disks;
1910 ncopies = conf->geo.near_copies;
1911 }
1912
1913 do {
1914 int n = conf->copies;
1915 int cnt = 0;
1916 int this = first;
1917 while (n--) {
1918 struct md_rdev *rdev;
1919 if (this != ignore &&
1920 (rdev = conf->mirrors[this].rdev) &&
1921 test_bit(In_sync, &rdev->flags))
1922 cnt++;
1923 this = (this+1) % disks;
1924 }
1925 if (cnt == 0)
1926 goto out;
1927 first = (first + ncopies) % disks;
1928 } while (first != 0);
1929 has_enough = 1;
1930 out:
1931 return has_enough;
1932 }
1933
enough(struct r10conf * conf,int ignore)1934 static int enough(struct r10conf *conf, int ignore)
1935 {
1936 /* when calling 'enough', both 'prev' and 'geo' must
1937 * be stable.
1938 * This is ensured if ->reconfig_mutex or ->device_lock
1939 * is held.
1940 */
1941 return _enough(conf, 0, ignore) &&
1942 _enough(conf, 1, ignore);
1943 }
1944
1945 /**
1946 * raid10_error() - RAID10 error handler.
1947 * @mddev: affected md device.
1948 * @rdev: member device to fail.
1949 *
1950 * The routine acknowledges &rdev failure and determines new @mddev state.
1951 * If it failed, then:
1952 * - &MD_BROKEN flag is set in &mddev->flags.
1953 * Otherwise, it must be degraded:
1954 * - recovery is interrupted.
1955 * - &mddev->degraded is bumped.
1956 *
1957 * @rdev is marked as &Faulty excluding case when array is failed and
1958 * &mddev->fail_last_dev is off.
1959 */
raid10_error(struct mddev * mddev,struct md_rdev * rdev)1960 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1961 {
1962 struct r10conf *conf = mddev->private;
1963 unsigned long flags;
1964
1965 spin_lock_irqsave(&conf->device_lock, flags);
1966
1967 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
1968 set_bit(MD_BROKEN, &mddev->flags);
1969
1970 if (!mddev->fail_last_dev) {
1971 spin_unlock_irqrestore(&conf->device_lock, flags);
1972 return;
1973 }
1974 }
1975 if (test_and_clear_bit(In_sync, &rdev->flags))
1976 mddev->degraded++;
1977
1978 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1979 set_bit(Blocked, &rdev->flags);
1980 set_bit(Faulty, &rdev->flags);
1981 set_mask_bits(&mddev->sb_flags, 0,
1982 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1983 spin_unlock_irqrestore(&conf->device_lock, flags);
1984 pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
1985 "md/raid10:%s: Operation continuing on %d devices.\n",
1986 mdname(mddev), rdev->bdev,
1987 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1988 }
1989
print_conf(struct r10conf * conf)1990 static void print_conf(struct r10conf *conf)
1991 {
1992 int i;
1993 struct md_rdev *rdev;
1994
1995 pr_debug("RAID10 conf printout:\n");
1996 if (!conf) {
1997 pr_debug("(!conf)\n");
1998 return;
1999 }
2000 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2001 conf->geo.raid_disks);
2002
2003 lockdep_assert_held(&conf->mddev->reconfig_mutex);
2004 for (i = 0; i < conf->geo.raid_disks; i++) {
2005 rdev = conf->mirrors[i].rdev;
2006 if (rdev)
2007 pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2008 i, !test_bit(In_sync, &rdev->flags),
2009 !test_bit(Faulty, &rdev->flags),
2010 rdev->bdev);
2011 }
2012 }
2013
close_sync(struct r10conf * conf)2014 static void close_sync(struct r10conf *conf)
2015 {
2016 wait_barrier(conf, false);
2017 allow_barrier(conf);
2018
2019 mempool_exit(&conf->r10buf_pool);
2020 }
2021
raid10_spare_active(struct mddev * mddev)2022 static int raid10_spare_active(struct mddev *mddev)
2023 {
2024 int i;
2025 struct r10conf *conf = mddev->private;
2026 struct raid10_info *tmp;
2027 int count = 0;
2028 unsigned long flags;
2029
2030 /*
2031 * Find all non-in_sync disks within the RAID10 configuration
2032 * and mark them in_sync
2033 */
2034 for (i = 0; i < conf->geo.raid_disks; i++) {
2035 tmp = conf->mirrors + i;
2036 if (tmp->replacement
2037 && tmp->replacement->recovery_offset == MaxSector
2038 && !test_bit(Faulty, &tmp->replacement->flags)
2039 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2040 /* Replacement has just become active */
2041 if (!tmp->rdev
2042 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2043 count++;
2044 if (tmp->rdev) {
2045 /* Replaced device not technically faulty,
2046 * but we need to be sure it gets removed
2047 * and never re-added.
2048 */
2049 set_bit(Faulty, &tmp->rdev->flags);
2050 sysfs_notify_dirent_safe(
2051 tmp->rdev->sysfs_state);
2052 }
2053 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2054 } else if (tmp->rdev
2055 && tmp->rdev->recovery_offset == MaxSector
2056 && !test_bit(Faulty, &tmp->rdev->flags)
2057 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2058 count++;
2059 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2060 }
2061 }
2062 spin_lock_irqsave(&conf->device_lock, flags);
2063 mddev->degraded -= count;
2064 spin_unlock_irqrestore(&conf->device_lock, flags);
2065
2066 print_conf(conf);
2067 return count;
2068 }
2069
raid10_add_disk(struct mddev * mddev,struct md_rdev * rdev)2070 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2071 {
2072 struct r10conf *conf = mddev->private;
2073 int err = -EEXIST;
2074 int mirror, repl_slot = -1;
2075 int first = 0;
2076 int last = conf->geo.raid_disks - 1;
2077 struct raid10_info *p;
2078
2079 if (mddev->recovery_cp < MaxSector)
2080 /* only hot-add to in-sync arrays, as recovery is
2081 * very different from resync
2082 */
2083 return -EBUSY;
2084 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2085 return -EINVAL;
2086
2087 if (rdev->raid_disk >= 0)
2088 first = last = rdev->raid_disk;
2089
2090 if (rdev->saved_raid_disk >= first &&
2091 rdev->saved_raid_disk < conf->geo.raid_disks &&
2092 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2093 mirror = rdev->saved_raid_disk;
2094 else
2095 mirror = first;
2096 for ( ; mirror <= last ; mirror++) {
2097 p = &conf->mirrors[mirror];
2098 if (p->recovery_disabled == mddev->recovery_disabled)
2099 continue;
2100 if (p->rdev) {
2101 if (test_bit(WantReplacement, &p->rdev->flags) &&
2102 p->replacement == NULL && repl_slot < 0)
2103 repl_slot = mirror;
2104 continue;
2105 }
2106
2107 err = mddev_stack_new_rdev(mddev, rdev);
2108 if (err)
2109 return err;
2110 p->head_position = 0;
2111 p->recovery_disabled = mddev->recovery_disabled - 1;
2112 rdev->raid_disk = mirror;
2113 err = 0;
2114 if (rdev->saved_raid_disk != mirror)
2115 conf->fullsync = 1;
2116 WRITE_ONCE(p->rdev, rdev);
2117 break;
2118 }
2119
2120 if (err && repl_slot >= 0) {
2121 p = &conf->mirrors[repl_slot];
2122 clear_bit(In_sync, &rdev->flags);
2123 set_bit(Replacement, &rdev->flags);
2124 rdev->raid_disk = repl_slot;
2125 err = mddev_stack_new_rdev(mddev, rdev);
2126 if (err)
2127 return err;
2128 conf->fullsync = 1;
2129 WRITE_ONCE(p->replacement, rdev);
2130 }
2131
2132 print_conf(conf);
2133 return err;
2134 }
2135
raid10_remove_disk(struct mddev * mddev,struct md_rdev * rdev)2136 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2137 {
2138 struct r10conf *conf = mddev->private;
2139 int err = 0;
2140 int number = rdev->raid_disk;
2141 struct md_rdev **rdevp;
2142 struct raid10_info *p;
2143
2144 print_conf(conf);
2145 if (unlikely(number >= mddev->raid_disks))
2146 return 0;
2147 p = conf->mirrors + number;
2148 if (rdev == p->rdev)
2149 rdevp = &p->rdev;
2150 else if (rdev == p->replacement)
2151 rdevp = &p->replacement;
2152 else
2153 return 0;
2154
2155 if (test_bit(In_sync, &rdev->flags) ||
2156 atomic_read(&rdev->nr_pending)) {
2157 err = -EBUSY;
2158 goto abort;
2159 }
2160 /* Only remove non-faulty devices if recovery
2161 * is not possible.
2162 */
2163 if (!test_bit(Faulty, &rdev->flags) &&
2164 mddev->recovery_disabled != p->recovery_disabled &&
2165 (!p->replacement || p->replacement == rdev) &&
2166 number < conf->geo.raid_disks &&
2167 enough(conf, -1)) {
2168 err = -EBUSY;
2169 goto abort;
2170 }
2171 WRITE_ONCE(*rdevp, NULL);
2172 if (p->replacement) {
2173 /* We must have just cleared 'rdev' */
2174 WRITE_ONCE(p->rdev, p->replacement);
2175 clear_bit(Replacement, &p->replacement->flags);
2176 WRITE_ONCE(p->replacement, NULL);
2177 }
2178
2179 clear_bit(WantReplacement, &rdev->flags);
2180 err = md_integrity_register(mddev);
2181
2182 abort:
2183
2184 print_conf(conf);
2185 return err;
2186 }
2187
__end_sync_read(struct r10bio * r10_bio,struct bio * bio,int d)2188 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2189 {
2190 struct r10conf *conf = r10_bio->mddev->private;
2191
2192 if (!bio->bi_status)
2193 set_bit(R10BIO_Uptodate, &r10_bio->state);
2194 else
2195 /* The write handler will notice the lack of
2196 * R10BIO_Uptodate and record any errors etc
2197 */
2198 atomic_add(r10_bio->sectors,
2199 &conf->mirrors[d].rdev->corrected_errors);
2200
2201 /* for reconstruct, we always reschedule after a read.
2202 * for resync, only after all reads
2203 */
2204 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2205 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2206 atomic_dec_and_test(&r10_bio->remaining)) {
2207 /* we have read all the blocks,
2208 * do the comparison in process context in raid10d
2209 */
2210 reschedule_retry(r10_bio);
2211 }
2212 }
2213
end_sync_read(struct bio * bio)2214 static void end_sync_read(struct bio *bio)
2215 {
2216 struct r10bio *r10_bio = get_resync_r10bio(bio);
2217 struct r10conf *conf = r10_bio->mddev->private;
2218 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2219
2220 __end_sync_read(r10_bio, bio, d);
2221 }
2222
end_reshape_read(struct bio * bio)2223 static void end_reshape_read(struct bio *bio)
2224 {
2225 /* reshape read bio isn't allocated from r10buf_pool */
2226 struct r10bio *r10_bio = bio->bi_private;
2227
2228 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2229 }
2230
end_sync_request(struct r10bio * r10_bio)2231 static void end_sync_request(struct r10bio *r10_bio)
2232 {
2233 struct mddev *mddev = r10_bio->mddev;
2234
2235 while (atomic_dec_and_test(&r10_bio->remaining)) {
2236 if (r10_bio->master_bio == NULL) {
2237 /* the primary of several recovery bios */
2238 sector_t s = r10_bio->sectors;
2239 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2240 test_bit(R10BIO_WriteError, &r10_bio->state))
2241 reschedule_retry(r10_bio);
2242 else
2243 put_buf(r10_bio);
2244 md_done_sync(mddev, s, 1);
2245 break;
2246 } else {
2247 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2248 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2249 test_bit(R10BIO_WriteError, &r10_bio->state))
2250 reschedule_retry(r10_bio);
2251 else
2252 put_buf(r10_bio);
2253 r10_bio = r10_bio2;
2254 }
2255 }
2256 }
2257
end_sync_write(struct bio * bio)2258 static void end_sync_write(struct bio *bio)
2259 {
2260 struct r10bio *r10_bio = get_resync_r10bio(bio);
2261 struct mddev *mddev = r10_bio->mddev;
2262 struct r10conf *conf = mddev->private;
2263 int d;
2264 int slot;
2265 int repl;
2266 struct md_rdev *rdev = NULL;
2267
2268 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2269 if (repl)
2270 rdev = conf->mirrors[d].replacement;
2271 else
2272 rdev = conf->mirrors[d].rdev;
2273
2274 if (bio->bi_status) {
2275 if (repl)
2276 md_error(mddev, rdev);
2277 else {
2278 set_bit(WriteErrorSeen, &rdev->flags);
2279 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2280 set_bit(MD_RECOVERY_NEEDED,
2281 &rdev->mddev->recovery);
2282 set_bit(R10BIO_WriteError, &r10_bio->state);
2283 }
2284 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
2285 r10_bio->sectors)) {
2286 set_bit(R10BIO_MadeGood, &r10_bio->state);
2287 }
2288
2289 rdev_dec_pending(rdev, mddev);
2290
2291 end_sync_request(r10_bio);
2292 }
2293
2294 /*
2295 * Note: sync and recover and handled very differently for raid10
2296 * This code is for resync.
2297 * For resync, we read through virtual addresses and read all blocks.
2298 * If there is any error, we schedule a write. The lowest numbered
2299 * drive is authoritative.
2300 * However requests come for physical address, so we need to map.
2301 * For every physical address there are raid_disks/copies virtual addresses,
2302 * which is always are least one, but is not necessarly an integer.
2303 * This means that a physical address can span multiple chunks, so we may
2304 * have to submit multiple io requests for a single sync request.
2305 */
2306 /*
2307 * We check if all blocks are in-sync and only write to blocks that
2308 * aren't in sync
2309 */
sync_request_write(struct mddev * mddev,struct r10bio * r10_bio)2310 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2311 {
2312 struct r10conf *conf = mddev->private;
2313 int i, first;
2314 struct bio *tbio, *fbio;
2315 int vcnt;
2316 struct page **tpages, **fpages;
2317
2318 atomic_set(&r10_bio->remaining, 1);
2319
2320 /* find the first device with a block */
2321 for (i=0; i<conf->copies; i++)
2322 if (!r10_bio->devs[i].bio->bi_status)
2323 break;
2324
2325 if (i == conf->copies)
2326 goto done;
2327
2328 first = i;
2329 fbio = r10_bio->devs[i].bio;
2330 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2331 fbio->bi_iter.bi_idx = 0;
2332 fpages = get_resync_pages(fbio)->pages;
2333
2334 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2335 /* now find blocks with errors */
2336 for (i=0 ; i < conf->copies ; i++) {
2337 int j, d;
2338 struct md_rdev *rdev;
2339 struct resync_pages *rp;
2340
2341 tbio = r10_bio->devs[i].bio;
2342
2343 if (tbio->bi_end_io != end_sync_read)
2344 continue;
2345 if (i == first)
2346 continue;
2347
2348 tpages = get_resync_pages(tbio)->pages;
2349 d = r10_bio->devs[i].devnum;
2350 rdev = conf->mirrors[d].rdev;
2351 if (!r10_bio->devs[i].bio->bi_status) {
2352 /* We know that the bi_io_vec layout is the same for
2353 * both 'first' and 'i', so we just compare them.
2354 * All vec entries are PAGE_SIZE;
2355 */
2356 int sectors = r10_bio->sectors;
2357 for (j = 0; j < vcnt; j++) {
2358 int len = PAGE_SIZE;
2359 if (sectors < (len / 512))
2360 len = sectors * 512;
2361 if (memcmp(page_address(fpages[j]),
2362 page_address(tpages[j]),
2363 len))
2364 break;
2365 sectors -= len/512;
2366 }
2367 if (j == vcnt)
2368 continue;
2369 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2370 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2371 /* Don't fix anything. */
2372 continue;
2373 } else if (test_bit(FailFast, &rdev->flags)) {
2374 /* Just give up on this device */
2375 md_error(rdev->mddev, rdev);
2376 continue;
2377 }
2378 /* Ok, we need to write this bio, either to correct an
2379 * inconsistency or to correct an unreadable block.
2380 * First we need to fixup bv_offset, bv_len and
2381 * bi_vecs, as the read request might have corrupted these
2382 */
2383 rp = get_resync_pages(tbio);
2384 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2385
2386 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2387
2388 rp->raid_bio = r10_bio;
2389 tbio->bi_private = rp;
2390 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2391 tbio->bi_end_io = end_sync_write;
2392
2393 bio_copy_data(tbio, fbio);
2394
2395 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2396 atomic_inc(&r10_bio->remaining);
2397 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2398
2399 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2400 tbio->bi_opf |= MD_FAILFAST;
2401 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2402 submit_bio_noacct(tbio);
2403 }
2404
2405 /* Now write out to any replacement devices
2406 * that are active
2407 */
2408 for (i = 0; i < conf->copies; i++) {
2409 int d;
2410
2411 tbio = r10_bio->devs[i].repl_bio;
2412 if (!tbio || !tbio->bi_end_io)
2413 continue;
2414 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2415 && r10_bio->devs[i].bio != fbio)
2416 bio_copy_data(tbio, fbio);
2417 d = r10_bio->devs[i].devnum;
2418 atomic_inc(&r10_bio->remaining);
2419 md_sync_acct(conf->mirrors[d].replacement->bdev,
2420 bio_sectors(tbio));
2421 submit_bio_noacct(tbio);
2422 }
2423
2424 done:
2425 if (atomic_dec_and_test(&r10_bio->remaining)) {
2426 md_done_sync(mddev, r10_bio->sectors, 1);
2427 put_buf(r10_bio);
2428 }
2429 }
2430
2431 /*
2432 * Now for the recovery code.
2433 * Recovery happens across physical sectors.
2434 * We recover all non-is_sync drives by finding the virtual address of
2435 * each, and then choose a working drive that also has that virt address.
2436 * There is a separate r10_bio for each non-in_sync drive.
2437 * Only the first two slots are in use. The first for reading,
2438 * The second for writing.
2439 *
2440 */
fix_recovery_read_error(struct r10bio * r10_bio)2441 static void fix_recovery_read_error(struct r10bio *r10_bio)
2442 {
2443 /* We got a read error during recovery.
2444 * We repeat the read in smaller page-sized sections.
2445 * If a read succeeds, write it to the new device or record
2446 * a bad block if we cannot.
2447 * If a read fails, record a bad block on both old and
2448 * new devices.
2449 */
2450 struct mddev *mddev = r10_bio->mddev;
2451 struct r10conf *conf = mddev->private;
2452 struct bio *bio = r10_bio->devs[0].bio;
2453 sector_t sect = 0;
2454 int sectors = r10_bio->sectors;
2455 int idx = 0;
2456 int dr = r10_bio->devs[0].devnum;
2457 int dw = r10_bio->devs[1].devnum;
2458 struct page **pages = get_resync_pages(bio)->pages;
2459
2460 while (sectors) {
2461 int s = sectors;
2462 struct md_rdev *rdev;
2463 sector_t addr;
2464 int ok;
2465
2466 if (s > (PAGE_SIZE>>9))
2467 s = PAGE_SIZE >> 9;
2468
2469 rdev = conf->mirrors[dr].rdev;
2470 addr = r10_bio->devs[0].addr + sect;
2471 ok = sync_page_io(rdev,
2472 addr,
2473 s << 9,
2474 pages[idx],
2475 REQ_OP_READ, false);
2476 if (ok) {
2477 rdev = conf->mirrors[dw].rdev;
2478 addr = r10_bio->devs[1].addr + sect;
2479 ok = sync_page_io(rdev,
2480 addr,
2481 s << 9,
2482 pages[idx],
2483 REQ_OP_WRITE, false);
2484 if (!ok) {
2485 set_bit(WriteErrorSeen, &rdev->flags);
2486 if (!test_and_set_bit(WantReplacement,
2487 &rdev->flags))
2488 set_bit(MD_RECOVERY_NEEDED,
2489 &rdev->mddev->recovery);
2490 }
2491 }
2492 if (!ok) {
2493 /* We don't worry if we cannot set a bad block -
2494 * it really is bad so there is no loss in not
2495 * recording it yet
2496 */
2497 rdev_set_badblocks(rdev, addr, s, 0);
2498
2499 if (rdev != conf->mirrors[dw].rdev) {
2500 /* need bad block on destination too */
2501 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2502 addr = r10_bio->devs[1].addr + sect;
2503 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2504 if (!ok) {
2505 /* just abort the recovery */
2506 pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2507 mdname(mddev));
2508
2509 conf->mirrors[dw].recovery_disabled
2510 = mddev->recovery_disabled;
2511 set_bit(MD_RECOVERY_INTR,
2512 &mddev->recovery);
2513 break;
2514 }
2515 }
2516 }
2517
2518 sectors -= s;
2519 sect += s;
2520 idx++;
2521 }
2522 }
2523
recovery_request_write(struct mddev * mddev,struct r10bio * r10_bio)2524 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2525 {
2526 struct r10conf *conf = mddev->private;
2527 int d;
2528 struct bio *wbio = r10_bio->devs[1].bio;
2529 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2530
2531 /* Need to test wbio2->bi_end_io before we call
2532 * submit_bio_noacct as if the former is NULL,
2533 * the latter is free to free wbio2.
2534 */
2535 if (wbio2 && !wbio2->bi_end_io)
2536 wbio2 = NULL;
2537
2538 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2539 fix_recovery_read_error(r10_bio);
2540 if (wbio->bi_end_io)
2541 end_sync_request(r10_bio);
2542 if (wbio2)
2543 end_sync_request(r10_bio);
2544 return;
2545 }
2546
2547 /*
2548 * share the pages with the first bio
2549 * and submit the write request
2550 */
2551 d = r10_bio->devs[1].devnum;
2552 if (wbio->bi_end_io) {
2553 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2554 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2555 submit_bio_noacct(wbio);
2556 }
2557 if (wbio2) {
2558 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2559 md_sync_acct(conf->mirrors[d].replacement->bdev,
2560 bio_sectors(wbio2));
2561 submit_bio_noacct(wbio2);
2562 }
2563 }
2564
r10_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,enum req_op op)2565 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2566 int sectors, struct page *page, enum req_op op)
2567 {
2568 if (rdev_has_badblock(rdev, sector, sectors) &&
2569 (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2570 return -1;
2571 if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2572 /* success */
2573 return 1;
2574 if (op == REQ_OP_WRITE) {
2575 set_bit(WriteErrorSeen, &rdev->flags);
2576 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2577 set_bit(MD_RECOVERY_NEEDED,
2578 &rdev->mddev->recovery);
2579 }
2580 /* need to record an error - either for the block or the device */
2581 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2582 md_error(rdev->mddev, rdev);
2583 return 0;
2584 }
2585
2586 /*
2587 * This is a kernel thread which:
2588 *
2589 * 1. Retries failed read operations on working mirrors.
2590 * 2. Updates the raid superblock when problems encounter.
2591 * 3. Performs writes following reads for array synchronising.
2592 */
2593
fix_read_error(struct r10conf * conf,struct mddev * mddev,struct r10bio * r10_bio)2594 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2595 {
2596 int sect = 0; /* Offset from r10_bio->sector */
2597 int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2598 struct md_rdev *rdev;
2599 int d = r10_bio->devs[slot].devnum;
2600
2601 /* still own a reference to this rdev, so it cannot
2602 * have been cleared recently.
2603 */
2604 rdev = conf->mirrors[d].rdev;
2605
2606 if (test_bit(Faulty, &rdev->flags))
2607 /* drive has already been failed, just ignore any
2608 more fix_read_error() attempts */
2609 return;
2610
2611 if (exceed_read_errors(mddev, rdev)) {
2612 r10_bio->devs[slot].bio = IO_BLOCKED;
2613 return;
2614 }
2615
2616 while(sectors) {
2617 int s = sectors;
2618 int sl = slot;
2619 int success = 0;
2620 int start;
2621
2622 if (s > (PAGE_SIZE>>9))
2623 s = PAGE_SIZE >> 9;
2624
2625 do {
2626 d = r10_bio->devs[sl].devnum;
2627 rdev = conf->mirrors[d].rdev;
2628 if (rdev &&
2629 test_bit(In_sync, &rdev->flags) &&
2630 !test_bit(Faulty, &rdev->flags) &&
2631 rdev_has_badblock(rdev,
2632 r10_bio->devs[sl].addr + sect,
2633 s) == 0) {
2634 atomic_inc(&rdev->nr_pending);
2635 success = sync_page_io(rdev,
2636 r10_bio->devs[sl].addr +
2637 sect,
2638 s<<9,
2639 conf->tmppage,
2640 REQ_OP_READ, false);
2641 rdev_dec_pending(rdev, mddev);
2642 if (success)
2643 break;
2644 }
2645 sl++;
2646 if (sl == conf->copies)
2647 sl = 0;
2648 } while (sl != slot);
2649
2650 if (!success) {
2651 /* Cannot read from anywhere, just mark the block
2652 * as bad on the first device to discourage future
2653 * reads.
2654 */
2655 int dn = r10_bio->devs[slot].devnum;
2656 rdev = conf->mirrors[dn].rdev;
2657
2658 if (!rdev_set_badblocks(
2659 rdev,
2660 r10_bio->devs[slot].addr
2661 + sect,
2662 s, 0)) {
2663 md_error(mddev, rdev);
2664 r10_bio->devs[slot].bio
2665 = IO_BLOCKED;
2666 }
2667 break;
2668 }
2669
2670 start = sl;
2671 /* write it back and re-read */
2672 while (sl != slot) {
2673 if (sl==0)
2674 sl = conf->copies;
2675 sl--;
2676 d = r10_bio->devs[sl].devnum;
2677 rdev = conf->mirrors[d].rdev;
2678 if (!rdev ||
2679 test_bit(Faulty, &rdev->flags) ||
2680 !test_bit(In_sync, &rdev->flags))
2681 continue;
2682
2683 atomic_inc(&rdev->nr_pending);
2684 if (r10_sync_page_io(rdev,
2685 r10_bio->devs[sl].addr +
2686 sect,
2687 s, conf->tmppage, REQ_OP_WRITE)
2688 == 0) {
2689 /* Well, this device is dead */
2690 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2691 mdname(mddev), s,
2692 (unsigned long long)(
2693 sect +
2694 choose_data_offset(r10_bio,
2695 rdev)),
2696 rdev->bdev);
2697 pr_notice("md/raid10:%s: %pg: failing drive\n",
2698 mdname(mddev),
2699 rdev->bdev);
2700 }
2701 rdev_dec_pending(rdev, mddev);
2702 }
2703 sl = start;
2704 while (sl != slot) {
2705 if (sl==0)
2706 sl = conf->copies;
2707 sl--;
2708 d = r10_bio->devs[sl].devnum;
2709 rdev = conf->mirrors[d].rdev;
2710 if (!rdev ||
2711 test_bit(Faulty, &rdev->flags) ||
2712 !test_bit(In_sync, &rdev->flags))
2713 continue;
2714
2715 atomic_inc(&rdev->nr_pending);
2716 switch (r10_sync_page_io(rdev,
2717 r10_bio->devs[sl].addr +
2718 sect,
2719 s, conf->tmppage, REQ_OP_READ)) {
2720 case 0:
2721 /* Well, this device is dead */
2722 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
2723 mdname(mddev), s,
2724 (unsigned long long)(
2725 sect +
2726 choose_data_offset(r10_bio, rdev)),
2727 rdev->bdev);
2728 pr_notice("md/raid10:%s: %pg: failing drive\n",
2729 mdname(mddev),
2730 rdev->bdev);
2731 break;
2732 case 1:
2733 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
2734 mdname(mddev), s,
2735 (unsigned long long)(
2736 sect +
2737 choose_data_offset(r10_bio, rdev)),
2738 rdev->bdev);
2739 atomic_add(s, &rdev->corrected_errors);
2740 }
2741
2742 rdev_dec_pending(rdev, mddev);
2743 }
2744
2745 sectors -= s;
2746 sect += s;
2747 }
2748 }
2749
narrow_write_error(struct r10bio * r10_bio,int i)2750 static int narrow_write_error(struct r10bio *r10_bio, int i)
2751 {
2752 struct bio *bio = r10_bio->master_bio;
2753 struct mddev *mddev = r10_bio->mddev;
2754 struct r10conf *conf = mddev->private;
2755 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2756 /* bio has the data to be written to slot 'i' where
2757 * we just recently had a write error.
2758 * We repeatedly clone the bio and trim down to one block,
2759 * then try the write. Where the write fails we record
2760 * a bad block.
2761 * It is conceivable that the bio doesn't exactly align with
2762 * blocks. We must handle this.
2763 *
2764 * We currently own a reference to the rdev.
2765 */
2766
2767 int block_sectors;
2768 sector_t sector;
2769 int sectors;
2770 int sect_to_write = r10_bio->sectors;
2771 int ok = 1;
2772
2773 if (rdev->badblocks.shift < 0)
2774 return 0;
2775
2776 block_sectors = roundup(1 << rdev->badblocks.shift,
2777 bdev_logical_block_size(rdev->bdev) >> 9);
2778 sector = r10_bio->sector;
2779 sectors = ((r10_bio->sector + block_sectors)
2780 & ~(sector_t)(block_sectors - 1))
2781 - sector;
2782
2783 while (sect_to_write) {
2784 struct bio *wbio;
2785 sector_t wsector;
2786 if (sectors > sect_to_write)
2787 sectors = sect_to_write;
2788 /* Write at 'sector' for 'sectors' */
2789 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2790 &mddev->bio_set);
2791 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2792 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2793 wbio->bi_iter.bi_sector = wsector +
2794 choose_data_offset(r10_bio, rdev);
2795 wbio->bi_opf = REQ_OP_WRITE;
2796
2797 if (submit_bio_wait(wbio) < 0)
2798 /* Failure! */
2799 ok = rdev_set_badblocks(rdev, wsector,
2800 sectors, 0)
2801 && ok;
2802
2803 bio_put(wbio);
2804 sect_to_write -= sectors;
2805 sector += sectors;
2806 sectors = block_sectors;
2807 }
2808 return ok;
2809 }
2810
handle_read_error(struct mddev * mddev,struct r10bio * r10_bio)2811 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2812 {
2813 int slot = r10_bio->read_slot;
2814 struct bio *bio;
2815 struct r10conf *conf = mddev->private;
2816 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2817
2818 /* we got a read error. Maybe the drive is bad. Maybe just
2819 * the block and we can fix it.
2820 * We freeze all other IO, and try reading the block from
2821 * other devices. When we find one, we re-write
2822 * and check it that fixes the read error.
2823 * This is all done synchronously while the array is
2824 * frozen.
2825 */
2826 bio = r10_bio->devs[slot].bio;
2827 bio_put(bio);
2828 r10_bio->devs[slot].bio = NULL;
2829
2830 if (mddev->ro)
2831 r10_bio->devs[slot].bio = IO_BLOCKED;
2832 else if (!test_bit(FailFast, &rdev->flags)) {
2833 freeze_array(conf, 1);
2834 fix_read_error(conf, mddev, r10_bio);
2835 unfreeze_array(conf);
2836 } else
2837 md_error(mddev, rdev);
2838
2839 rdev_dec_pending(rdev, mddev);
2840 r10_bio->state = 0;
2841 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2842 /*
2843 * allow_barrier after re-submit to ensure no sync io
2844 * can be issued while regular io pending.
2845 */
2846 allow_barrier(conf);
2847 }
2848
handle_write_completed(struct r10conf * conf,struct r10bio * r10_bio)2849 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2850 {
2851 /* Some sort of write request has finished and it
2852 * succeeded in writing where we thought there was a
2853 * bad block. So forget the bad block.
2854 * Or possibly if failed and we need to record
2855 * a bad block.
2856 */
2857 int m;
2858 struct md_rdev *rdev;
2859
2860 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2861 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2862 for (m = 0; m < conf->copies; m++) {
2863 int dev = r10_bio->devs[m].devnum;
2864 rdev = conf->mirrors[dev].rdev;
2865 if (r10_bio->devs[m].bio == NULL ||
2866 r10_bio->devs[m].bio->bi_end_io == NULL)
2867 continue;
2868 if (!r10_bio->devs[m].bio->bi_status) {
2869 rdev_clear_badblocks(
2870 rdev,
2871 r10_bio->devs[m].addr,
2872 r10_bio->sectors, 0);
2873 } else {
2874 if (!rdev_set_badblocks(
2875 rdev,
2876 r10_bio->devs[m].addr,
2877 r10_bio->sectors, 0))
2878 md_error(conf->mddev, rdev);
2879 }
2880 rdev = conf->mirrors[dev].replacement;
2881 if (r10_bio->devs[m].repl_bio == NULL ||
2882 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2883 continue;
2884
2885 if (!r10_bio->devs[m].repl_bio->bi_status) {
2886 rdev_clear_badblocks(
2887 rdev,
2888 r10_bio->devs[m].addr,
2889 r10_bio->sectors, 0);
2890 } else {
2891 if (!rdev_set_badblocks(
2892 rdev,
2893 r10_bio->devs[m].addr,
2894 r10_bio->sectors, 0))
2895 md_error(conf->mddev, rdev);
2896 }
2897 }
2898 put_buf(r10_bio);
2899 } else {
2900 bool fail = false;
2901 for (m = 0; m < conf->copies; m++) {
2902 int dev = r10_bio->devs[m].devnum;
2903 struct bio *bio = r10_bio->devs[m].bio;
2904 rdev = conf->mirrors[dev].rdev;
2905 if (bio == IO_MADE_GOOD) {
2906 rdev_clear_badblocks(
2907 rdev,
2908 r10_bio->devs[m].addr,
2909 r10_bio->sectors, 0);
2910 rdev_dec_pending(rdev, conf->mddev);
2911 } else if (bio != NULL && bio->bi_status) {
2912 fail = true;
2913 if (!narrow_write_error(r10_bio, m)) {
2914 md_error(conf->mddev, rdev);
2915 set_bit(R10BIO_Degraded,
2916 &r10_bio->state);
2917 }
2918 rdev_dec_pending(rdev, conf->mddev);
2919 }
2920 bio = r10_bio->devs[m].repl_bio;
2921 rdev = conf->mirrors[dev].replacement;
2922 if (rdev && bio == IO_MADE_GOOD) {
2923 rdev_clear_badblocks(
2924 rdev,
2925 r10_bio->devs[m].addr,
2926 r10_bio->sectors, 0);
2927 rdev_dec_pending(rdev, conf->mddev);
2928 }
2929 }
2930 if (fail) {
2931 spin_lock_irq(&conf->device_lock);
2932 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2933 conf->nr_queued++;
2934 spin_unlock_irq(&conf->device_lock);
2935 /*
2936 * In case freeze_array() is waiting for condition
2937 * nr_pending == nr_queued + extra to be true.
2938 */
2939 wake_up(&conf->wait_barrier);
2940 md_wakeup_thread(conf->mddev->thread);
2941 } else {
2942 if (test_bit(R10BIO_WriteError,
2943 &r10_bio->state))
2944 close_write(r10_bio);
2945 raid_end_bio_io(r10_bio);
2946 }
2947 }
2948 }
2949
raid10d(struct md_thread * thread)2950 static void raid10d(struct md_thread *thread)
2951 {
2952 struct mddev *mddev = thread->mddev;
2953 struct r10bio *r10_bio;
2954 unsigned long flags;
2955 struct r10conf *conf = mddev->private;
2956 struct list_head *head = &conf->retry_list;
2957 struct blk_plug plug;
2958
2959 md_check_recovery(mddev);
2960
2961 if (!list_empty_careful(&conf->bio_end_io_list) &&
2962 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2963 LIST_HEAD(tmp);
2964 spin_lock_irqsave(&conf->device_lock, flags);
2965 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2966 while (!list_empty(&conf->bio_end_io_list)) {
2967 list_move(conf->bio_end_io_list.prev, &tmp);
2968 conf->nr_queued--;
2969 }
2970 }
2971 spin_unlock_irqrestore(&conf->device_lock, flags);
2972 while (!list_empty(&tmp)) {
2973 r10_bio = list_first_entry(&tmp, struct r10bio,
2974 retry_list);
2975 list_del(&r10_bio->retry_list);
2976 if (mddev->degraded)
2977 set_bit(R10BIO_Degraded, &r10_bio->state);
2978
2979 if (test_bit(R10BIO_WriteError,
2980 &r10_bio->state))
2981 close_write(r10_bio);
2982 raid_end_bio_io(r10_bio);
2983 }
2984 }
2985
2986 blk_start_plug(&plug);
2987 for (;;) {
2988
2989 flush_pending_writes(conf);
2990
2991 spin_lock_irqsave(&conf->device_lock, flags);
2992 if (list_empty(head)) {
2993 spin_unlock_irqrestore(&conf->device_lock, flags);
2994 break;
2995 }
2996 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2997 list_del(head->prev);
2998 conf->nr_queued--;
2999 spin_unlock_irqrestore(&conf->device_lock, flags);
3000
3001 mddev = r10_bio->mddev;
3002 conf = mddev->private;
3003 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3004 test_bit(R10BIO_WriteError, &r10_bio->state))
3005 handle_write_completed(conf, r10_bio);
3006 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3007 reshape_request_write(mddev, r10_bio);
3008 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3009 sync_request_write(mddev, r10_bio);
3010 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3011 recovery_request_write(mddev, r10_bio);
3012 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3013 handle_read_error(mddev, r10_bio);
3014 else
3015 WARN_ON_ONCE(1);
3016
3017 cond_resched();
3018 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3019 md_check_recovery(mddev);
3020 }
3021 blk_finish_plug(&plug);
3022 }
3023
init_resync(struct r10conf * conf)3024 static int init_resync(struct r10conf *conf)
3025 {
3026 int ret, buffs, i;
3027
3028 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3029 BUG_ON(mempool_initialized(&conf->r10buf_pool));
3030 conf->have_replacement = 0;
3031 for (i = 0; i < conf->geo.raid_disks; i++)
3032 if (conf->mirrors[i].replacement)
3033 conf->have_replacement = 1;
3034 ret = mempool_init(&conf->r10buf_pool, buffs,
3035 r10buf_pool_alloc, r10buf_pool_free, conf);
3036 if (ret)
3037 return ret;
3038 conf->next_resync = 0;
3039 return 0;
3040 }
3041
raid10_alloc_init_r10buf(struct r10conf * conf)3042 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3043 {
3044 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3045 struct rsync_pages *rp;
3046 struct bio *bio;
3047 int nalloc;
3048 int i;
3049
3050 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3051 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3052 nalloc = conf->copies; /* resync */
3053 else
3054 nalloc = 2; /* recovery */
3055
3056 for (i = 0; i < nalloc; i++) {
3057 bio = r10bio->devs[i].bio;
3058 rp = bio->bi_private;
3059 bio_reset(bio, NULL, 0);
3060 bio->bi_private = rp;
3061 bio = r10bio->devs[i].repl_bio;
3062 if (bio) {
3063 rp = bio->bi_private;
3064 bio_reset(bio, NULL, 0);
3065 bio->bi_private = rp;
3066 }
3067 }
3068 return r10bio;
3069 }
3070
3071 /*
3072 * Set cluster_sync_high since we need other nodes to add the
3073 * range [cluster_sync_low, cluster_sync_high] to suspend list.
3074 */
raid10_set_cluster_sync_high(struct r10conf * conf)3075 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3076 {
3077 sector_t window_size;
3078 int extra_chunk, chunks;
3079
3080 /*
3081 * First, here we define "stripe" as a unit which across
3082 * all member devices one time, so we get chunks by use
3083 * raid_disks / near_copies. Otherwise, if near_copies is
3084 * close to raid_disks, then resync window could increases
3085 * linearly with the increase of raid_disks, which means
3086 * we will suspend a really large IO window while it is not
3087 * necessary. If raid_disks is not divisible by near_copies,
3088 * an extra chunk is needed to ensure the whole "stripe" is
3089 * covered.
3090 */
3091
3092 chunks = conf->geo.raid_disks / conf->geo.near_copies;
3093 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3094 extra_chunk = 0;
3095 else
3096 extra_chunk = 1;
3097 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3098
3099 /*
3100 * At least use a 32M window to align with raid1's resync window
3101 */
3102 window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3103 CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3104
3105 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3106 }
3107
3108 /*
3109 * perform a "sync" on one "block"
3110 *
3111 * We need to make sure that no normal I/O request - particularly write
3112 * requests - conflict with active sync requests.
3113 *
3114 * This is achieved by tracking pending requests and a 'barrier' concept
3115 * that can be installed to exclude normal IO requests.
3116 *
3117 * Resync and recovery are handled very differently.
3118 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3119 *
3120 * For resync, we iterate over virtual addresses, read all copies,
3121 * and update if there are differences. If only one copy is live,
3122 * skip it.
3123 * For recovery, we iterate over physical addresses, read a good
3124 * value for each non-in_sync drive, and over-write.
3125 *
3126 * So, for recovery we may have several outstanding complex requests for a
3127 * given address, one for each out-of-sync device. We model this by allocating
3128 * a number of r10_bio structures, one for each out-of-sync device.
3129 * As we setup these structures, we collect all bio's together into a list
3130 * which we then process collectively to add pages, and then process again
3131 * to pass to submit_bio_noacct.
3132 *
3133 * The r10_bio structures are linked using a borrowed master_bio pointer.
3134 * This link is counted in ->remaining. When the r10_bio that points to NULL
3135 * has its remaining count decremented to 0, the whole complex operation
3136 * is complete.
3137 *
3138 */
3139
raid10_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)3140 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3141 sector_t max_sector, int *skipped)
3142 {
3143 struct r10conf *conf = mddev->private;
3144 struct r10bio *r10_bio;
3145 struct bio *biolist = NULL, *bio;
3146 sector_t nr_sectors;
3147 int i;
3148 int max_sync;
3149 sector_t sync_blocks;
3150 sector_t sectors_skipped = 0;
3151 int chunks_skipped = 0;
3152 sector_t chunk_mask = conf->geo.chunk_mask;
3153 int page_idx = 0;
3154 int error_disk = -1;
3155
3156 /*
3157 * Allow skipping a full rebuild for incremental assembly
3158 * of a clean array, like RAID1 does.
3159 */
3160 if (mddev->bitmap == NULL &&
3161 mddev->recovery_cp == MaxSector &&
3162 mddev->reshape_position == MaxSector &&
3163 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3164 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3165 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3166 conf->fullsync == 0) {
3167 *skipped = 1;
3168 return mddev->dev_sectors - sector_nr;
3169 }
3170
3171 if (!mempool_initialized(&conf->r10buf_pool))
3172 if (init_resync(conf))
3173 return 0;
3174
3175 skipped:
3176 if (sector_nr >= max_sector) {
3177 conf->cluster_sync_low = 0;
3178 conf->cluster_sync_high = 0;
3179
3180 /* If we aborted, we need to abort the
3181 * sync on the 'current' bitmap chucks (there can
3182 * be several when recovering multiple devices).
3183 * as we may have started syncing it but not finished.
3184 * We can find the current address in
3185 * mddev->curr_resync, but for recovery,
3186 * we need to convert that to several
3187 * virtual addresses.
3188 */
3189 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3190 end_reshape(conf);
3191 close_sync(conf);
3192 return 0;
3193 }
3194
3195 if (mddev->curr_resync < max_sector) { /* aborted */
3196 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3197 mddev->bitmap_ops->end_sync(mddev,
3198 mddev->curr_resync,
3199 &sync_blocks);
3200 else for (i = 0; i < conf->geo.raid_disks; i++) {
3201 sector_t sect =
3202 raid10_find_virt(conf, mddev->curr_resync, i);
3203
3204 mddev->bitmap_ops->end_sync(mddev, sect,
3205 &sync_blocks);
3206 }
3207 } else {
3208 /* completed sync */
3209 if ((!mddev->bitmap || conf->fullsync)
3210 && conf->have_replacement
3211 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3212 /* Completed a full sync so the replacements
3213 * are now fully recovered.
3214 */
3215 for (i = 0; i < conf->geo.raid_disks; i++) {
3216 struct md_rdev *rdev =
3217 conf->mirrors[i].replacement;
3218
3219 if (rdev)
3220 rdev->recovery_offset = MaxSector;
3221 }
3222 }
3223 conf->fullsync = 0;
3224 }
3225 mddev->bitmap_ops->close_sync(mddev);
3226 close_sync(conf);
3227 *skipped = 1;
3228 return sectors_skipped;
3229 }
3230
3231 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3232 return reshape_request(mddev, sector_nr, skipped);
3233
3234 if (chunks_skipped >= conf->geo.raid_disks) {
3235 pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
3236 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery");
3237 if (error_disk >= 0 &&
3238 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3239 /*
3240 * recovery fails, set mirrors.recovery_disabled,
3241 * device shouldn't be added to there.
3242 */
3243 conf->mirrors[error_disk].recovery_disabled =
3244 mddev->recovery_disabled;
3245 return 0;
3246 }
3247 /*
3248 * if there has been nothing to do on any drive,
3249 * then there is nothing to do at all.
3250 */
3251 *skipped = 1;
3252 return (max_sector - sector_nr) + sectors_skipped;
3253 }
3254
3255 if (max_sector > mddev->resync_max)
3256 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3257
3258 /* make sure whole request will fit in a chunk - if chunks
3259 * are meaningful
3260 */
3261 if (conf->geo.near_copies < conf->geo.raid_disks &&
3262 max_sector > (sector_nr | chunk_mask))
3263 max_sector = (sector_nr | chunk_mask) + 1;
3264
3265 /*
3266 * If there is non-resync activity waiting for a turn, then let it
3267 * though before starting on this new sync request.
3268 */
3269 if (conf->nr_waiting)
3270 schedule_timeout_uninterruptible(1);
3271
3272 /* Again, very different code for resync and recovery.
3273 * Both must result in an r10bio with a list of bios that
3274 * have bi_end_io, bi_sector, bi_bdev set,
3275 * and bi_private set to the r10bio.
3276 * For recovery, we may actually create several r10bios
3277 * with 2 bios in each, that correspond to the bios in the main one.
3278 * In this case, the subordinate r10bios link back through a
3279 * borrowed master_bio pointer, and the counter in the master
3280 * includes a ref from each subordinate.
3281 */
3282 /* First, we decide what to do and set ->bi_end_io
3283 * To end_sync_read if we want to read, and
3284 * end_sync_write if we will want to write.
3285 */
3286
3287 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3288 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3289 /* recovery... the complicated one */
3290 int j;
3291 r10_bio = NULL;
3292
3293 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3294 bool still_degraded;
3295 struct r10bio *rb2;
3296 sector_t sect;
3297 bool must_sync;
3298 int any_working;
3299 struct raid10_info *mirror = &conf->mirrors[i];
3300 struct md_rdev *mrdev, *mreplace;
3301
3302 mrdev = mirror->rdev;
3303 mreplace = mirror->replacement;
3304
3305 if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
3306 test_bit(In_sync, &mrdev->flags)))
3307 mrdev = NULL;
3308 if (mreplace && test_bit(Faulty, &mreplace->flags))
3309 mreplace = NULL;
3310
3311 if (!mrdev && !mreplace)
3312 continue;
3313
3314 still_degraded = false;
3315 /* want to reconstruct this device */
3316 rb2 = r10_bio;
3317 sect = raid10_find_virt(conf, sector_nr, i);
3318 if (sect >= mddev->resync_max_sectors)
3319 /* last stripe is not complete - don't
3320 * try to recover this sector.
3321 */
3322 continue;
3323 /* Unless we are doing a full sync, or a replacement
3324 * we only need to recover the block if it is set in
3325 * the bitmap
3326 */
3327 must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3328 &sync_blocks,
3329 true);
3330 if (sync_blocks < max_sync)
3331 max_sync = sync_blocks;
3332 if (!must_sync &&
3333 mreplace == NULL &&
3334 !conf->fullsync) {
3335 /* yep, skip the sync_blocks here, but don't assume
3336 * that there will never be anything to do here
3337 */
3338 chunks_skipped = -1;
3339 continue;
3340 }
3341 if (mrdev)
3342 atomic_inc(&mrdev->nr_pending);
3343 if (mreplace)
3344 atomic_inc(&mreplace->nr_pending);
3345
3346 r10_bio = raid10_alloc_init_r10buf(conf);
3347 r10_bio->state = 0;
3348 raise_barrier(conf, rb2 != NULL);
3349 atomic_set(&r10_bio->remaining, 0);
3350
3351 r10_bio->master_bio = (struct bio*)rb2;
3352 if (rb2)
3353 atomic_inc(&rb2->remaining);
3354 r10_bio->mddev = mddev;
3355 set_bit(R10BIO_IsRecover, &r10_bio->state);
3356 r10_bio->sector = sect;
3357
3358 raid10_find_phys(conf, r10_bio);
3359
3360 /* Need to check if the array will still be
3361 * degraded
3362 */
3363 for (j = 0; j < conf->geo.raid_disks; j++) {
3364 struct md_rdev *rdev = conf->mirrors[j].rdev;
3365
3366 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3367 still_degraded = false;
3368 break;
3369 }
3370 }
3371
3372 must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3373 &sync_blocks, still_degraded);
3374
3375 any_working = 0;
3376 for (j=0; j<conf->copies;j++) {
3377 int k;
3378 int d = r10_bio->devs[j].devnum;
3379 sector_t from_addr, to_addr;
3380 struct md_rdev *rdev = conf->mirrors[d].rdev;
3381 sector_t sector, first_bad;
3382 int bad_sectors;
3383 if (!rdev ||
3384 !test_bit(In_sync, &rdev->flags))
3385 continue;
3386 /* This is where we read from */
3387 any_working = 1;
3388 sector = r10_bio->devs[j].addr;
3389
3390 if (is_badblock(rdev, sector, max_sync,
3391 &first_bad, &bad_sectors)) {
3392 if (first_bad > sector)
3393 max_sync = first_bad - sector;
3394 else {
3395 bad_sectors -= (sector
3396 - first_bad);
3397 if (max_sync > bad_sectors)
3398 max_sync = bad_sectors;
3399 continue;
3400 }
3401 }
3402 bio = r10_bio->devs[0].bio;
3403 bio->bi_next = biolist;
3404 biolist = bio;
3405 bio->bi_end_io = end_sync_read;
3406 bio->bi_opf = REQ_OP_READ;
3407 if (test_bit(FailFast, &rdev->flags))
3408 bio->bi_opf |= MD_FAILFAST;
3409 from_addr = r10_bio->devs[j].addr;
3410 bio->bi_iter.bi_sector = from_addr +
3411 rdev->data_offset;
3412 bio_set_dev(bio, rdev->bdev);
3413 atomic_inc(&rdev->nr_pending);
3414 /* and we write to 'i' (if not in_sync) */
3415
3416 for (k=0; k<conf->copies; k++)
3417 if (r10_bio->devs[k].devnum == i)
3418 break;
3419 BUG_ON(k == conf->copies);
3420 to_addr = r10_bio->devs[k].addr;
3421 r10_bio->devs[0].devnum = d;
3422 r10_bio->devs[0].addr = from_addr;
3423 r10_bio->devs[1].devnum = i;
3424 r10_bio->devs[1].addr = to_addr;
3425
3426 if (mrdev) {
3427 bio = r10_bio->devs[1].bio;
3428 bio->bi_next = biolist;
3429 biolist = bio;
3430 bio->bi_end_io = end_sync_write;
3431 bio->bi_opf = REQ_OP_WRITE;
3432 bio->bi_iter.bi_sector = to_addr
3433 + mrdev->data_offset;
3434 bio_set_dev(bio, mrdev->bdev);
3435 atomic_inc(&r10_bio->remaining);
3436 } else
3437 r10_bio->devs[1].bio->bi_end_io = NULL;
3438
3439 /* and maybe write to replacement */
3440 bio = r10_bio->devs[1].repl_bio;
3441 if (bio)
3442 bio->bi_end_io = NULL;
3443 /* Note: if replace is not NULL, then bio
3444 * cannot be NULL as r10buf_pool_alloc will
3445 * have allocated it.
3446 */
3447 if (!mreplace)
3448 break;
3449 bio->bi_next = biolist;
3450 biolist = bio;
3451 bio->bi_end_io = end_sync_write;
3452 bio->bi_opf = REQ_OP_WRITE;
3453 bio->bi_iter.bi_sector = to_addr +
3454 mreplace->data_offset;
3455 bio_set_dev(bio, mreplace->bdev);
3456 atomic_inc(&r10_bio->remaining);
3457 break;
3458 }
3459 if (j == conf->copies) {
3460 /* Cannot recover, so abort the recovery or
3461 * record a bad block */
3462 if (any_working) {
3463 /* problem is that there are bad blocks
3464 * on other device(s)
3465 */
3466 int k;
3467 for (k = 0; k < conf->copies; k++)
3468 if (r10_bio->devs[k].devnum == i)
3469 break;
3470 if (mrdev && !test_bit(In_sync,
3471 &mrdev->flags)
3472 && !rdev_set_badblocks(
3473 mrdev,
3474 r10_bio->devs[k].addr,
3475 max_sync, 0))
3476 any_working = 0;
3477 if (mreplace &&
3478 !rdev_set_badblocks(
3479 mreplace,
3480 r10_bio->devs[k].addr,
3481 max_sync, 0))
3482 any_working = 0;
3483 }
3484 if (!any_working) {
3485 if (!test_and_set_bit(MD_RECOVERY_INTR,
3486 &mddev->recovery))
3487 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3488 mdname(mddev));
3489 mirror->recovery_disabled
3490 = mddev->recovery_disabled;
3491 } else {
3492 error_disk = i;
3493 }
3494 put_buf(r10_bio);
3495 if (rb2)
3496 atomic_dec(&rb2->remaining);
3497 r10_bio = rb2;
3498 if (mrdev)
3499 rdev_dec_pending(mrdev, mddev);
3500 if (mreplace)
3501 rdev_dec_pending(mreplace, mddev);
3502 break;
3503 }
3504 if (mrdev)
3505 rdev_dec_pending(mrdev, mddev);
3506 if (mreplace)
3507 rdev_dec_pending(mreplace, mddev);
3508 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3509 /* Only want this if there is elsewhere to
3510 * read from. 'j' is currently the first
3511 * readable copy.
3512 */
3513 int targets = 1;
3514 for (; j < conf->copies; j++) {
3515 int d = r10_bio->devs[j].devnum;
3516 if (conf->mirrors[d].rdev &&
3517 test_bit(In_sync,
3518 &conf->mirrors[d].rdev->flags))
3519 targets++;
3520 }
3521 if (targets == 1)
3522 r10_bio->devs[0].bio->bi_opf
3523 &= ~MD_FAILFAST;
3524 }
3525 }
3526 if (biolist == NULL) {
3527 while (r10_bio) {
3528 struct r10bio *rb2 = r10_bio;
3529 r10_bio = (struct r10bio*) rb2->master_bio;
3530 rb2->master_bio = NULL;
3531 put_buf(rb2);
3532 }
3533 goto giveup;
3534 }
3535 } else {
3536 /* resync. Schedule a read for every block at this virt offset */
3537 int count = 0;
3538
3539 /*
3540 * Since curr_resync_completed could probably not update in
3541 * time, and we will set cluster_sync_low based on it.
3542 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3543 * safety reason, which ensures curr_resync_completed is
3544 * updated in bitmap_cond_end_sync.
3545 */
3546 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
3547 mddev_is_clustered(mddev) &&
3548 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3549
3550 if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
3551 &sync_blocks,
3552 mddev->degraded) &&
3553 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3554 &mddev->recovery)) {
3555 /* We can skip this block */
3556 *skipped = 1;
3557 return sync_blocks + sectors_skipped;
3558 }
3559 if (sync_blocks < max_sync)
3560 max_sync = sync_blocks;
3561 r10_bio = raid10_alloc_init_r10buf(conf);
3562 r10_bio->state = 0;
3563
3564 r10_bio->mddev = mddev;
3565 atomic_set(&r10_bio->remaining, 0);
3566 raise_barrier(conf, 0);
3567 conf->next_resync = sector_nr;
3568
3569 r10_bio->master_bio = NULL;
3570 r10_bio->sector = sector_nr;
3571 set_bit(R10BIO_IsSync, &r10_bio->state);
3572 raid10_find_phys(conf, r10_bio);
3573 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3574
3575 for (i = 0; i < conf->copies; i++) {
3576 int d = r10_bio->devs[i].devnum;
3577 sector_t first_bad, sector;
3578 int bad_sectors;
3579 struct md_rdev *rdev;
3580
3581 if (r10_bio->devs[i].repl_bio)
3582 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3583
3584 bio = r10_bio->devs[i].bio;
3585 bio->bi_status = BLK_STS_IOERR;
3586 rdev = conf->mirrors[d].rdev;
3587 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3588 continue;
3589
3590 sector = r10_bio->devs[i].addr;
3591 if (is_badblock(rdev, sector, max_sync,
3592 &first_bad, &bad_sectors)) {
3593 if (first_bad > sector)
3594 max_sync = first_bad - sector;
3595 else {
3596 bad_sectors -= (sector - first_bad);
3597 if (max_sync > bad_sectors)
3598 max_sync = bad_sectors;
3599 continue;
3600 }
3601 }
3602 atomic_inc(&rdev->nr_pending);
3603 atomic_inc(&r10_bio->remaining);
3604 bio->bi_next = biolist;
3605 biolist = bio;
3606 bio->bi_end_io = end_sync_read;
3607 bio->bi_opf = REQ_OP_READ;
3608 if (test_bit(FailFast, &rdev->flags))
3609 bio->bi_opf |= MD_FAILFAST;
3610 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3611 bio_set_dev(bio, rdev->bdev);
3612 count++;
3613
3614 rdev = conf->mirrors[d].replacement;
3615 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3616 continue;
3617
3618 atomic_inc(&rdev->nr_pending);
3619
3620 /* Need to set up for writing to the replacement */
3621 bio = r10_bio->devs[i].repl_bio;
3622 bio->bi_status = BLK_STS_IOERR;
3623
3624 sector = r10_bio->devs[i].addr;
3625 bio->bi_next = biolist;
3626 biolist = bio;
3627 bio->bi_end_io = end_sync_write;
3628 bio->bi_opf = REQ_OP_WRITE;
3629 if (test_bit(FailFast, &rdev->flags))
3630 bio->bi_opf |= MD_FAILFAST;
3631 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3632 bio_set_dev(bio, rdev->bdev);
3633 count++;
3634 }
3635
3636 if (count < 2) {
3637 for (i=0; i<conf->copies; i++) {
3638 int d = r10_bio->devs[i].devnum;
3639 if (r10_bio->devs[i].bio->bi_end_io)
3640 rdev_dec_pending(conf->mirrors[d].rdev,
3641 mddev);
3642 if (r10_bio->devs[i].repl_bio &&
3643 r10_bio->devs[i].repl_bio->bi_end_io)
3644 rdev_dec_pending(
3645 conf->mirrors[d].replacement,
3646 mddev);
3647 }
3648 put_buf(r10_bio);
3649 biolist = NULL;
3650 goto giveup;
3651 }
3652 }
3653
3654 nr_sectors = 0;
3655 if (sector_nr + max_sync < max_sector)
3656 max_sector = sector_nr + max_sync;
3657 do {
3658 struct page *page;
3659 int len = PAGE_SIZE;
3660 if (sector_nr + (len>>9) > max_sector)
3661 len = (max_sector - sector_nr) << 9;
3662 if (len == 0)
3663 break;
3664 for (bio= biolist ; bio ; bio=bio->bi_next) {
3665 struct resync_pages *rp = get_resync_pages(bio);
3666 page = resync_fetch_page(rp, page_idx);
3667 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3668 bio->bi_status = BLK_STS_RESOURCE;
3669 bio_endio(bio);
3670 goto giveup;
3671 }
3672 }
3673 nr_sectors += len>>9;
3674 sector_nr += len>>9;
3675 } while (++page_idx < RESYNC_PAGES);
3676 r10_bio->sectors = nr_sectors;
3677
3678 if (mddev_is_clustered(mddev) &&
3679 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3680 /* It is resync not recovery */
3681 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3682 conf->cluster_sync_low = mddev->curr_resync_completed;
3683 raid10_set_cluster_sync_high(conf);
3684 /* Send resync message */
3685 md_cluster_ops->resync_info_update(mddev,
3686 conf->cluster_sync_low,
3687 conf->cluster_sync_high);
3688 }
3689 } else if (mddev_is_clustered(mddev)) {
3690 /* This is recovery not resync */
3691 sector_t sect_va1, sect_va2;
3692 bool broadcast_msg = false;
3693
3694 for (i = 0; i < conf->geo.raid_disks; i++) {
3695 /*
3696 * sector_nr is a device address for recovery, so we
3697 * need translate it to array address before compare
3698 * with cluster_sync_high.
3699 */
3700 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3701
3702 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3703 broadcast_msg = true;
3704 /*
3705 * curr_resync_completed is similar as
3706 * sector_nr, so make the translation too.
3707 */
3708 sect_va2 = raid10_find_virt(conf,
3709 mddev->curr_resync_completed, i);
3710
3711 if (conf->cluster_sync_low == 0 ||
3712 conf->cluster_sync_low > sect_va2)
3713 conf->cluster_sync_low = sect_va2;
3714 }
3715 }
3716 if (broadcast_msg) {
3717 raid10_set_cluster_sync_high(conf);
3718 md_cluster_ops->resync_info_update(mddev,
3719 conf->cluster_sync_low,
3720 conf->cluster_sync_high);
3721 }
3722 }
3723
3724 while (biolist) {
3725 bio = biolist;
3726 biolist = biolist->bi_next;
3727
3728 bio->bi_next = NULL;
3729 r10_bio = get_resync_r10bio(bio);
3730 r10_bio->sectors = nr_sectors;
3731
3732 if (bio->bi_end_io == end_sync_read) {
3733 md_sync_acct_bio(bio, nr_sectors);
3734 bio->bi_status = 0;
3735 submit_bio_noacct(bio);
3736 }
3737 }
3738
3739 if (sectors_skipped)
3740 /* pretend they weren't skipped, it makes
3741 * no important difference in this case
3742 */
3743 md_done_sync(mddev, sectors_skipped, 1);
3744
3745 return sectors_skipped + nr_sectors;
3746 giveup:
3747 /* There is nowhere to write, so all non-sync
3748 * drives must be failed or in resync, all drives
3749 * have a bad block, so try the next chunk...
3750 */
3751 if (sector_nr + max_sync < max_sector)
3752 max_sector = sector_nr + max_sync;
3753
3754 sectors_skipped += (max_sector - sector_nr);
3755 chunks_skipped ++;
3756 sector_nr = max_sector;
3757 goto skipped;
3758 }
3759
3760 static sector_t
raid10_size(struct mddev * mddev,sector_t sectors,int raid_disks)3761 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3762 {
3763 sector_t size;
3764 struct r10conf *conf = mddev->private;
3765
3766 if (!raid_disks)
3767 raid_disks = min(conf->geo.raid_disks,
3768 conf->prev.raid_disks);
3769 if (!sectors)
3770 sectors = conf->dev_sectors;
3771
3772 size = sectors >> conf->geo.chunk_shift;
3773 sector_div(size, conf->geo.far_copies);
3774 size = size * raid_disks;
3775 sector_div(size, conf->geo.near_copies);
3776
3777 return size << conf->geo.chunk_shift;
3778 }
3779
calc_sectors(struct r10conf * conf,sector_t size)3780 static void calc_sectors(struct r10conf *conf, sector_t size)
3781 {
3782 /* Calculate the number of sectors-per-device that will
3783 * actually be used, and set conf->dev_sectors and
3784 * conf->stride
3785 */
3786
3787 size = size >> conf->geo.chunk_shift;
3788 sector_div(size, conf->geo.far_copies);
3789 size = size * conf->geo.raid_disks;
3790 sector_div(size, conf->geo.near_copies);
3791 /* 'size' is now the number of chunks in the array */
3792 /* calculate "used chunks per device" */
3793 size = size * conf->copies;
3794
3795 /* We need to round up when dividing by raid_disks to
3796 * get the stride size.
3797 */
3798 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3799
3800 conf->dev_sectors = size << conf->geo.chunk_shift;
3801
3802 if (conf->geo.far_offset)
3803 conf->geo.stride = 1 << conf->geo.chunk_shift;
3804 else {
3805 sector_div(size, conf->geo.far_copies);
3806 conf->geo.stride = size << conf->geo.chunk_shift;
3807 }
3808 }
3809
3810 enum geo_type {geo_new, geo_old, geo_start};
setup_geo(struct geom * geo,struct mddev * mddev,enum geo_type new)3811 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3812 {
3813 int nc, fc, fo;
3814 int layout, chunk, disks;
3815 switch (new) {
3816 case geo_old:
3817 layout = mddev->layout;
3818 chunk = mddev->chunk_sectors;
3819 disks = mddev->raid_disks - mddev->delta_disks;
3820 break;
3821 case geo_new:
3822 layout = mddev->new_layout;
3823 chunk = mddev->new_chunk_sectors;
3824 disks = mddev->raid_disks;
3825 break;
3826 default: /* avoid 'may be unused' warnings */
3827 case geo_start: /* new when starting reshape - raid_disks not
3828 * updated yet. */
3829 layout = mddev->new_layout;
3830 chunk = mddev->new_chunk_sectors;
3831 disks = mddev->raid_disks + mddev->delta_disks;
3832 break;
3833 }
3834 if (layout >> 19)
3835 return -1;
3836 if (chunk < (PAGE_SIZE >> 9) ||
3837 !is_power_of_2(chunk))
3838 return -2;
3839 nc = layout & 255;
3840 fc = (layout >> 8) & 255;
3841 fo = layout & (1<<16);
3842 geo->raid_disks = disks;
3843 geo->near_copies = nc;
3844 geo->far_copies = fc;
3845 geo->far_offset = fo;
3846 switch (layout >> 17) {
3847 case 0: /* original layout. simple but not always optimal */
3848 geo->far_set_size = disks;
3849 break;
3850 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3851 * actually using this, but leave code here just in case.*/
3852 geo->far_set_size = disks/fc;
3853 WARN(geo->far_set_size < fc,
3854 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3855 break;
3856 case 2: /* "improved" layout fixed to match documentation */
3857 geo->far_set_size = fc * nc;
3858 break;
3859 default: /* Not a valid layout */
3860 return -1;
3861 }
3862 geo->chunk_mask = chunk - 1;
3863 geo->chunk_shift = ffz(~chunk);
3864 return nc*fc;
3865 }
3866
raid10_free_conf(struct r10conf * conf)3867 static void raid10_free_conf(struct r10conf *conf)
3868 {
3869 if (!conf)
3870 return;
3871
3872 mempool_exit(&conf->r10bio_pool);
3873 kfree(conf->mirrors);
3874 kfree(conf->mirrors_old);
3875 kfree(conf->mirrors_new);
3876 safe_put_page(conf->tmppage);
3877 bioset_exit(&conf->bio_split);
3878 kfree(conf);
3879 }
3880
setup_conf(struct mddev * mddev)3881 static struct r10conf *setup_conf(struct mddev *mddev)
3882 {
3883 struct r10conf *conf = NULL;
3884 int err = -EINVAL;
3885 struct geom geo;
3886 int copies;
3887
3888 copies = setup_geo(&geo, mddev, geo_new);
3889
3890 if (copies == -2) {
3891 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3892 mdname(mddev), PAGE_SIZE);
3893 goto out;
3894 }
3895
3896 if (copies < 2 || copies > mddev->raid_disks) {
3897 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3898 mdname(mddev), mddev->new_layout);
3899 goto out;
3900 }
3901
3902 err = -ENOMEM;
3903 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3904 if (!conf)
3905 goto out;
3906
3907 /* FIXME calc properly */
3908 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3909 sizeof(struct raid10_info),
3910 GFP_KERNEL);
3911 if (!conf->mirrors)
3912 goto out;
3913
3914 conf->tmppage = alloc_page(GFP_KERNEL);
3915 if (!conf->tmppage)
3916 goto out;
3917
3918 conf->geo = geo;
3919 conf->copies = copies;
3920 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3921 rbio_pool_free, conf);
3922 if (err)
3923 goto out;
3924
3925 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3926 if (err)
3927 goto out;
3928
3929 calc_sectors(conf, mddev->dev_sectors);
3930 if (mddev->reshape_position == MaxSector) {
3931 conf->prev = conf->geo;
3932 conf->reshape_progress = MaxSector;
3933 } else {
3934 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3935 err = -EINVAL;
3936 goto out;
3937 }
3938 conf->reshape_progress = mddev->reshape_position;
3939 if (conf->prev.far_offset)
3940 conf->prev.stride = 1 << conf->prev.chunk_shift;
3941 else
3942 /* far_copies must be 1 */
3943 conf->prev.stride = conf->dev_sectors;
3944 }
3945 conf->reshape_safe = conf->reshape_progress;
3946 spin_lock_init(&conf->device_lock);
3947 INIT_LIST_HEAD(&conf->retry_list);
3948 INIT_LIST_HEAD(&conf->bio_end_io_list);
3949
3950 seqlock_init(&conf->resync_lock);
3951 init_waitqueue_head(&conf->wait_barrier);
3952 atomic_set(&conf->nr_pending, 0);
3953
3954 err = -ENOMEM;
3955 rcu_assign_pointer(conf->thread,
3956 md_register_thread(raid10d, mddev, "raid10"));
3957 if (!conf->thread)
3958 goto out;
3959
3960 conf->mddev = mddev;
3961 return conf;
3962
3963 out:
3964 raid10_free_conf(conf);
3965 return ERR_PTR(err);
3966 }
3967
raid10_nr_stripes(struct r10conf * conf)3968 static unsigned int raid10_nr_stripes(struct r10conf *conf)
3969 {
3970 unsigned int raid_disks = conf->geo.raid_disks;
3971
3972 if (conf->geo.raid_disks % conf->geo.near_copies)
3973 return raid_disks;
3974 return raid_disks / conf->geo.near_copies;
3975 }
3976
raid10_set_queue_limits(struct mddev * mddev)3977 static int raid10_set_queue_limits(struct mddev *mddev)
3978 {
3979 struct r10conf *conf = mddev->private;
3980 struct queue_limits lim;
3981 int err;
3982
3983 md_init_stacking_limits(&lim);
3984 lim.max_write_zeroes_sectors = 0;
3985 lim.io_min = mddev->chunk_sectors << 9;
3986 lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
3987 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
3988 if (err) {
3989 queue_limits_cancel_update(mddev->gendisk->queue);
3990 return err;
3991 }
3992 return queue_limits_set(mddev->gendisk->queue, &lim);
3993 }
3994
raid10_run(struct mddev * mddev)3995 static int raid10_run(struct mddev *mddev)
3996 {
3997 struct r10conf *conf;
3998 int i, disk_idx;
3999 struct raid10_info *disk;
4000 struct md_rdev *rdev;
4001 sector_t size;
4002 sector_t min_offset_diff = 0;
4003 int first = 1;
4004 int ret = -EIO;
4005
4006 if (mddev->private == NULL) {
4007 conf = setup_conf(mddev);
4008 if (IS_ERR(conf))
4009 return PTR_ERR(conf);
4010 mddev->private = conf;
4011 }
4012 conf = mddev->private;
4013 if (!conf)
4014 goto out;
4015
4016 rcu_assign_pointer(mddev->thread, conf->thread);
4017 rcu_assign_pointer(conf->thread, NULL);
4018
4019 if (mddev_is_clustered(conf->mddev)) {
4020 int fc, fo;
4021
4022 fc = (mddev->layout >> 8) & 255;
4023 fo = mddev->layout & (1<<16);
4024 if (fc > 1 || fo > 0) {
4025 pr_err("only near layout is supported by clustered"
4026 " raid10\n");
4027 goto out_free_conf;
4028 }
4029 }
4030
4031 rdev_for_each(rdev, mddev) {
4032 long long diff;
4033
4034 disk_idx = rdev->raid_disk;
4035 if (disk_idx < 0)
4036 continue;
4037 if (disk_idx >= conf->geo.raid_disks &&
4038 disk_idx >= conf->prev.raid_disks)
4039 continue;
4040 disk = conf->mirrors + disk_idx;
4041
4042 if (test_bit(Replacement, &rdev->flags)) {
4043 if (disk->replacement)
4044 goto out_free_conf;
4045 disk->replacement = rdev;
4046 } else {
4047 if (disk->rdev)
4048 goto out_free_conf;
4049 disk->rdev = rdev;
4050 }
4051 diff = (rdev->new_data_offset - rdev->data_offset);
4052 if (!mddev->reshape_backwards)
4053 diff = -diff;
4054 if (diff < 0)
4055 diff = 0;
4056 if (first || diff < min_offset_diff)
4057 min_offset_diff = diff;
4058
4059 disk->head_position = 0;
4060 first = 0;
4061 }
4062
4063 if (!mddev_is_dm(conf->mddev)) {
4064 int err = raid10_set_queue_limits(mddev);
4065
4066 if (err) {
4067 ret = err;
4068 goto out_free_conf;
4069 }
4070 }
4071
4072 /* need to check that every block has at least one working mirror */
4073 if (!enough(conf, -1)) {
4074 pr_err("md/raid10:%s: not enough operational mirrors.\n",
4075 mdname(mddev));
4076 goto out_free_conf;
4077 }
4078
4079 if (conf->reshape_progress != MaxSector) {
4080 /* must ensure that shape change is supported */
4081 if (conf->geo.far_copies != 1 &&
4082 conf->geo.far_offset == 0)
4083 goto out_free_conf;
4084 if (conf->prev.far_copies != 1 &&
4085 conf->prev.far_offset == 0)
4086 goto out_free_conf;
4087 }
4088
4089 mddev->degraded = 0;
4090 for (i = 0;
4091 i < conf->geo.raid_disks
4092 || i < conf->prev.raid_disks;
4093 i++) {
4094
4095 disk = conf->mirrors + i;
4096
4097 if (!disk->rdev && disk->replacement) {
4098 /* The replacement is all we have - use it */
4099 disk->rdev = disk->replacement;
4100 disk->replacement = NULL;
4101 clear_bit(Replacement, &disk->rdev->flags);
4102 }
4103
4104 if (!disk->rdev ||
4105 !test_bit(In_sync, &disk->rdev->flags)) {
4106 disk->head_position = 0;
4107 mddev->degraded++;
4108 if (disk->rdev &&
4109 disk->rdev->saved_raid_disk < 0)
4110 conf->fullsync = 1;
4111 }
4112
4113 if (disk->replacement &&
4114 !test_bit(In_sync, &disk->replacement->flags) &&
4115 disk->replacement->saved_raid_disk < 0) {
4116 conf->fullsync = 1;
4117 }
4118
4119 disk->recovery_disabled = mddev->recovery_disabled - 1;
4120 }
4121
4122 if (mddev->recovery_cp != MaxSector)
4123 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4124 mdname(mddev));
4125 pr_info("md/raid10:%s: active with %d out of %d devices\n",
4126 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4127 conf->geo.raid_disks);
4128 /*
4129 * Ok, everything is just fine now
4130 */
4131 mddev->dev_sectors = conf->dev_sectors;
4132 size = raid10_size(mddev, 0, 0);
4133 md_set_array_sectors(mddev, size);
4134 mddev->resync_max_sectors = size;
4135 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4136
4137 if (md_integrity_register(mddev))
4138 goto out_free_conf;
4139
4140 if (conf->reshape_progress != MaxSector) {
4141 unsigned long before_length, after_length;
4142
4143 before_length = ((1 << conf->prev.chunk_shift) *
4144 conf->prev.far_copies);
4145 after_length = ((1 << conf->geo.chunk_shift) *
4146 conf->geo.far_copies);
4147
4148 if (max(before_length, after_length) > min_offset_diff) {
4149 /* This cannot work */
4150 pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4151 goto out_free_conf;
4152 }
4153 conf->offset_diff = min_offset_diff;
4154
4155 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4156 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4157 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4158 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4159 }
4160
4161 return 0;
4162
4163 out_free_conf:
4164 md_unregister_thread(mddev, &mddev->thread);
4165 raid10_free_conf(conf);
4166 mddev->private = NULL;
4167 out:
4168 return ret;
4169 }
4170
raid10_free(struct mddev * mddev,void * priv)4171 static void raid10_free(struct mddev *mddev, void *priv)
4172 {
4173 raid10_free_conf(priv);
4174 }
4175
raid10_quiesce(struct mddev * mddev,int quiesce)4176 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4177 {
4178 struct r10conf *conf = mddev->private;
4179
4180 if (quiesce)
4181 raise_barrier(conf, 0);
4182 else
4183 lower_barrier(conf);
4184 }
4185
raid10_resize(struct mddev * mddev,sector_t sectors)4186 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4187 {
4188 /* Resize of 'far' arrays is not supported.
4189 * For 'near' and 'offset' arrays we can set the
4190 * number of sectors used to be an appropriate multiple
4191 * of the chunk size.
4192 * For 'offset', this is far_copies*chunksize.
4193 * For 'near' the multiplier is the LCM of
4194 * near_copies and raid_disks.
4195 * So if far_copies > 1 && !far_offset, fail.
4196 * Else find LCM(raid_disks, near_copy)*far_copies and
4197 * multiply by chunk_size. Then round to this number.
4198 * This is mostly done by raid10_size()
4199 */
4200 struct r10conf *conf = mddev->private;
4201 sector_t oldsize, size;
4202 int ret;
4203
4204 if (mddev->reshape_position != MaxSector)
4205 return -EBUSY;
4206
4207 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4208 return -EINVAL;
4209
4210 oldsize = raid10_size(mddev, 0, 0);
4211 size = raid10_size(mddev, sectors, 0);
4212 if (mddev->external_size &&
4213 mddev->array_sectors > size)
4214 return -EINVAL;
4215
4216 ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
4217 if (ret)
4218 return ret;
4219
4220 md_set_array_sectors(mddev, size);
4221 if (sectors > mddev->dev_sectors &&
4222 mddev->recovery_cp > oldsize) {
4223 mddev->recovery_cp = oldsize;
4224 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4225 }
4226 calc_sectors(conf, sectors);
4227 mddev->dev_sectors = conf->dev_sectors;
4228 mddev->resync_max_sectors = size;
4229 return 0;
4230 }
4231
raid10_takeover_raid0(struct mddev * mddev,sector_t size,int devs)4232 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4233 {
4234 struct md_rdev *rdev;
4235 struct r10conf *conf;
4236
4237 if (mddev->degraded > 0) {
4238 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4239 mdname(mddev));
4240 return ERR_PTR(-EINVAL);
4241 }
4242 sector_div(size, devs);
4243
4244 /* Set new parameters */
4245 mddev->new_level = 10;
4246 /* new layout: far_copies = 1, near_copies = 2 */
4247 mddev->new_layout = (1<<8) + 2;
4248 mddev->new_chunk_sectors = mddev->chunk_sectors;
4249 mddev->delta_disks = mddev->raid_disks;
4250 mddev->raid_disks *= 2;
4251 /* make sure it will be not marked as dirty */
4252 mddev->recovery_cp = MaxSector;
4253 mddev->dev_sectors = size;
4254
4255 conf = setup_conf(mddev);
4256 if (!IS_ERR(conf)) {
4257 rdev_for_each(rdev, mddev)
4258 if (rdev->raid_disk >= 0) {
4259 rdev->new_raid_disk = rdev->raid_disk * 2;
4260 rdev->sectors = size;
4261 }
4262 }
4263
4264 return conf;
4265 }
4266
raid10_takeover(struct mddev * mddev)4267 static void *raid10_takeover(struct mddev *mddev)
4268 {
4269 struct r0conf *raid0_conf;
4270
4271 /* raid10 can take over:
4272 * raid0 - providing it has only two drives
4273 */
4274 if (mddev->level == 0) {
4275 /* for raid0 takeover only one zone is supported */
4276 raid0_conf = mddev->private;
4277 if (raid0_conf->nr_strip_zones > 1) {
4278 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4279 mdname(mddev));
4280 return ERR_PTR(-EINVAL);
4281 }
4282 return raid10_takeover_raid0(mddev,
4283 raid0_conf->strip_zone->zone_end,
4284 raid0_conf->strip_zone->nb_dev);
4285 }
4286 return ERR_PTR(-EINVAL);
4287 }
4288
raid10_check_reshape(struct mddev * mddev)4289 static int raid10_check_reshape(struct mddev *mddev)
4290 {
4291 /* Called when there is a request to change
4292 * - layout (to ->new_layout)
4293 * - chunk size (to ->new_chunk_sectors)
4294 * - raid_disks (by delta_disks)
4295 * or when trying to restart a reshape that was ongoing.
4296 *
4297 * We need to validate the request and possibly allocate
4298 * space if that might be an issue later.
4299 *
4300 * Currently we reject any reshape of a 'far' mode array,
4301 * allow chunk size to change if new is generally acceptable,
4302 * allow raid_disks to increase, and allow
4303 * a switch between 'near' mode and 'offset' mode.
4304 */
4305 struct r10conf *conf = mddev->private;
4306 struct geom geo;
4307
4308 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4309 return -EINVAL;
4310
4311 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4312 /* mustn't change number of copies */
4313 return -EINVAL;
4314 if (geo.far_copies > 1 && !geo.far_offset)
4315 /* Cannot switch to 'far' mode */
4316 return -EINVAL;
4317
4318 if (mddev->array_sectors & geo.chunk_mask)
4319 /* not factor of array size */
4320 return -EINVAL;
4321
4322 if (!enough(conf, -1))
4323 return -EINVAL;
4324
4325 kfree(conf->mirrors_new);
4326 conf->mirrors_new = NULL;
4327 if (mddev->delta_disks > 0) {
4328 /* allocate new 'mirrors' list */
4329 conf->mirrors_new =
4330 kcalloc(mddev->raid_disks + mddev->delta_disks,
4331 sizeof(struct raid10_info),
4332 GFP_KERNEL);
4333 if (!conf->mirrors_new)
4334 return -ENOMEM;
4335 }
4336 return 0;
4337 }
4338
4339 /*
4340 * Need to check if array has failed when deciding whether to:
4341 * - start an array
4342 * - remove non-faulty devices
4343 * - add a spare
4344 * - allow a reshape
4345 * This determination is simple when no reshape is happening.
4346 * However if there is a reshape, we need to carefully check
4347 * both the before and after sections.
4348 * This is because some failed devices may only affect one
4349 * of the two sections, and some non-in_sync devices may
4350 * be insync in the section most affected by failed devices.
4351 */
calc_degraded(struct r10conf * conf)4352 static int calc_degraded(struct r10conf *conf)
4353 {
4354 int degraded, degraded2;
4355 int i;
4356
4357 degraded = 0;
4358 /* 'prev' section first */
4359 for (i = 0; i < conf->prev.raid_disks; i++) {
4360 struct md_rdev *rdev = conf->mirrors[i].rdev;
4361
4362 if (!rdev || test_bit(Faulty, &rdev->flags))
4363 degraded++;
4364 else if (!test_bit(In_sync, &rdev->flags))
4365 /* When we can reduce the number of devices in
4366 * an array, this might not contribute to
4367 * 'degraded'. It does now.
4368 */
4369 degraded++;
4370 }
4371 if (conf->geo.raid_disks == conf->prev.raid_disks)
4372 return degraded;
4373 degraded2 = 0;
4374 for (i = 0; i < conf->geo.raid_disks; i++) {
4375 struct md_rdev *rdev = conf->mirrors[i].rdev;
4376
4377 if (!rdev || test_bit(Faulty, &rdev->flags))
4378 degraded2++;
4379 else if (!test_bit(In_sync, &rdev->flags)) {
4380 /* If reshape is increasing the number of devices,
4381 * this section has already been recovered, so
4382 * it doesn't contribute to degraded.
4383 * else it does.
4384 */
4385 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4386 degraded2++;
4387 }
4388 }
4389 if (degraded2 > degraded)
4390 return degraded2;
4391 return degraded;
4392 }
4393
raid10_start_reshape(struct mddev * mddev)4394 static int raid10_start_reshape(struct mddev *mddev)
4395 {
4396 /* A 'reshape' has been requested. This commits
4397 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4398 * This also checks if there are enough spares and adds them
4399 * to the array.
4400 * We currently require enough spares to make the final
4401 * array non-degraded. We also require that the difference
4402 * between old and new data_offset - on each device - is
4403 * enough that we never risk over-writing.
4404 */
4405
4406 unsigned long before_length, after_length;
4407 sector_t min_offset_diff = 0;
4408 int first = 1;
4409 struct geom new;
4410 struct r10conf *conf = mddev->private;
4411 struct md_rdev *rdev;
4412 int spares = 0;
4413 int ret;
4414
4415 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4416 return -EBUSY;
4417
4418 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4419 return -EINVAL;
4420
4421 before_length = ((1 << conf->prev.chunk_shift) *
4422 conf->prev.far_copies);
4423 after_length = ((1 << conf->geo.chunk_shift) *
4424 conf->geo.far_copies);
4425
4426 rdev_for_each(rdev, mddev) {
4427 if (!test_bit(In_sync, &rdev->flags)
4428 && !test_bit(Faulty, &rdev->flags))
4429 spares++;
4430 if (rdev->raid_disk >= 0) {
4431 long long diff = (rdev->new_data_offset
4432 - rdev->data_offset);
4433 if (!mddev->reshape_backwards)
4434 diff = -diff;
4435 if (diff < 0)
4436 diff = 0;
4437 if (first || diff < min_offset_diff)
4438 min_offset_diff = diff;
4439 first = 0;
4440 }
4441 }
4442
4443 if (max(before_length, after_length) > min_offset_diff)
4444 return -EINVAL;
4445
4446 if (spares < mddev->delta_disks)
4447 return -EINVAL;
4448
4449 conf->offset_diff = min_offset_diff;
4450 spin_lock_irq(&conf->device_lock);
4451 if (conf->mirrors_new) {
4452 memcpy(conf->mirrors_new, conf->mirrors,
4453 sizeof(struct raid10_info)*conf->prev.raid_disks);
4454 smp_mb();
4455 kfree(conf->mirrors_old);
4456 conf->mirrors_old = conf->mirrors;
4457 conf->mirrors = conf->mirrors_new;
4458 conf->mirrors_new = NULL;
4459 }
4460 setup_geo(&conf->geo, mddev, geo_start);
4461 smp_mb();
4462 if (mddev->reshape_backwards) {
4463 sector_t size = raid10_size(mddev, 0, 0);
4464 if (size < mddev->array_sectors) {
4465 spin_unlock_irq(&conf->device_lock);
4466 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4467 mdname(mddev));
4468 return -EINVAL;
4469 }
4470 mddev->resync_max_sectors = size;
4471 conf->reshape_progress = size;
4472 } else
4473 conf->reshape_progress = 0;
4474 conf->reshape_safe = conf->reshape_progress;
4475 spin_unlock_irq(&conf->device_lock);
4476
4477 if (mddev->delta_disks && mddev->bitmap) {
4478 struct mdp_superblock_1 *sb = NULL;
4479 sector_t oldsize, newsize;
4480
4481 oldsize = raid10_size(mddev, 0, 0);
4482 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4483
4484 if (!mddev_is_clustered(mddev)) {
4485 ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4486 if (ret)
4487 goto abort;
4488 else
4489 goto out;
4490 }
4491
4492 rdev_for_each(rdev, mddev) {
4493 if (rdev->raid_disk > -1 &&
4494 !test_bit(Faulty, &rdev->flags))
4495 sb = page_address(rdev->sb_page);
4496 }
4497
4498 /*
4499 * some node is already performing reshape, and no need to
4500 * call bitmap_ops->resize again since it should be called when
4501 * receiving BITMAP_RESIZE msg
4502 */
4503 if ((sb && (le32_to_cpu(sb->feature_map) &
4504 MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4505 goto out;
4506
4507 ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4508 if (ret)
4509 goto abort;
4510
4511 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4512 if (ret) {
4513 mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
4514 goto abort;
4515 }
4516 }
4517 out:
4518 if (mddev->delta_disks > 0) {
4519 rdev_for_each(rdev, mddev)
4520 if (rdev->raid_disk < 0 &&
4521 !test_bit(Faulty, &rdev->flags)) {
4522 if (raid10_add_disk(mddev, rdev) == 0) {
4523 if (rdev->raid_disk >=
4524 conf->prev.raid_disks)
4525 set_bit(In_sync, &rdev->flags);
4526 else
4527 rdev->recovery_offset = 0;
4528
4529 /* Failure here is OK */
4530 sysfs_link_rdev(mddev, rdev);
4531 }
4532 } else if (rdev->raid_disk >= conf->prev.raid_disks
4533 && !test_bit(Faulty, &rdev->flags)) {
4534 /* This is a spare that was manually added */
4535 set_bit(In_sync, &rdev->flags);
4536 }
4537 }
4538 /* When a reshape changes the number of devices,
4539 * ->degraded is measured against the larger of the
4540 * pre and post numbers.
4541 */
4542 spin_lock_irq(&conf->device_lock);
4543 mddev->degraded = calc_degraded(conf);
4544 spin_unlock_irq(&conf->device_lock);
4545 mddev->raid_disks = conf->geo.raid_disks;
4546 mddev->reshape_position = conf->reshape_progress;
4547 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4548
4549 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4550 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4551 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4552 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4553 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4554 conf->reshape_checkpoint = jiffies;
4555 md_new_event();
4556 return 0;
4557
4558 abort:
4559 mddev->recovery = 0;
4560 spin_lock_irq(&conf->device_lock);
4561 conf->geo = conf->prev;
4562 mddev->raid_disks = conf->geo.raid_disks;
4563 rdev_for_each(rdev, mddev)
4564 rdev->new_data_offset = rdev->data_offset;
4565 smp_wmb();
4566 conf->reshape_progress = MaxSector;
4567 conf->reshape_safe = MaxSector;
4568 mddev->reshape_position = MaxSector;
4569 spin_unlock_irq(&conf->device_lock);
4570 return ret;
4571 }
4572
4573 /* Calculate the last device-address that could contain
4574 * any block from the chunk that includes the array-address 's'
4575 * and report the next address.
4576 * i.e. the address returned will be chunk-aligned and after
4577 * any data that is in the chunk containing 's'.
4578 */
last_dev_address(sector_t s,struct geom * geo)4579 static sector_t last_dev_address(sector_t s, struct geom *geo)
4580 {
4581 s = (s | geo->chunk_mask) + 1;
4582 s >>= geo->chunk_shift;
4583 s *= geo->near_copies;
4584 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4585 s *= geo->far_copies;
4586 s <<= geo->chunk_shift;
4587 return s;
4588 }
4589
4590 /* Calculate the first device-address that could contain
4591 * any block from the chunk that includes the array-address 's'.
4592 * This too will be the start of a chunk
4593 */
first_dev_address(sector_t s,struct geom * geo)4594 static sector_t first_dev_address(sector_t s, struct geom *geo)
4595 {
4596 s >>= geo->chunk_shift;
4597 s *= geo->near_copies;
4598 sector_div(s, geo->raid_disks);
4599 s *= geo->far_copies;
4600 s <<= geo->chunk_shift;
4601 return s;
4602 }
4603
reshape_request(struct mddev * mddev,sector_t sector_nr,int * skipped)4604 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4605 int *skipped)
4606 {
4607 /* We simply copy at most one chunk (smallest of old and new)
4608 * at a time, possibly less if that exceeds RESYNC_PAGES,
4609 * or we hit a bad block or something.
4610 * This might mean we pause for normal IO in the middle of
4611 * a chunk, but that is not a problem as mddev->reshape_position
4612 * can record any location.
4613 *
4614 * If we will want to write to a location that isn't
4615 * yet recorded as 'safe' (i.e. in metadata on disk) then
4616 * we need to flush all reshape requests and update the metadata.
4617 *
4618 * When reshaping forwards (e.g. to more devices), we interpret
4619 * 'safe' as the earliest block which might not have been copied
4620 * down yet. We divide this by previous stripe size and multiply
4621 * by previous stripe length to get lowest device offset that we
4622 * cannot write to yet.
4623 * We interpret 'sector_nr' as an address that we want to write to.
4624 * From this we use last_device_address() to find where we might
4625 * write to, and first_device_address on the 'safe' position.
4626 * If this 'next' write position is after the 'safe' position,
4627 * we must update the metadata to increase the 'safe' position.
4628 *
4629 * When reshaping backwards, we round in the opposite direction
4630 * and perform the reverse test: next write position must not be
4631 * less than current safe position.
4632 *
4633 * In all this the minimum difference in data offsets
4634 * (conf->offset_diff - always positive) allows a bit of slack,
4635 * so next can be after 'safe', but not by more than offset_diff
4636 *
4637 * We need to prepare all the bios here before we start any IO
4638 * to ensure the size we choose is acceptable to all devices.
4639 * The means one for each copy for write-out and an extra one for
4640 * read-in.
4641 * We store the read-in bio in ->master_bio and the others in
4642 * ->devs[x].bio and ->devs[x].repl_bio.
4643 */
4644 struct r10conf *conf = mddev->private;
4645 struct r10bio *r10_bio;
4646 sector_t next, safe, last;
4647 int max_sectors;
4648 int nr_sectors;
4649 int s;
4650 struct md_rdev *rdev;
4651 int need_flush = 0;
4652 struct bio *blist;
4653 struct bio *bio, *read_bio;
4654 int sectors_done = 0;
4655 struct page **pages;
4656
4657 if (sector_nr == 0) {
4658 /* If restarting in the middle, skip the initial sectors */
4659 if (mddev->reshape_backwards &&
4660 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4661 sector_nr = (raid10_size(mddev, 0, 0)
4662 - conf->reshape_progress);
4663 } else if (!mddev->reshape_backwards &&
4664 conf->reshape_progress > 0)
4665 sector_nr = conf->reshape_progress;
4666 if (sector_nr) {
4667 mddev->curr_resync_completed = sector_nr;
4668 sysfs_notify_dirent_safe(mddev->sysfs_completed);
4669 *skipped = 1;
4670 return sector_nr;
4671 }
4672 }
4673
4674 /* We don't use sector_nr to track where we are up to
4675 * as that doesn't work well for ->reshape_backwards.
4676 * So just use ->reshape_progress.
4677 */
4678 if (mddev->reshape_backwards) {
4679 /* 'next' is the earliest device address that we might
4680 * write to for this chunk in the new layout
4681 */
4682 next = first_dev_address(conf->reshape_progress - 1,
4683 &conf->geo);
4684
4685 /* 'safe' is the last device address that we might read from
4686 * in the old layout after a restart
4687 */
4688 safe = last_dev_address(conf->reshape_safe - 1,
4689 &conf->prev);
4690
4691 if (next + conf->offset_diff < safe)
4692 need_flush = 1;
4693
4694 last = conf->reshape_progress - 1;
4695 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4696 & conf->prev.chunk_mask);
4697 if (sector_nr + RESYNC_SECTORS < last)
4698 sector_nr = last + 1 - RESYNC_SECTORS;
4699 } else {
4700 /* 'next' is after the last device address that we
4701 * might write to for this chunk in the new layout
4702 */
4703 next = last_dev_address(conf->reshape_progress, &conf->geo);
4704
4705 /* 'safe' is the earliest device address that we might
4706 * read from in the old layout after a restart
4707 */
4708 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4709
4710 /* Need to update metadata if 'next' might be beyond 'safe'
4711 * as that would possibly corrupt data
4712 */
4713 if (next > safe + conf->offset_diff)
4714 need_flush = 1;
4715
4716 sector_nr = conf->reshape_progress;
4717 last = sector_nr | (conf->geo.chunk_mask
4718 & conf->prev.chunk_mask);
4719
4720 if (sector_nr + RESYNC_SECTORS <= last)
4721 last = sector_nr + RESYNC_SECTORS - 1;
4722 }
4723
4724 if (need_flush ||
4725 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4726 /* Need to update reshape_position in metadata */
4727 wait_barrier(conf, false);
4728 mddev->reshape_position = conf->reshape_progress;
4729 if (mddev->reshape_backwards)
4730 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4731 - conf->reshape_progress;
4732 else
4733 mddev->curr_resync_completed = conf->reshape_progress;
4734 conf->reshape_checkpoint = jiffies;
4735 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4736 md_wakeup_thread(mddev->thread);
4737 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4738 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4739 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4740 allow_barrier(conf);
4741 return sectors_done;
4742 }
4743 conf->reshape_safe = mddev->reshape_position;
4744 allow_barrier(conf);
4745 }
4746
4747 raise_barrier(conf, 0);
4748 read_more:
4749 /* Now schedule reads for blocks from sector_nr to last */
4750 r10_bio = raid10_alloc_init_r10buf(conf);
4751 r10_bio->state = 0;
4752 raise_barrier(conf, 1);
4753 atomic_set(&r10_bio->remaining, 0);
4754 r10_bio->mddev = mddev;
4755 r10_bio->sector = sector_nr;
4756 set_bit(R10BIO_IsReshape, &r10_bio->state);
4757 r10_bio->sectors = last - sector_nr + 1;
4758 rdev = read_balance(conf, r10_bio, &max_sectors);
4759 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4760
4761 if (!rdev) {
4762 /* Cannot read from here, so need to record bad blocks
4763 * on all the target devices.
4764 */
4765 // FIXME
4766 mempool_free(r10_bio, &conf->r10buf_pool);
4767 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4768 return sectors_done;
4769 }
4770
4771 read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4772 GFP_KERNEL, &mddev->bio_set);
4773 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4774 + rdev->data_offset);
4775 read_bio->bi_private = r10_bio;
4776 read_bio->bi_end_io = end_reshape_read;
4777 r10_bio->master_bio = read_bio;
4778 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4779
4780 /*
4781 * Broadcast RESYNC message to other nodes, so all nodes would not
4782 * write to the region to avoid conflict.
4783 */
4784 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4785 struct mdp_superblock_1 *sb = NULL;
4786 int sb_reshape_pos = 0;
4787
4788 conf->cluster_sync_low = sector_nr;
4789 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4790 sb = page_address(rdev->sb_page);
4791 if (sb) {
4792 sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4793 /*
4794 * Set cluster_sync_low again if next address for array
4795 * reshape is less than cluster_sync_low. Since we can't
4796 * update cluster_sync_low until it has finished reshape.
4797 */
4798 if (sb_reshape_pos < conf->cluster_sync_low)
4799 conf->cluster_sync_low = sb_reshape_pos;
4800 }
4801
4802 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4803 conf->cluster_sync_high);
4804 }
4805
4806 /* Now find the locations in the new layout */
4807 __raid10_find_phys(&conf->geo, r10_bio);
4808
4809 blist = read_bio;
4810 read_bio->bi_next = NULL;
4811
4812 for (s = 0; s < conf->copies*2; s++) {
4813 struct bio *b;
4814 int d = r10_bio->devs[s/2].devnum;
4815 struct md_rdev *rdev2;
4816 if (s&1) {
4817 rdev2 = conf->mirrors[d].replacement;
4818 b = r10_bio->devs[s/2].repl_bio;
4819 } else {
4820 rdev2 = conf->mirrors[d].rdev;
4821 b = r10_bio->devs[s/2].bio;
4822 }
4823 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4824 continue;
4825
4826 bio_set_dev(b, rdev2->bdev);
4827 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4828 rdev2->new_data_offset;
4829 b->bi_end_io = end_reshape_write;
4830 b->bi_opf = REQ_OP_WRITE;
4831 b->bi_next = blist;
4832 blist = b;
4833 }
4834
4835 /* Now add as many pages as possible to all of these bios. */
4836
4837 nr_sectors = 0;
4838 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4839 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4840 struct page *page = pages[s / (PAGE_SIZE >> 9)];
4841 int len = (max_sectors - s) << 9;
4842 if (len > PAGE_SIZE)
4843 len = PAGE_SIZE;
4844 for (bio = blist; bio ; bio = bio->bi_next) {
4845 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
4846 bio->bi_status = BLK_STS_RESOURCE;
4847 bio_endio(bio);
4848 return sectors_done;
4849 }
4850 }
4851 sector_nr += len >> 9;
4852 nr_sectors += len >> 9;
4853 }
4854 r10_bio->sectors = nr_sectors;
4855
4856 /* Now submit the read */
4857 md_sync_acct_bio(read_bio, r10_bio->sectors);
4858 atomic_inc(&r10_bio->remaining);
4859 read_bio->bi_next = NULL;
4860 submit_bio_noacct(read_bio);
4861 sectors_done += nr_sectors;
4862 if (sector_nr <= last)
4863 goto read_more;
4864
4865 lower_barrier(conf);
4866
4867 /* Now that we have done the whole section we can
4868 * update reshape_progress
4869 */
4870 if (mddev->reshape_backwards)
4871 conf->reshape_progress -= sectors_done;
4872 else
4873 conf->reshape_progress += sectors_done;
4874
4875 return sectors_done;
4876 }
4877
4878 static void end_reshape_request(struct r10bio *r10_bio);
4879 static int handle_reshape_read_error(struct mddev *mddev,
4880 struct r10bio *r10_bio);
reshape_request_write(struct mddev * mddev,struct r10bio * r10_bio)4881 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4882 {
4883 /* Reshape read completed. Hopefully we have a block
4884 * to write out.
4885 * If we got a read error then we do sync 1-page reads from
4886 * elsewhere until we find the data - or give up.
4887 */
4888 struct r10conf *conf = mddev->private;
4889 int s;
4890
4891 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4892 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4893 /* Reshape has been aborted */
4894 md_done_sync(mddev, r10_bio->sectors, 0);
4895 return;
4896 }
4897
4898 /* We definitely have the data in the pages, schedule the
4899 * writes.
4900 */
4901 atomic_set(&r10_bio->remaining, 1);
4902 for (s = 0; s < conf->copies*2; s++) {
4903 struct bio *b;
4904 int d = r10_bio->devs[s/2].devnum;
4905 struct md_rdev *rdev;
4906 if (s&1) {
4907 rdev = conf->mirrors[d].replacement;
4908 b = r10_bio->devs[s/2].repl_bio;
4909 } else {
4910 rdev = conf->mirrors[d].rdev;
4911 b = r10_bio->devs[s/2].bio;
4912 }
4913 if (!rdev || test_bit(Faulty, &rdev->flags))
4914 continue;
4915
4916 atomic_inc(&rdev->nr_pending);
4917 md_sync_acct_bio(b, r10_bio->sectors);
4918 atomic_inc(&r10_bio->remaining);
4919 b->bi_next = NULL;
4920 submit_bio_noacct(b);
4921 }
4922 end_reshape_request(r10_bio);
4923 }
4924
end_reshape(struct r10conf * conf)4925 static void end_reshape(struct r10conf *conf)
4926 {
4927 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4928 return;
4929
4930 spin_lock_irq(&conf->device_lock);
4931 conf->prev = conf->geo;
4932 md_finish_reshape(conf->mddev);
4933 smp_wmb();
4934 conf->reshape_progress = MaxSector;
4935 conf->reshape_safe = MaxSector;
4936 spin_unlock_irq(&conf->device_lock);
4937
4938 mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
4939 conf->fullsync = 0;
4940 }
4941
raid10_update_reshape_pos(struct mddev * mddev)4942 static void raid10_update_reshape_pos(struct mddev *mddev)
4943 {
4944 struct r10conf *conf = mddev->private;
4945 sector_t lo, hi;
4946
4947 md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4948 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4949 || mddev->reshape_position == MaxSector)
4950 conf->reshape_progress = mddev->reshape_position;
4951 else
4952 WARN_ON_ONCE(1);
4953 }
4954
handle_reshape_read_error(struct mddev * mddev,struct r10bio * r10_bio)4955 static int handle_reshape_read_error(struct mddev *mddev,
4956 struct r10bio *r10_bio)
4957 {
4958 /* Use sync reads to get the blocks from somewhere else */
4959 int sectors = r10_bio->sectors;
4960 struct r10conf *conf = mddev->private;
4961 struct r10bio *r10b;
4962 int slot = 0;
4963 int idx = 0;
4964 struct page **pages;
4965
4966 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
4967 if (!r10b) {
4968 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4969 return -ENOMEM;
4970 }
4971
4972 /* reshape IOs share pages from .devs[0].bio */
4973 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4974
4975 r10b->sector = r10_bio->sector;
4976 __raid10_find_phys(&conf->prev, r10b);
4977
4978 while (sectors) {
4979 int s = sectors;
4980 int success = 0;
4981 int first_slot = slot;
4982
4983 if (s > (PAGE_SIZE >> 9))
4984 s = PAGE_SIZE >> 9;
4985
4986 while (!success) {
4987 int d = r10b->devs[slot].devnum;
4988 struct md_rdev *rdev = conf->mirrors[d].rdev;
4989 sector_t addr;
4990 if (rdev == NULL ||
4991 test_bit(Faulty, &rdev->flags) ||
4992 !test_bit(In_sync, &rdev->flags))
4993 goto failed;
4994
4995 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4996 atomic_inc(&rdev->nr_pending);
4997 success = sync_page_io(rdev,
4998 addr,
4999 s << 9,
5000 pages[idx],
5001 REQ_OP_READ, false);
5002 rdev_dec_pending(rdev, mddev);
5003 if (success)
5004 break;
5005 failed:
5006 slot++;
5007 if (slot >= conf->copies)
5008 slot = 0;
5009 if (slot == first_slot)
5010 break;
5011 }
5012 if (!success) {
5013 /* couldn't read this block, must give up */
5014 set_bit(MD_RECOVERY_INTR,
5015 &mddev->recovery);
5016 kfree(r10b);
5017 return -EIO;
5018 }
5019 sectors -= s;
5020 idx++;
5021 }
5022 kfree(r10b);
5023 return 0;
5024 }
5025
end_reshape_write(struct bio * bio)5026 static void end_reshape_write(struct bio *bio)
5027 {
5028 struct r10bio *r10_bio = get_resync_r10bio(bio);
5029 struct mddev *mddev = r10_bio->mddev;
5030 struct r10conf *conf = mddev->private;
5031 int d;
5032 int slot;
5033 int repl;
5034 struct md_rdev *rdev = NULL;
5035
5036 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5037 rdev = repl ? conf->mirrors[d].replacement :
5038 conf->mirrors[d].rdev;
5039
5040 if (bio->bi_status) {
5041 /* FIXME should record badblock */
5042 md_error(mddev, rdev);
5043 }
5044
5045 rdev_dec_pending(rdev, mddev);
5046 end_reshape_request(r10_bio);
5047 }
5048
end_reshape_request(struct r10bio * r10_bio)5049 static void end_reshape_request(struct r10bio *r10_bio)
5050 {
5051 if (!atomic_dec_and_test(&r10_bio->remaining))
5052 return;
5053 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5054 bio_put(r10_bio->master_bio);
5055 put_buf(r10_bio);
5056 }
5057
raid10_finish_reshape(struct mddev * mddev)5058 static void raid10_finish_reshape(struct mddev *mddev)
5059 {
5060 struct r10conf *conf = mddev->private;
5061
5062 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5063 return;
5064
5065 if (mddev->delta_disks > 0) {
5066 if (mddev->recovery_cp > mddev->resync_max_sectors) {
5067 mddev->recovery_cp = mddev->resync_max_sectors;
5068 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5069 }
5070 mddev->resync_max_sectors = mddev->array_sectors;
5071 } else {
5072 int d;
5073 for (d = conf->geo.raid_disks ;
5074 d < conf->geo.raid_disks - mddev->delta_disks;
5075 d++) {
5076 struct md_rdev *rdev = conf->mirrors[d].rdev;
5077 if (rdev)
5078 clear_bit(In_sync, &rdev->flags);
5079 rdev = conf->mirrors[d].replacement;
5080 if (rdev)
5081 clear_bit(In_sync, &rdev->flags);
5082 }
5083 }
5084 mddev->layout = mddev->new_layout;
5085 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5086 mddev->reshape_position = MaxSector;
5087 mddev->delta_disks = 0;
5088 mddev->reshape_backwards = 0;
5089 }
5090
5091 static struct md_personality raid10_personality =
5092 {
5093 .name = "raid10",
5094 .level = 10,
5095 .owner = THIS_MODULE,
5096 .make_request = raid10_make_request,
5097 .run = raid10_run,
5098 .free = raid10_free,
5099 .status = raid10_status,
5100 .error_handler = raid10_error,
5101 .hot_add_disk = raid10_add_disk,
5102 .hot_remove_disk= raid10_remove_disk,
5103 .spare_active = raid10_spare_active,
5104 .sync_request = raid10_sync_request,
5105 .quiesce = raid10_quiesce,
5106 .size = raid10_size,
5107 .resize = raid10_resize,
5108 .takeover = raid10_takeover,
5109 .check_reshape = raid10_check_reshape,
5110 .start_reshape = raid10_start_reshape,
5111 .finish_reshape = raid10_finish_reshape,
5112 .update_reshape_pos = raid10_update_reshape_pos,
5113 };
5114
raid_init(void)5115 static int __init raid_init(void)
5116 {
5117 return register_md_personality(&raid10_personality);
5118 }
5119
raid_exit(void)5120 static void raid_exit(void)
5121 {
5122 unregister_md_personality(&raid10_personality);
5123 }
5124
5125 module_init(raid_init);
5126 module_exit(raid_exit);
5127 MODULE_LICENSE("GPL");
5128 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5129 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5130 MODULE_ALIAS("md-raid10");
5131 MODULE_ALIAS("md-level-10");
5132