1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * raid10.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 2000-2004 Neil Brown
6 *
7 * RAID-10 support for md.
8 *
9 * Base on code in raid1.c. See raid1.c for further copyright information.
10 */
11
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22
23 #define RAID_1_10_NAME "raid10"
24 #include "raid10.h"
25 #include "raid0.h"
26 #include "md-bitmap.h"
27 #include "md-cluster.h"
28
29 /*
30 * RAID10 provides a combination of RAID0 and RAID1 functionality.
31 * The layout of data is defined by
32 * chunk_size
33 * raid_disks
34 * near_copies (stored in low byte of layout)
35 * far_copies (stored in second byte of layout)
36 * far_offset (stored in bit 16 of layout )
37 * use_far_sets (stored in bit 17 of layout )
38 * use_far_sets_bugfixed (stored in bit 18 of layout )
39 *
40 * The data to be stored is divided into chunks using chunksize. Each device
41 * is divided into far_copies sections. In each section, chunks are laid out
42 * in a style similar to raid0, but near_copies copies of each chunk is stored
43 * (each on a different drive). The starting device for each section is offset
44 * near_copies from the starting device of the previous section. Thus there
45 * are (near_copies * far_copies) of each chunk, and each is on a different
46 * drive. near_copies and far_copies must be at least one, and their product
47 * is at most raid_disks.
48 *
49 * If far_offset is true, then the far_copies are handled a bit differently.
50 * The copies are still in different stripes, but instead of being very far
51 * apart on disk, there are adjacent stripes.
52 *
53 * The far and offset algorithms are handled slightly differently if
54 * 'use_far_sets' is true. In this case, the array's devices are grouped into
55 * sets that are (near_copies * far_copies) in size. The far copied stripes
56 * are still shifted by 'near_copies' devices, but this shifting stays confined
57 * to the set rather than the entire array. This is done to improve the number
58 * of device combinations that can fail without causing the array to fail.
59 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
60 * on a device):
61 * A B C D A B C D E
62 * ... ...
63 * D A B C E A B C D
64 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
65 * [A B] [C D] [A B] [C D E]
66 * |...| |...| |...| | ... |
67 * [B A] [D C] [B A] [E C D]
68 */
69
70 static void allow_barrier(struct r10conf *conf);
71 static void lower_barrier(struct r10conf *conf);
72 static int _enough(struct r10conf *conf, int previous, int ignore);
73 static int enough(struct r10conf *conf, int ignore);
74 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
75 int *skipped);
76 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
77 static void end_reshape_write(struct bio *bio);
78 static void end_reshape(struct r10conf *conf);
79
80 #include "raid1-10.c"
81
82 #define NULL_CMD
83 #define cmd_before(conf, cmd) \
84 do { \
85 write_sequnlock_irq(&(conf)->resync_lock); \
86 cmd; \
87 } while (0)
88 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
89
90 #define wait_event_barrier_cmd(conf, cond, cmd) \
91 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
92 cmd_after(conf))
93
94 #define wait_event_barrier(conf, cond) \
95 wait_event_barrier_cmd(conf, cond, NULL_CMD)
96
97 /*
98 * for resync bio, r10bio pointer can be retrieved from the per-bio
99 * 'struct resync_pages'.
100 */
get_resync_r10bio(struct bio * bio)101 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
102 {
103 return get_resync_pages(bio)->raid_bio;
104 }
105
r10bio_pool_alloc(gfp_t gfp_flags,void * data)106 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
107 {
108 struct r10conf *conf = data;
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
110
111 /* allocate a r10bio with room for raid_disks entries in the
112 * bios array */
113 return kzalloc(size, gfp_flags);
114 }
115
116 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
117 /* amount of memory to reserve for resync requests */
118 #define RESYNC_WINDOW (1024*1024)
119 /* maximum number of concurrent requests, memory permitting */
120 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
121 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
122 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
123
124 /*
125 * When performing a resync, we need to read and compare, so
126 * we need as many pages are there are copies.
127 * When performing a recovery, we need 2 bios, one for read,
128 * one for write (we recover only one drive per r10buf)
129 *
130 */
r10buf_pool_alloc(gfp_t gfp_flags,void * data)131 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
132 {
133 struct r10conf *conf = data;
134 struct r10bio *r10_bio;
135 struct bio *bio;
136 int j;
137 int nalloc, nalloc_rp;
138 struct resync_pages *rps;
139
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
141 if (!r10_bio)
142 return NULL;
143
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
146 nalloc = conf->copies; /* resync */
147 else
148 nalloc = 2; /* recovery */
149
150 /* allocate once for all bios */
151 if (!conf->have_replacement)
152 nalloc_rp = nalloc;
153 else
154 nalloc_rp = nalloc * 2;
155 rps = kmalloc_objs(struct resync_pages, nalloc_rp, gfp_flags);
156 if (!rps)
157 goto out_free_r10bio;
158
159 /*
160 * Allocate bios.
161 */
162 for (j = nalloc ; j-- ; ) {
163 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
164 if (!bio)
165 goto out_free_bio;
166 bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
167 r10_bio->devs[j].bio = bio;
168 if (!conf->have_replacement)
169 continue;
170 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
171 if (!bio)
172 goto out_free_bio;
173 bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
174 r10_bio->devs[j].repl_bio = bio;
175 }
176 /*
177 * Allocate RESYNC_PAGES data pages and attach them
178 * where needed.
179 */
180 for (j = 0; j < nalloc; j++) {
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
182 struct resync_pages *rp, *rp_repl;
183
184 rp = &rps[j];
185 if (rbio)
186 rp_repl = &rps[nalloc + j];
187
188 bio = r10_bio->devs[j].bio;
189
190 if (!j || test_bit(MD_RECOVERY_SYNC,
191 &conf->mddev->recovery)) {
192 if (resync_alloc_pages(rp, gfp_flags))
193 goto out_free_pages;
194 } else {
195 memcpy(rp, &rps[0], sizeof(*rp));
196 resync_get_all_pages(rp);
197 }
198
199 rp->raid_bio = r10_bio;
200 bio->bi_private = rp;
201 if (rbio) {
202 memcpy(rp_repl, rp, sizeof(*rp));
203 rbio->bi_private = rp_repl;
204 }
205 }
206
207 return r10_bio;
208
209 out_free_pages:
210 while (--j >= 0)
211 resync_free_pages(&rps[j]);
212
213 j = 0;
214 out_free_bio:
215 for ( ; j < nalloc; j++) {
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
219 if (r10_bio->devs[j].repl_bio)
220 bio_uninit(r10_bio->devs[j].repl_bio);
221 kfree(r10_bio->devs[j].repl_bio);
222 }
223 kfree(rps);
224 out_free_r10bio:
225 rbio_pool_free(r10_bio, conf);
226 return NULL;
227 }
228
r10buf_pool_free(void * __r10_bio,void * data)229 static void r10buf_pool_free(void *__r10_bio, void *data)
230 {
231 struct r10conf *conf = data;
232 struct r10bio *r10bio = __r10_bio;
233 int j;
234 struct resync_pages *rp = NULL;
235
236 for (j = conf->copies; j--; ) {
237 struct bio *bio = r10bio->devs[j].bio;
238
239 if (bio) {
240 rp = get_resync_pages(bio);
241 resync_free_pages(rp);
242 bio_uninit(bio);
243 kfree(bio);
244 }
245
246 bio = r10bio->devs[j].repl_bio;
247 if (bio) {
248 bio_uninit(bio);
249 kfree(bio);
250 }
251 }
252
253 /* resync pages array stored in the 1st bio's .bi_private */
254 kfree(rp);
255
256 rbio_pool_free(r10bio, conf);
257 }
258
put_all_bios(struct r10conf * conf,struct r10bio * r10_bio)259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
260 {
261 int i;
262
263 for (i = 0; i < conf->geo.raid_disks; i++) {
264 struct bio **bio = & r10_bio->devs[i].bio;
265 if (!BIO_SPECIAL(*bio))
266 bio_put(*bio);
267 *bio = NULL;
268 bio = &r10_bio->devs[i].repl_bio;
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
270 bio_put(*bio);
271 *bio = NULL;
272 }
273 }
274
free_r10bio(struct r10bio * r10_bio)275 static void free_r10bio(struct r10bio *r10_bio)
276 {
277 struct r10conf *conf = r10_bio->mddev->private;
278
279 put_all_bios(conf, r10_bio);
280 mempool_free(r10_bio, &conf->r10bio_pool);
281 }
282
put_buf(struct r10bio * r10_bio)283 static void put_buf(struct r10bio *r10_bio)
284 {
285 struct r10conf *conf = r10_bio->mddev->private;
286
287 mempool_free(r10_bio, &conf->r10buf_pool);
288
289 lower_barrier(conf);
290 }
291
wake_up_barrier(struct r10conf * conf)292 static void wake_up_barrier(struct r10conf *conf)
293 {
294 if (wq_has_sleeper(&conf->wait_barrier))
295 wake_up(&conf->wait_barrier);
296 }
297
reschedule_retry(struct r10bio * r10_bio)298 static void reschedule_retry(struct r10bio *r10_bio)
299 {
300 unsigned long flags;
301 struct mddev *mddev = r10_bio->mddev;
302 struct r10conf *conf = mddev->private;
303
304 spin_lock_irqsave(&conf->device_lock, flags);
305 list_add(&r10_bio->retry_list, &conf->retry_list);
306 conf->nr_queued ++;
307 spin_unlock_irqrestore(&conf->device_lock, flags);
308
309 /* wake up frozen array... */
310 wake_up(&conf->wait_barrier);
311
312 md_wakeup_thread(mddev->thread);
313 }
314
315 /*
316 * raid_end_bio_io() is called when we have finished servicing a mirrored
317 * operation and are ready to return a success/failure code to the buffer
318 * cache layer.
319 */
raid_end_bio_io(struct r10bio * r10_bio)320 static void raid_end_bio_io(struct r10bio *r10_bio)
321 {
322 struct bio *bio = r10_bio->master_bio;
323 struct r10conf *conf = r10_bio->mddev->private;
324
325 if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
326 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
327 bio->bi_status = BLK_STS_IOERR;
328 bio_endio(bio);
329 }
330
331 /*
332 * Wake up any possible resync thread that waits for the device
333 * to go idle.
334 */
335 allow_barrier(conf);
336
337 free_r10bio(r10_bio);
338 }
339
340 /*
341 * Update disk head position estimator based on IRQ completion info.
342 */
update_head_pos(int slot,struct r10bio * r10_bio)343 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
344 {
345 struct r10conf *conf = r10_bio->mddev->private;
346
347 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
348 r10_bio->devs[slot].addr + (r10_bio->sectors);
349 }
350
351 /*
352 * Find the disk number which triggered given bio
353 */
find_bio_disk(struct r10conf * conf,struct r10bio * r10_bio,struct bio * bio,int * slotp,int * replp)354 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
355 struct bio *bio, int *slotp, int *replp)
356 {
357 int slot;
358 int repl = 0;
359
360 for (slot = 0; slot < conf->geo.raid_disks; slot++) {
361 if (r10_bio->devs[slot].bio == bio)
362 break;
363 if (r10_bio->devs[slot].repl_bio == bio) {
364 repl = 1;
365 break;
366 }
367 }
368
369 update_head_pos(slot, r10_bio);
370
371 if (slotp)
372 *slotp = slot;
373 if (replp)
374 *replp = repl;
375 return r10_bio->devs[slot].devnum;
376 }
377
raid10_end_read_request(struct bio * bio)378 static void raid10_end_read_request(struct bio *bio)
379 {
380 int uptodate = !bio->bi_status;
381 struct r10bio *r10_bio = bio->bi_private;
382 int slot;
383 struct md_rdev *rdev;
384 struct r10conf *conf = r10_bio->mddev->private;
385
386 slot = r10_bio->read_slot;
387 rdev = r10_bio->devs[slot].rdev;
388 /*
389 * this branch is our 'one mirror IO has finished' event handler:
390 */
391 update_head_pos(slot, r10_bio);
392
393 if (uptodate) {
394 /*
395 * Set R10BIO_Uptodate in our master bio, so that
396 * we will return a good error code to the higher
397 * levels even if IO on some other mirrored buffer fails.
398 *
399 * The 'master' represents the composite IO operation to
400 * user-side. So if something waits for IO, then it will
401 * wait for the 'master' bio.
402 */
403 set_bit(R10BIO_Uptodate, &r10_bio->state);
404 } else if (!raid1_should_handle_error(bio)) {
405 uptodate = 1;
406 } else {
407 /* If all other devices that store this block have
408 * failed, we want to return the error upwards rather
409 * than fail the last device. Here we redefine
410 * "uptodate" to mean "Don't want to retry"
411 */
412 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
413 rdev->raid_disk))
414 uptodate = 1;
415 }
416 if (uptodate) {
417 raid_end_bio_io(r10_bio);
418 rdev_dec_pending(rdev, conf->mddev);
419 } else {
420 /*
421 * oops, read error - keep the refcount on the rdev
422 */
423 pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
424 mdname(conf->mddev),
425 rdev->bdev,
426 (unsigned long long)r10_bio->sector);
427 set_bit(R10BIO_ReadError, &r10_bio->state);
428 reschedule_retry(r10_bio);
429 }
430 }
431
close_write(struct r10bio * r10_bio)432 static void close_write(struct r10bio *r10_bio)
433 {
434 struct mddev *mddev = r10_bio->mddev;
435
436 md_write_end(mddev);
437 }
438
one_write_done(struct r10bio * r10_bio)439 static void one_write_done(struct r10bio *r10_bio)
440 {
441 if (atomic_dec_and_test(&r10_bio->remaining)) {
442 if (test_bit(R10BIO_WriteError, &r10_bio->state))
443 reschedule_retry(r10_bio);
444 else {
445 close_write(r10_bio);
446 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
447 reschedule_retry(r10_bio);
448 else
449 raid_end_bio_io(r10_bio);
450 }
451 }
452 }
453
raid10_end_write_request(struct bio * bio)454 static void raid10_end_write_request(struct bio *bio)
455 {
456 struct r10bio *r10_bio = bio->bi_private;
457 int dev;
458 int dec_rdev = 1;
459 struct r10conf *conf = r10_bio->mddev->private;
460 int slot, repl;
461 struct md_rdev *rdev = NULL;
462 struct bio *to_put = NULL;
463 bool ignore_error = !raid1_should_handle_error(bio) ||
464 (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
465
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
467
468 if (repl)
469 rdev = conf->mirrors[dev].replacement;
470 if (!rdev) {
471 smp_rmb();
472 repl = 0;
473 rdev = conf->mirrors[dev].rdev;
474 }
475 /*
476 * this branch is our 'one mirror IO has finished' event handler:
477 */
478 if (bio->bi_status && !ignore_error) {
479 if (repl)
480 /* Never record new bad blocks to replacement,
481 * just fail it.
482 */
483 md_error(rdev->mddev, rdev);
484 else {
485 set_bit(WriteErrorSeen, &rdev->flags);
486 if (!test_and_set_bit(WantReplacement, &rdev->flags))
487 set_bit(MD_RECOVERY_NEEDED,
488 &rdev->mddev->recovery);
489
490 dec_rdev = 0;
491 if (test_bit(FailFast, &rdev->flags) &&
492 (bio->bi_opf & MD_FAILFAST)) {
493 md_error(rdev->mddev, rdev);
494 }
495
496 /*
497 * When the device is faulty, it is not necessary to
498 * handle write error.
499 */
500 if (!test_bit(Faulty, &rdev->flags))
501 set_bit(R10BIO_WriteError, &r10_bio->state);
502 else {
503 /* Fail the request */
504 r10_bio->devs[slot].bio = NULL;
505 to_put = bio;
506 dec_rdev = 1;
507 }
508 }
509 } else {
510 /*
511 * Set R10BIO_Uptodate in our master bio, so that
512 * we will return a good error code for to the higher
513 * levels even if IO on some other mirrored buffer fails.
514 *
515 * The 'master' represents the composite IO operation to
516 * user-side. So if something waits for IO, then it will
517 * wait for the 'master' bio.
518 *
519 * Do not set R10BIO_Uptodate if the current device is
520 * rebuilding or Faulty. This is because we cannot use
521 * such device for properly reading the data back (we could
522 * potentially use it, if the current write would have felt
523 * before rdev->recovery_offset, but for simplicity we don't
524 * check this here.
525 */
526 if (test_bit(In_sync, &rdev->flags) &&
527 !test_bit(Faulty, &rdev->flags))
528 set_bit(R10BIO_Uptodate, &r10_bio->state);
529
530 /* Maybe we can clear some bad blocks. */
531 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
532 r10_bio->sectors) &&
533 !ignore_error) {
534 bio_put(bio);
535 if (repl)
536 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
537 else
538 r10_bio->devs[slot].bio = IO_MADE_GOOD;
539 dec_rdev = 0;
540 set_bit(R10BIO_MadeGood, &r10_bio->state);
541 }
542 }
543
544 /*
545 *
546 * Let's see if all mirrored write operations have finished
547 * already.
548 */
549 one_write_done(r10_bio);
550 if (dec_rdev)
551 rdev_dec_pending(rdev, conf->mddev);
552 if (to_put)
553 bio_put(to_put);
554 }
555
556 /*
557 * RAID10 layout manager
558 * As well as the chunksize and raid_disks count, there are two
559 * parameters: near_copies and far_copies.
560 * near_copies * far_copies must be <= raid_disks.
561 * Normally one of these will be 1.
562 * If both are 1, we get raid0.
563 * If near_copies == raid_disks, we get raid1.
564 *
565 * Chunks are laid out in raid0 style with near_copies copies of the
566 * first chunk, followed by near_copies copies of the next chunk and
567 * so on.
568 * If far_copies > 1, then after 1/far_copies of the array has been assigned
569 * as described above, we start again with a device offset of near_copies.
570 * So we effectively have another copy of the whole array further down all
571 * the drives, but with blocks on different drives.
572 * With this layout, and block is never stored twice on the one device.
573 *
574 * raid10_find_phys finds the sector offset of a given virtual sector
575 * on each device that it is on.
576 *
577 * raid10_find_virt does the reverse mapping, from a device and a
578 * sector offset to a virtual address
579 */
580
__raid10_find_phys(struct geom * geo,struct r10bio * r10bio)581 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
582 {
583 int n,f;
584 sector_t sector;
585 sector_t chunk;
586 sector_t stripe;
587 int dev;
588 int slot = 0;
589 int last_far_set_start, last_far_set_size;
590
591 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
592 last_far_set_start *= geo->far_set_size;
593
594 last_far_set_size = geo->far_set_size;
595 last_far_set_size += (geo->raid_disks % geo->far_set_size);
596
597 /* now calculate first sector/dev */
598 chunk = r10bio->sector >> geo->chunk_shift;
599 sector = r10bio->sector & geo->chunk_mask;
600
601 chunk *= geo->near_copies;
602 stripe = chunk;
603 dev = sector_div(stripe, geo->raid_disks);
604 if (geo->far_offset)
605 stripe *= geo->far_copies;
606
607 sector += stripe << geo->chunk_shift;
608
609 /* and calculate all the others */
610 for (n = 0; n < geo->near_copies; n++) {
611 int d = dev;
612 int set;
613 sector_t s = sector;
614 r10bio->devs[slot].devnum = d;
615 r10bio->devs[slot].addr = s;
616 slot++;
617
618 for (f = 1; f < geo->far_copies; f++) {
619 set = d / geo->far_set_size;
620 d += geo->near_copies;
621
622 if ((geo->raid_disks % geo->far_set_size) &&
623 (d > last_far_set_start)) {
624 d -= last_far_set_start;
625 d %= last_far_set_size;
626 d += last_far_set_start;
627 } else {
628 d %= geo->far_set_size;
629 d += geo->far_set_size * set;
630 }
631 s += geo->stride;
632 r10bio->devs[slot].devnum = d;
633 r10bio->devs[slot].addr = s;
634 slot++;
635 }
636 dev++;
637 if (dev >= geo->raid_disks) {
638 dev = 0;
639 sector += (geo->chunk_mask + 1);
640 }
641 }
642 }
643
raid10_find_phys(struct r10conf * conf,struct r10bio * r10bio)644 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
645 {
646 struct geom *geo = &conf->geo;
647
648 if (conf->reshape_progress != MaxSector &&
649 ((r10bio->sector >= conf->reshape_progress) !=
650 conf->mddev->reshape_backwards)) {
651 set_bit(R10BIO_Previous, &r10bio->state);
652 geo = &conf->prev;
653 } else
654 clear_bit(R10BIO_Previous, &r10bio->state);
655
656 __raid10_find_phys(geo, r10bio);
657 }
658
raid10_find_virt(struct r10conf * conf,sector_t sector,int dev)659 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
660 {
661 sector_t offset, chunk, vchunk;
662 /* Never use conf->prev as this is only called during resync
663 * or recovery, so reshape isn't happening
664 */
665 struct geom *geo = &conf->geo;
666 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
667 int far_set_size = geo->far_set_size;
668 int last_far_set_start;
669
670 if (geo->raid_disks % geo->far_set_size) {
671 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
672 last_far_set_start *= geo->far_set_size;
673
674 if (dev >= last_far_set_start) {
675 far_set_size = geo->far_set_size;
676 far_set_size += (geo->raid_disks % geo->far_set_size);
677 far_set_start = last_far_set_start;
678 }
679 }
680
681 offset = sector & geo->chunk_mask;
682 if (geo->far_offset) {
683 int fc;
684 chunk = sector >> geo->chunk_shift;
685 fc = sector_div(chunk, geo->far_copies);
686 dev -= fc * geo->near_copies;
687 if (dev < far_set_start)
688 dev += far_set_size;
689 } else {
690 while (sector >= geo->stride) {
691 sector -= geo->stride;
692 if (dev < (geo->near_copies + far_set_start))
693 dev += far_set_size - geo->near_copies;
694 else
695 dev -= geo->near_copies;
696 }
697 chunk = sector >> geo->chunk_shift;
698 }
699 vchunk = chunk * geo->raid_disks + dev;
700 sector_div(vchunk, geo->near_copies);
701 return (vchunk << geo->chunk_shift) + offset;
702 }
703
704 /*
705 * This routine returns the disk from which the requested read should
706 * be done. There is a per-array 'next expected sequential IO' sector
707 * number - if this matches on the next IO then we use the last disk.
708 * There is also a per-disk 'last know head position' sector that is
709 * maintained from IRQ contexts, both the normal and the resync IO
710 * completion handlers update this position correctly. If there is no
711 * perfect sequential match then we pick the disk whose head is closest.
712 *
713 * If there are 2 mirrors in the same 2 devices, performance degrades
714 * because position is mirror, not device based.
715 *
716 * The rdev for the device selected will have nr_pending incremented.
717 */
718
719 /*
720 * FIXME: possibly should rethink readbalancing and do it differently
721 * depending on near_copies / far_copies geometry.
722 */
read_balance(struct r10conf * conf,struct r10bio * r10_bio,int * max_sectors)723 static struct md_rdev *read_balance(struct r10conf *conf,
724 struct r10bio *r10_bio,
725 int *max_sectors)
726 {
727 const sector_t this_sector = r10_bio->sector;
728 int disk, slot;
729 int sectors = r10_bio->sectors;
730 int best_good_sectors;
731 sector_t new_distance, best_dist;
732 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
733 int do_balance;
734 int best_dist_slot, best_pending_slot;
735 bool has_nonrot_disk = false;
736 unsigned int min_pending;
737 struct geom *geo = &conf->geo;
738
739 raid10_find_phys(conf, r10_bio);
740 best_dist_slot = -1;
741 min_pending = UINT_MAX;
742 best_dist_rdev = NULL;
743 best_pending_rdev = NULL;
744 best_dist = MaxSector;
745 best_good_sectors = 0;
746 do_balance = 1;
747 clear_bit(R10BIO_FailFast, &r10_bio->state);
748
749 if (raid1_should_read_first(conf->mddev, this_sector, sectors))
750 do_balance = 0;
751
752 for (slot = 0; slot < conf->copies ; slot++) {
753 sector_t first_bad;
754 sector_t bad_sectors;
755 sector_t dev_sector;
756 unsigned int pending;
757 bool nonrot;
758
759 if (r10_bio->devs[slot].bio == IO_BLOCKED)
760 continue;
761 disk = r10_bio->devs[slot].devnum;
762 rdev = conf->mirrors[disk].replacement;
763 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
764 r10_bio->devs[slot].addr + sectors >
765 rdev->recovery_offset)
766 rdev = conf->mirrors[disk].rdev;
767 if (rdev == NULL ||
768 test_bit(Faulty, &rdev->flags))
769 continue;
770 if (!test_bit(In_sync, &rdev->flags) &&
771 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
772 continue;
773
774 dev_sector = r10_bio->devs[slot].addr;
775 if (is_badblock(rdev, dev_sector, sectors,
776 &first_bad, &bad_sectors)) {
777 if (best_dist < MaxSector)
778 /* Already have a better slot */
779 continue;
780 if (first_bad <= dev_sector) {
781 /* Cannot read here. If this is the
782 * 'primary' device, then we must not read
783 * beyond 'bad_sectors' from another device.
784 */
785 bad_sectors -= (dev_sector - first_bad);
786 if (!do_balance && sectors > bad_sectors)
787 sectors = bad_sectors;
788 if (best_good_sectors > sectors)
789 best_good_sectors = sectors;
790 } else {
791 sector_t good_sectors =
792 first_bad - dev_sector;
793 if (good_sectors > best_good_sectors) {
794 best_good_sectors = good_sectors;
795 best_dist_slot = slot;
796 best_dist_rdev = rdev;
797 }
798 if (!do_balance)
799 /* Must read from here */
800 break;
801 }
802 continue;
803 } else
804 best_good_sectors = sectors;
805
806 if (!do_balance)
807 break;
808
809 nonrot = bdev_nonrot(rdev->bdev);
810 has_nonrot_disk |= nonrot;
811 pending = atomic_read(&rdev->nr_pending);
812 if (min_pending > pending && nonrot) {
813 min_pending = pending;
814 best_pending_slot = slot;
815 best_pending_rdev = rdev;
816 }
817
818 if (best_dist_slot >= 0)
819 /* At least 2 disks to choose from so failfast is OK */
820 set_bit(R10BIO_FailFast, &r10_bio->state);
821 /* This optimisation is debatable, and completely destroys
822 * sequential read speed for 'far copies' arrays. So only
823 * keep it for 'near' arrays, and review those later.
824 */
825 if (geo->near_copies > 1 && !pending)
826 new_distance = 0;
827
828 /* for far > 1 always use the lowest address */
829 else if (geo->far_copies > 1)
830 new_distance = r10_bio->devs[slot].addr;
831 else
832 new_distance = abs(r10_bio->devs[slot].addr -
833 conf->mirrors[disk].head_position);
834
835 if (new_distance < best_dist) {
836 best_dist = new_distance;
837 best_dist_slot = slot;
838 best_dist_rdev = rdev;
839 }
840 }
841 if (slot >= conf->copies) {
842 if (has_nonrot_disk) {
843 slot = best_pending_slot;
844 rdev = best_pending_rdev;
845 } else {
846 slot = best_dist_slot;
847 rdev = best_dist_rdev;
848 }
849 }
850
851 if (slot >= 0) {
852 atomic_inc(&rdev->nr_pending);
853 r10_bio->read_slot = slot;
854 } else
855 rdev = NULL;
856 *max_sectors = best_good_sectors;
857
858 return rdev;
859 }
860
flush_pending_writes(struct r10conf * conf)861 static void flush_pending_writes(struct r10conf *conf)
862 {
863 /* Any writes that have been queued but are awaiting
864 * bitmap updates get flushed here.
865 */
866 spin_lock_irq(&conf->device_lock);
867
868 if (conf->pending_bio_list.head) {
869 struct blk_plug plug;
870 struct bio *bio;
871
872 bio = bio_list_get(&conf->pending_bio_list);
873 spin_unlock_irq(&conf->device_lock);
874
875 /*
876 * As this is called in a wait_event() loop (see freeze_array),
877 * current->state might be TASK_UNINTERRUPTIBLE which will
878 * cause a warning when we prepare to wait again. As it is
879 * rare that this path is taken, it is perfectly safe to force
880 * us to go around the wait_event() loop again, so the warning
881 * is a false-positive. Silence the warning by resetting
882 * thread state
883 */
884 __set_current_state(TASK_RUNNING);
885
886 blk_start_plug(&plug);
887 raid1_prepare_flush_writes(conf->mddev);
888 wake_up(&conf->wait_barrier);
889
890 while (bio) { /* submit pending writes */
891 struct bio *next = bio->bi_next;
892
893 raid1_submit_write(bio);
894 bio = next;
895 cond_resched();
896 }
897 blk_finish_plug(&plug);
898 } else
899 spin_unlock_irq(&conf->device_lock);
900 }
901
902 /* Barriers....
903 * Sometimes we need to suspend IO while we do something else,
904 * either some resync/recovery, or reconfigure the array.
905 * To do this we raise a 'barrier'.
906 * The 'barrier' is a counter that can be raised multiple times
907 * to count how many activities are happening which preclude
908 * normal IO.
909 * We can only raise the barrier if there is no pending IO.
910 * i.e. if nr_pending == 0.
911 * We choose only to raise the barrier if no-one is waiting for the
912 * barrier to go down. This means that as soon as an IO request
913 * is ready, no other operations which require a barrier will start
914 * until the IO request has had a chance.
915 *
916 * So: regular IO calls 'wait_barrier'. When that returns there
917 * is no backgroup IO happening, It must arrange to call
918 * allow_barrier when it has finished its IO.
919 * backgroup IO calls must call raise_barrier. Once that returns
920 * there is no normal IO happeing. It must arrange to call
921 * lower_barrier when the particular background IO completes.
922 */
923
raise_barrier(struct r10conf * conf,int force)924 static void raise_barrier(struct r10conf *conf, int force)
925 {
926 write_seqlock_irq(&conf->resync_lock);
927
928 if (WARN_ON_ONCE(force && !conf->barrier))
929 force = false;
930
931 /* Wait until no block IO is waiting (unless 'force') */
932 wait_event_barrier(conf, force || !conf->nr_waiting);
933
934 /* block any new IO from starting */
935 WRITE_ONCE(conf->barrier, conf->barrier + 1);
936
937 /* Now wait for all pending IO to complete */
938 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
939 conf->barrier < RESYNC_DEPTH);
940
941 write_sequnlock_irq(&conf->resync_lock);
942 }
943
lower_barrier(struct r10conf * conf)944 static void lower_barrier(struct r10conf *conf)
945 {
946 unsigned long flags;
947
948 write_seqlock_irqsave(&conf->resync_lock, flags);
949 WRITE_ONCE(conf->barrier, conf->barrier - 1);
950 write_sequnlock_irqrestore(&conf->resync_lock, flags);
951 wake_up(&conf->wait_barrier);
952 }
953
stop_waiting_barrier(struct r10conf * conf)954 static bool stop_waiting_barrier(struct r10conf *conf)
955 {
956 struct bio_list *bio_list = current->bio_list;
957 struct md_thread *thread;
958
959 /* barrier is dropped */
960 if (!conf->barrier)
961 return true;
962
963 /*
964 * If there are already pending requests (preventing the barrier from
965 * rising completely), and the pre-process bio queue isn't empty, then
966 * don't wait, as we need to empty that queue to get the nr_pending
967 * count down.
968 */
969 if (atomic_read(&conf->nr_pending) && bio_list &&
970 (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
971 return true;
972
973 /* daemon thread must exist while handling io */
974 thread = rcu_dereference_protected(conf->mddev->thread, true);
975 /*
976 * move on if io is issued from raid10d(), nr_pending is not released
977 * from original io(see handle_read_error()). All raise barrier is
978 * blocked until this io is done.
979 */
980 if (thread->tsk == current) {
981 WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
982 return true;
983 }
984
985 return false;
986 }
987
wait_barrier_nolock(struct r10conf * conf)988 static bool wait_barrier_nolock(struct r10conf *conf)
989 {
990 unsigned int seq = read_seqbegin(&conf->resync_lock);
991
992 if (READ_ONCE(conf->barrier))
993 return false;
994
995 atomic_inc(&conf->nr_pending);
996 if (!read_seqretry(&conf->resync_lock, seq))
997 return true;
998
999 if (atomic_dec_and_test(&conf->nr_pending))
1000 wake_up_barrier(conf);
1001
1002 return false;
1003 }
1004
wait_barrier(struct r10conf * conf,bool nowait)1005 static bool wait_barrier(struct r10conf *conf, bool nowait)
1006 {
1007 bool ret = true;
1008
1009 if (wait_barrier_nolock(conf))
1010 return true;
1011
1012 write_seqlock_irq(&conf->resync_lock);
1013 if (conf->barrier) {
1014 /* Return false when nowait flag is set */
1015 if (nowait) {
1016 ret = false;
1017 } else {
1018 conf->nr_waiting++;
1019 mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
1020 wait_event_barrier(conf, stop_waiting_barrier(conf));
1021 conf->nr_waiting--;
1022 }
1023 if (!conf->nr_waiting)
1024 wake_up(&conf->wait_barrier);
1025 }
1026 /* Only increment nr_pending when we wait */
1027 if (ret)
1028 atomic_inc(&conf->nr_pending);
1029 write_sequnlock_irq(&conf->resync_lock);
1030 return ret;
1031 }
1032
allow_barrier(struct r10conf * conf)1033 static void allow_barrier(struct r10conf *conf)
1034 {
1035 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1036 (conf->array_freeze_pending))
1037 wake_up_barrier(conf);
1038 }
1039
freeze_array(struct r10conf * conf,int extra)1040 static void freeze_array(struct r10conf *conf, int extra)
1041 {
1042 /* stop syncio and normal IO and wait for everything to
1043 * go quiet.
1044 * We increment barrier and nr_waiting, and then
1045 * wait until nr_pending match nr_queued+extra
1046 * This is called in the context of one normal IO request
1047 * that has failed. Thus any sync request that might be pending
1048 * will be blocked by nr_pending, and we need to wait for
1049 * pending IO requests to complete or be queued for re-try.
1050 * Thus the number queued (nr_queued) plus this request (extra)
1051 * must match the number of pending IOs (nr_pending) before
1052 * we continue.
1053 */
1054 write_seqlock_irq(&conf->resync_lock);
1055 conf->array_freeze_pending++;
1056 WRITE_ONCE(conf->barrier, conf->barrier + 1);
1057 conf->nr_waiting++;
1058 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1059 conf->nr_queued + extra, flush_pending_writes(conf));
1060 conf->array_freeze_pending--;
1061 write_sequnlock_irq(&conf->resync_lock);
1062 }
1063
unfreeze_array(struct r10conf * conf)1064 static void unfreeze_array(struct r10conf *conf)
1065 {
1066 /* reverse the effect of the freeze */
1067 write_seqlock_irq(&conf->resync_lock);
1068 WRITE_ONCE(conf->barrier, conf->barrier - 1);
1069 conf->nr_waiting--;
1070 wake_up(&conf->wait_barrier);
1071 write_sequnlock_irq(&conf->resync_lock);
1072 }
1073
choose_data_offset(struct r10bio * r10_bio,struct md_rdev * rdev)1074 static sector_t choose_data_offset(struct r10bio *r10_bio,
1075 struct md_rdev *rdev)
1076 {
1077 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1078 test_bit(R10BIO_Previous, &r10_bio->state))
1079 return rdev->data_offset;
1080 else
1081 return rdev->new_data_offset;
1082 }
1083
raid10_unplug(struct blk_plug_cb * cb,bool from_schedule)1084 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1085 {
1086 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
1087 struct mddev *mddev = plug->cb.data;
1088 struct r10conf *conf = mddev->private;
1089 struct bio *bio;
1090
1091 if (from_schedule) {
1092 spin_lock_irq(&conf->device_lock);
1093 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1094 spin_unlock_irq(&conf->device_lock);
1095 wake_up_barrier(conf);
1096 md_wakeup_thread(mddev->thread);
1097 kfree(plug);
1098 return;
1099 }
1100
1101 /* we aren't scheduling, so we can do the write-out directly. */
1102 bio = bio_list_get(&plug->pending);
1103 raid1_prepare_flush_writes(mddev);
1104 wake_up_barrier(conf);
1105
1106 while (bio) { /* submit pending writes */
1107 struct bio *next = bio->bi_next;
1108
1109 raid1_submit_write(bio);
1110 bio = next;
1111 cond_resched();
1112 }
1113 kfree(plug);
1114 }
1115
1116 /*
1117 * 1. Register the new request and wait if the reconstruction thread has put
1118 * up a bar for new requests. Continue immediately if no resync is active
1119 * currently.
1120 * 2. If IO spans the reshape position. Need to wait for reshape to pass.
1121 */
regular_request_wait(struct mddev * mddev,struct r10conf * conf,struct bio * bio,sector_t sectors)1122 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1123 struct bio *bio, sector_t sectors)
1124 {
1125 /* Bail out if REQ_NOWAIT is set for the bio */
1126 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1127 bio_wouldblock_error(bio);
1128 return false;
1129 }
1130 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1131 bio->bi_iter.bi_sector < conf->reshape_progress &&
1132 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1133 allow_barrier(conf);
1134 if (bio->bi_opf & REQ_NOWAIT) {
1135 bio_wouldblock_error(bio);
1136 return false;
1137 }
1138 mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
1139 wait_event(conf->wait_barrier,
1140 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1141 conf->reshape_progress >= bio->bi_iter.bi_sector +
1142 sectors);
1143 wait_barrier(conf, false);
1144 }
1145 return true;
1146 }
1147
raid10_read_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio,bool io_accounting)1148 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1149 struct r10bio *r10_bio, bool io_accounting)
1150 {
1151 struct r10conf *conf = mddev->private;
1152 struct bio *read_bio;
1153 int max_sectors;
1154 struct md_rdev *rdev;
1155 char b[BDEVNAME_SIZE];
1156 int slot = r10_bio->read_slot;
1157 struct md_rdev *err_rdev = NULL;
1158 gfp_t gfp = GFP_NOIO;
1159
1160 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1161 /*
1162 * This is an error retry, but we cannot
1163 * safely dereference the rdev in the r10_bio,
1164 * we must use the one in conf.
1165 * If it has already been disconnected (unlikely)
1166 * we lose the device name in error messages.
1167 */
1168 int disk;
1169 /*
1170 * As we are blocking raid10, it is a little safer to
1171 * use __GFP_HIGH.
1172 */
1173 gfp = GFP_NOIO | __GFP_HIGH;
1174
1175 disk = r10_bio->devs[slot].devnum;
1176 err_rdev = conf->mirrors[disk].rdev;
1177 if (err_rdev)
1178 snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1179 else {
1180 strcpy(b, "???");
1181 /* This never gets dereferenced */
1182 err_rdev = r10_bio->devs[slot].rdev;
1183 }
1184 }
1185
1186 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
1187 raid_end_bio_io(r10_bio);
1188 return;
1189 }
1190
1191 rdev = read_balance(conf, r10_bio, &max_sectors);
1192 if (!rdev) {
1193 if (err_rdev) {
1194 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1195 mdname(mddev), b,
1196 (unsigned long long)r10_bio->sector);
1197 }
1198 raid_end_bio_io(r10_bio);
1199 return;
1200 }
1201 if (err_rdev)
1202 pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1203 mdname(mddev),
1204 rdev->bdev,
1205 (unsigned long long)r10_bio->sector);
1206 if (max_sectors < bio_sectors(bio)) {
1207 allow_barrier(conf);
1208 bio = bio_submit_split_bioset(bio, max_sectors,
1209 &conf->bio_split);
1210 wait_barrier(conf, false);
1211 if (!bio) {
1212 set_bit(R10BIO_Returned, &r10_bio->state);
1213 goto err_handle;
1214 }
1215
1216 r10_bio->master_bio = bio;
1217 r10_bio->sectors = max_sectors;
1218 }
1219 slot = r10_bio->read_slot;
1220
1221 if (io_accounting) {
1222 md_account_bio(mddev, &bio);
1223 r10_bio->master_bio = bio;
1224 }
1225 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1226 read_bio->bi_opf &= ~REQ_NOWAIT;
1227
1228 r10_bio->devs[slot].bio = read_bio;
1229 r10_bio->devs[slot].rdev = rdev;
1230
1231 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1232 choose_data_offset(r10_bio, rdev);
1233 read_bio->bi_end_io = raid10_end_read_request;
1234 if (test_bit(FailFast, &rdev->flags) &&
1235 test_bit(R10BIO_FailFast, &r10_bio->state))
1236 read_bio->bi_opf |= MD_FAILFAST;
1237 read_bio->bi_private = r10_bio;
1238 mddev_trace_remap(mddev, read_bio, r10_bio->sector);
1239 submit_bio_noacct(read_bio);
1240 return;
1241 err_handle:
1242 atomic_dec(&rdev->nr_pending);
1243 raid_end_bio_io(r10_bio);
1244 }
1245
raid10_write_one_disk(struct mddev * mddev,struct r10bio * r10_bio,struct bio * bio,bool replacement,int n_copy)1246 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1247 struct bio *bio, bool replacement,
1248 int n_copy)
1249 {
1250 unsigned long flags;
1251 struct r10conf *conf = mddev->private;
1252 struct md_rdev *rdev;
1253 int devnum = r10_bio->devs[n_copy].devnum;
1254 struct bio *mbio;
1255
1256 rdev = replacement ? conf->mirrors[devnum].replacement :
1257 conf->mirrors[devnum].rdev;
1258
1259 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1260 mbio->bi_opf &= ~REQ_NOWAIT;
1261 if (replacement)
1262 r10_bio->devs[n_copy].repl_bio = mbio;
1263 else
1264 r10_bio->devs[n_copy].bio = mbio;
1265
1266 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1267 choose_data_offset(r10_bio, rdev));
1268 mbio->bi_end_io = raid10_end_write_request;
1269 if (!replacement && test_bit(FailFast,
1270 &conf->mirrors[devnum].rdev->flags)
1271 && enough(conf, devnum))
1272 mbio->bi_opf |= MD_FAILFAST;
1273 mbio->bi_private = r10_bio;
1274 mddev_trace_remap(mddev, mbio, r10_bio->sector);
1275 /* flush_pending_writes() needs access to the rdev so...*/
1276 mbio->bi_bdev = (void *)rdev;
1277
1278 atomic_inc(&r10_bio->remaining);
1279
1280 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1281 spin_lock_irqsave(&conf->device_lock, flags);
1282 bio_list_add(&conf->pending_bio_list, mbio);
1283 spin_unlock_irqrestore(&conf->device_lock, flags);
1284 md_wakeup_thread(mddev->thread);
1285 }
1286 }
1287
wait_blocked_dev(struct mddev * mddev,struct r10bio * r10_bio)1288 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1289 {
1290 struct r10conf *conf = mddev->private;
1291 struct md_rdev *blocked_rdev;
1292 int i;
1293
1294 retry_wait:
1295 blocked_rdev = NULL;
1296 for (i = 0; i < conf->copies; i++) {
1297 struct md_rdev *rdev, *rrdev;
1298
1299 rdev = conf->mirrors[i].rdev;
1300 if (rdev) {
1301 sector_t dev_sector = r10_bio->devs[i].addr;
1302
1303 /*
1304 * Discard request doesn't care the write result
1305 * so it doesn't need to wait blocked disk here.
1306 */
1307 if (test_bit(WriteErrorSeen, &rdev->flags) &&
1308 r10_bio->sectors &&
1309 rdev_has_badblock(rdev, dev_sector,
1310 r10_bio->sectors) < 0)
1311 /*
1312 * Mustn't write here until the bad
1313 * block is acknowledged
1314 */
1315 set_bit(BlockedBadBlocks, &rdev->flags);
1316
1317 if (rdev_blocked(rdev)) {
1318 blocked_rdev = rdev;
1319 atomic_inc(&rdev->nr_pending);
1320 break;
1321 }
1322 }
1323
1324 rrdev = conf->mirrors[i].replacement;
1325 if (rrdev && rdev_blocked(rrdev)) {
1326 atomic_inc(&rrdev->nr_pending);
1327 blocked_rdev = rrdev;
1328 break;
1329 }
1330 }
1331
1332 if (unlikely(blocked_rdev)) {
1333 /* Have to wait for this device to get unblocked, then retry */
1334 allow_barrier(conf);
1335 mddev_add_trace_msg(conf->mddev,
1336 "raid10 %s wait rdev %d blocked",
1337 __func__, blocked_rdev->raid_disk);
1338 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1339 wait_barrier(conf, false);
1340 goto retry_wait;
1341 }
1342 }
1343
raid10_write_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio)1344 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1345 struct r10bio *r10_bio)
1346 {
1347 struct r10conf *conf = mddev->private;
1348 int i, k;
1349 sector_t sectors;
1350 int max_sectors;
1351
1352 if ((mddev_is_clustered(mddev) &&
1353 mddev->cluster_ops->area_resyncing(mddev, WRITE,
1354 bio->bi_iter.bi_sector,
1355 bio_end_sector(bio)))) {
1356 DEFINE_WAIT(w);
1357 /* Bail out if REQ_NOWAIT is set for the bio */
1358 if (bio->bi_opf & REQ_NOWAIT) {
1359 bio_wouldblock_error(bio);
1360 return;
1361 }
1362 for (;;) {
1363 prepare_to_wait(&conf->wait_barrier,
1364 &w, TASK_IDLE);
1365 if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
1366 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1367 break;
1368 schedule();
1369 }
1370 finish_wait(&conf->wait_barrier, &w);
1371 }
1372
1373 sectors = r10_bio->sectors;
1374 if (!regular_request_wait(mddev, conf, bio, sectors)) {
1375 raid_end_bio_io(r10_bio);
1376 return;
1377 }
1378
1379 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1380 (mddev->reshape_backwards
1381 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1382 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1383 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1384 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1385 /* Need to update reshape_position in metadata */
1386 mddev->reshape_position = conf->reshape_progress;
1387 set_mask_bits(&mddev->sb_flags, 0,
1388 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1389 md_wakeup_thread(mddev->thread);
1390 if (bio->bi_opf & REQ_NOWAIT) {
1391 allow_barrier(conf);
1392 bio_wouldblock_error(bio);
1393 return;
1394 }
1395 mddev_add_trace_msg(conf->mddev,
1396 "raid10 wait reshape metadata");
1397 wait_event(mddev->sb_wait,
1398 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1399
1400 conf->reshape_safe = mddev->reshape_position;
1401 }
1402
1403 /* first select target devices under rcu_lock and
1404 * inc refcount on their rdev. Record them by setting
1405 * bios[x] to bio
1406 * If there are known/acknowledged bad blocks on any device
1407 * on which we have seen a write error, we want to avoid
1408 * writing to those blocks. This potentially requires several
1409 * writes to write around the bad blocks. Each set of writes
1410 * gets its own r10_bio with a set of bios attached.
1411 */
1412
1413 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1414 raid10_find_phys(conf, r10_bio);
1415
1416 wait_blocked_dev(mddev, r10_bio);
1417
1418 max_sectors = r10_bio->sectors;
1419
1420 for (i = 0; i < conf->copies; i++) {
1421 int d = r10_bio->devs[i].devnum;
1422 struct md_rdev *rdev, *rrdev;
1423
1424 rdev = conf->mirrors[d].rdev;
1425 rrdev = conf->mirrors[d].replacement;
1426 if (rdev && (test_bit(Faulty, &rdev->flags)))
1427 rdev = NULL;
1428 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1429 rrdev = NULL;
1430
1431 r10_bio->devs[i].bio = NULL;
1432 r10_bio->devs[i].repl_bio = NULL;
1433
1434 if (!rdev && !rrdev)
1435 continue;
1436 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1437 sector_t first_bad;
1438 sector_t dev_sector = r10_bio->devs[i].addr;
1439 sector_t bad_sectors;
1440 int is_bad;
1441
1442 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1443 &first_bad, &bad_sectors);
1444 if (is_bad && first_bad <= dev_sector) {
1445 /* Cannot write here at all */
1446 bad_sectors -= (dev_sector - first_bad);
1447 if (bad_sectors < max_sectors)
1448 /* Mustn't write more than bad_sectors
1449 * to other devices yet
1450 */
1451 max_sectors = bad_sectors;
1452 continue;
1453 }
1454 if (is_bad) {
1455 int good_sectors;
1456
1457 /*
1458 * We cannot atomically write this, so just
1459 * error in that case. It could be possible to
1460 * atomically write other mirrors, but the
1461 * complexity of supporting that is not worth
1462 * the benefit.
1463 */
1464 if (bio->bi_opf & REQ_ATOMIC)
1465 goto err_handle;
1466
1467 good_sectors = first_bad - dev_sector;
1468 if (good_sectors < max_sectors)
1469 max_sectors = good_sectors;
1470 }
1471 }
1472 if (rdev) {
1473 r10_bio->devs[i].bio = bio;
1474 atomic_inc(&rdev->nr_pending);
1475 }
1476 if (rrdev) {
1477 r10_bio->devs[i].repl_bio = bio;
1478 atomic_inc(&rrdev->nr_pending);
1479 }
1480 }
1481
1482 if (max_sectors < r10_bio->sectors)
1483 r10_bio->sectors = max_sectors;
1484
1485 if (r10_bio->sectors < bio_sectors(bio)) {
1486 allow_barrier(conf);
1487 bio = bio_submit_split_bioset(bio, r10_bio->sectors,
1488 &conf->bio_split);
1489 wait_barrier(conf, false);
1490 if (!bio) {
1491 set_bit(R10BIO_Returned, &r10_bio->state);
1492 goto err_handle;
1493 }
1494
1495 r10_bio->master_bio = bio;
1496 }
1497
1498 md_account_bio(mddev, &bio);
1499 r10_bio->master_bio = bio;
1500 atomic_set(&r10_bio->remaining, 1);
1501
1502 for (i = 0; i < conf->copies; i++) {
1503 if (r10_bio->devs[i].bio)
1504 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1505 if (r10_bio->devs[i].repl_bio)
1506 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1507 }
1508 one_write_done(r10_bio);
1509 return;
1510 err_handle:
1511 for (k = 0; k < i; k++) {
1512 int d = r10_bio->devs[k].devnum;
1513 struct md_rdev *rdev = conf->mirrors[d].rdev;
1514 struct md_rdev *rrdev = conf->mirrors[d].replacement;
1515
1516 if (r10_bio->devs[k].bio) {
1517 rdev_dec_pending(rdev, mddev);
1518 r10_bio->devs[k].bio = NULL;
1519 }
1520 if (r10_bio->devs[k].repl_bio) {
1521 rdev_dec_pending(rrdev, mddev);
1522 r10_bio->devs[k].repl_bio = NULL;
1523 }
1524 }
1525
1526 raid_end_bio_io(r10_bio);
1527 }
1528
__make_request(struct mddev * mddev,struct bio * bio,int sectors)1529 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1530 {
1531 struct r10conf *conf = mddev->private;
1532 struct r10bio *r10_bio;
1533
1534 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1535
1536 r10_bio->master_bio = bio;
1537 r10_bio->sectors = sectors;
1538
1539 r10_bio->mddev = mddev;
1540 r10_bio->sector = bio->bi_iter.bi_sector;
1541 r10_bio->state = 0;
1542 r10_bio->read_slot = -1;
1543 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1544 conf->geo.raid_disks);
1545
1546 if (bio_data_dir(bio) == READ)
1547 raid10_read_request(mddev, bio, r10_bio, true);
1548 else
1549 raid10_write_request(mddev, bio, r10_bio);
1550 }
1551
raid_end_discard_bio(struct r10bio * r10bio)1552 static void raid_end_discard_bio(struct r10bio *r10bio)
1553 {
1554 struct r10conf *conf = r10bio->mddev->private;
1555 struct r10bio *first_r10bio;
1556
1557 while (atomic_dec_and_test(&r10bio->remaining)) {
1558
1559 allow_barrier(conf);
1560
1561 if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1562 first_r10bio = (struct r10bio *)r10bio->master_bio;
1563 free_r10bio(r10bio);
1564 r10bio = first_r10bio;
1565 } else {
1566 md_write_end(r10bio->mddev);
1567 bio_endio(r10bio->master_bio);
1568 free_r10bio(r10bio);
1569 break;
1570 }
1571 }
1572 }
1573
raid10_end_discard_request(struct bio * bio)1574 static void raid10_end_discard_request(struct bio *bio)
1575 {
1576 struct r10bio *r10_bio = bio->bi_private;
1577 struct r10conf *conf = r10_bio->mddev->private;
1578 struct md_rdev *rdev = NULL;
1579 int dev;
1580 int slot, repl;
1581
1582 /*
1583 * We don't care the return value of discard bio
1584 */
1585 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1586 set_bit(R10BIO_Uptodate, &r10_bio->state);
1587
1588 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1589 rdev = repl ? conf->mirrors[dev].replacement :
1590 conf->mirrors[dev].rdev;
1591
1592 raid_end_discard_bio(r10_bio);
1593 rdev_dec_pending(rdev, conf->mddev);
1594 }
1595
1596 /*
1597 * There are some limitations to handle discard bio
1598 * 1st, the discard size is bigger than stripe_size*2.
1599 * 2st, if the discard bio spans reshape progress, we use the old way to
1600 * handle discard bio
1601 */
raid10_handle_discard(struct mddev * mddev,struct bio * bio)1602 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1603 {
1604 struct r10conf *conf = mddev->private;
1605 struct geom *geo = &conf->geo;
1606 int far_copies = geo->far_copies;
1607 bool first_copy = true;
1608 struct r10bio *r10_bio, *first_r10bio;
1609 struct bio *split;
1610 int disk;
1611 sector_t chunk;
1612 unsigned int stripe_size;
1613 unsigned int stripe_data_disks;
1614 sector_t split_size;
1615 sector_t bio_start, bio_end;
1616 sector_t first_stripe_index, last_stripe_index;
1617 sector_t start_disk_offset;
1618 unsigned int start_disk_index;
1619 sector_t end_disk_offset;
1620 unsigned int end_disk_index;
1621 unsigned int remainder;
1622
1623 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1624 return -EAGAIN;
1625
1626 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1627 bio_wouldblock_error(bio);
1628 return 0;
1629 }
1630
1631 /*
1632 * Check reshape again to avoid reshape happens after checking
1633 * MD_RECOVERY_RESHAPE and before wait_barrier
1634 */
1635 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1636 goto out;
1637
1638 if (geo->near_copies)
1639 stripe_data_disks = geo->raid_disks / geo->near_copies +
1640 geo->raid_disks % geo->near_copies;
1641 else
1642 stripe_data_disks = geo->raid_disks;
1643
1644 stripe_size = stripe_data_disks << geo->chunk_shift;
1645
1646 bio_start = bio->bi_iter.bi_sector;
1647 bio_end = bio_end_sector(bio);
1648
1649 /*
1650 * Maybe one discard bio is smaller than strip size or across one
1651 * stripe and discard region is larger than one stripe size. For far
1652 * offset layout, if the discard region is not aligned with stripe
1653 * size, there is hole when we submit discard bio to member disk.
1654 * For simplicity, we only handle discard bio which discard region
1655 * is bigger than stripe_size * 2
1656 */
1657 if (bio_sectors(bio) < stripe_size*2)
1658 goto out;
1659
1660 /*
1661 * Keep bio aligned with strip size.
1662 */
1663 div_u64_rem(bio_start, stripe_size, &remainder);
1664 if (remainder) {
1665 split_size = stripe_size - remainder;
1666 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1667 if (IS_ERR(split)) {
1668 bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1669 bio_endio(bio);
1670 return 0;
1671 }
1672
1673 bio_chain(split, bio);
1674 trace_block_split(split, bio->bi_iter.bi_sector);
1675 allow_barrier(conf);
1676 /* Resend the fist split part */
1677 submit_bio_noacct(split);
1678 wait_barrier(conf, false);
1679 }
1680 div_u64_rem(bio_end, stripe_size, &remainder);
1681 if (remainder) {
1682 split_size = bio_sectors(bio) - remainder;
1683 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1684 if (IS_ERR(split)) {
1685 bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1686 bio_endio(bio);
1687 return 0;
1688 }
1689
1690 bio_chain(split, bio);
1691 trace_block_split(split, bio->bi_iter.bi_sector);
1692 allow_barrier(conf);
1693 /* Resend the second split part */
1694 submit_bio_noacct(bio);
1695 bio = split;
1696 wait_barrier(conf, false);
1697 }
1698
1699 bio_start = bio->bi_iter.bi_sector;
1700 bio_end = bio_end_sector(bio);
1701
1702 /*
1703 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1704 * One stripe contains the chunks from all member disk (one chunk from
1705 * one disk at the same HBA address). For layout detail, see 'man md 4'
1706 */
1707 chunk = bio_start >> geo->chunk_shift;
1708 chunk *= geo->near_copies;
1709 first_stripe_index = chunk;
1710 start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1711 if (geo->far_offset)
1712 first_stripe_index *= geo->far_copies;
1713 start_disk_offset = (bio_start & geo->chunk_mask) +
1714 (first_stripe_index << geo->chunk_shift);
1715
1716 chunk = bio_end >> geo->chunk_shift;
1717 chunk *= geo->near_copies;
1718 last_stripe_index = chunk;
1719 end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1720 if (geo->far_offset)
1721 last_stripe_index *= geo->far_copies;
1722 end_disk_offset = (bio_end & geo->chunk_mask) +
1723 (last_stripe_index << geo->chunk_shift);
1724
1725 retry_discard:
1726 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1727 r10_bio->mddev = mddev;
1728 r10_bio->state = 0;
1729 r10_bio->sectors = 0;
1730 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1731 wait_blocked_dev(mddev, r10_bio);
1732
1733 /*
1734 * For far layout it needs more than one r10bio to cover all regions.
1735 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1736 * to record the discard bio. Other r10bio->master_bio record the first
1737 * r10bio. The first r10bio only release after all other r10bios finish.
1738 * The discard bio returns only first r10bio finishes
1739 */
1740 if (first_copy) {
1741 md_account_bio(mddev, &bio);
1742 r10_bio->master_bio = bio;
1743 set_bit(R10BIO_Discard, &r10_bio->state);
1744 first_copy = false;
1745 first_r10bio = r10_bio;
1746 } else
1747 r10_bio->master_bio = (struct bio *)first_r10bio;
1748
1749 /*
1750 * first select target devices under rcu_lock and
1751 * inc refcount on their rdev. Record them by setting
1752 * bios[x] to bio
1753 */
1754 for (disk = 0; disk < geo->raid_disks; disk++) {
1755 struct md_rdev *rdev, *rrdev;
1756
1757 rdev = conf->mirrors[disk].rdev;
1758 rrdev = conf->mirrors[disk].replacement;
1759 r10_bio->devs[disk].bio = NULL;
1760 r10_bio->devs[disk].repl_bio = NULL;
1761
1762 if (rdev && (test_bit(Faulty, &rdev->flags)))
1763 rdev = NULL;
1764 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1765 rrdev = NULL;
1766 if (!rdev && !rrdev)
1767 continue;
1768
1769 if (rdev) {
1770 r10_bio->devs[disk].bio = bio;
1771 atomic_inc(&rdev->nr_pending);
1772 }
1773 if (rrdev) {
1774 r10_bio->devs[disk].repl_bio = bio;
1775 atomic_inc(&rrdev->nr_pending);
1776 }
1777 }
1778
1779 atomic_set(&r10_bio->remaining, 1);
1780 for (disk = 0; disk < geo->raid_disks; disk++) {
1781 sector_t dev_start, dev_end;
1782 struct bio *mbio, *rbio = NULL;
1783
1784 /*
1785 * Now start to calculate the start and end address for each disk.
1786 * The space between dev_start and dev_end is the discard region.
1787 *
1788 * For dev_start, it needs to consider three conditions:
1789 * 1st, the disk is before start_disk, you can imagine the disk in
1790 * the next stripe. So the dev_start is the start address of next
1791 * stripe.
1792 * 2st, the disk is after start_disk, it means the disk is at the
1793 * same stripe of first disk
1794 * 3st, the first disk itself, we can use start_disk_offset directly
1795 */
1796 if (disk < start_disk_index)
1797 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1798 else if (disk > start_disk_index)
1799 dev_start = first_stripe_index * mddev->chunk_sectors;
1800 else
1801 dev_start = start_disk_offset;
1802
1803 if (disk < end_disk_index)
1804 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1805 else if (disk > end_disk_index)
1806 dev_end = last_stripe_index * mddev->chunk_sectors;
1807 else
1808 dev_end = end_disk_offset;
1809
1810 /*
1811 * It only handles discard bio which size is >= stripe size, so
1812 * dev_end > dev_start all the time.
1813 * It doesn't need to use rcu lock to get rdev here. We already
1814 * add rdev->nr_pending in the first loop.
1815 */
1816 if (r10_bio->devs[disk].bio) {
1817 struct md_rdev *rdev = conf->mirrors[disk].rdev;
1818 mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1819 &mddev->bio_set);
1820 mbio->bi_end_io = raid10_end_discard_request;
1821 mbio->bi_private = r10_bio;
1822 r10_bio->devs[disk].bio = mbio;
1823 r10_bio->devs[disk].devnum = disk;
1824 atomic_inc(&r10_bio->remaining);
1825 md_submit_discard_bio(mddev, rdev, mbio,
1826 dev_start + choose_data_offset(r10_bio, rdev),
1827 dev_end - dev_start);
1828 bio_endio(mbio);
1829 }
1830 if (r10_bio->devs[disk].repl_bio) {
1831 struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1832 rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1833 &mddev->bio_set);
1834 rbio->bi_end_io = raid10_end_discard_request;
1835 rbio->bi_private = r10_bio;
1836 r10_bio->devs[disk].repl_bio = rbio;
1837 r10_bio->devs[disk].devnum = disk;
1838 atomic_inc(&r10_bio->remaining);
1839 md_submit_discard_bio(mddev, rrdev, rbio,
1840 dev_start + choose_data_offset(r10_bio, rrdev),
1841 dev_end - dev_start);
1842 bio_endio(rbio);
1843 }
1844 }
1845
1846 if (!geo->far_offset && --far_copies) {
1847 first_stripe_index += geo->stride >> geo->chunk_shift;
1848 start_disk_offset += geo->stride;
1849 last_stripe_index += geo->stride >> geo->chunk_shift;
1850 end_disk_offset += geo->stride;
1851 atomic_inc(&first_r10bio->remaining);
1852 raid_end_discard_bio(r10_bio);
1853 wait_barrier(conf, false);
1854 goto retry_discard;
1855 }
1856
1857 raid_end_discard_bio(r10_bio);
1858
1859 return 0;
1860 out:
1861 allow_barrier(conf);
1862 return -EAGAIN;
1863 }
1864
raid10_make_request(struct mddev * mddev,struct bio * bio)1865 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1866 {
1867 struct r10conf *conf = mddev->private;
1868 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1869 int chunk_sects = chunk_mask + 1;
1870 int sectors = bio_sectors(bio);
1871
1872 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1873 && md_flush_request(mddev, bio))
1874 return true;
1875
1876 md_write_start(mddev, bio);
1877
1878 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1879 if (!raid10_handle_discard(mddev, bio))
1880 return true;
1881
1882 /*
1883 * If this request crosses a chunk boundary, we need to split
1884 * it.
1885 */
1886 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1887 sectors > chunk_sects
1888 && (conf->geo.near_copies < conf->geo.raid_disks
1889 || conf->prev.near_copies <
1890 conf->prev.raid_disks)))
1891 sectors = chunk_sects -
1892 (bio->bi_iter.bi_sector &
1893 (chunk_sects - 1));
1894 __make_request(mddev, bio, sectors);
1895
1896 /* In case raid10d snuck in to freeze_array */
1897 wake_up_barrier(conf);
1898 return true;
1899 }
1900
raid10_status(struct seq_file * seq,struct mddev * mddev)1901 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1902 {
1903 struct r10conf *conf = mddev->private;
1904 int i;
1905
1906 lockdep_assert_held(&mddev->lock);
1907
1908 if (conf->geo.near_copies < conf->geo.raid_disks)
1909 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1910 if (conf->geo.near_copies > 1)
1911 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1912 if (conf->geo.far_copies > 1) {
1913 if (conf->geo.far_offset)
1914 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1915 else
1916 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1917 if (conf->geo.far_set_size != conf->geo.raid_disks)
1918 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1919 }
1920 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1921 conf->geo.raid_disks - mddev->degraded);
1922 for (i = 0; i < conf->geo.raid_disks; i++) {
1923 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1924
1925 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1926 }
1927 seq_printf(seq, "]");
1928 }
1929
1930 /* check if there are enough drives for
1931 * every block to appear on atleast one.
1932 * Don't consider the device numbered 'ignore'
1933 * as we might be about to remove it.
1934 */
_enough(struct r10conf * conf,int previous,int ignore)1935 static int _enough(struct r10conf *conf, int previous, int ignore)
1936 {
1937 int first = 0;
1938 int has_enough = 0;
1939 int disks, ncopies;
1940 if (previous) {
1941 disks = conf->prev.raid_disks;
1942 ncopies = conf->prev.near_copies;
1943 } else {
1944 disks = conf->geo.raid_disks;
1945 ncopies = conf->geo.near_copies;
1946 }
1947
1948 do {
1949 int n = conf->copies;
1950 int cnt = 0;
1951 int this = first;
1952 while (n--) {
1953 struct md_rdev *rdev;
1954 if (this != ignore &&
1955 (rdev = conf->mirrors[this].rdev) &&
1956 test_bit(In_sync, &rdev->flags))
1957 cnt++;
1958 this = (this+1) % disks;
1959 }
1960 if (cnt == 0)
1961 goto out;
1962 first = (first + ncopies) % disks;
1963 } while (first != 0);
1964 has_enough = 1;
1965 out:
1966 return has_enough;
1967 }
1968
enough(struct r10conf * conf,int ignore)1969 static int enough(struct r10conf *conf, int ignore)
1970 {
1971 /* when calling 'enough', both 'prev' and 'geo' must
1972 * be stable.
1973 * This is ensured if ->reconfig_mutex or ->device_lock
1974 * is held.
1975 */
1976 return _enough(conf, 0, ignore) &&
1977 _enough(conf, 1, ignore);
1978 }
1979
1980 /**
1981 * raid10_error() - RAID10 error handler.
1982 * @mddev: affected md device.
1983 * @rdev: member device to fail.
1984 *
1985 * The routine acknowledges &rdev failure and determines new @mddev state.
1986 * If it failed, then:
1987 * - &MD_BROKEN flag is set in &mddev->flags.
1988 * Otherwise, it must be degraded:
1989 * - recovery is interrupted.
1990 * - &mddev->degraded is bumped.
1991 *
1992 * @rdev is marked as &Faulty excluding case when array is failed and
1993 * MD_FAILLAST_DEV is not set.
1994 */
raid10_error(struct mddev * mddev,struct md_rdev * rdev)1995 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1996 {
1997 struct r10conf *conf = mddev->private;
1998 unsigned long flags;
1999
2000 spin_lock_irqsave(&conf->device_lock, flags);
2001
2002 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2003 set_bit(MD_BROKEN, &mddev->flags);
2004
2005 if (!test_bit(MD_FAILLAST_DEV, &mddev->flags)) {
2006 spin_unlock_irqrestore(&conf->device_lock, flags);
2007 return;
2008 }
2009 }
2010 if (test_and_clear_bit(In_sync, &rdev->flags))
2011 mddev->degraded++;
2012
2013 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2014 set_bit(Blocked, &rdev->flags);
2015 set_bit(Faulty, &rdev->flags);
2016 set_mask_bits(&mddev->sb_flags, 0,
2017 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2018 spin_unlock_irqrestore(&conf->device_lock, flags);
2019 pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
2020 "md/raid10:%s: Operation continuing on %d devices.\n",
2021 mdname(mddev), rdev->bdev,
2022 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2023 }
2024
print_conf(struct r10conf * conf)2025 static void print_conf(struct r10conf *conf)
2026 {
2027 int i;
2028 struct md_rdev *rdev;
2029
2030 pr_debug("RAID10 conf printout:\n");
2031 if (!conf) {
2032 pr_debug("(!conf)\n");
2033 return;
2034 }
2035 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2036 conf->geo.raid_disks);
2037
2038 lockdep_assert_held(&conf->mddev->reconfig_mutex);
2039 for (i = 0; i < conf->geo.raid_disks; i++) {
2040 rdev = conf->mirrors[i].rdev;
2041 if (rdev)
2042 pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2043 i, !test_bit(In_sync, &rdev->flags),
2044 !test_bit(Faulty, &rdev->flags),
2045 rdev->bdev);
2046 }
2047 }
2048
close_sync(struct r10conf * conf)2049 static void close_sync(struct r10conf *conf)
2050 {
2051 wait_barrier(conf, false);
2052 allow_barrier(conf);
2053
2054 mempool_exit(&conf->r10buf_pool);
2055 }
2056
raid10_spare_active(struct mddev * mddev)2057 static int raid10_spare_active(struct mddev *mddev)
2058 {
2059 int i;
2060 struct r10conf *conf = mddev->private;
2061 struct raid10_info *tmp;
2062 int count = 0;
2063 unsigned long flags;
2064
2065 /*
2066 * Find all non-in_sync disks within the RAID10 configuration
2067 * and mark them in_sync
2068 */
2069 for (i = 0; i < conf->geo.raid_disks; i++) {
2070 tmp = conf->mirrors + i;
2071 if (tmp->replacement
2072 && tmp->replacement->recovery_offset == MaxSector
2073 && !test_bit(Faulty, &tmp->replacement->flags)
2074 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2075 /* Replacement has just become active */
2076 if (!tmp->rdev
2077 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2078 count++;
2079 if (tmp->rdev) {
2080 /* Replaced device not technically faulty,
2081 * but we need to be sure it gets removed
2082 * and never re-added.
2083 */
2084 set_bit(Faulty, &tmp->rdev->flags);
2085 sysfs_notify_dirent_safe(
2086 tmp->rdev->sysfs_state);
2087 }
2088 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2089 } else if (tmp->rdev
2090 && tmp->rdev->recovery_offset == MaxSector
2091 && !test_bit(Faulty, &tmp->rdev->flags)
2092 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2093 count++;
2094 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2095 }
2096 }
2097 spin_lock_irqsave(&conf->device_lock, flags);
2098 mddev->degraded -= count;
2099 spin_unlock_irqrestore(&conf->device_lock, flags);
2100
2101 print_conf(conf);
2102 return count;
2103 }
2104
raid10_add_disk(struct mddev * mddev,struct md_rdev * rdev)2105 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2106 {
2107 struct r10conf *conf = mddev->private;
2108 int err = -EEXIST;
2109 int mirror, repl_slot = -1;
2110 int first = 0;
2111 int last = conf->geo.raid_disks - 1;
2112 struct raid10_info *p;
2113
2114 if (mddev->resync_offset < MaxSector)
2115 /* only hot-add to in-sync arrays, as recovery is
2116 * very different from resync
2117 */
2118 return -EBUSY;
2119 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2120 return -EINVAL;
2121
2122 if (rdev->raid_disk >= 0)
2123 first = last = rdev->raid_disk;
2124
2125 if (rdev->saved_raid_disk >= first &&
2126 rdev->saved_raid_disk < conf->geo.raid_disks &&
2127 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2128 mirror = rdev->saved_raid_disk;
2129 else
2130 mirror = first;
2131 for ( ; mirror <= last ; mirror++) {
2132 p = &conf->mirrors[mirror];
2133 if (p->rdev) {
2134 if (test_bit(WantReplacement, &p->rdev->flags) &&
2135 p->replacement == NULL && repl_slot < 0)
2136 repl_slot = mirror;
2137 continue;
2138 }
2139
2140 err = mddev_stack_new_rdev(mddev, rdev);
2141 if (err)
2142 return err;
2143 p->head_position = 0;
2144 rdev->raid_disk = mirror;
2145 err = 0;
2146 if (rdev->saved_raid_disk != mirror)
2147 conf->fullsync = 1;
2148 WRITE_ONCE(p->rdev, rdev);
2149 break;
2150 }
2151
2152 if (err && repl_slot >= 0) {
2153 p = &conf->mirrors[repl_slot];
2154 clear_bit(In_sync, &rdev->flags);
2155 set_bit(Replacement, &rdev->flags);
2156 rdev->raid_disk = repl_slot;
2157 err = mddev_stack_new_rdev(mddev, rdev);
2158 if (err)
2159 return err;
2160 conf->fullsync = 1;
2161 WRITE_ONCE(p->replacement, rdev);
2162 }
2163
2164 print_conf(conf);
2165 return err;
2166 }
2167
raid10_remove_disk(struct mddev * mddev,struct md_rdev * rdev)2168 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2169 {
2170 struct r10conf *conf = mddev->private;
2171 int err = 0;
2172 int number = rdev->raid_disk;
2173 struct md_rdev **rdevp;
2174 struct raid10_info *p;
2175
2176 print_conf(conf);
2177 if (unlikely(number >= mddev->raid_disks))
2178 return 0;
2179 p = conf->mirrors + number;
2180 if (rdev == p->rdev)
2181 rdevp = &p->rdev;
2182 else if (rdev == p->replacement)
2183 rdevp = &p->replacement;
2184 else
2185 return 0;
2186
2187 if (test_bit(In_sync, &rdev->flags) ||
2188 atomic_read(&rdev->nr_pending)) {
2189 err = -EBUSY;
2190 goto abort;
2191 }
2192 /* Only remove non-faulty devices if recovery
2193 * is not possible.
2194 */
2195 if (!test_bit(Faulty, &rdev->flags) &&
2196 (!p->replacement || p->replacement == rdev) &&
2197 number < conf->geo.raid_disks &&
2198 enough(conf, -1)) {
2199 err = -EBUSY;
2200 goto abort;
2201 }
2202 WRITE_ONCE(*rdevp, NULL);
2203 if (p->replacement) {
2204 /* We must have just cleared 'rdev' */
2205 WRITE_ONCE(p->rdev, p->replacement);
2206 clear_bit(Replacement, &p->replacement->flags);
2207 WRITE_ONCE(p->replacement, NULL);
2208 }
2209
2210 clear_bit(WantReplacement, &rdev->flags);
2211 err = md_integrity_register(mddev);
2212
2213 abort:
2214
2215 print_conf(conf);
2216 return err;
2217 }
2218
__end_sync_read(struct r10bio * r10_bio,struct bio * bio,int d)2219 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2220 {
2221 struct r10conf *conf = r10_bio->mddev->private;
2222
2223 if (!bio->bi_status)
2224 set_bit(R10BIO_Uptodate, &r10_bio->state);
2225 else
2226 /* The write handler will notice the lack of
2227 * R10BIO_Uptodate and record any errors etc
2228 */
2229 atomic_add(r10_bio->sectors,
2230 &conf->mirrors[d].rdev->corrected_errors);
2231
2232 /* for reconstruct, we always reschedule after a read.
2233 * for resync, only after all reads
2234 */
2235 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2236 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2237 atomic_dec_and_test(&r10_bio->remaining)) {
2238 /* we have read all the blocks,
2239 * do the comparison in process context in raid10d
2240 */
2241 reschedule_retry(r10_bio);
2242 }
2243 }
2244
end_sync_read(struct bio * bio)2245 static void end_sync_read(struct bio *bio)
2246 {
2247 struct r10bio *r10_bio = get_resync_r10bio(bio);
2248 struct r10conf *conf = r10_bio->mddev->private;
2249 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2250
2251 __end_sync_read(r10_bio, bio, d);
2252 }
2253
end_reshape_read(struct bio * bio)2254 static void end_reshape_read(struct bio *bio)
2255 {
2256 /* reshape read bio isn't allocated from r10buf_pool */
2257 struct r10bio *r10_bio = bio->bi_private;
2258
2259 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2260 }
2261
end_sync_request(struct r10bio * r10_bio)2262 static void end_sync_request(struct r10bio *r10_bio)
2263 {
2264 struct mddev *mddev = r10_bio->mddev;
2265
2266 while (atomic_dec_and_test(&r10_bio->remaining)) {
2267 if (r10_bio->master_bio == NULL) {
2268 /* the primary of several recovery bios */
2269 sector_t s = r10_bio->sectors;
2270 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2271 test_bit(R10BIO_WriteError, &r10_bio->state))
2272 reschedule_retry(r10_bio);
2273 else
2274 put_buf(r10_bio);
2275 md_done_sync(mddev, s);
2276 break;
2277 } else {
2278 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2279 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2280 test_bit(R10BIO_WriteError, &r10_bio->state))
2281 reschedule_retry(r10_bio);
2282 else
2283 put_buf(r10_bio);
2284 r10_bio = r10_bio2;
2285 }
2286 }
2287 }
2288
end_sync_write(struct bio * bio)2289 static void end_sync_write(struct bio *bio)
2290 {
2291 struct r10bio *r10_bio = get_resync_r10bio(bio);
2292 struct mddev *mddev = r10_bio->mddev;
2293 struct r10conf *conf = mddev->private;
2294 int d;
2295 int slot;
2296 int repl;
2297 struct md_rdev *rdev = NULL;
2298
2299 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2300 if (repl)
2301 rdev = conf->mirrors[d].replacement;
2302 else
2303 rdev = conf->mirrors[d].rdev;
2304
2305 if (bio->bi_status) {
2306 if (repl)
2307 md_error(mddev, rdev);
2308 else {
2309 set_bit(WriteErrorSeen, &rdev->flags);
2310 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2311 set_bit(MD_RECOVERY_NEEDED,
2312 &rdev->mddev->recovery);
2313 set_bit(R10BIO_WriteError, &r10_bio->state);
2314 }
2315 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
2316 r10_bio->sectors)) {
2317 set_bit(R10BIO_MadeGood, &r10_bio->state);
2318 }
2319
2320 rdev_dec_pending(rdev, mddev);
2321
2322 end_sync_request(r10_bio);
2323 }
2324
2325 /*
2326 * Note: sync and recover and handled very differently for raid10
2327 * This code is for resync.
2328 * For resync, we read through virtual addresses and read all blocks.
2329 * If there is any error, we schedule a write. The lowest numbered
2330 * drive is authoritative.
2331 * However requests come for physical address, so we need to map.
2332 * For every physical address there are raid_disks/copies virtual addresses,
2333 * which is always are least one, but is not necessarly an integer.
2334 * This means that a physical address can span multiple chunks, so we may
2335 * have to submit multiple io requests for a single sync request.
2336 */
2337 /*
2338 * We check if all blocks are in-sync and only write to blocks that
2339 * aren't in sync
2340 */
sync_request_write(struct mddev * mddev,struct r10bio * r10_bio)2341 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2342 {
2343 struct r10conf *conf = mddev->private;
2344 int i, first;
2345 struct bio *tbio, *fbio;
2346 int vcnt;
2347 struct page **tpages, **fpages;
2348
2349 atomic_set(&r10_bio->remaining, 1);
2350
2351 /* find the first device with a block */
2352 for (i=0; i<conf->copies; i++)
2353 if (!r10_bio->devs[i].bio->bi_status)
2354 break;
2355
2356 if (i == conf->copies)
2357 goto done;
2358
2359 first = i;
2360 fbio = r10_bio->devs[i].bio;
2361 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2362 fbio->bi_iter.bi_idx = 0;
2363 fpages = get_resync_pages(fbio)->pages;
2364
2365 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2366 /* now find blocks with errors */
2367 for (i=0 ; i < conf->copies ; i++) {
2368 int j, d;
2369 struct md_rdev *rdev;
2370 struct resync_pages *rp;
2371
2372 tbio = r10_bio->devs[i].bio;
2373
2374 if (tbio->bi_end_io != end_sync_read)
2375 continue;
2376 if (i == first)
2377 continue;
2378
2379 tpages = get_resync_pages(tbio)->pages;
2380 d = r10_bio->devs[i].devnum;
2381 rdev = conf->mirrors[d].rdev;
2382 if (!r10_bio->devs[i].bio->bi_status) {
2383 /* We know that the bi_io_vec layout is the same for
2384 * both 'first' and 'i', so we just compare them.
2385 * All vec entries are PAGE_SIZE;
2386 */
2387 int sectors = r10_bio->sectors;
2388 for (j = 0; j < vcnt; j++) {
2389 int len = PAGE_SIZE;
2390 if (sectors < (len / 512))
2391 len = sectors * 512;
2392 if (memcmp(page_address(fpages[j]),
2393 page_address(tpages[j]),
2394 len))
2395 break;
2396 sectors -= len/512;
2397 }
2398 if (j == vcnt)
2399 continue;
2400 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2401 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2402 /* Don't fix anything. */
2403 continue;
2404 } else if (test_bit(FailFast, &rdev->flags)) {
2405 /* Just give up on this device */
2406 md_error(rdev->mddev, rdev);
2407 continue;
2408 }
2409 /* Ok, we need to write this bio, either to correct an
2410 * inconsistency or to correct an unreadable block.
2411 * First we need to fixup bv_offset, bv_len and
2412 * bi_vecs, as the read request might have corrupted these
2413 */
2414 rp = get_resync_pages(tbio);
2415 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2416
2417 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2418
2419 rp->raid_bio = r10_bio;
2420 tbio->bi_private = rp;
2421 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2422 tbio->bi_end_io = end_sync_write;
2423
2424 bio_copy_data(tbio, fbio);
2425
2426 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2427 atomic_inc(&r10_bio->remaining);
2428
2429 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2430 tbio->bi_opf |= MD_FAILFAST;
2431 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2432 submit_bio_noacct(tbio);
2433 }
2434
2435 /* Now write out to any replacement devices
2436 * that are active
2437 */
2438 for (i = 0; i < conf->copies; i++) {
2439 tbio = r10_bio->devs[i].repl_bio;
2440 if (!tbio || !tbio->bi_end_io)
2441 continue;
2442 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2443 && r10_bio->devs[i].bio != fbio)
2444 bio_copy_data(tbio, fbio);
2445 atomic_inc(&r10_bio->remaining);
2446 submit_bio_noacct(tbio);
2447 }
2448
2449 done:
2450 if (atomic_dec_and_test(&r10_bio->remaining)) {
2451 md_done_sync(mddev, r10_bio->sectors);
2452 put_buf(r10_bio);
2453 }
2454 }
2455
2456 /*
2457 * Now for the recovery code.
2458 * Recovery happens across physical sectors.
2459 * We recover all non-is_sync drives by finding the virtual address of
2460 * each, and then choose a working drive that also has that virt address.
2461 * There is a separate r10_bio for each non-in_sync drive.
2462 * Only the first two slots are in use. The first for reading,
2463 * The second for writing.
2464 *
2465 */
fix_recovery_read_error(struct r10bio * r10_bio)2466 static void fix_recovery_read_error(struct r10bio *r10_bio)
2467 {
2468 /* We got a read error during recovery.
2469 * We repeat the read in smaller page-sized sections.
2470 * If a read succeeds, write it to the new device or record
2471 * a bad block if we cannot.
2472 * If a read fails, record a bad block on both old and
2473 * new devices.
2474 */
2475 struct mddev *mddev = r10_bio->mddev;
2476 struct r10conf *conf = mddev->private;
2477 struct bio *bio = r10_bio->devs[0].bio;
2478 sector_t sect = 0;
2479 int sectors = r10_bio->sectors;
2480 int idx = 0;
2481 int dr = r10_bio->devs[0].devnum;
2482 int dw = r10_bio->devs[1].devnum;
2483 struct page **pages = get_resync_pages(bio)->pages;
2484
2485 while (sectors) {
2486 int s = sectors;
2487 struct md_rdev *rdev;
2488 sector_t addr;
2489 int ok;
2490
2491 if (s > (PAGE_SIZE>>9))
2492 s = PAGE_SIZE >> 9;
2493
2494 rdev = conf->mirrors[dr].rdev;
2495 addr = r10_bio->devs[0].addr + sect;
2496 ok = sync_page_io(rdev,
2497 addr,
2498 s << 9,
2499 pages[idx],
2500 REQ_OP_READ, false);
2501 if (ok) {
2502 rdev = conf->mirrors[dw].rdev;
2503 addr = r10_bio->devs[1].addr + sect;
2504 ok = sync_page_io(rdev,
2505 addr,
2506 s << 9,
2507 pages[idx],
2508 REQ_OP_WRITE, false);
2509 if (!ok) {
2510 set_bit(WriteErrorSeen, &rdev->flags);
2511 if (!test_and_set_bit(WantReplacement,
2512 &rdev->flags))
2513 set_bit(MD_RECOVERY_NEEDED,
2514 &rdev->mddev->recovery);
2515 }
2516 }
2517 if (!ok) {
2518 /* We don't worry if we cannot set a bad block -
2519 * it really is bad so there is no loss in not
2520 * recording it yet
2521 */
2522 rdev_set_badblocks(rdev, addr, s, 0);
2523
2524 if (rdev != conf->mirrors[dw].rdev) {
2525 /* need bad block on destination too */
2526 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2527 addr = r10_bio->devs[1].addr + sect;
2528 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2529 if (!ok) {
2530 /* just abort the recovery */
2531 pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2532 mdname(mddev));
2533
2534 set_bit(MD_RECOVERY_INTR,
2535 &mddev->recovery);
2536 break;
2537 }
2538 }
2539 }
2540
2541 sectors -= s;
2542 sect += s;
2543 idx++;
2544 }
2545 }
2546
recovery_request_write(struct mddev * mddev,struct r10bio * r10_bio)2547 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2548 {
2549 struct r10conf *conf = mddev->private;
2550 int d;
2551 struct bio *wbio = r10_bio->devs[1].bio;
2552 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2553
2554 /* Need to test wbio2->bi_end_io before we call
2555 * submit_bio_noacct as if the former is NULL,
2556 * the latter is free to free wbio2.
2557 */
2558 if (wbio2 && !wbio2->bi_end_io)
2559 wbio2 = NULL;
2560
2561 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2562 fix_recovery_read_error(r10_bio);
2563 if (wbio->bi_end_io)
2564 end_sync_request(r10_bio);
2565 if (wbio2)
2566 end_sync_request(r10_bio);
2567 return;
2568 }
2569
2570 /*
2571 * share the pages with the first bio
2572 * and submit the write request
2573 */
2574 d = r10_bio->devs[1].devnum;
2575 if (wbio->bi_end_io) {
2576 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2577 submit_bio_noacct(wbio);
2578 }
2579 if (wbio2) {
2580 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2581 submit_bio_noacct(wbio2);
2582 }
2583 }
2584
r10_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,enum req_op op)2585 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2586 int sectors, struct page *page, enum req_op op)
2587 {
2588 if (rdev_has_badblock(rdev, sector, sectors) &&
2589 (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2590 return -1;
2591 if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2592 /* success */
2593 return 1;
2594 if (op == REQ_OP_WRITE) {
2595 set_bit(WriteErrorSeen, &rdev->flags);
2596 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2597 set_bit(MD_RECOVERY_NEEDED,
2598 &rdev->mddev->recovery);
2599 }
2600 /* need to record an error - either for the block or the device */
2601 rdev_set_badblocks(rdev, sector, sectors, 0);
2602 return 0;
2603 }
2604
2605 /*
2606 * This is a kernel thread which:
2607 *
2608 * 1. Retries failed read operations on working mirrors.
2609 * 2. Updates the raid superblock when problems encounter.
2610 * 3. Performs writes following reads for array synchronising.
2611 */
2612
fix_read_error(struct r10conf * conf,struct mddev * mddev,struct r10bio * r10_bio)2613 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2614 {
2615 int sect = 0; /* Offset from r10_bio->sector */
2616 int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2617 struct md_rdev *rdev;
2618 int d = r10_bio->devs[slot].devnum;
2619
2620 /* still own a reference to this rdev, so it cannot
2621 * have been cleared recently.
2622 */
2623 rdev = conf->mirrors[d].rdev;
2624
2625 if (test_bit(Faulty, &rdev->flags))
2626 /* drive has already been failed, just ignore any
2627 more fix_read_error() attempts */
2628 return;
2629
2630 if (exceed_read_errors(mddev, rdev)) {
2631 r10_bio->devs[slot].bio = IO_BLOCKED;
2632 return;
2633 }
2634
2635 while(sectors) {
2636 int s = sectors;
2637 int sl = slot;
2638 int success = 0;
2639 int start;
2640
2641 if (s > (PAGE_SIZE>>9))
2642 s = PAGE_SIZE >> 9;
2643
2644 do {
2645 d = r10_bio->devs[sl].devnum;
2646 rdev = conf->mirrors[d].rdev;
2647 if (rdev &&
2648 test_bit(In_sync, &rdev->flags) &&
2649 !test_bit(Faulty, &rdev->flags) &&
2650 rdev_has_badblock(rdev,
2651 r10_bio->devs[sl].addr + sect,
2652 s) == 0) {
2653 atomic_inc(&rdev->nr_pending);
2654 success = sync_page_io(rdev,
2655 r10_bio->devs[sl].addr +
2656 sect,
2657 s<<9,
2658 conf->tmppage,
2659 REQ_OP_READ, false);
2660 rdev_dec_pending(rdev, mddev);
2661 if (success)
2662 break;
2663 }
2664 sl++;
2665 if (sl == conf->copies)
2666 sl = 0;
2667 } while (sl != slot);
2668
2669 if (!success) {
2670 /* Cannot read from anywhere, just mark the block
2671 * as bad on the first device to discourage future
2672 * reads.
2673 */
2674 int dn = r10_bio->devs[slot].devnum;
2675 rdev = conf->mirrors[dn].rdev;
2676
2677 if (!rdev_set_badblocks(
2678 rdev,
2679 r10_bio->devs[slot].addr
2680 + sect,
2681 s, 0)) {
2682 r10_bio->devs[slot].bio
2683 = IO_BLOCKED;
2684 }
2685 break;
2686 }
2687
2688 start = sl;
2689 /* write it back and re-read */
2690 while (sl != slot) {
2691 if (sl==0)
2692 sl = conf->copies;
2693 sl--;
2694 d = r10_bio->devs[sl].devnum;
2695 rdev = conf->mirrors[d].rdev;
2696 if (!rdev ||
2697 test_bit(Faulty, &rdev->flags) ||
2698 !test_bit(In_sync, &rdev->flags))
2699 continue;
2700
2701 atomic_inc(&rdev->nr_pending);
2702 if (r10_sync_page_io(rdev,
2703 r10_bio->devs[sl].addr +
2704 sect,
2705 s, conf->tmppage, REQ_OP_WRITE)
2706 == 0) {
2707 /* Well, this device is dead */
2708 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2709 mdname(mddev), s,
2710 (unsigned long long)(
2711 sect +
2712 choose_data_offset(r10_bio,
2713 rdev)),
2714 rdev->bdev);
2715 pr_notice("md/raid10:%s: %pg: failing drive\n",
2716 mdname(mddev),
2717 rdev->bdev);
2718 }
2719 rdev_dec_pending(rdev, mddev);
2720 }
2721 sl = start;
2722 while (sl != slot) {
2723 if (sl==0)
2724 sl = conf->copies;
2725 sl--;
2726 d = r10_bio->devs[sl].devnum;
2727 rdev = conf->mirrors[d].rdev;
2728 if (!rdev ||
2729 test_bit(Faulty, &rdev->flags) ||
2730 !test_bit(In_sync, &rdev->flags))
2731 continue;
2732
2733 atomic_inc(&rdev->nr_pending);
2734 switch (r10_sync_page_io(rdev,
2735 r10_bio->devs[sl].addr +
2736 sect,
2737 s, conf->tmppage, REQ_OP_READ)) {
2738 case 0:
2739 /* Well, this device is dead */
2740 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
2741 mdname(mddev), s,
2742 (unsigned long long)(
2743 sect +
2744 choose_data_offset(r10_bio, rdev)),
2745 rdev->bdev);
2746 pr_notice("md/raid10:%s: %pg: failing drive\n",
2747 mdname(mddev),
2748 rdev->bdev);
2749 break;
2750 case 1:
2751 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
2752 mdname(mddev), s,
2753 (unsigned long long)(
2754 sect +
2755 choose_data_offset(r10_bio, rdev)),
2756 rdev->bdev);
2757 atomic_add(s, &rdev->corrected_errors);
2758 }
2759
2760 rdev_dec_pending(rdev, mddev);
2761 }
2762
2763 sectors -= s;
2764 sect += s;
2765 }
2766 }
2767
narrow_write_error(struct r10bio * r10_bio,int i)2768 static void narrow_write_error(struct r10bio *r10_bio, int i)
2769 {
2770 struct bio *bio = r10_bio->master_bio;
2771 struct mddev *mddev = r10_bio->mddev;
2772 struct r10conf *conf = mddev->private;
2773 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2774 /* bio has the data to be written to slot 'i' where
2775 * we just recently had a write error.
2776 * We repeatedly clone the bio and trim down to one block,
2777 * then try the write. Where the write fails we record
2778 * a bad block.
2779 * It is conceivable that the bio doesn't exactly align with
2780 * blocks. We must handle this.
2781 *
2782 * We currently own a reference to the rdev.
2783 */
2784
2785 int block_sectors, lbs = bdev_logical_block_size(rdev->bdev) >> 9;
2786 sector_t sector;
2787 int sectors;
2788 int sect_to_write = r10_bio->sectors;
2789
2790 if (rdev->badblocks.shift < 0)
2791 block_sectors = lbs;
2792 else
2793 block_sectors = roundup(1 << rdev->badblocks.shift, lbs);
2794
2795 sector = r10_bio->sector;
2796 sectors = ((r10_bio->sector + block_sectors)
2797 & ~(sector_t)(block_sectors - 1))
2798 - sector;
2799
2800 while (sect_to_write) {
2801 struct bio *wbio;
2802 sector_t wsector;
2803 if (sectors > sect_to_write)
2804 sectors = sect_to_write;
2805 /* Write at 'sector' for 'sectors' */
2806 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2807 &mddev->bio_set);
2808 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2809 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2810 wbio->bi_iter.bi_sector = wsector +
2811 choose_data_offset(r10_bio, rdev);
2812 wbio->bi_opf = REQ_OP_WRITE;
2813
2814 if (submit_bio_wait(wbio) &&
2815 !rdev_set_badblocks(rdev, wsector, sectors, 0)) {
2816 /*
2817 * Badblocks set failed, disk marked Faulty.
2818 * No further operations needed.
2819 */
2820 bio_put(wbio);
2821 break;
2822 }
2823
2824 bio_put(wbio);
2825 sect_to_write -= sectors;
2826 sector += sectors;
2827 sectors = block_sectors;
2828 }
2829 }
2830
handle_read_error(struct mddev * mddev,struct r10bio * r10_bio)2831 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2832 {
2833 int slot = r10_bio->read_slot;
2834 struct bio *bio;
2835 struct r10conf *conf = mddev->private;
2836 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2837
2838 /* we got a read error. Maybe the drive is bad. Maybe just
2839 * the block and we can fix it.
2840 * We freeze all other IO, and try reading the block from
2841 * other devices. When we find one, we re-write
2842 * and check it that fixes the read error.
2843 * This is all done synchronously while the array is
2844 * frozen.
2845 */
2846 bio = r10_bio->devs[slot].bio;
2847 bio_put(bio);
2848 r10_bio->devs[slot].bio = NULL;
2849
2850 if (mddev->ro)
2851 r10_bio->devs[slot].bio = IO_BLOCKED;
2852 else if (!test_bit(FailFast, &rdev->flags)) {
2853 freeze_array(conf, 1);
2854 fix_read_error(conf, mddev, r10_bio);
2855 unfreeze_array(conf);
2856 } else
2857 md_error(mddev, rdev);
2858
2859 rdev_dec_pending(rdev, mddev);
2860 r10_bio->state = 0;
2861 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2862 /*
2863 * allow_barrier after re-submit to ensure no sync io
2864 * can be issued while regular io pending.
2865 */
2866 allow_barrier(conf);
2867 }
2868
handle_write_completed(struct r10conf * conf,struct r10bio * r10_bio)2869 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2870 {
2871 /* Some sort of write request has finished and it
2872 * succeeded in writing where we thought there was a
2873 * bad block. So forget the bad block.
2874 * Or possibly if failed and we need to record
2875 * a bad block.
2876 */
2877 int m;
2878 struct md_rdev *rdev;
2879
2880 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2881 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2882 for (m = 0; m < conf->copies; m++) {
2883 int dev = r10_bio->devs[m].devnum;
2884 rdev = conf->mirrors[dev].rdev;
2885 if (r10_bio->devs[m].bio == NULL ||
2886 r10_bio->devs[m].bio->bi_end_io == NULL)
2887 continue;
2888 if (!r10_bio->devs[m].bio->bi_status)
2889 rdev_clear_badblocks(
2890 rdev,
2891 r10_bio->devs[m].addr,
2892 r10_bio->sectors, 0);
2893 else
2894 rdev_set_badblocks(rdev,
2895 r10_bio->devs[m].addr,
2896 r10_bio->sectors, 0);
2897 rdev = conf->mirrors[dev].replacement;
2898 if (r10_bio->devs[m].repl_bio == NULL ||
2899 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2900 continue;
2901
2902 if (!r10_bio->devs[m].repl_bio->bi_status)
2903 rdev_clear_badblocks(
2904 rdev,
2905 r10_bio->devs[m].addr,
2906 r10_bio->sectors, 0);
2907 else
2908 rdev_set_badblocks(rdev,
2909 r10_bio->devs[m].addr,
2910 r10_bio->sectors, 0);
2911 }
2912 put_buf(r10_bio);
2913 } else {
2914 bool fail = false;
2915 for (m = 0; m < conf->copies; m++) {
2916 int dev = r10_bio->devs[m].devnum;
2917 struct bio *bio = r10_bio->devs[m].bio;
2918 rdev = conf->mirrors[dev].rdev;
2919 if (bio == IO_MADE_GOOD) {
2920 rdev_clear_badblocks(
2921 rdev,
2922 r10_bio->devs[m].addr,
2923 r10_bio->sectors, 0);
2924 rdev_dec_pending(rdev, conf->mddev);
2925 } else if (bio != NULL && bio->bi_status) {
2926 fail = true;
2927 narrow_write_error(r10_bio, m);
2928 rdev_dec_pending(rdev, conf->mddev);
2929 }
2930 bio = r10_bio->devs[m].repl_bio;
2931 rdev = conf->mirrors[dev].replacement;
2932 if (rdev && bio == IO_MADE_GOOD) {
2933 rdev_clear_badblocks(
2934 rdev,
2935 r10_bio->devs[m].addr,
2936 r10_bio->sectors, 0);
2937 rdev_dec_pending(rdev, conf->mddev);
2938 }
2939 }
2940 if (fail) {
2941 spin_lock_irq(&conf->device_lock);
2942 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2943 conf->nr_queued++;
2944 spin_unlock_irq(&conf->device_lock);
2945 /*
2946 * In case freeze_array() is waiting for condition
2947 * nr_pending == nr_queued + extra to be true.
2948 */
2949 wake_up(&conf->wait_barrier);
2950 md_wakeup_thread(conf->mddev->thread);
2951 } else {
2952 if (test_bit(R10BIO_WriteError,
2953 &r10_bio->state))
2954 close_write(r10_bio);
2955 raid_end_bio_io(r10_bio);
2956 }
2957 }
2958 }
2959
raid10d(struct md_thread * thread)2960 static void raid10d(struct md_thread *thread)
2961 {
2962 struct mddev *mddev = thread->mddev;
2963 struct r10bio *r10_bio;
2964 unsigned long flags;
2965 struct r10conf *conf = mddev->private;
2966 struct list_head *head = &conf->retry_list;
2967 struct blk_plug plug;
2968
2969 md_check_recovery(mddev);
2970
2971 if (!list_empty_careful(&conf->bio_end_io_list) &&
2972 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2973 LIST_HEAD(tmp);
2974 spin_lock_irqsave(&conf->device_lock, flags);
2975 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2976 while (!list_empty(&conf->bio_end_io_list)) {
2977 list_move(conf->bio_end_io_list.prev, &tmp);
2978 conf->nr_queued--;
2979 }
2980 }
2981 spin_unlock_irqrestore(&conf->device_lock, flags);
2982 while (!list_empty(&tmp)) {
2983 r10_bio = list_first_entry(&tmp, struct r10bio,
2984 retry_list);
2985 list_del(&r10_bio->retry_list);
2986
2987 if (test_bit(R10BIO_WriteError,
2988 &r10_bio->state))
2989 close_write(r10_bio);
2990 raid_end_bio_io(r10_bio);
2991 }
2992 }
2993
2994 blk_start_plug(&plug);
2995 for (;;) {
2996
2997 flush_pending_writes(conf);
2998
2999 spin_lock_irqsave(&conf->device_lock, flags);
3000 if (list_empty(head)) {
3001 spin_unlock_irqrestore(&conf->device_lock, flags);
3002 break;
3003 }
3004 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3005 list_del(head->prev);
3006 conf->nr_queued--;
3007 spin_unlock_irqrestore(&conf->device_lock, flags);
3008
3009 mddev = r10_bio->mddev;
3010 conf = mddev->private;
3011 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3012 test_bit(R10BIO_WriteError, &r10_bio->state))
3013 handle_write_completed(conf, r10_bio);
3014 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3015 reshape_request_write(mddev, r10_bio);
3016 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3017 sync_request_write(mddev, r10_bio);
3018 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3019 recovery_request_write(mddev, r10_bio);
3020 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3021 handle_read_error(mddev, r10_bio);
3022 else
3023 WARN_ON_ONCE(1);
3024
3025 cond_resched();
3026 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3027 md_check_recovery(mddev);
3028 }
3029 blk_finish_plug(&plug);
3030 }
3031
init_resync(struct r10conf * conf)3032 static int init_resync(struct r10conf *conf)
3033 {
3034 int ret, buffs, i;
3035
3036 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3037 BUG_ON(mempool_initialized(&conf->r10buf_pool));
3038 conf->have_replacement = 0;
3039 for (i = 0; i < conf->geo.raid_disks; i++)
3040 if (conf->mirrors[i].replacement)
3041 conf->have_replacement = 1;
3042 ret = mempool_init(&conf->r10buf_pool, buffs,
3043 r10buf_pool_alloc, r10buf_pool_free, conf);
3044 if (ret)
3045 return ret;
3046 conf->next_resync = 0;
3047 return 0;
3048 }
3049
raid10_alloc_init_r10buf(struct r10conf * conf)3050 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3051 {
3052 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3053 struct rsync_pages *rp;
3054 struct bio *bio;
3055 int nalloc;
3056 int i;
3057
3058 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3059 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3060 nalloc = conf->copies; /* resync */
3061 else
3062 nalloc = 2; /* recovery */
3063
3064 for (i = 0; i < nalloc; i++) {
3065 bio = r10bio->devs[i].bio;
3066 rp = bio->bi_private;
3067 bio_reset(bio, NULL, 0);
3068 bio->bi_private = rp;
3069 bio = r10bio->devs[i].repl_bio;
3070 if (bio) {
3071 rp = bio->bi_private;
3072 bio_reset(bio, NULL, 0);
3073 bio->bi_private = rp;
3074 }
3075 }
3076 return r10bio;
3077 }
3078
3079 /*
3080 * Set cluster_sync_high since we need other nodes to add the
3081 * range [cluster_sync_low, cluster_sync_high] to suspend list.
3082 */
raid10_set_cluster_sync_high(struct r10conf * conf)3083 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3084 {
3085 sector_t window_size;
3086 int extra_chunk, chunks;
3087
3088 /*
3089 * First, here we define "stripe" as a unit which across
3090 * all member devices one time, so we get chunks by use
3091 * raid_disks / near_copies. Otherwise, if near_copies is
3092 * close to raid_disks, then resync window could increases
3093 * linearly with the increase of raid_disks, which means
3094 * we will suspend a really large IO window while it is not
3095 * necessary. If raid_disks is not divisible by near_copies,
3096 * an extra chunk is needed to ensure the whole "stripe" is
3097 * covered.
3098 */
3099
3100 chunks = conf->geo.raid_disks / conf->geo.near_copies;
3101 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3102 extra_chunk = 0;
3103 else
3104 extra_chunk = 1;
3105 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3106
3107 /*
3108 * At least use a 32M window to align with raid1's resync window
3109 */
3110 window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3111 CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3112
3113 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3114 }
3115
3116 /*
3117 * perform a "sync" on one "block"
3118 *
3119 * We need to make sure that no normal I/O request - particularly write
3120 * requests - conflict with active sync requests.
3121 *
3122 * This is achieved by tracking pending requests and a 'barrier' concept
3123 * that can be installed to exclude normal IO requests.
3124 *
3125 * Resync and recovery are handled very differently.
3126 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3127 *
3128 * For resync, we iterate over virtual addresses, read all copies,
3129 * and update if there are differences. If only one copy is live,
3130 * skip it.
3131 * For recovery, we iterate over physical addresses, read a good
3132 * value for each non-in_sync drive, and over-write.
3133 *
3134 * So, for recovery we may have several outstanding complex requests for a
3135 * given address, one for each out-of-sync device. We model this by allocating
3136 * a number of r10_bio structures, one for each out-of-sync device.
3137 * As we setup these structures, we collect all bio's together into a list
3138 * which we then process collectively to add pages, and then process again
3139 * to pass to submit_bio_noacct.
3140 *
3141 * The r10_bio structures are linked using a borrowed master_bio pointer.
3142 * This link is counted in ->remaining. When the r10_bio that points to NULL
3143 * has its remaining count decremented to 0, the whole complex operation
3144 * is complete.
3145 *
3146 */
3147
raid10_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)3148 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3149 sector_t max_sector, int *skipped)
3150 {
3151 struct r10conf *conf = mddev->private;
3152 struct r10bio *r10_bio;
3153 struct bio *biolist = NULL, *bio;
3154 sector_t nr_sectors;
3155 int i;
3156 int max_sync;
3157 sector_t sync_blocks;
3158 sector_t chunk_mask = conf->geo.chunk_mask;
3159 int page_idx = 0;
3160
3161 /*
3162 * Allow skipping a full rebuild for incremental assembly
3163 * of a clean array, like RAID1 does.
3164 */
3165 if (mddev->bitmap == NULL &&
3166 mddev->resync_offset == MaxSector &&
3167 mddev->reshape_position == MaxSector &&
3168 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3169 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3170 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3171 conf->fullsync == 0) {
3172 *skipped = 1;
3173 return mddev->dev_sectors - sector_nr;
3174 }
3175
3176 if (!mempool_initialized(&conf->r10buf_pool))
3177 if (init_resync(conf))
3178 return 0;
3179
3180 if (sector_nr >= max_sector) {
3181 conf->cluster_sync_low = 0;
3182 conf->cluster_sync_high = 0;
3183
3184 /* If we aborted, we need to abort the
3185 * sync on the 'current' bitmap chucks (there can
3186 * be several when recovering multiple devices).
3187 * as we may have started syncing it but not finished.
3188 * We can find the current address in
3189 * mddev->curr_resync, but for recovery,
3190 * we need to convert that to several
3191 * virtual addresses.
3192 */
3193 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3194 end_reshape(conf);
3195 close_sync(conf);
3196 return 0;
3197 }
3198
3199 if (mddev->curr_resync < max_sector) { /* aborted */
3200 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3201 md_bitmap_end_sync(mddev, mddev->curr_resync,
3202 &sync_blocks);
3203 else for (i = 0; i < conf->geo.raid_disks; i++) {
3204 sector_t sect =
3205 raid10_find_virt(conf, mddev->curr_resync, i);
3206
3207 md_bitmap_end_sync(mddev, sect, &sync_blocks);
3208 }
3209 } else {
3210 /* completed sync */
3211 if ((!mddev->bitmap || conf->fullsync)
3212 && conf->have_replacement
3213 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3214 /* Completed a full sync so the replacements
3215 * are now fully recovered.
3216 */
3217 for (i = 0; i < conf->geo.raid_disks; i++) {
3218 struct md_rdev *rdev =
3219 conf->mirrors[i].replacement;
3220
3221 if (rdev)
3222 rdev->recovery_offset = MaxSector;
3223 }
3224 }
3225 conf->fullsync = 0;
3226 }
3227 if (md_bitmap_enabled(mddev, false))
3228 mddev->bitmap_ops->close_sync(mddev);
3229 close_sync(conf);
3230 *skipped = 1;
3231 return 0;
3232 }
3233
3234 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3235 return reshape_request(mddev, sector_nr, skipped);
3236
3237 if (max_sector > mddev->resync_max)
3238 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3239
3240 /* make sure whole request will fit in a chunk - if chunks
3241 * are meaningful
3242 */
3243 if (conf->geo.near_copies < conf->geo.raid_disks &&
3244 max_sector > (sector_nr | chunk_mask))
3245 max_sector = (sector_nr | chunk_mask) + 1;
3246
3247 /*
3248 * If there is non-resync activity waiting for a turn, then let it
3249 * though before starting on this new sync request.
3250 */
3251 if (conf->nr_waiting)
3252 schedule_timeout_uninterruptible(1);
3253
3254 /* Again, very different code for resync and recovery.
3255 * Both must result in an r10bio with a list of bios that
3256 * have bi_end_io, bi_sector, bi_bdev set,
3257 * and bi_private set to the r10bio.
3258 * For recovery, we may actually create several r10bios
3259 * with 2 bios in each, that correspond to the bios in the main one.
3260 * In this case, the subordinate r10bios link back through a
3261 * borrowed master_bio pointer, and the counter in the master
3262 * includes a ref from each subordinate.
3263 */
3264 /* First, we decide what to do and set ->bi_end_io
3265 * To end_sync_read if we want to read, and
3266 * end_sync_write if we will want to write.
3267 */
3268
3269 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3270 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3271 /* recovery... the complicated one */
3272 int j;
3273 r10_bio = NULL;
3274
3275 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3276 bool still_degraded;
3277 struct r10bio *rb2;
3278 sector_t sect;
3279 bool must_sync;
3280 int any_working;
3281 struct raid10_info *mirror = &conf->mirrors[i];
3282 struct md_rdev *mrdev, *mreplace;
3283
3284 mrdev = mirror->rdev;
3285 mreplace = mirror->replacement;
3286
3287 if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
3288 test_bit(In_sync, &mrdev->flags)))
3289 mrdev = NULL;
3290 if (mreplace && test_bit(Faulty, &mreplace->flags))
3291 mreplace = NULL;
3292
3293 if (!mrdev && !mreplace)
3294 continue;
3295
3296 still_degraded = false;
3297 /* want to reconstruct this device */
3298 rb2 = r10_bio;
3299 sect = raid10_find_virt(conf, sector_nr, i);
3300 if (sect >= mddev->resync_max_sectors)
3301 /* last stripe is not complete - don't
3302 * try to recover this sector.
3303 */
3304 continue;
3305 /* Unless we are doing a full sync, or a replacement
3306 * we only need to recover the block if it is set in
3307 * the bitmap
3308 */
3309 must_sync = md_bitmap_start_sync(mddev, sect,
3310 &sync_blocks, true);
3311 if (sync_blocks < max_sync)
3312 max_sync = sync_blocks;
3313 if (!must_sync &&
3314 mreplace == NULL &&
3315 !conf->fullsync) {
3316 /* yep, skip the sync_blocks here, but don't assume
3317 * that there will never be anything to do here
3318 */
3319 continue;
3320 }
3321 if (mrdev)
3322 atomic_inc(&mrdev->nr_pending);
3323 if (mreplace)
3324 atomic_inc(&mreplace->nr_pending);
3325
3326 r10_bio = raid10_alloc_init_r10buf(conf);
3327 r10_bio->state = 0;
3328 raise_barrier(conf, rb2 != NULL);
3329 atomic_set(&r10_bio->remaining, 0);
3330
3331 r10_bio->master_bio = (struct bio*)rb2;
3332 if (rb2)
3333 atomic_inc(&rb2->remaining);
3334 r10_bio->mddev = mddev;
3335 set_bit(R10BIO_IsRecover, &r10_bio->state);
3336 r10_bio->sector = sect;
3337
3338 raid10_find_phys(conf, r10_bio);
3339
3340 /* Need to check if the array will still be
3341 * degraded
3342 */
3343 for (j = 0; j < conf->geo.raid_disks; j++) {
3344 struct md_rdev *rdev = conf->mirrors[j].rdev;
3345
3346 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3347 still_degraded = false;
3348 break;
3349 }
3350 }
3351
3352 md_bitmap_start_sync(mddev, sect, &sync_blocks,
3353 still_degraded);
3354 any_working = 0;
3355 for (j=0; j<conf->copies;j++) {
3356 int k;
3357 int d = r10_bio->devs[j].devnum;
3358 sector_t from_addr, to_addr;
3359 struct md_rdev *rdev = conf->mirrors[d].rdev;
3360 sector_t sector, first_bad;
3361 sector_t bad_sectors;
3362 if (!rdev ||
3363 !test_bit(In_sync, &rdev->flags))
3364 continue;
3365 /* This is where we read from */
3366 sector = r10_bio->devs[j].addr;
3367
3368 if (is_badblock(rdev, sector, max_sync,
3369 &first_bad, &bad_sectors)) {
3370 if (first_bad > sector)
3371 max_sync = first_bad - sector;
3372 else {
3373 bad_sectors -= (sector
3374 - first_bad);
3375 if (max_sync > bad_sectors)
3376 max_sync = bad_sectors;
3377 continue;
3378 }
3379 }
3380 any_working = 1;
3381 bio = r10_bio->devs[0].bio;
3382 bio->bi_next = biolist;
3383 biolist = bio;
3384 bio->bi_end_io = end_sync_read;
3385 bio->bi_opf = REQ_OP_READ;
3386 if (test_bit(FailFast, &rdev->flags))
3387 bio->bi_opf |= MD_FAILFAST;
3388 from_addr = r10_bio->devs[j].addr;
3389 bio->bi_iter.bi_sector = from_addr +
3390 rdev->data_offset;
3391 bio_set_dev(bio, rdev->bdev);
3392 atomic_inc(&rdev->nr_pending);
3393 /* and we write to 'i' (if not in_sync) */
3394
3395 for (k=0; k<conf->copies; k++)
3396 if (r10_bio->devs[k].devnum == i)
3397 break;
3398 BUG_ON(k == conf->copies);
3399 to_addr = r10_bio->devs[k].addr;
3400 r10_bio->devs[0].devnum = d;
3401 r10_bio->devs[0].addr = from_addr;
3402 r10_bio->devs[1].devnum = i;
3403 r10_bio->devs[1].addr = to_addr;
3404
3405 if (mrdev) {
3406 bio = r10_bio->devs[1].bio;
3407 bio->bi_next = biolist;
3408 biolist = bio;
3409 bio->bi_end_io = end_sync_write;
3410 bio->bi_opf = REQ_OP_WRITE;
3411 bio->bi_iter.bi_sector = to_addr
3412 + mrdev->data_offset;
3413 bio_set_dev(bio, mrdev->bdev);
3414 atomic_inc(&r10_bio->remaining);
3415 } else
3416 r10_bio->devs[1].bio->bi_end_io = NULL;
3417
3418 /* and maybe write to replacement */
3419 bio = r10_bio->devs[1].repl_bio;
3420 if (bio)
3421 bio->bi_end_io = NULL;
3422 /* Note: if replace is not NULL, then bio
3423 * cannot be NULL as r10buf_pool_alloc will
3424 * have allocated it.
3425 */
3426 if (!mreplace)
3427 break;
3428 bio->bi_next = biolist;
3429 biolist = bio;
3430 bio->bi_end_io = end_sync_write;
3431 bio->bi_opf = REQ_OP_WRITE;
3432 bio->bi_iter.bi_sector = to_addr +
3433 mreplace->data_offset;
3434 bio_set_dev(bio, mreplace->bdev);
3435 atomic_inc(&r10_bio->remaining);
3436 break;
3437 }
3438 if (j == conf->copies) {
3439 /* Cannot recover, so abort the recovery or
3440 * record a bad block */
3441 if (any_working) {
3442 /* problem is that there are bad blocks
3443 * on other device(s)
3444 */
3445 int k;
3446 for (k = 0; k < conf->copies; k++)
3447 if (r10_bio->devs[k].devnum == i)
3448 break;
3449 if (mrdev &&
3450 !test_bit(In_sync, &mrdev->flags))
3451 rdev_set_badblocks(
3452 mrdev,
3453 r10_bio->devs[k].addr,
3454 max_sync, 0);
3455 if (mreplace)
3456 rdev_set_badblocks(
3457 mreplace,
3458 r10_bio->devs[k].addr,
3459 max_sync, 0);
3460 pr_warn("md/raid10:%s: cannot recovery sector %llu + %d.\n",
3461 mdname(mddev), r10_bio->devs[k].addr, max_sync);
3462 }
3463 put_buf(r10_bio);
3464 if (rb2)
3465 atomic_dec(&rb2->remaining);
3466 r10_bio = rb2;
3467 if (mrdev)
3468 rdev_dec_pending(mrdev, mddev);
3469 if (mreplace)
3470 rdev_dec_pending(mreplace, mddev);
3471 break;
3472 }
3473 if (mrdev)
3474 rdev_dec_pending(mrdev, mddev);
3475 if (mreplace)
3476 rdev_dec_pending(mreplace, mddev);
3477 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3478 /* Only want this if there is elsewhere to
3479 * read from. 'j' is currently the first
3480 * readable copy.
3481 */
3482 int targets = 1;
3483 for (; j < conf->copies; j++) {
3484 int d = r10_bio->devs[j].devnum;
3485 if (conf->mirrors[d].rdev &&
3486 test_bit(In_sync,
3487 &conf->mirrors[d].rdev->flags))
3488 targets++;
3489 }
3490 if (targets == 1)
3491 r10_bio->devs[0].bio->bi_opf
3492 &= ~MD_FAILFAST;
3493 }
3494 }
3495 if (biolist == NULL) {
3496 while (r10_bio) {
3497 struct r10bio *rb2 = r10_bio;
3498 r10_bio = (struct r10bio*) rb2->master_bio;
3499 rb2->master_bio = NULL;
3500 put_buf(rb2);
3501 }
3502 *skipped = 1;
3503 return max_sync;
3504 }
3505 } else {
3506 /* resync. Schedule a read for every block at this virt offset */
3507 int count = 0;
3508
3509 /*
3510 * Since curr_resync_completed could probably not update in
3511 * time, and we will set cluster_sync_low based on it.
3512 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3513 * safety reason, which ensures curr_resync_completed is
3514 * updated in bitmap_cond_end_sync.
3515 */
3516 if (md_bitmap_enabled(mddev, false))
3517 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
3518 mddev_is_clustered(mddev) &&
3519 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3520
3521 if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks,
3522 mddev->degraded) &&
3523 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3524 &mddev->recovery)) {
3525 /* We can skip this block */
3526 *skipped = 1;
3527 return sync_blocks;
3528 }
3529 if (sync_blocks < max_sync)
3530 max_sync = sync_blocks;
3531 r10_bio = raid10_alloc_init_r10buf(conf);
3532 r10_bio->state = 0;
3533
3534 r10_bio->mddev = mddev;
3535 atomic_set(&r10_bio->remaining, 0);
3536 raise_barrier(conf, 0);
3537 conf->next_resync = sector_nr;
3538
3539 r10_bio->master_bio = NULL;
3540 r10_bio->sector = sector_nr;
3541 set_bit(R10BIO_IsSync, &r10_bio->state);
3542 raid10_find_phys(conf, r10_bio);
3543 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3544
3545 for (i = 0; i < conf->copies; i++) {
3546 int d = r10_bio->devs[i].devnum;
3547 sector_t first_bad, sector;
3548 sector_t bad_sectors;
3549 struct md_rdev *rdev;
3550
3551 if (r10_bio->devs[i].repl_bio)
3552 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3553
3554 bio = r10_bio->devs[i].bio;
3555 bio->bi_status = BLK_STS_IOERR;
3556 rdev = conf->mirrors[d].rdev;
3557 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3558 continue;
3559
3560 sector = r10_bio->devs[i].addr;
3561 if (is_badblock(rdev, sector, max_sync,
3562 &first_bad, &bad_sectors)) {
3563 if (first_bad > sector)
3564 max_sync = first_bad - sector;
3565 else {
3566 bad_sectors -= (sector - first_bad);
3567 if (max_sync > bad_sectors)
3568 max_sync = bad_sectors;
3569 continue;
3570 }
3571 }
3572 atomic_inc(&rdev->nr_pending);
3573 atomic_inc(&r10_bio->remaining);
3574 bio->bi_next = biolist;
3575 biolist = bio;
3576 bio->bi_end_io = end_sync_read;
3577 bio->bi_opf = REQ_OP_READ;
3578 if (test_bit(FailFast, &rdev->flags))
3579 bio->bi_opf |= MD_FAILFAST;
3580 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3581 bio_set_dev(bio, rdev->bdev);
3582 count++;
3583
3584 rdev = conf->mirrors[d].replacement;
3585 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3586 continue;
3587
3588 atomic_inc(&rdev->nr_pending);
3589
3590 /* Need to set up for writing to the replacement */
3591 bio = r10_bio->devs[i].repl_bio;
3592 bio->bi_status = BLK_STS_IOERR;
3593
3594 sector = r10_bio->devs[i].addr;
3595 bio->bi_next = biolist;
3596 biolist = bio;
3597 bio->bi_end_io = end_sync_write;
3598 bio->bi_opf = REQ_OP_WRITE;
3599 if (test_bit(FailFast, &rdev->flags))
3600 bio->bi_opf |= MD_FAILFAST;
3601 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3602 bio_set_dev(bio, rdev->bdev);
3603 count++;
3604 }
3605
3606 if (count < 2) {
3607 for (i=0; i<conf->copies; i++) {
3608 int d = r10_bio->devs[i].devnum;
3609 if (r10_bio->devs[i].bio->bi_end_io)
3610 rdev_dec_pending(conf->mirrors[d].rdev,
3611 mddev);
3612 if (r10_bio->devs[i].repl_bio &&
3613 r10_bio->devs[i].repl_bio->bi_end_io)
3614 rdev_dec_pending(
3615 conf->mirrors[d].replacement,
3616 mddev);
3617 }
3618 put_buf(r10_bio);
3619 *skipped = 1;
3620 return max_sync;
3621 }
3622 }
3623
3624 nr_sectors = 0;
3625 if (sector_nr + max_sync < max_sector)
3626 max_sector = sector_nr + max_sync;
3627 do {
3628 struct page *page;
3629 int len = PAGE_SIZE;
3630 if (sector_nr + (len>>9) > max_sector)
3631 len = (max_sector - sector_nr) << 9;
3632 if (len == 0)
3633 break;
3634 for (bio= biolist ; bio ; bio=bio->bi_next) {
3635 struct resync_pages *rp = get_resync_pages(bio);
3636 page = resync_fetch_page(rp, page_idx);
3637 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3638 bio->bi_status = BLK_STS_RESOURCE;
3639 bio_endio(bio);
3640 *skipped = 1;
3641 return max_sync;
3642 }
3643 }
3644 nr_sectors += len>>9;
3645 sector_nr += len>>9;
3646 } while (++page_idx < RESYNC_PAGES);
3647 r10_bio->sectors = nr_sectors;
3648
3649 if (mddev_is_clustered(mddev) &&
3650 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3651 /* It is resync not recovery */
3652 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3653 conf->cluster_sync_low = mddev->curr_resync_completed;
3654 raid10_set_cluster_sync_high(conf);
3655 /* Send resync message */
3656 mddev->cluster_ops->resync_info_update(mddev,
3657 conf->cluster_sync_low,
3658 conf->cluster_sync_high);
3659 }
3660 } else if (mddev_is_clustered(mddev)) {
3661 /* This is recovery not resync */
3662 sector_t sect_va1, sect_va2;
3663 bool broadcast_msg = false;
3664
3665 for (i = 0; i < conf->geo.raid_disks; i++) {
3666 /*
3667 * sector_nr is a device address for recovery, so we
3668 * need translate it to array address before compare
3669 * with cluster_sync_high.
3670 */
3671 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3672
3673 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3674 broadcast_msg = true;
3675 /*
3676 * curr_resync_completed is similar as
3677 * sector_nr, so make the translation too.
3678 */
3679 sect_va2 = raid10_find_virt(conf,
3680 mddev->curr_resync_completed, i);
3681
3682 if (conf->cluster_sync_low == 0 ||
3683 conf->cluster_sync_low > sect_va2)
3684 conf->cluster_sync_low = sect_va2;
3685 }
3686 }
3687 if (broadcast_msg) {
3688 raid10_set_cluster_sync_high(conf);
3689 mddev->cluster_ops->resync_info_update(mddev,
3690 conf->cluster_sync_low,
3691 conf->cluster_sync_high);
3692 }
3693 }
3694
3695 while (biolist) {
3696 bio = biolist;
3697 biolist = biolist->bi_next;
3698
3699 bio->bi_next = NULL;
3700 r10_bio = get_resync_r10bio(bio);
3701 r10_bio->sectors = nr_sectors;
3702
3703 if (bio->bi_end_io == end_sync_read) {
3704 bio->bi_status = 0;
3705 submit_bio_noacct(bio);
3706 }
3707 }
3708
3709 return nr_sectors;
3710 }
3711
3712 static sector_t
raid10_size(struct mddev * mddev,sector_t sectors,int raid_disks)3713 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3714 {
3715 sector_t size;
3716 struct r10conf *conf = mddev->private;
3717
3718 if (!raid_disks)
3719 raid_disks = min(conf->geo.raid_disks,
3720 conf->prev.raid_disks);
3721 if (!sectors)
3722 sectors = conf->dev_sectors;
3723
3724 size = sectors >> conf->geo.chunk_shift;
3725 sector_div(size, conf->geo.far_copies);
3726 size = size * raid_disks;
3727 sector_div(size, conf->geo.near_copies);
3728
3729 return size << conf->geo.chunk_shift;
3730 }
3731
calc_sectors(struct r10conf * conf,sector_t size)3732 static void calc_sectors(struct r10conf *conf, sector_t size)
3733 {
3734 /* Calculate the number of sectors-per-device that will
3735 * actually be used, and set conf->dev_sectors and
3736 * conf->stride
3737 */
3738
3739 size = size >> conf->geo.chunk_shift;
3740 sector_div(size, conf->geo.far_copies);
3741 size = size * conf->geo.raid_disks;
3742 sector_div(size, conf->geo.near_copies);
3743 /* 'size' is now the number of chunks in the array */
3744 /* calculate "used chunks per device" */
3745 size = size * conf->copies;
3746
3747 /* We need to round up when dividing by raid_disks to
3748 * get the stride size.
3749 */
3750 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3751
3752 conf->dev_sectors = size << conf->geo.chunk_shift;
3753
3754 if (conf->geo.far_offset)
3755 conf->geo.stride = 1 << conf->geo.chunk_shift;
3756 else {
3757 sector_div(size, conf->geo.far_copies);
3758 conf->geo.stride = size << conf->geo.chunk_shift;
3759 }
3760 }
3761
3762 enum geo_type {geo_new, geo_old, geo_start};
setup_geo(struct geom * geo,struct mddev * mddev,enum geo_type new)3763 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3764 {
3765 int nc, fc, fo;
3766 int layout, chunk, disks;
3767 switch (new) {
3768 case geo_old:
3769 layout = mddev->layout;
3770 chunk = mddev->chunk_sectors;
3771 disks = mddev->raid_disks - mddev->delta_disks;
3772 break;
3773 case geo_new:
3774 layout = mddev->new_layout;
3775 chunk = mddev->new_chunk_sectors;
3776 disks = mddev->raid_disks;
3777 break;
3778 default: /* avoid 'may be unused' warnings */
3779 case geo_start: /* new when starting reshape - raid_disks not
3780 * updated yet. */
3781 layout = mddev->new_layout;
3782 chunk = mddev->new_chunk_sectors;
3783 disks = mddev->raid_disks + mddev->delta_disks;
3784 break;
3785 }
3786 if (layout >> 19)
3787 return -1;
3788 if (chunk < (PAGE_SIZE >> 9) ||
3789 !is_power_of_2(chunk))
3790 return -2;
3791 nc = layout & 255;
3792 fc = (layout >> 8) & 255;
3793 fo = layout & (1<<16);
3794 geo->raid_disks = disks;
3795 geo->near_copies = nc;
3796 geo->far_copies = fc;
3797 geo->far_offset = fo;
3798 switch (layout >> 17) {
3799 case 0: /* original layout. simple but not always optimal */
3800 geo->far_set_size = disks;
3801 break;
3802 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3803 * actually using this, but leave code here just in case.*/
3804 geo->far_set_size = disks/fc;
3805 WARN(geo->far_set_size < fc,
3806 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3807 break;
3808 case 2: /* "improved" layout fixed to match documentation */
3809 geo->far_set_size = fc * nc;
3810 break;
3811 default: /* Not a valid layout */
3812 return -1;
3813 }
3814 geo->chunk_mask = chunk - 1;
3815 geo->chunk_shift = ffz(~chunk);
3816 return nc*fc;
3817 }
3818
raid10_free_conf(struct r10conf * conf)3819 static void raid10_free_conf(struct r10conf *conf)
3820 {
3821 if (!conf)
3822 return;
3823
3824 mempool_exit(&conf->r10bio_pool);
3825 kfree(conf->mirrors);
3826 kfree(conf->mirrors_old);
3827 kfree(conf->mirrors_new);
3828 safe_put_page(conf->tmppage);
3829 bioset_exit(&conf->bio_split);
3830 kfree(conf);
3831 }
3832
setup_conf(struct mddev * mddev)3833 static struct r10conf *setup_conf(struct mddev *mddev)
3834 {
3835 struct r10conf *conf = NULL;
3836 int err = -EINVAL;
3837 struct geom geo;
3838 int copies;
3839
3840 copies = setup_geo(&geo, mddev, geo_new);
3841
3842 if (copies == -2) {
3843 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3844 mdname(mddev), PAGE_SIZE);
3845 goto out;
3846 }
3847
3848 if (copies < 2 || copies > mddev->raid_disks) {
3849 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3850 mdname(mddev), mddev->new_layout);
3851 goto out;
3852 }
3853
3854 err = -ENOMEM;
3855 conf = kzalloc_obj(struct r10conf);
3856 if (!conf)
3857 goto out;
3858
3859 /* FIXME calc properly */
3860 conf->mirrors = kzalloc_objs(struct raid10_info,
3861 mddev->raid_disks + max(0, -mddev->delta_disks));
3862 if (!conf->mirrors)
3863 goto out;
3864
3865 conf->tmppage = alloc_page(GFP_KERNEL);
3866 if (!conf->tmppage)
3867 goto out;
3868
3869 conf->geo = geo;
3870 conf->copies = copies;
3871 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3872 rbio_pool_free, conf);
3873 if (err)
3874 goto out;
3875
3876 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3877 if (err)
3878 goto out;
3879
3880 calc_sectors(conf, mddev->dev_sectors);
3881 if (mddev->reshape_position == MaxSector) {
3882 conf->prev = conf->geo;
3883 conf->reshape_progress = MaxSector;
3884 } else {
3885 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3886 err = -EINVAL;
3887 goto out;
3888 }
3889 conf->reshape_progress = mddev->reshape_position;
3890 if (conf->prev.far_offset)
3891 conf->prev.stride = 1 << conf->prev.chunk_shift;
3892 else
3893 /* far_copies must be 1 */
3894 conf->prev.stride = conf->dev_sectors;
3895 }
3896 conf->reshape_safe = conf->reshape_progress;
3897 spin_lock_init(&conf->device_lock);
3898 INIT_LIST_HEAD(&conf->retry_list);
3899 INIT_LIST_HEAD(&conf->bio_end_io_list);
3900
3901 seqlock_init(&conf->resync_lock);
3902 init_waitqueue_head(&conf->wait_barrier);
3903 atomic_set(&conf->nr_pending, 0);
3904
3905 err = -ENOMEM;
3906 rcu_assign_pointer(conf->thread,
3907 md_register_thread(raid10d, mddev, "raid10"));
3908 if (!conf->thread)
3909 goto out;
3910
3911 conf->mddev = mddev;
3912 return conf;
3913
3914 out:
3915 raid10_free_conf(conf);
3916 return ERR_PTR(err);
3917 }
3918
raid10_nr_stripes(struct r10conf * conf)3919 static unsigned int raid10_nr_stripes(struct r10conf *conf)
3920 {
3921 unsigned int raid_disks = conf->geo.raid_disks;
3922
3923 if (conf->geo.raid_disks % conf->geo.near_copies)
3924 return raid_disks;
3925 return raid_disks / conf->geo.near_copies;
3926 }
3927
raid10_set_queue_limits(struct mddev * mddev)3928 static int raid10_set_queue_limits(struct mddev *mddev)
3929 {
3930 struct r10conf *conf = mddev->private;
3931 struct queue_limits lim;
3932 int err;
3933
3934 md_init_stacking_limits(&lim);
3935 lim.max_write_zeroes_sectors = 0;
3936 lim.max_hw_wzeroes_unmap_sectors = 0;
3937 lim.logical_block_size = mddev->logical_block_size;
3938 lim.io_min = mddev->chunk_sectors << 9;
3939 lim.chunk_sectors = mddev->chunk_sectors;
3940 lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
3941 lim.features |= BLK_FEAT_ATOMIC_WRITES;
3942 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
3943 if (err)
3944 return err;
3945 return queue_limits_set(mddev->gendisk->queue, &lim);
3946 }
3947
raid10_run(struct mddev * mddev)3948 static int raid10_run(struct mddev *mddev)
3949 {
3950 struct r10conf *conf;
3951 int i, disk_idx;
3952 struct raid10_info *disk;
3953 struct md_rdev *rdev;
3954 sector_t size;
3955 sector_t min_offset_diff = 0;
3956 int first = 1;
3957 int ret = -EIO;
3958
3959 if (mddev->private == NULL) {
3960 conf = setup_conf(mddev);
3961 if (IS_ERR(conf))
3962 return PTR_ERR(conf);
3963 mddev->private = conf;
3964 }
3965 conf = mddev->private;
3966 if (!conf)
3967 goto out;
3968
3969 rcu_assign_pointer(mddev->thread, conf->thread);
3970 rcu_assign_pointer(conf->thread, NULL);
3971
3972 if (mddev_is_clustered(conf->mddev)) {
3973 int fc, fo;
3974
3975 fc = (mddev->layout >> 8) & 255;
3976 fo = mddev->layout & (1<<16);
3977 if (fc > 1 || fo > 0) {
3978 pr_err("only near layout is supported by clustered"
3979 " raid10\n");
3980 goto out_free_conf;
3981 }
3982 }
3983
3984 rdev_for_each(rdev, mddev) {
3985 long long diff;
3986
3987 disk_idx = rdev->raid_disk;
3988 if (disk_idx < 0)
3989 continue;
3990 if (disk_idx >= conf->geo.raid_disks &&
3991 disk_idx >= conf->prev.raid_disks)
3992 continue;
3993 disk = conf->mirrors + disk_idx;
3994
3995 if (test_bit(Replacement, &rdev->flags)) {
3996 if (disk->replacement)
3997 goto out_free_conf;
3998 disk->replacement = rdev;
3999 } else {
4000 if (disk->rdev)
4001 goto out_free_conf;
4002 disk->rdev = rdev;
4003 }
4004 diff = (rdev->new_data_offset - rdev->data_offset);
4005 if (!mddev->reshape_backwards)
4006 diff = -diff;
4007 if (diff < 0)
4008 diff = 0;
4009 if (first || diff < min_offset_diff)
4010 min_offset_diff = diff;
4011
4012 disk->head_position = 0;
4013 first = 0;
4014 }
4015
4016 if (!mddev_is_dm(conf->mddev)) {
4017 int err = raid10_set_queue_limits(mddev);
4018
4019 if (err) {
4020 ret = err;
4021 goto out_free_conf;
4022 }
4023 }
4024
4025 /* need to check that every block has at least one working mirror */
4026 if (!enough(conf, -1)) {
4027 pr_err("md/raid10:%s: not enough operational mirrors.\n",
4028 mdname(mddev));
4029 goto out_free_conf;
4030 }
4031
4032 if (conf->reshape_progress != MaxSector) {
4033 /* must ensure that shape change is supported */
4034 if (conf->geo.far_copies != 1 &&
4035 conf->geo.far_offset == 0)
4036 goto out_free_conf;
4037 if (conf->prev.far_copies != 1 &&
4038 conf->prev.far_offset == 0)
4039 goto out_free_conf;
4040 }
4041
4042 mddev->degraded = 0;
4043 for (i = 0;
4044 i < conf->geo.raid_disks
4045 || i < conf->prev.raid_disks;
4046 i++) {
4047
4048 disk = conf->mirrors + i;
4049
4050 if (!disk->rdev && disk->replacement) {
4051 /* The replacement is all we have - use it */
4052 disk->rdev = disk->replacement;
4053 disk->replacement = NULL;
4054 clear_bit(Replacement, &disk->rdev->flags);
4055 }
4056
4057 if (!disk->rdev ||
4058 !test_bit(In_sync, &disk->rdev->flags)) {
4059 disk->head_position = 0;
4060 mddev->degraded++;
4061 if (disk->rdev &&
4062 disk->rdev->saved_raid_disk < 0)
4063 conf->fullsync = 1;
4064 }
4065
4066 if (disk->replacement &&
4067 !test_bit(In_sync, &disk->replacement->flags) &&
4068 disk->replacement->saved_raid_disk < 0) {
4069 conf->fullsync = 1;
4070 }
4071 }
4072
4073 if (mddev->resync_offset != MaxSector)
4074 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4075 mdname(mddev));
4076 pr_info("md/raid10:%s: active with %d out of %d devices\n",
4077 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4078 conf->geo.raid_disks);
4079 /*
4080 * Ok, everything is just fine now
4081 */
4082 mddev->dev_sectors = conf->dev_sectors;
4083 size = raid10_size(mddev, 0, 0);
4084 md_set_array_sectors(mddev, size);
4085 mddev->resync_max_sectors = size;
4086 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4087
4088 if (md_integrity_register(mddev))
4089 goto out_free_conf;
4090
4091 if (conf->reshape_progress != MaxSector) {
4092 unsigned long before_length, after_length;
4093
4094 before_length = ((1 << conf->prev.chunk_shift) *
4095 conf->prev.far_copies);
4096 after_length = ((1 << conf->geo.chunk_shift) *
4097 conf->geo.far_copies);
4098
4099 if (max(before_length, after_length) > min_offset_diff) {
4100 /* This cannot work */
4101 pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4102 goto out_free_conf;
4103 }
4104 conf->offset_diff = min_offset_diff;
4105
4106 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4107 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4108 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4109 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4110 }
4111
4112 return 0;
4113
4114 out_free_conf:
4115 md_unregister_thread(mddev, &mddev->thread);
4116 raid10_free_conf(conf);
4117 mddev->private = NULL;
4118 out:
4119 return ret;
4120 }
4121
raid10_free(struct mddev * mddev,void * priv)4122 static void raid10_free(struct mddev *mddev, void *priv)
4123 {
4124 raid10_free_conf(priv);
4125 }
4126
raid10_quiesce(struct mddev * mddev,int quiesce)4127 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4128 {
4129 struct r10conf *conf = mddev->private;
4130
4131 if (quiesce)
4132 raise_barrier(conf, 0);
4133 else
4134 lower_barrier(conf);
4135 }
4136
raid10_resize(struct mddev * mddev,sector_t sectors)4137 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4138 {
4139 /* Resize of 'far' arrays is not supported.
4140 * For 'near' and 'offset' arrays we can set the
4141 * number of sectors used to be an appropriate multiple
4142 * of the chunk size.
4143 * For 'offset', this is far_copies*chunksize.
4144 * For 'near' the multiplier is the LCM of
4145 * near_copies and raid_disks.
4146 * So if far_copies > 1 && !far_offset, fail.
4147 * Else find LCM(raid_disks, near_copy)*far_copies and
4148 * multiply by chunk_size. Then round to this number.
4149 * This is mostly done by raid10_size()
4150 */
4151 struct r10conf *conf = mddev->private;
4152 sector_t oldsize, size;
4153
4154 if (mddev->reshape_position != MaxSector)
4155 return -EBUSY;
4156
4157 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4158 return -EINVAL;
4159
4160 oldsize = raid10_size(mddev, 0, 0);
4161 size = raid10_size(mddev, sectors, 0);
4162 if (mddev->external_size &&
4163 mddev->array_sectors > size)
4164 return -EINVAL;
4165
4166 if (md_bitmap_enabled(mddev, false)) {
4167 int ret = mddev->bitmap_ops->resize(mddev, size, 0);
4168
4169 if (ret)
4170 return ret;
4171 }
4172
4173 md_set_array_sectors(mddev, size);
4174 if (sectors > mddev->dev_sectors &&
4175 mddev->resync_offset > oldsize) {
4176 mddev->resync_offset = oldsize;
4177 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4178 }
4179 calc_sectors(conf, sectors);
4180 mddev->dev_sectors = conf->dev_sectors;
4181 mddev->resync_max_sectors = size;
4182 return 0;
4183 }
4184
raid10_takeover_raid0(struct mddev * mddev,sector_t size,int devs)4185 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4186 {
4187 struct md_rdev *rdev;
4188 struct r10conf *conf;
4189
4190 if (mddev->degraded > 0) {
4191 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4192 mdname(mddev));
4193 return ERR_PTR(-EINVAL);
4194 }
4195 sector_div(size, devs);
4196
4197 /* Set new parameters */
4198 mddev->new_level = 10;
4199 /* new layout: far_copies = 1, near_copies = 2 */
4200 mddev->new_layout = (1<<8) + 2;
4201 mddev->new_chunk_sectors = mddev->chunk_sectors;
4202 mddev->delta_disks = mddev->raid_disks;
4203 mddev->raid_disks *= 2;
4204 /* make sure it will be not marked as dirty */
4205 mddev->resync_offset = MaxSector;
4206 mddev->dev_sectors = size;
4207
4208 conf = setup_conf(mddev);
4209 if (!IS_ERR(conf)) {
4210 rdev_for_each(rdev, mddev)
4211 if (rdev->raid_disk >= 0) {
4212 rdev->new_raid_disk = rdev->raid_disk * 2;
4213 rdev->sectors = size;
4214 }
4215 }
4216
4217 return conf;
4218 }
4219
raid10_takeover(struct mddev * mddev)4220 static void *raid10_takeover(struct mddev *mddev)
4221 {
4222 struct r0conf *raid0_conf;
4223
4224 /* raid10 can take over:
4225 * raid0 - providing it has only two drives
4226 */
4227 if (mddev->level == 0) {
4228 /* for raid0 takeover only one zone is supported */
4229 raid0_conf = mddev->private;
4230 if (raid0_conf->nr_strip_zones > 1) {
4231 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4232 mdname(mddev));
4233 return ERR_PTR(-EINVAL);
4234 }
4235 return raid10_takeover_raid0(mddev,
4236 raid0_conf->strip_zone->zone_end,
4237 raid0_conf->strip_zone->nb_dev);
4238 }
4239 return ERR_PTR(-EINVAL);
4240 }
4241
raid10_check_reshape(struct mddev * mddev)4242 static int raid10_check_reshape(struct mddev *mddev)
4243 {
4244 /* Called when there is a request to change
4245 * - layout (to ->new_layout)
4246 * - chunk size (to ->new_chunk_sectors)
4247 * - raid_disks (by delta_disks)
4248 * or when trying to restart a reshape that was ongoing.
4249 *
4250 * We need to validate the request and possibly allocate
4251 * space if that might be an issue later.
4252 *
4253 * Currently we reject any reshape of a 'far' mode array,
4254 * allow chunk size to change if new is generally acceptable,
4255 * allow raid_disks to increase, and allow
4256 * a switch between 'near' mode and 'offset' mode.
4257 */
4258 struct r10conf *conf = mddev->private;
4259 struct geom geo;
4260
4261 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4262 return -EINVAL;
4263
4264 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4265 /* mustn't change number of copies */
4266 return -EINVAL;
4267 if (geo.far_copies > 1 && !geo.far_offset)
4268 /* Cannot switch to 'far' mode */
4269 return -EINVAL;
4270
4271 if (mddev->array_sectors & geo.chunk_mask)
4272 /* not factor of array size */
4273 return -EINVAL;
4274
4275 if (!enough(conf, -1))
4276 return -EINVAL;
4277
4278 kfree(conf->mirrors_new);
4279 conf->mirrors_new = NULL;
4280 if (mddev->delta_disks > 0) {
4281 /* allocate new 'mirrors' list */
4282 conf->mirrors_new =
4283 kzalloc_objs(struct raid10_info,
4284 mddev->raid_disks + mddev->delta_disks);
4285 if (!conf->mirrors_new)
4286 return -ENOMEM;
4287 }
4288 return 0;
4289 }
4290
4291 /*
4292 * Need to check if array has failed when deciding whether to:
4293 * - start an array
4294 * - remove non-faulty devices
4295 * - add a spare
4296 * - allow a reshape
4297 * This determination is simple when no reshape is happening.
4298 * However if there is a reshape, we need to carefully check
4299 * both the before and after sections.
4300 * This is because some failed devices may only affect one
4301 * of the two sections, and some non-in_sync devices may
4302 * be insync in the section most affected by failed devices.
4303 */
calc_degraded(struct r10conf * conf)4304 static int calc_degraded(struct r10conf *conf)
4305 {
4306 int degraded, degraded2;
4307 int i;
4308
4309 degraded = 0;
4310 /* 'prev' section first */
4311 for (i = 0; i < conf->prev.raid_disks; i++) {
4312 struct md_rdev *rdev = conf->mirrors[i].rdev;
4313
4314 if (!rdev || test_bit(Faulty, &rdev->flags))
4315 degraded++;
4316 else if (!test_bit(In_sync, &rdev->flags))
4317 /* When we can reduce the number of devices in
4318 * an array, this might not contribute to
4319 * 'degraded'. It does now.
4320 */
4321 degraded++;
4322 }
4323 if (conf->geo.raid_disks == conf->prev.raid_disks)
4324 return degraded;
4325 degraded2 = 0;
4326 for (i = 0; i < conf->geo.raid_disks; i++) {
4327 struct md_rdev *rdev = conf->mirrors[i].rdev;
4328
4329 if (!rdev || test_bit(Faulty, &rdev->flags))
4330 degraded2++;
4331 else if (!test_bit(In_sync, &rdev->flags)) {
4332 /* If reshape is increasing the number of devices,
4333 * this section has already been recovered, so
4334 * it doesn't contribute to degraded.
4335 * else it does.
4336 */
4337 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4338 degraded2++;
4339 }
4340 }
4341 if (degraded2 > degraded)
4342 return degraded2;
4343 return degraded;
4344 }
4345
raid10_start_reshape(struct mddev * mddev)4346 static int raid10_start_reshape(struct mddev *mddev)
4347 {
4348 /* A 'reshape' has been requested. This commits
4349 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4350 * This also checks if there are enough spares and adds them
4351 * to the array.
4352 * We currently require enough spares to make the final
4353 * array non-degraded. We also require that the difference
4354 * between old and new data_offset - on each device - is
4355 * enough that we never risk over-writing.
4356 */
4357
4358 unsigned long before_length, after_length;
4359 sector_t min_offset_diff = 0;
4360 int first = 1;
4361 struct geom new;
4362 struct r10conf *conf = mddev->private;
4363 struct md_rdev *rdev;
4364 int spares = 0;
4365 int ret;
4366
4367 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4368 return -EBUSY;
4369
4370 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4371 return -EINVAL;
4372
4373 before_length = ((1 << conf->prev.chunk_shift) *
4374 conf->prev.far_copies);
4375 after_length = ((1 << conf->geo.chunk_shift) *
4376 conf->geo.far_copies);
4377
4378 rdev_for_each(rdev, mddev) {
4379 if (!test_bit(In_sync, &rdev->flags)
4380 && !test_bit(Faulty, &rdev->flags))
4381 spares++;
4382 if (rdev->raid_disk >= 0) {
4383 long long diff = (rdev->new_data_offset
4384 - rdev->data_offset);
4385 if (!mddev->reshape_backwards)
4386 diff = -diff;
4387 if (diff < 0)
4388 diff = 0;
4389 if (first || diff < min_offset_diff)
4390 min_offset_diff = diff;
4391 first = 0;
4392 }
4393 }
4394
4395 if (max(before_length, after_length) > min_offset_diff)
4396 return -EINVAL;
4397
4398 if (spares < mddev->delta_disks)
4399 return -EINVAL;
4400
4401 conf->offset_diff = min_offset_diff;
4402 spin_lock_irq(&conf->device_lock);
4403 if (conf->mirrors_new) {
4404 memcpy(conf->mirrors_new, conf->mirrors,
4405 sizeof(struct raid10_info)*conf->prev.raid_disks);
4406 smp_mb();
4407 kfree(conf->mirrors_old);
4408 conf->mirrors_old = conf->mirrors;
4409 conf->mirrors = conf->mirrors_new;
4410 conf->mirrors_new = NULL;
4411 }
4412 setup_geo(&conf->geo, mddev, geo_start);
4413 smp_mb();
4414 if (mddev->reshape_backwards) {
4415 sector_t size = raid10_size(mddev, 0, 0);
4416 if (size < mddev->array_sectors) {
4417 spin_unlock_irq(&conf->device_lock);
4418 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4419 mdname(mddev));
4420 return -EINVAL;
4421 }
4422 mddev->resync_max_sectors = size;
4423 conf->reshape_progress = size;
4424 } else
4425 conf->reshape_progress = 0;
4426 conf->reshape_safe = conf->reshape_progress;
4427 spin_unlock_irq(&conf->device_lock);
4428
4429 if (mddev->delta_disks && mddev->bitmap) {
4430 struct mdp_superblock_1 *sb = NULL;
4431 sector_t oldsize, newsize;
4432
4433 oldsize = raid10_size(mddev, 0, 0);
4434 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4435
4436 if (!mddev_is_clustered(mddev) &&
4437 md_bitmap_enabled(mddev, false)) {
4438 ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
4439 if (ret)
4440 goto abort;
4441 else
4442 goto out;
4443 }
4444
4445 rdev_for_each(rdev, mddev) {
4446 if (rdev->raid_disk > -1 &&
4447 !test_bit(Faulty, &rdev->flags))
4448 sb = page_address(rdev->sb_page);
4449 }
4450
4451 /*
4452 * some node is already performing reshape, and no need to
4453 * call bitmap_ops->resize again since it should be called when
4454 * receiving BITMAP_RESIZE msg
4455 */
4456 if ((sb && (le32_to_cpu(sb->feature_map) &
4457 MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4458 goto out;
4459
4460 /* cluster can't be setup without bitmap */
4461 ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
4462 if (ret)
4463 goto abort;
4464
4465 ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4466 if (ret) {
4467 mddev->bitmap_ops->resize(mddev, oldsize, 0);
4468 goto abort;
4469 }
4470 }
4471 out:
4472 if (mddev->delta_disks > 0) {
4473 rdev_for_each(rdev, mddev)
4474 if (rdev->raid_disk < 0 &&
4475 !test_bit(Faulty, &rdev->flags)) {
4476 if (raid10_add_disk(mddev, rdev) == 0) {
4477 if (rdev->raid_disk >=
4478 conf->prev.raid_disks)
4479 set_bit(In_sync, &rdev->flags);
4480 else
4481 rdev->recovery_offset = 0;
4482
4483 /* Failure here is OK */
4484 sysfs_link_rdev(mddev, rdev);
4485 }
4486 } else if (rdev->raid_disk >= conf->prev.raid_disks
4487 && !test_bit(Faulty, &rdev->flags)) {
4488 /* This is a spare that was manually added */
4489 set_bit(In_sync, &rdev->flags);
4490 }
4491 }
4492 /* When a reshape changes the number of devices,
4493 * ->degraded is measured against the larger of the
4494 * pre and post numbers.
4495 */
4496 spin_lock_irq(&conf->device_lock);
4497 mddev->degraded = calc_degraded(conf);
4498 spin_unlock_irq(&conf->device_lock);
4499 mddev->raid_disks = conf->geo.raid_disks;
4500 mddev->reshape_position = conf->reshape_progress;
4501 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4502
4503 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4504 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4505 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4506 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4507 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4508 conf->reshape_checkpoint = jiffies;
4509 md_new_event();
4510 return 0;
4511
4512 abort:
4513 mddev->recovery = 0;
4514 spin_lock_irq(&conf->device_lock);
4515 conf->geo = conf->prev;
4516 mddev->raid_disks = conf->geo.raid_disks;
4517 rdev_for_each(rdev, mddev)
4518 rdev->new_data_offset = rdev->data_offset;
4519 smp_wmb();
4520 conf->reshape_progress = MaxSector;
4521 conf->reshape_safe = MaxSector;
4522 mddev->reshape_position = MaxSector;
4523 spin_unlock_irq(&conf->device_lock);
4524 return ret;
4525 }
4526
4527 /* Calculate the last device-address that could contain
4528 * any block from the chunk that includes the array-address 's'
4529 * and report the next address.
4530 * i.e. the address returned will be chunk-aligned and after
4531 * any data that is in the chunk containing 's'.
4532 */
last_dev_address(sector_t s,struct geom * geo)4533 static sector_t last_dev_address(sector_t s, struct geom *geo)
4534 {
4535 s = (s | geo->chunk_mask) + 1;
4536 s >>= geo->chunk_shift;
4537 s *= geo->near_copies;
4538 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4539 s *= geo->far_copies;
4540 s <<= geo->chunk_shift;
4541 return s;
4542 }
4543
4544 /* Calculate the first device-address that could contain
4545 * any block from the chunk that includes the array-address 's'.
4546 * This too will be the start of a chunk
4547 */
first_dev_address(sector_t s,struct geom * geo)4548 static sector_t first_dev_address(sector_t s, struct geom *geo)
4549 {
4550 s >>= geo->chunk_shift;
4551 s *= geo->near_copies;
4552 sector_div(s, geo->raid_disks);
4553 s *= geo->far_copies;
4554 s <<= geo->chunk_shift;
4555 return s;
4556 }
4557
reshape_request(struct mddev * mddev,sector_t sector_nr,int * skipped)4558 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4559 int *skipped)
4560 {
4561 /* We simply copy at most one chunk (smallest of old and new)
4562 * at a time, possibly less if that exceeds RESYNC_PAGES,
4563 * or we hit a bad block or something.
4564 * This might mean we pause for normal IO in the middle of
4565 * a chunk, but that is not a problem as mddev->reshape_position
4566 * can record any location.
4567 *
4568 * If we will want to write to a location that isn't
4569 * yet recorded as 'safe' (i.e. in metadata on disk) then
4570 * we need to flush all reshape requests and update the metadata.
4571 *
4572 * When reshaping forwards (e.g. to more devices), we interpret
4573 * 'safe' as the earliest block which might not have been copied
4574 * down yet. We divide this by previous stripe size and multiply
4575 * by previous stripe length to get lowest device offset that we
4576 * cannot write to yet.
4577 * We interpret 'sector_nr' as an address that we want to write to.
4578 * From this we use last_device_address() to find where we might
4579 * write to, and first_device_address on the 'safe' position.
4580 * If this 'next' write position is after the 'safe' position,
4581 * we must update the metadata to increase the 'safe' position.
4582 *
4583 * When reshaping backwards, we round in the opposite direction
4584 * and perform the reverse test: next write position must not be
4585 * less than current safe position.
4586 *
4587 * In all this the minimum difference in data offsets
4588 * (conf->offset_diff - always positive) allows a bit of slack,
4589 * so next can be after 'safe', but not by more than offset_diff
4590 *
4591 * We need to prepare all the bios here before we start any IO
4592 * to ensure the size we choose is acceptable to all devices.
4593 * The means one for each copy for write-out and an extra one for
4594 * read-in.
4595 * We store the read-in bio in ->master_bio and the others in
4596 * ->devs[x].bio and ->devs[x].repl_bio.
4597 */
4598 struct r10conf *conf = mddev->private;
4599 struct r10bio *r10_bio;
4600 sector_t next, safe, last;
4601 int max_sectors;
4602 int nr_sectors;
4603 int s;
4604 struct md_rdev *rdev;
4605 int need_flush = 0;
4606 struct bio *blist;
4607 struct bio *bio, *read_bio;
4608 int sectors_done = 0;
4609 struct page **pages;
4610
4611 if (sector_nr == 0) {
4612 /* If restarting in the middle, skip the initial sectors */
4613 if (mddev->reshape_backwards &&
4614 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4615 sector_nr = (raid10_size(mddev, 0, 0)
4616 - conf->reshape_progress);
4617 } else if (!mddev->reshape_backwards &&
4618 conf->reshape_progress > 0)
4619 sector_nr = conf->reshape_progress;
4620 if (sector_nr) {
4621 mddev->curr_resync_completed = sector_nr;
4622 sysfs_notify_dirent_safe(mddev->sysfs_completed);
4623 *skipped = 1;
4624 return sector_nr;
4625 }
4626 }
4627
4628 /* We don't use sector_nr to track where we are up to
4629 * as that doesn't work well for ->reshape_backwards.
4630 * So just use ->reshape_progress.
4631 */
4632 if (mddev->reshape_backwards) {
4633 /* 'next' is the earliest device address that we might
4634 * write to for this chunk in the new layout
4635 */
4636 next = first_dev_address(conf->reshape_progress - 1,
4637 &conf->geo);
4638
4639 /* 'safe' is the last device address that we might read from
4640 * in the old layout after a restart
4641 */
4642 safe = last_dev_address(conf->reshape_safe - 1,
4643 &conf->prev);
4644
4645 if (next + conf->offset_diff < safe)
4646 need_flush = 1;
4647
4648 last = conf->reshape_progress - 1;
4649 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4650 & conf->prev.chunk_mask);
4651 if (sector_nr + RESYNC_SECTORS < last)
4652 sector_nr = last + 1 - RESYNC_SECTORS;
4653 } else {
4654 /* 'next' is after the last device address that we
4655 * might write to for this chunk in the new layout
4656 */
4657 next = last_dev_address(conf->reshape_progress, &conf->geo);
4658
4659 /* 'safe' is the earliest device address that we might
4660 * read from in the old layout after a restart
4661 */
4662 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4663
4664 /* Need to update metadata if 'next' might be beyond 'safe'
4665 * as that would possibly corrupt data
4666 */
4667 if (next > safe + conf->offset_diff)
4668 need_flush = 1;
4669
4670 sector_nr = conf->reshape_progress;
4671 last = sector_nr | (conf->geo.chunk_mask
4672 & conf->prev.chunk_mask);
4673
4674 if (sector_nr + RESYNC_SECTORS <= last)
4675 last = sector_nr + RESYNC_SECTORS - 1;
4676 }
4677
4678 if (need_flush ||
4679 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4680 /* Need to update reshape_position in metadata */
4681 wait_barrier(conf, false);
4682 mddev->reshape_position = conf->reshape_progress;
4683 if (mddev->reshape_backwards)
4684 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4685 - conf->reshape_progress;
4686 else
4687 mddev->curr_resync_completed = conf->reshape_progress;
4688 conf->reshape_checkpoint = jiffies;
4689 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4690 md_wakeup_thread(mddev->thread);
4691 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4692 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4693 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4694 allow_barrier(conf);
4695 return sectors_done;
4696 }
4697 conf->reshape_safe = mddev->reshape_position;
4698 allow_barrier(conf);
4699 }
4700
4701 raise_barrier(conf, 0);
4702 read_more:
4703 /* Now schedule reads for blocks from sector_nr to last */
4704 r10_bio = raid10_alloc_init_r10buf(conf);
4705 r10_bio->state = 0;
4706 raise_barrier(conf, 1);
4707 atomic_set(&r10_bio->remaining, 0);
4708 r10_bio->mddev = mddev;
4709 r10_bio->sector = sector_nr;
4710 set_bit(R10BIO_IsReshape, &r10_bio->state);
4711 r10_bio->sectors = last - sector_nr + 1;
4712 rdev = read_balance(conf, r10_bio, &max_sectors);
4713 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4714
4715 if (!rdev) {
4716 /* Cannot read from here, so need to record bad blocks
4717 * on all the target devices.
4718 */
4719 // FIXME
4720 mempool_free(r10_bio, &conf->r10buf_pool);
4721 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4722 return sectors_done;
4723 }
4724
4725 read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4726 GFP_KERNEL, &mddev->bio_set);
4727 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4728 + rdev->data_offset);
4729 read_bio->bi_private = r10_bio;
4730 read_bio->bi_end_io = end_reshape_read;
4731 r10_bio->master_bio = read_bio;
4732 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4733
4734 /*
4735 * Broadcast RESYNC message to other nodes, so all nodes would not
4736 * write to the region to avoid conflict.
4737 */
4738 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4739 struct mdp_superblock_1 *sb = NULL;
4740 int sb_reshape_pos = 0;
4741
4742 conf->cluster_sync_low = sector_nr;
4743 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4744 sb = page_address(rdev->sb_page);
4745 if (sb) {
4746 sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4747 /*
4748 * Set cluster_sync_low again if next address for array
4749 * reshape is less than cluster_sync_low. Since we can't
4750 * update cluster_sync_low until it has finished reshape.
4751 */
4752 if (sb_reshape_pos < conf->cluster_sync_low)
4753 conf->cluster_sync_low = sb_reshape_pos;
4754 }
4755
4756 mddev->cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4757 conf->cluster_sync_high);
4758 }
4759
4760 /* Now find the locations in the new layout */
4761 __raid10_find_phys(&conf->geo, r10_bio);
4762
4763 blist = read_bio;
4764 read_bio->bi_next = NULL;
4765
4766 for (s = 0; s < conf->copies*2; s++) {
4767 struct bio *b;
4768 int d = r10_bio->devs[s/2].devnum;
4769 struct md_rdev *rdev2;
4770 if (s&1) {
4771 rdev2 = conf->mirrors[d].replacement;
4772 b = r10_bio->devs[s/2].repl_bio;
4773 } else {
4774 rdev2 = conf->mirrors[d].rdev;
4775 b = r10_bio->devs[s/2].bio;
4776 }
4777 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4778 continue;
4779
4780 bio_set_dev(b, rdev2->bdev);
4781 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4782 rdev2->new_data_offset;
4783 b->bi_end_io = end_reshape_write;
4784 b->bi_opf = REQ_OP_WRITE;
4785 b->bi_next = blist;
4786 blist = b;
4787 }
4788
4789 /* Now add as many pages as possible to all of these bios. */
4790
4791 nr_sectors = 0;
4792 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4793 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4794 struct page *page = pages[s / (PAGE_SIZE >> 9)];
4795 int len = (max_sectors - s) << 9;
4796 if (len > PAGE_SIZE)
4797 len = PAGE_SIZE;
4798 for (bio = blist; bio ; bio = bio->bi_next) {
4799 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
4800 bio->bi_status = BLK_STS_RESOURCE;
4801 bio_endio(bio);
4802 return sectors_done;
4803 }
4804 }
4805 sector_nr += len >> 9;
4806 nr_sectors += len >> 9;
4807 }
4808 r10_bio->sectors = nr_sectors;
4809
4810 /* Now submit the read */
4811 atomic_inc(&r10_bio->remaining);
4812 read_bio->bi_next = NULL;
4813 submit_bio_noacct(read_bio);
4814 sectors_done += nr_sectors;
4815 if (sector_nr <= last)
4816 goto read_more;
4817
4818 lower_barrier(conf);
4819
4820 /* Now that we have done the whole section we can
4821 * update reshape_progress
4822 */
4823 if (mddev->reshape_backwards)
4824 conf->reshape_progress -= sectors_done;
4825 else
4826 conf->reshape_progress += sectors_done;
4827
4828 return sectors_done;
4829 }
4830
4831 static void end_reshape_request(struct r10bio *r10_bio);
4832 static int handle_reshape_read_error(struct mddev *mddev,
4833 struct r10bio *r10_bio);
reshape_request_write(struct mddev * mddev,struct r10bio * r10_bio)4834 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4835 {
4836 /* Reshape read completed. Hopefully we have a block
4837 * to write out.
4838 * If we got a read error then we do sync 1-page reads from
4839 * elsewhere until we find the data - or give up.
4840 */
4841 struct r10conf *conf = mddev->private;
4842 int s;
4843
4844 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4845 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4846 /* Reshape has been aborted */
4847 md_done_sync(mddev, r10_bio->sectors);
4848 md_sync_error(mddev);
4849 return;
4850 }
4851
4852 /* We definitely have the data in the pages, schedule the
4853 * writes.
4854 */
4855 atomic_set(&r10_bio->remaining, 1);
4856 for (s = 0; s < conf->copies*2; s++) {
4857 struct bio *b;
4858 int d = r10_bio->devs[s/2].devnum;
4859 struct md_rdev *rdev;
4860 if (s&1) {
4861 rdev = conf->mirrors[d].replacement;
4862 b = r10_bio->devs[s/2].repl_bio;
4863 } else {
4864 rdev = conf->mirrors[d].rdev;
4865 b = r10_bio->devs[s/2].bio;
4866 }
4867 if (!rdev || test_bit(Faulty, &rdev->flags))
4868 continue;
4869
4870 atomic_inc(&rdev->nr_pending);
4871 atomic_inc(&r10_bio->remaining);
4872 b->bi_next = NULL;
4873 submit_bio_noacct(b);
4874 }
4875 end_reshape_request(r10_bio);
4876 }
4877
end_reshape(struct r10conf * conf)4878 static void end_reshape(struct r10conf *conf)
4879 {
4880 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4881 return;
4882
4883 spin_lock_irq(&conf->device_lock);
4884 conf->prev = conf->geo;
4885 md_finish_reshape(conf->mddev);
4886 smp_wmb();
4887 conf->reshape_progress = MaxSector;
4888 conf->reshape_safe = MaxSector;
4889 spin_unlock_irq(&conf->device_lock);
4890
4891 mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
4892 conf->fullsync = 0;
4893 }
4894
raid10_update_reshape_pos(struct mddev * mddev)4895 static void raid10_update_reshape_pos(struct mddev *mddev)
4896 {
4897 struct r10conf *conf = mddev->private;
4898 sector_t lo, hi;
4899
4900 mddev->cluster_ops->resync_info_get(mddev, &lo, &hi);
4901 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4902 || mddev->reshape_position == MaxSector)
4903 conf->reshape_progress = mddev->reshape_position;
4904 else
4905 WARN_ON_ONCE(1);
4906 }
4907
handle_reshape_read_error(struct mddev * mddev,struct r10bio * r10_bio)4908 static int handle_reshape_read_error(struct mddev *mddev,
4909 struct r10bio *r10_bio)
4910 {
4911 /* Use sync reads to get the blocks from somewhere else */
4912 int sectors = r10_bio->sectors;
4913 struct r10conf *conf = mddev->private;
4914 struct r10bio *r10b;
4915 int slot = 0;
4916 int idx = 0;
4917 struct page **pages;
4918
4919 r10b = kmalloc_flex(*r10b, devs, conf->copies, GFP_NOIO);
4920 if (!r10b) {
4921 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4922 return -ENOMEM;
4923 }
4924
4925 /* reshape IOs share pages from .devs[0].bio */
4926 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4927
4928 r10b->sector = r10_bio->sector;
4929 __raid10_find_phys(&conf->prev, r10b);
4930
4931 while (sectors) {
4932 int s = sectors;
4933 int success = 0;
4934 int first_slot = slot;
4935
4936 if (s > (PAGE_SIZE >> 9))
4937 s = PAGE_SIZE >> 9;
4938
4939 while (!success) {
4940 int d = r10b->devs[slot].devnum;
4941 struct md_rdev *rdev = conf->mirrors[d].rdev;
4942 sector_t addr;
4943 if (rdev == NULL ||
4944 test_bit(Faulty, &rdev->flags) ||
4945 !test_bit(In_sync, &rdev->flags))
4946 goto failed;
4947
4948 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4949 atomic_inc(&rdev->nr_pending);
4950 success = sync_page_io(rdev,
4951 addr,
4952 s << 9,
4953 pages[idx],
4954 REQ_OP_READ, false);
4955 rdev_dec_pending(rdev, mddev);
4956 if (success)
4957 break;
4958 failed:
4959 slot++;
4960 if (slot >= conf->copies)
4961 slot = 0;
4962 if (slot == first_slot)
4963 break;
4964 }
4965 if (!success) {
4966 /* couldn't read this block, must give up */
4967 set_bit(MD_RECOVERY_INTR,
4968 &mddev->recovery);
4969 kfree(r10b);
4970 return -EIO;
4971 }
4972 sectors -= s;
4973 idx++;
4974 }
4975 kfree(r10b);
4976 return 0;
4977 }
4978
end_reshape_write(struct bio * bio)4979 static void end_reshape_write(struct bio *bio)
4980 {
4981 struct r10bio *r10_bio = get_resync_r10bio(bio);
4982 struct mddev *mddev = r10_bio->mddev;
4983 struct r10conf *conf = mddev->private;
4984 int d;
4985 int slot;
4986 int repl;
4987 struct md_rdev *rdev = NULL;
4988
4989 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4990 rdev = repl ? conf->mirrors[d].replacement :
4991 conf->mirrors[d].rdev;
4992
4993 if (bio->bi_status) {
4994 /* FIXME should record badblock */
4995 md_error(mddev, rdev);
4996 }
4997
4998 rdev_dec_pending(rdev, mddev);
4999 end_reshape_request(r10_bio);
5000 }
5001
end_reshape_request(struct r10bio * r10_bio)5002 static void end_reshape_request(struct r10bio *r10_bio)
5003 {
5004 if (!atomic_dec_and_test(&r10_bio->remaining))
5005 return;
5006 md_done_sync(r10_bio->mddev, r10_bio->sectors);
5007 bio_put(r10_bio->master_bio);
5008 put_buf(r10_bio);
5009 }
5010
raid10_finish_reshape(struct mddev * mddev)5011 static void raid10_finish_reshape(struct mddev *mddev)
5012 {
5013 struct r10conf *conf = mddev->private;
5014
5015 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5016 return;
5017
5018 if (mddev->delta_disks > 0) {
5019 if (mddev->resync_offset > mddev->resync_max_sectors) {
5020 mddev->resync_offset = mddev->resync_max_sectors;
5021 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5022 }
5023 mddev->resync_max_sectors = mddev->array_sectors;
5024 } else {
5025 int d;
5026 for (d = conf->geo.raid_disks ;
5027 d < conf->geo.raid_disks - mddev->delta_disks;
5028 d++) {
5029 struct md_rdev *rdev = conf->mirrors[d].rdev;
5030 if (rdev)
5031 clear_bit(In_sync, &rdev->flags);
5032 rdev = conf->mirrors[d].replacement;
5033 if (rdev)
5034 clear_bit(In_sync, &rdev->flags);
5035 }
5036 }
5037 mddev->layout = mddev->new_layout;
5038 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5039 mddev->reshape_position = MaxSector;
5040 mddev->delta_disks = 0;
5041 mddev->reshape_backwards = 0;
5042 }
5043
5044 static struct md_personality raid10_personality =
5045 {
5046 .head = {
5047 .type = MD_PERSONALITY,
5048 .id = ID_RAID10,
5049 .name = "raid10",
5050 .owner = THIS_MODULE,
5051 },
5052
5053 .make_request = raid10_make_request,
5054 .run = raid10_run,
5055 .free = raid10_free,
5056 .status = raid10_status,
5057 .error_handler = raid10_error,
5058 .hot_add_disk = raid10_add_disk,
5059 .hot_remove_disk= raid10_remove_disk,
5060 .spare_active = raid10_spare_active,
5061 .sync_request = raid10_sync_request,
5062 .quiesce = raid10_quiesce,
5063 .size = raid10_size,
5064 .resize = raid10_resize,
5065 .takeover = raid10_takeover,
5066 .check_reshape = raid10_check_reshape,
5067 .start_reshape = raid10_start_reshape,
5068 .finish_reshape = raid10_finish_reshape,
5069 .update_reshape_pos = raid10_update_reshape_pos,
5070 };
5071
raid10_init(void)5072 static int __init raid10_init(void)
5073 {
5074 return register_md_submodule(&raid10_personality.head);
5075 }
5076
raid10_exit(void)5077 static void __exit raid10_exit(void)
5078 {
5079 unregister_md_submodule(&raid10_personality.head);
5080 }
5081
5082 module_init(raid10_init);
5083 module_exit(raid10_exit);
5084 MODULE_LICENSE("GPL");
5085 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5086 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5087 MODULE_ALIAS("md-raid10");
5088 MODULE_ALIAS("md-level-10");
5089