1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * raid10.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 2000-2004 Neil Brown
6 *
7 * RAID-10 support for md.
8 *
9 * Base on code in raid1.c. See raid1.c for further copyright information.
10 */
11
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22
23 #define RAID_1_10_NAME "raid10"
24 #include "raid10.h"
25 #include "raid0.h"
26 #include "md-bitmap.h"
27 #include "md-cluster.h"
28
29 /*
30 * RAID10 provides a combination of RAID0 and RAID1 functionality.
31 * The layout of data is defined by
32 * chunk_size
33 * raid_disks
34 * near_copies (stored in low byte of layout)
35 * far_copies (stored in second byte of layout)
36 * far_offset (stored in bit 16 of layout )
37 * use_far_sets (stored in bit 17 of layout )
38 * use_far_sets_bugfixed (stored in bit 18 of layout )
39 *
40 * The data to be stored is divided into chunks using chunksize. Each device
41 * is divided into far_copies sections. In each section, chunks are laid out
42 * in a style similar to raid0, but near_copies copies of each chunk is stored
43 * (each on a different drive). The starting device for each section is offset
44 * near_copies from the starting device of the previous section. Thus there
45 * are (near_copies * far_copies) of each chunk, and each is on a different
46 * drive. near_copies and far_copies must be at least one, and their product
47 * is at most raid_disks.
48 *
49 * If far_offset is true, then the far_copies are handled a bit differently.
50 * The copies are still in different stripes, but instead of being very far
51 * apart on disk, there are adjacent stripes.
52 *
53 * The far and offset algorithms are handled slightly differently if
54 * 'use_far_sets' is true. In this case, the array's devices are grouped into
55 * sets that are (near_copies * far_copies) in size. The far copied stripes
56 * are still shifted by 'near_copies' devices, but this shifting stays confined
57 * to the set rather than the entire array. This is done to improve the number
58 * of device combinations that can fail without causing the array to fail.
59 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
60 * on a device):
61 * A B C D A B C D E
62 * ... ...
63 * D A B C E A B C D
64 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
65 * [A B] [C D] [A B] [C D E]
66 * |...| |...| |...| | ... |
67 * [B A] [D C] [B A] [E C D]
68 */
69
70 static void allow_barrier(struct r10conf *conf);
71 static void lower_barrier(struct r10conf *conf);
72 static int _enough(struct r10conf *conf, int previous, int ignore);
73 static int enough(struct r10conf *conf, int ignore);
74 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
75 int *skipped);
76 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
77 static void end_reshape_write(struct bio *bio);
78 static void end_reshape(struct r10conf *conf);
79
80 #include "raid1-10.c"
81
82 #define NULL_CMD
83 #define cmd_before(conf, cmd) \
84 do { \
85 write_sequnlock_irq(&(conf)->resync_lock); \
86 cmd; \
87 } while (0)
88 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
89
90 #define wait_event_barrier_cmd(conf, cond, cmd) \
91 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
92 cmd_after(conf))
93
94 #define wait_event_barrier(conf, cond) \
95 wait_event_barrier_cmd(conf, cond, NULL_CMD)
96
97 /*
98 * for resync bio, r10bio pointer can be retrieved from the per-bio
99 * 'struct resync_pages'.
100 */
get_resync_r10bio(struct bio * bio)101 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
102 {
103 return get_resync_pages(bio)->raid_bio;
104 }
105
r10bio_pool_alloc(gfp_t gfp_flags,void * data)106 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
107 {
108 struct r10conf *conf = data;
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
110
111 /* allocate a r10bio with room for raid_disks entries in the
112 * bios array */
113 return kzalloc(size, gfp_flags);
114 }
115
116 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
117 /* amount of memory to reserve for resync requests */
118 #define RESYNC_WINDOW (1024*1024)
119 /* maximum number of concurrent requests, memory permitting */
120 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
121 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
122 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
123
124 /*
125 * When performing a resync, we need to read and compare, so
126 * we need as many pages are there are copies.
127 * When performing a recovery, we need 2 bios, one for read,
128 * one for write (we recover only one drive per r10buf)
129 *
130 */
r10buf_pool_alloc(gfp_t gfp_flags,void * data)131 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
132 {
133 struct r10conf *conf = data;
134 struct r10bio *r10_bio;
135 struct bio *bio;
136 int j;
137 int nalloc, nalloc_rp;
138 struct resync_pages *rps;
139
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
141 if (!r10_bio)
142 return NULL;
143
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
146 nalloc = conf->copies; /* resync */
147 else
148 nalloc = 2; /* recovery */
149
150 /* allocate once for all bios */
151 if (!conf->have_replacement)
152 nalloc_rp = nalloc;
153 else
154 nalloc_rp = nalloc * 2;
155 rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
156 if (!rps)
157 goto out_free_r10bio;
158
159 /*
160 * Allocate bios.
161 */
162 for (j = nalloc ; j-- ; ) {
163 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
164 if (!bio)
165 goto out_free_bio;
166 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
167 r10_bio->devs[j].bio = bio;
168 if (!conf->have_replacement)
169 continue;
170 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
171 if (!bio)
172 goto out_free_bio;
173 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
174 r10_bio->devs[j].repl_bio = bio;
175 }
176 /*
177 * Allocate RESYNC_PAGES data pages and attach them
178 * where needed.
179 */
180 for (j = 0; j < nalloc; j++) {
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
182 struct resync_pages *rp, *rp_repl;
183
184 rp = &rps[j];
185 if (rbio)
186 rp_repl = &rps[nalloc + j];
187
188 bio = r10_bio->devs[j].bio;
189
190 if (!j || test_bit(MD_RECOVERY_SYNC,
191 &conf->mddev->recovery)) {
192 if (resync_alloc_pages(rp, gfp_flags))
193 goto out_free_pages;
194 } else {
195 memcpy(rp, &rps[0], sizeof(*rp));
196 resync_get_all_pages(rp);
197 }
198
199 rp->raid_bio = r10_bio;
200 bio->bi_private = rp;
201 if (rbio) {
202 memcpy(rp_repl, rp, sizeof(*rp));
203 rbio->bi_private = rp_repl;
204 }
205 }
206
207 return r10_bio;
208
209 out_free_pages:
210 while (--j >= 0)
211 resync_free_pages(&rps[j]);
212
213 j = 0;
214 out_free_bio:
215 for ( ; j < nalloc; j++) {
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
219 if (r10_bio->devs[j].repl_bio)
220 bio_uninit(r10_bio->devs[j].repl_bio);
221 kfree(r10_bio->devs[j].repl_bio);
222 }
223 kfree(rps);
224 out_free_r10bio:
225 rbio_pool_free(r10_bio, conf);
226 return NULL;
227 }
228
r10buf_pool_free(void * __r10_bio,void * data)229 static void r10buf_pool_free(void *__r10_bio, void *data)
230 {
231 struct r10conf *conf = data;
232 struct r10bio *r10bio = __r10_bio;
233 int j;
234 struct resync_pages *rp = NULL;
235
236 for (j = conf->copies; j--; ) {
237 struct bio *bio = r10bio->devs[j].bio;
238
239 if (bio) {
240 rp = get_resync_pages(bio);
241 resync_free_pages(rp);
242 bio_uninit(bio);
243 kfree(bio);
244 }
245
246 bio = r10bio->devs[j].repl_bio;
247 if (bio) {
248 bio_uninit(bio);
249 kfree(bio);
250 }
251 }
252
253 /* resync pages array stored in the 1st bio's .bi_private */
254 kfree(rp);
255
256 rbio_pool_free(r10bio, conf);
257 }
258
put_all_bios(struct r10conf * conf,struct r10bio * r10_bio)259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
260 {
261 int i;
262
263 for (i = 0; i < conf->geo.raid_disks; i++) {
264 struct bio **bio = & r10_bio->devs[i].bio;
265 if (!BIO_SPECIAL(*bio))
266 bio_put(*bio);
267 *bio = NULL;
268 bio = &r10_bio->devs[i].repl_bio;
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
270 bio_put(*bio);
271 *bio = NULL;
272 }
273 }
274
free_r10bio(struct r10bio * r10_bio)275 static void free_r10bio(struct r10bio *r10_bio)
276 {
277 struct r10conf *conf = r10_bio->mddev->private;
278
279 put_all_bios(conf, r10_bio);
280 mempool_free(r10_bio, &conf->r10bio_pool);
281 }
282
put_buf(struct r10bio * r10_bio)283 static void put_buf(struct r10bio *r10_bio)
284 {
285 struct r10conf *conf = r10_bio->mddev->private;
286
287 mempool_free(r10_bio, &conf->r10buf_pool);
288
289 lower_barrier(conf);
290 }
291
wake_up_barrier(struct r10conf * conf)292 static void wake_up_barrier(struct r10conf *conf)
293 {
294 if (wq_has_sleeper(&conf->wait_barrier))
295 wake_up(&conf->wait_barrier);
296 }
297
reschedule_retry(struct r10bio * r10_bio)298 static void reschedule_retry(struct r10bio *r10_bio)
299 {
300 unsigned long flags;
301 struct mddev *mddev = r10_bio->mddev;
302 struct r10conf *conf = mddev->private;
303
304 spin_lock_irqsave(&conf->device_lock, flags);
305 list_add(&r10_bio->retry_list, &conf->retry_list);
306 conf->nr_queued ++;
307 spin_unlock_irqrestore(&conf->device_lock, flags);
308
309 /* wake up frozen array... */
310 wake_up(&conf->wait_barrier);
311
312 md_wakeup_thread(mddev->thread);
313 }
314
315 /*
316 * raid_end_bio_io() is called when we have finished servicing a mirrored
317 * operation and are ready to return a success/failure code to the buffer
318 * cache layer.
319 */
raid_end_bio_io(struct r10bio * r10_bio)320 static void raid_end_bio_io(struct r10bio *r10_bio)
321 {
322 struct bio *bio = r10_bio->master_bio;
323 struct r10conf *conf = r10_bio->mddev->private;
324
325 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
326 bio->bi_status = BLK_STS_IOERR;
327
328 bio_endio(bio);
329 /*
330 * Wake up any possible resync thread that waits for the device
331 * to go idle.
332 */
333 allow_barrier(conf);
334
335 free_r10bio(r10_bio);
336 }
337
338 /*
339 * Update disk head position estimator based on IRQ completion info.
340 */
update_head_pos(int slot,struct r10bio * r10_bio)341 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
342 {
343 struct r10conf *conf = r10_bio->mddev->private;
344
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
346 r10_bio->devs[slot].addr + (r10_bio->sectors);
347 }
348
349 /*
350 * Find the disk number which triggered given bio
351 */
find_bio_disk(struct r10conf * conf,struct r10bio * r10_bio,struct bio * bio,int * slotp,int * replp)352 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
353 struct bio *bio, int *slotp, int *replp)
354 {
355 int slot;
356 int repl = 0;
357
358 for (slot = 0; slot < conf->geo.raid_disks; slot++) {
359 if (r10_bio->devs[slot].bio == bio)
360 break;
361 if (r10_bio->devs[slot].repl_bio == bio) {
362 repl = 1;
363 break;
364 }
365 }
366
367 update_head_pos(slot, r10_bio);
368
369 if (slotp)
370 *slotp = slot;
371 if (replp)
372 *replp = repl;
373 return r10_bio->devs[slot].devnum;
374 }
375
raid10_end_read_request(struct bio * bio)376 static void raid10_end_read_request(struct bio *bio)
377 {
378 int uptodate = !bio->bi_status;
379 struct r10bio *r10_bio = bio->bi_private;
380 int slot;
381 struct md_rdev *rdev;
382 struct r10conf *conf = r10_bio->mddev->private;
383
384 slot = r10_bio->read_slot;
385 rdev = r10_bio->devs[slot].rdev;
386 /*
387 * this branch is our 'one mirror IO has finished' event handler:
388 */
389 update_head_pos(slot, r10_bio);
390
391 if (uptodate) {
392 /*
393 * Set R10BIO_Uptodate in our master bio, so that
394 * we will return a good error code to the higher
395 * levels even if IO on some other mirrored buffer fails.
396 *
397 * The 'master' represents the composite IO operation to
398 * user-side. So if something waits for IO, then it will
399 * wait for the 'master' bio.
400 */
401 set_bit(R10BIO_Uptodate, &r10_bio->state);
402 } else if (!raid1_should_handle_error(bio)) {
403 uptodate = 1;
404 } else {
405 /* If all other devices that store this block have
406 * failed, we want to return the error upwards rather
407 * than fail the last device. Here we redefine
408 * "uptodate" to mean "Don't want to retry"
409 */
410 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
411 rdev->raid_disk))
412 uptodate = 1;
413 }
414 if (uptodate) {
415 raid_end_bio_io(r10_bio);
416 rdev_dec_pending(rdev, conf->mddev);
417 } else {
418 /*
419 * oops, read error - keep the refcount on the rdev
420 */
421 pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
422 mdname(conf->mddev),
423 rdev->bdev,
424 (unsigned long long)r10_bio->sector);
425 set_bit(R10BIO_ReadError, &r10_bio->state);
426 reschedule_retry(r10_bio);
427 }
428 }
429
close_write(struct r10bio * r10_bio)430 static void close_write(struct r10bio *r10_bio)
431 {
432 struct mddev *mddev = r10_bio->mddev;
433
434 md_write_end(mddev);
435 }
436
one_write_done(struct r10bio * r10_bio)437 static void one_write_done(struct r10bio *r10_bio)
438 {
439 if (atomic_dec_and_test(&r10_bio->remaining)) {
440 if (test_bit(R10BIO_WriteError, &r10_bio->state))
441 reschedule_retry(r10_bio);
442 else {
443 close_write(r10_bio);
444 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
445 reschedule_retry(r10_bio);
446 else
447 raid_end_bio_io(r10_bio);
448 }
449 }
450 }
451
raid10_end_write_request(struct bio * bio)452 static void raid10_end_write_request(struct bio *bio)
453 {
454 struct r10bio *r10_bio = bio->bi_private;
455 int dev;
456 int dec_rdev = 1;
457 struct r10conf *conf = r10_bio->mddev->private;
458 int slot, repl;
459 struct md_rdev *rdev = NULL;
460 struct bio *to_put = NULL;
461 bool ignore_error = !raid1_should_handle_error(bio) ||
462 (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
463
464 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
465
466 if (repl)
467 rdev = conf->mirrors[dev].replacement;
468 if (!rdev) {
469 smp_rmb();
470 repl = 0;
471 rdev = conf->mirrors[dev].rdev;
472 }
473 /*
474 * this branch is our 'one mirror IO has finished' event handler:
475 */
476 if (bio->bi_status && !ignore_error) {
477 if (repl)
478 /* Never record new bad blocks to replacement,
479 * just fail it.
480 */
481 md_error(rdev->mddev, rdev);
482 else {
483 set_bit(WriteErrorSeen, &rdev->flags);
484 if (!test_and_set_bit(WantReplacement, &rdev->flags))
485 set_bit(MD_RECOVERY_NEEDED,
486 &rdev->mddev->recovery);
487
488 dec_rdev = 0;
489 if (test_bit(FailFast, &rdev->flags) &&
490 (bio->bi_opf & MD_FAILFAST)) {
491 md_error(rdev->mddev, rdev);
492 }
493
494 /*
495 * When the device is faulty, it is not necessary to
496 * handle write error.
497 */
498 if (!test_bit(Faulty, &rdev->flags))
499 set_bit(R10BIO_WriteError, &r10_bio->state);
500 else {
501 /* Fail the request */
502 r10_bio->devs[slot].bio = NULL;
503 to_put = bio;
504 dec_rdev = 1;
505 }
506 }
507 } else {
508 /*
509 * Set R10BIO_Uptodate in our master bio, so that
510 * we will return a good error code for to the higher
511 * levels even if IO on some other mirrored buffer fails.
512 *
513 * The 'master' represents the composite IO operation to
514 * user-side. So if something waits for IO, then it will
515 * wait for the 'master' bio.
516 *
517 * Do not set R10BIO_Uptodate if the current device is
518 * rebuilding or Faulty. This is because we cannot use
519 * such device for properly reading the data back (we could
520 * potentially use it, if the current write would have felt
521 * before rdev->recovery_offset, but for simplicity we don't
522 * check this here.
523 */
524 if (test_bit(In_sync, &rdev->flags) &&
525 !test_bit(Faulty, &rdev->flags))
526 set_bit(R10BIO_Uptodate, &r10_bio->state);
527
528 /* Maybe we can clear some bad blocks. */
529 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
530 r10_bio->sectors) &&
531 !ignore_error) {
532 bio_put(bio);
533 if (repl)
534 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
535 else
536 r10_bio->devs[slot].bio = IO_MADE_GOOD;
537 dec_rdev = 0;
538 set_bit(R10BIO_MadeGood, &r10_bio->state);
539 }
540 }
541
542 /*
543 *
544 * Let's see if all mirrored write operations have finished
545 * already.
546 */
547 one_write_done(r10_bio);
548 if (dec_rdev)
549 rdev_dec_pending(rdev, conf->mddev);
550 if (to_put)
551 bio_put(to_put);
552 }
553
554 /*
555 * RAID10 layout manager
556 * As well as the chunksize and raid_disks count, there are two
557 * parameters: near_copies and far_copies.
558 * near_copies * far_copies must be <= raid_disks.
559 * Normally one of these will be 1.
560 * If both are 1, we get raid0.
561 * If near_copies == raid_disks, we get raid1.
562 *
563 * Chunks are laid out in raid0 style with near_copies copies of the
564 * first chunk, followed by near_copies copies of the next chunk and
565 * so on.
566 * If far_copies > 1, then after 1/far_copies of the array has been assigned
567 * as described above, we start again with a device offset of near_copies.
568 * So we effectively have another copy of the whole array further down all
569 * the drives, but with blocks on different drives.
570 * With this layout, and block is never stored twice on the one device.
571 *
572 * raid10_find_phys finds the sector offset of a given virtual sector
573 * on each device that it is on.
574 *
575 * raid10_find_virt does the reverse mapping, from a device and a
576 * sector offset to a virtual address
577 */
578
__raid10_find_phys(struct geom * geo,struct r10bio * r10bio)579 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
580 {
581 int n,f;
582 sector_t sector;
583 sector_t chunk;
584 sector_t stripe;
585 int dev;
586 int slot = 0;
587 int last_far_set_start, last_far_set_size;
588
589 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
590 last_far_set_start *= geo->far_set_size;
591
592 last_far_set_size = geo->far_set_size;
593 last_far_set_size += (geo->raid_disks % geo->far_set_size);
594
595 /* now calculate first sector/dev */
596 chunk = r10bio->sector >> geo->chunk_shift;
597 sector = r10bio->sector & geo->chunk_mask;
598
599 chunk *= geo->near_copies;
600 stripe = chunk;
601 dev = sector_div(stripe, geo->raid_disks);
602 if (geo->far_offset)
603 stripe *= geo->far_copies;
604
605 sector += stripe << geo->chunk_shift;
606
607 /* and calculate all the others */
608 for (n = 0; n < geo->near_copies; n++) {
609 int d = dev;
610 int set;
611 sector_t s = sector;
612 r10bio->devs[slot].devnum = d;
613 r10bio->devs[slot].addr = s;
614 slot++;
615
616 for (f = 1; f < geo->far_copies; f++) {
617 set = d / geo->far_set_size;
618 d += geo->near_copies;
619
620 if ((geo->raid_disks % geo->far_set_size) &&
621 (d > last_far_set_start)) {
622 d -= last_far_set_start;
623 d %= last_far_set_size;
624 d += last_far_set_start;
625 } else {
626 d %= geo->far_set_size;
627 d += geo->far_set_size * set;
628 }
629 s += geo->stride;
630 r10bio->devs[slot].devnum = d;
631 r10bio->devs[slot].addr = s;
632 slot++;
633 }
634 dev++;
635 if (dev >= geo->raid_disks) {
636 dev = 0;
637 sector += (geo->chunk_mask + 1);
638 }
639 }
640 }
641
raid10_find_phys(struct r10conf * conf,struct r10bio * r10bio)642 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
643 {
644 struct geom *geo = &conf->geo;
645
646 if (conf->reshape_progress != MaxSector &&
647 ((r10bio->sector >= conf->reshape_progress) !=
648 conf->mddev->reshape_backwards)) {
649 set_bit(R10BIO_Previous, &r10bio->state);
650 geo = &conf->prev;
651 } else
652 clear_bit(R10BIO_Previous, &r10bio->state);
653
654 __raid10_find_phys(geo, r10bio);
655 }
656
raid10_find_virt(struct r10conf * conf,sector_t sector,int dev)657 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
658 {
659 sector_t offset, chunk, vchunk;
660 /* Never use conf->prev as this is only called during resync
661 * or recovery, so reshape isn't happening
662 */
663 struct geom *geo = &conf->geo;
664 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
665 int far_set_size = geo->far_set_size;
666 int last_far_set_start;
667
668 if (geo->raid_disks % geo->far_set_size) {
669 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
670 last_far_set_start *= geo->far_set_size;
671
672 if (dev >= last_far_set_start) {
673 far_set_size = geo->far_set_size;
674 far_set_size += (geo->raid_disks % geo->far_set_size);
675 far_set_start = last_far_set_start;
676 }
677 }
678
679 offset = sector & geo->chunk_mask;
680 if (geo->far_offset) {
681 int fc;
682 chunk = sector >> geo->chunk_shift;
683 fc = sector_div(chunk, geo->far_copies);
684 dev -= fc * geo->near_copies;
685 if (dev < far_set_start)
686 dev += far_set_size;
687 } else {
688 while (sector >= geo->stride) {
689 sector -= geo->stride;
690 if (dev < (geo->near_copies + far_set_start))
691 dev += far_set_size - geo->near_copies;
692 else
693 dev -= geo->near_copies;
694 }
695 chunk = sector >> geo->chunk_shift;
696 }
697 vchunk = chunk * geo->raid_disks + dev;
698 sector_div(vchunk, geo->near_copies);
699 return (vchunk << geo->chunk_shift) + offset;
700 }
701
702 /*
703 * This routine returns the disk from which the requested read should
704 * be done. There is a per-array 'next expected sequential IO' sector
705 * number - if this matches on the next IO then we use the last disk.
706 * There is also a per-disk 'last know head position' sector that is
707 * maintained from IRQ contexts, both the normal and the resync IO
708 * completion handlers update this position correctly. If there is no
709 * perfect sequential match then we pick the disk whose head is closest.
710 *
711 * If there are 2 mirrors in the same 2 devices, performance degrades
712 * because position is mirror, not device based.
713 *
714 * The rdev for the device selected will have nr_pending incremented.
715 */
716
717 /*
718 * FIXME: possibly should rethink readbalancing and do it differently
719 * depending on near_copies / far_copies geometry.
720 */
read_balance(struct r10conf * conf,struct r10bio * r10_bio,int * max_sectors)721 static struct md_rdev *read_balance(struct r10conf *conf,
722 struct r10bio *r10_bio,
723 int *max_sectors)
724 {
725 const sector_t this_sector = r10_bio->sector;
726 int disk, slot;
727 int sectors = r10_bio->sectors;
728 int best_good_sectors;
729 sector_t new_distance, best_dist;
730 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
731 int do_balance;
732 int best_dist_slot, best_pending_slot;
733 bool has_nonrot_disk = false;
734 unsigned int min_pending;
735 struct geom *geo = &conf->geo;
736
737 raid10_find_phys(conf, r10_bio);
738 best_dist_slot = -1;
739 min_pending = UINT_MAX;
740 best_dist_rdev = NULL;
741 best_pending_rdev = NULL;
742 best_dist = MaxSector;
743 best_good_sectors = 0;
744 do_balance = 1;
745 clear_bit(R10BIO_FailFast, &r10_bio->state);
746
747 if (raid1_should_read_first(conf->mddev, this_sector, sectors))
748 do_balance = 0;
749
750 for (slot = 0; slot < conf->copies ; slot++) {
751 sector_t first_bad;
752 sector_t bad_sectors;
753 sector_t dev_sector;
754 unsigned int pending;
755 bool nonrot;
756
757 if (r10_bio->devs[slot].bio == IO_BLOCKED)
758 continue;
759 disk = r10_bio->devs[slot].devnum;
760 rdev = conf->mirrors[disk].replacement;
761 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
762 r10_bio->devs[slot].addr + sectors >
763 rdev->recovery_offset)
764 rdev = conf->mirrors[disk].rdev;
765 if (rdev == NULL ||
766 test_bit(Faulty, &rdev->flags))
767 continue;
768 if (!test_bit(In_sync, &rdev->flags) &&
769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
770 continue;
771
772 dev_sector = r10_bio->devs[slot].addr;
773 if (is_badblock(rdev, dev_sector, sectors,
774 &first_bad, &bad_sectors)) {
775 if (best_dist < MaxSector)
776 /* Already have a better slot */
777 continue;
778 if (first_bad <= dev_sector) {
779 /* Cannot read here. If this is the
780 * 'primary' device, then we must not read
781 * beyond 'bad_sectors' from another device.
782 */
783 bad_sectors -= (dev_sector - first_bad);
784 if (!do_balance && sectors > bad_sectors)
785 sectors = bad_sectors;
786 if (best_good_sectors > sectors)
787 best_good_sectors = sectors;
788 } else {
789 sector_t good_sectors =
790 first_bad - dev_sector;
791 if (good_sectors > best_good_sectors) {
792 best_good_sectors = good_sectors;
793 best_dist_slot = slot;
794 best_dist_rdev = rdev;
795 }
796 if (!do_balance)
797 /* Must read from here */
798 break;
799 }
800 continue;
801 } else
802 best_good_sectors = sectors;
803
804 if (!do_balance)
805 break;
806
807 nonrot = bdev_nonrot(rdev->bdev);
808 has_nonrot_disk |= nonrot;
809 pending = atomic_read(&rdev->nr_pending);
810 if (min_pending > pending && nonrot) {
811 min_pending = pending;
812 best_pending_slot = slot;
813 best_pending_rdev = rdev;
814 }
815
816 if (best_dist_slot >= 0)
817 /* At least 2 disks to choose from so failfast is OK */
818 set_bit(R10BIO_FailFast, &r10_bio->state);
819 /* This optimisation is debatable, and completely destroys
820 * sequential read speed for 'far copies' arrays. So only
821 * keep it for 'near' arrays, and review those later.
822 */
823 if (geo->near_copies > 1 && !pending)
824 new_distance = 0;
825
826 /* for far > 1 always use the lowest address */
827 else if (geo->far_copies > 1)
828 new_distance = r10_bio->devs[slot].addr;
829 else
830 new_distance = abs(r10_bio->devs[slot].addr -
831 conf->mirrors[disk].head_position);
832
833 if (new_distance < best_dist) {
834 best_dist = new_distance;
835 best_dist_slot = slot;
836 best_dist_rdev = rdev;
837 }
838 }
839 if (slot >= conf->copies) {
840 if (has_nonrot_disk) {
841 slot = best_pending_slot;
842 rdev = best_pending_rdev;
843 } else {
844 slot = best_dist_slot;
845 rdev = best_dist_rdev;
846 }
847 }
848
849 if (slot >= 0) {
850 atomic_inc(&rdev->nr_pending);
851 r10_bio->read_slot = slot;
852 } else
853 rdev = NULL;
854 *max_sectors = best_good_sectors;
855
856 return rdev;
857 }
858
flush_pending_writes(struct r10conf * conf)859 static void flush_pending_writes(struct r10conf *conf)
860 {
861 /* Any writes that have been queued but are awaiting
862 * bitmap updates get flushed here.
863 */
864 spin_lock_irq(&conf->device_lock);
865
866 if (conf->pending_bio_list.head) {
867 struct blk_plug plug;
868 struct bio *bio;
869
870 bio = bio_list_get(&conf->pending_bio_list);
871 spin_unlock_irq(&conf->device_lock);
872
873 /*
874 * As this is called in a wait_event() loop (see freeze_array),
875 * current->state might be TASK_UNINTERRUPTIBLE which will
876 * cause a warning when we prepare to wait again. As it is
877 * rare that this path is taken, it is perfectly safe to force
878 * us to go around the wait_event() loop again, so the warning
879 * is a false-positive. Silence the warning by resetting
880 * thread state
881 */
882 __set_current_state(TASK_RUNNING);
883
884 blk_start_plug(&plug);
885 raid1_prepare_flush_writes(conf->mddev);
886 wake_up(&conf->wait_barrier);
887
888 while (bio) { /* submit pending writes */
889 struct bio *next = bio->bi_next;
890
891 raid1_submit_write(bio);
892 bio = next;
893 cond_resched();
894 }
895 blk_finish_plug(&plug);
896 } else
897 spin_unlock_irq(&conf->device_lock);
898 }
899
900 /* Barriers....
901 * Sometimes we need to suspend IO while we do something else,
902 * either some resync/recovery, or reconfigure the array.
903 * To do this we raise a 'barrier'.
904 * The 'barrier' is a counter that can be raised multiple times
905 * to count how many activities are happening which preclude
906 * normal IO.
907 * We can only raise the barrier if there is no pending IO.
908 * i.e. if nr_pending == 0.
909 * We choose only to raise the barrier if no-one is waiting for the
910 * barrier to go down. This means that as soon as an IO request
911 * is ready, no other operations which require a barrier will start
912 * until the IO request has had a chance.
913 *
914 * So: regular IO calls 'wait_barrier'. When that returns there
915 * is no backgroup IO happening, It must arrange to call
916 * allow_barrier when it has finished its IO.
917 * backgroup IO calls must call raise_barrier. Once that returns
918 * there is no normal IO happeing. It must arrange to call
919 * lower_barrier when the particular background IO completes.
920 */
921
raise_barrier(struct r10conf * conf,int force)922 static void raise_barrier(struct r10conf *conf, int force)
923 {
924 write_seqlock_irq(&conf->resync_lock);
925
926 if (WARN_ON_ONCE(force && !conf->barrier))
927 force = false;
928
929 /* Wait until no block IO is waiting (unless 'force') */
930 wait_event_barrier(conf, force || !conf->nr_waiting);
931
932 /* block any new IO from starting */
933 WRITE_ONCE(conf->barrier, conf->barrier + 1);
934
935 /* Now wait for all pending IO to complete */
936 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
937 conf->barrier < RESYNC_DEPTH);
938
939 write_sequnlock_irq(&conf->resync_lock);
940 }
941
lower_barrier(struct r10conf * conf)942 static void lower_barrier(struct r10conf *conf)
943 {
944 unsigned long flags;
945
946 write_seqlock_irqsave(&conf->resync_lock, flags);
947 WRITE_ONCE(conf->barrier, conf->barrier - 1);
948 write_sequnlock_irqrestore(&conf->resync_lock, flags);
949 wake_up(&conf->wait_barrier);
950 }
951
stop_waiting_barrier(struct r10conf * conf)952 static bool stop_waiting_barrier(struct r10conf *conf)
953 {
954 struct bio_list *bio_list = current->bio_list;
955 struct md_thread *thread;
956
957 /* barrier is dropped */
958 if (!conf->barrier)
959 return true;
960
961 /*
962 * If there are already pending requests (preventing the barrier from
963 * rising completely), and the pre-process bio queue isn't empty, then
964 * don't wait, as we need to empty that queue to get the nr_pending
965 * count down.
966 */
967 if (atomic_read(&conf->nr_pending) && bio_list &&
968 (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
969 return true;
970
971 /* daemon thread must exist while handling io */
972 thread = rcu_dereference_protected(conf->mddev->thread, true);
973 /*
974 * move on if io is issued from raid10d(), nr_pending is not released
975 * from original io(see handle_read_error()). All raise barrier is
976 * blocked until this io is done.
977 */
978 if (thread->tsk == current) {
979 WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
980 return true;
981 }
982
983 return false;
984 }
985
wait_barrier_nolock(struct r10conf * conf)986 static bool wait_barrier_nolock(struct r10conf *conf)
987 {
988 unsigned int seq = read_seqbegin(&conf->resync_lock);
989
990 if (READ_ONCE(conf->barrier))
991 return false;
992
993 atomic_inc(&conf->nr_pending);
994 if (!read_seqretry(&conf->resync_lock, seq))
995 return true;
996
997 if (atomic_dec_and_test(&conf->nr_pending))
998 wake_up_barrier(conf);
999
1000 return false;
1001 }
1002
wait_barrier(struct r10conf * conf,bool nowait)1003 static bool wait_barrier(struct r10conf *conf, bool nowait)
1004 {
1005 bool ret = true;
1006
1007 if (wait_barrier_nolock(conf))
1008 return true;
1009
1010 write_seqlock_irq(&conf->resync_lock);
1011 if (conf->barrier) {
1012 /* Return false when nowait flag is set */
1013 if (nowait) {
1014 ret = false;
1015 } else {
1016 conf->nr_waiting++;
1017 mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
1018 wait_event_barrier(conf, stop_waiting_barrier(conf));
1019 conf->nr_waiting--;
1020 }
1021 if (!conf->nr_waiting)
1022 wake_up(&conf->wait_barrier);
1023 }
1024 /* Only increment nr_pending when we wait */
1025 if (ret)
1026 atomic_inc(&conf->nr_pending);
1027 write_sequnlock_irq(&conf->resync_lock);
1028 return ret;
1029 }
1030
allow_barrier(struct r10conf * conf)1031 static void allow_barrier(struct r10conf *conf)
1032 {
1033 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1034 (conf->array_freeze_pending))
1035 wake_up_barrier(conf);
1036 }
1037
freeze_array(struct r10conf * conf,int extra)1038 static void freeze_array(struct r10conf *conf, int extra)
1039 {
1040 /* stop syncio and normal IO and wait for everything to
1041 * go quiet.
1042 * We increment barrier and nr_waiting, and then
1043 * wait until nr_pending match nr_queued+extra
1044 * This is called in the context of one normal IO request
1045 * that has failed. Thus any sync request that might be pending
1046 * will be blocked by nr_pending, and we need to wait for
1047 * pending IO requests to complete or be queued for re-try.
1048 * Thus the number queued (nr_queued) plus this request (extra)
1049 * must match the number of pending IOs (nr_pending) before
1050 * we continue.
1051 */
1052 write_seqlock_irq(&conf->resync_lock);
1053 conf->array_freeze_pending++;
1054 WRITE_ONCE(conf->barrier, conf->barrier + 1);
1055 conf->nr_waiting++;
1056 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1057 conf->nr_queued + extra, flush_pending_writes(conf));
1058 conf->array_freeze_pending--;
1059 write_sequnlock_irq(&conf->resync_lock);
1060 }
1061
unfreeze_array(struct r10conf * conf)1062 static void unfreeze_array(struct r10conf *conf)
1063 {
1064 /* reverse the effect of the freeze */
1065 write_seqlock_irq(&conf->resync_lock);
1066 WRITE_ONCE(conf->barrier, conf->barrier - 1);
1067 conf->nr_waiting--;
1068 wake_up(&conf->wait_barrier);
1069 write_sequnlock_irq(&conf->resync_lock);
1070 }
1071
choose_data_offset(struct r10bio * r10_bio,struct md_rdev * rdev)1072 static sector_t choose_data_offset(struct r10bio *r10_bio,
1073 struct md_rdev *rdev)
1074 {
1075 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1076 test_bit(R10BIO_Previous, &r10_bio->state))
1077 return rdev->data_offset;
1078 else
1079 return rdev->new_data_offset;
1080 }
1081
raid10_unplug(struct blk_plug_cb * cb,bool from_schedule)1082 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1083 {
1084 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
1085 struct mddev *mddev = plug->cb.data;
1086 struct r10conf *conf = mddev->private;
1087 struct bio *bio;
1088
1089 if (from_schedule) {
1090 spin_lock_irq(&conf->device_lock);
1091 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1092 spin_unlock_irq(&conf->device_lock);
1093 wake_up_barrier(conf);
1094 md_wakeup_thread(mddev->thread);
1095 kfree(plug);
1096 return;
1097 }
1098
1099 /* we aren't scheduling, so we can do the write-out directly. */
1100 bio = bio_list_get(&plug->pending);
1101 raid1_prepare_flush_writes(mddev);
1102 wake_up_barrier(conf);
1103
1104 while (bio) { /* submit pending writes */
1105 struct bio *next = bio->bi_next;
1106
1107 raid1_submit_write(bio);
1108 bio = next;
1109 cond_resched();
1110 }
1111 kfree(plug);
1112 }
1113
1114 /*
1115 * 1. Register the new request and wait if the reconstruction thread has put
1116 * up a bar for new requests. Continue immediately if no resync is active
1117 * currently.
1118 * 2. If IO spans the reshape position. Need to wait for reshape to pass.
1119 */
regular_request_wait(struct mddev * mddev,struct r10conf * conf,struct bio * bio,sector_t sectors)1120 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1121 struct bio *bio, sector_t sectors)
1122 {
1123 /* Bail out if REQ_NOWAIT is set for the bio */
1124 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1125 bio_wouldblock_error(bio);
1126 return false;
1127 }
1128 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1129 bio->bi_iter.bi_sector < conf->reshape_progress &&
1130 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1131 allow_barrier(conf);
1132 if (bio->bi_opf & REQ_NOWAIT) {
1133 bio_wouldblock_error(bio);
1134 return false;
1135 }
1136 mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
1137 wait_event(conf->wait_barrier,
1138 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1139 conf->reshape_progress >= bio->bi_iter.bi_sector +
1140 sectors);
1141 wait_barrier(conf, false);
1142 }
1143 return true;
1144 }
1145
raid10_read_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio,bool io_accounting)1146 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1147 struct r10bio *r10_bio, bool io_accounting)
1148 {
1149 struct r10conf *conf = mddev->private;
1150 struct bio *read_bio;
1151 int max_sectors;
1152 struct md_rdev *rdev;
1153 char b[BDEVNAME_SIZE];
1154 int slot = r10_bio->read_slot;
1155 struct md_rdev *err_rdev = NULL;
1156 gfp_t gfp = GFP_NOIO;
1157 int error;
1158
1159 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1160 /*
1161 * This is an error retry, but we cannot
1162 * safely dereference the rdev in the r10_bio,
1163 * we must use the one in conf.
1164 * If it has already been disconnected (unlikely)
1165 * we lose the device name in error messages.
1166 */
1167 int disk;
1168 /*
1169 * As we are blocking raid10, it is a little safer to
1170 * use __GFP_HIGH.
1171 */
1172 gfp = GFP_NOIO | __GFP_HIGH;
1173
1174 disk = r10_bio->devs[slot].devnum;
1175 err_rdev = conf->mirrors[disk].rdev;
1176 if (err_rdev)
1177 snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1178 else {
1179 strcpy(b, "???");
1180 /* This never gets dereferenced */
1181 err_rdev = r10_bio->devs[slot].rdev;
1182 }
1183 }
1184
1185 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
1186 raid_end_bio_io(r10_bio);
1187 return;
1188 }
1189
1190 rdev = read_balance(conf, r10_bio, &max_sectors);
1191 if (!rdev) {
1192 if (err_rdev) {
1193 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1194 mdname(mddev), b,
1195 (unsigned long long)r10_bio->sector);
1196 }
1197 raid_end_bio_io(r10_bio);
1198 return;
1199 }
1200 if (err_rdev)
1201 pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1202 mdname(mddev),
1203 rdev->bdev,
1204 (unsigned long long)r10_bio->sector);
1205 if (max_sectors < bio_sectors(bio)) {
1206 struct bio *split = bio_split(bio, max_sectors,
1207 gfp, &conf->bio_split);
1208 if (IS_ERR(split)) {
1209 error = PTR_ERR(split);
1210 goto err_handle;
1211 }
1212 bio_chain(split, bio);
1213 allow_barrier(conf);
1214 submit_bio_noacct(bio);
1215 wait_barrier(conf, false);
1216 bio = split;
1217 r10_bio->master_bio = bio;
1218 r10_bio->sectors = max_sectors;
1219 }
1220 slot = r10_bio->read_slot;
1221
1222 if (io_accounting) {
1223 md_account_bio(mddev, &bio);
1224 r10_bio->master_bio = bio;
1225 }
1226 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1227 read_bio->bi_opf &= ~REQ_NOWAIT;
1228
1229 r10_bio->devs[slot].bio = read_bio;
1230 r10_bio->devs[slot].rdev = rdev;
1231
1232 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1233 choose_data_offset(r10_bio, rdev);
1234 read_bio->bi_end_io = raid10_end_read_request;
1235 if (test_bit(FailFast, &rdev->flags) &&
1236 test_bit(R10BIO_FailFast, &r10_bio->state))
1237 read_bio->bi_opf |= MD_FAILFAST;
1238 read_bio->bi_private = r10_bio;
1239 mddev_trace_remap(mddev, read_bio, r10_bio->sector);
1240 submit_bio_noacct(read_bio);
1241 return;
1242 err_handle:
1243 atomic_dec(&rdev->nr_pending);
1244 bio->bi_status = errno_to_blk_status(error);
1245 set_bit(R10BIO_Uptodate, &r10_bio->state);
1246 raid_end_bio_io(r10_bio);
1247 }
1248
raid10_write_one_disk(struct mddev * mddev,struct r10bio * r10_bio,struct bio * bio,bool replacement,int n_copy)1249 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1250 struct bio *bio, bool replacement,
1251 int n_copy)
1252 {
1253 unsigned long flags;
1254 struct r10conf *conf = mddev->private;
1255 struct md_rdev *rdev;
1256 int devnum = r10_bio->devs[n_copy].devnum;
1257 struct bio *mbio;
1258
1259 rdev = replacement ? conf->mirrors[devnum].replacement :
1260 conf->mirrors[devnum].rdev;
1261
1262 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1263 mbio->bi_opf &= ~REQ_NOWAIT;
1264 if (replacement)
1265 r10_bio->devs[n_copy].repl_bio = mbio;
1266 else
1267 r10_bio->devs[n_copy].bio = mbio;
1268
1269 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1270 choose_data_offset(r10_bio, rdev));
1271 mbio->bi_end_io = raid10_end_write_request;
1272 if (!replacement && test_bit(FailFast,
1273 &conf->mirrors[devnum].rdev->flags)
1274 && enough(conf, devnum))
1275 mbio->bi_opf |= MD_FAILFAST;
1276 mbio->bi_private = r10_bio;
1277 mddev_trace_remap(mddev, mbio, r10_bio->sector);
1278 /* flush_pending_writes() needs access to the rdev so...*/
1279 mbio->bi_bdev = (void *)rdev;
1280
1281 atomic_inc(&r10_bio->remaining);
1282
1283 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1284 spin_lock_irqsave(&conf->device_lock, flags);
1285 bio_list_add(&conf->pending_bio_list, mbio);
1286 spin_unlock_irqrestore(&conf->device_lock, flags);
1287 md_wakeup_thread(mddev->thread);
1288 }
1289 }
1290
wait_blocked_dev(struct mddev * mddev,struct r10bio * r10_bio)1291 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1292 {
1293 struct r10conf *conf = mddev->private;
1294 struct md_rdev *blocked_rdev;
1295 int i;
1296
1297 retry_wait:
1298 blocked_rdev = NULL;
1299 for (i = 0; i < conf->copies; i++) {
1300 struct md_rdev *rdev, *rrdev;
1301
1302 rdev = conf->mirrors[i].rdev;
1303 if (rdev) {
1304 sector_t dev_sector = r10_bio->devs[i].addr;
1305
1306 /*
1307 * Discard request doesn't care the write result
1308 * so it doesn't need to wait blocked disk here.
1309 */
1310 if (test_bit(WriteErrorSeen, &rdev->flags) &&
1311 r10_bio->sectors &&
1312 rdev_has_badblock(rdev, dev_sector,
1313 r10_bio->sectors) < 0)
1314 /*
1315 * Mustn't write here until the bad
1316 * block is acknowledged
1317 */
1318 set_bit(BlockedBadBlocks, &rdev->flags);
1319
1320 if (rdev_blocked(rdev)) {
1321 blocked_rdev = rdev;
1322 atomic_inc(&rdev->nr_pending);
1323 break;
1324 }
1325 }
1326
1327 rrdev = conf->mirrors[i].replacement;
1328 if (rrdev && rdev_blocked(rrdev)) {
1329 atomic_inc(&rrdev->nr_pending);
1330 blocked_rdev = rrdev;
1331 break;
1332 }
1333 }
1334
1335 if (unlikely(blocked_rdev)) {
1336 /* Have to wait for this device to get unblocked, then retry */
1337 allow_barrier(conf);
1338 mddev_add_trace_msg(conf->mddev,
1339 "raid10 %s wait rdev %d blocked",
1340 __func__, blocked_rdev->raid_disk);
1341 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1342 wait_barrier(conf, false);
1343 goto retry_wait;
1344 }
1345 }
1346
raid10_write_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio)1347 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1348 struct r10bio *r10_bio)
1349 {
1350 struct r10conf *conf = mddev->private;
1351 int i, k;
1352 sector_t sectors;
1353 int max_sectors;
1354 int error;
1355
1356 if ((mddev_is_clustered(mddev) &&
1357 mddev->cluster_ops->area_resyncing(mddev, WRITE,
1358 bio->bi_iter.bi_sector,
1359 bio_end_sector(bio)))) {
1360 DEFINE_WAIT(w);
1361 /* Bail out if REQ_NOWAIT is set for the bio */
1362 if (bio->bi_opf & REQ_NOWAIT) {
1363 bio_wouldblock_error(bio);
1364 return;
1365 }
1366 for (;;) {
1367 prepare_to_wait(&conf->wait_barrier,
1368 &w, TASK_IDLE);
1369 if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
1370 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1371 break;
1372 schedule();
1373 }
1374 finish_wait(&conf->wait_barrier, &w);
1375 }
1376
1377 sectors = r10_bio->sectors;
1378 if (!regular_request_wait(mddev, conf, bio, sectors)) {
1379 raid_end_bio_io(r10_bio);
1380 return;
1381 }
1382
1383 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1384 (mddev->reshape_backwards
1385 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1386 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1387 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1388 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1389 /* Need to update reshape_position in metadata */
1390 mddev->reshape_position = conf->reshape_progress;
1391 set_mask_bits(&mddev->sb_flags, 0,
1392 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1393 md_wakeup_thread(mddev->thread);
1394 if (bio->bi_opf & REQ_NOWAIT) {
1395 allow_barrier(conf);
1396 bio_wouldblock_error(bio);
1397 return;
1398 }
1399 mddev_add_trace_msg(conf->mddev,
1400 "raid10 wait reshape metadata");
1401 wait_event(mddev->sb_wait,
1402 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1403
1404 conf->reshape_safe = mddev->reshape_position;
1405 }
1406
1407 /* first select target devices under rcu_lock and
1408 * inc refcount on their rdev. Record them by setting
1409 * bios[x] to bio
1410 * If there are known/acknowledged bad blocks on any device
1411 * on which we have seen a write error, we want to avoid
1412 * writing to those blocks. This potentially requires several
1413 * writes to write around the bad blocks. Each set of writes
1414 * gets its own r10_bio with a set of bios attached.
1415 */
1416
1417 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1418 raid10_find_phys(conf, r10_bio);
1419
1420 wait_blocked_dev(mddev, r10_bio);
1421
1422 max_sectors = r10_bio->sectors;
1423
1424 for (i = 0; i < conf->copies; i++) {
1425 int d = r10_bio->devs[i].devnum;
1426 struct md_rdev *rdev, *rrdev;
1427
1428 rdev = conf->mirrors[d].rdev;
1429 rrdev = conf->mirrors[d].replacement;
1430 if (rdev && (test_bit(Faulty, &rdev->flags)))
1431 rdev = NULL;
1432 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1433 rrdev = NULL;
1434
1435 r10_bio->devs[i].bio = NULL;
1436 r10_bio->devs[i].repl_bio = NULL;
1437
1438 if (!rdev && !rrdev)
1439 continue;
1440 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1441 sector_t first_bad;
1442 sector_t dev_sector = r10_bio->devs[i].addr;
1443 sector_t bad_sectors;
1444 int is_bad;
1445
1446 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1447 &first_bad, &bad_sectors);
1448 if (is_bad && first_bad <= dev_sector) {
1449 /* Cannot write here at all */
1450 bad_sectors -= (dev_sector - first_bad);
1451 if (bad_sectors < max_sectors)
1452 /* Mustn't write more than bad_sectors
1453 * to other devices yet
1454 */
1455 max_sectors = bad_sectors;
1456 continue;
1457 }
1458 if (is_bad) {
1459 int good_sectors;
1460
1461 /*
1462 * We cannot atomically write this, so just
1463 * error in that case. It could be possible to
1464 * atomically write other mirrors, but the
1465 * complexity of supporting that is not worth
1466 * the benefit.
1467 */
1468 if (bio->bi_opf & REQ_ATOMIC) {
1469 error = -EIO;
1470 goto err_handle;
1471 }
1472
1473 good_sectors = first_bad - dev_sector;
1474 if (good_sectors < max_sectors)
1475 max_sectors = good_sectors;
1476 }
1477 }
1478 if (rdev) {
1479 r10_bio->devs[i].bio = bio;
1480 atomic_inc(&rdev->nr_pending);
1481 }
1482 if (rrdev) {
1483 r10_bio->devs[i].repl_bio = bio;
1484 atomic_inc(&rrdev->nr_pending);
1485 }
1486 }
1487
1488 if (max_sectors < r10_bio->sectors)
1489 r10_bio->sectors = max_sectors;
1490
1491 if (r10_bio->sectors < bio_sectors(bio)) {
1492 struct bio *split = bio_split(bio, r10_bio->sectors,
1493 GFP_NOIO, &conf->bio_split);
1494 if (IS_ERR(split)) {
1495 error = PTR_ERR(split);
1496 goto err_handle;
1497 }
1498 bio_chain(split, bio);
1499 allow_barrier(conf);
1500 submit_bio_noacct(bio);
1501 wait_barrier(conf, false);
1502 bio = split;
1503 r10_bio->master_bio = bio;
1504 }
1505
1506 md_account_bio(mddev, &bio);
1507 r10_bio->master_bio = bio;
1508 atomic_set(&r10_bio->remaining, 1);
1509
1510 for (i = 0; i < conf->copies; i++) {
1511 if (r10_bio->devs[i].bio)
1512 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1513 if (r10_bio->devs[i].repl_bio)
1514 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1515 }
1516 one_write_done(r10_bio);
1517 return;
1518 err_handle:
1519 for (k = 0; k < i; k++) {
1520 int d = r10_bio->devs[k].devnum;
1521 struct md_rdev *rdev = conf->mirrors[d].rdev;
1522 struct md_rdev *rrdev = conf->mirrors[d].replacement;
1523
1524 if (r10_bio->devs[k].bio) {
1525 rdev_dec_pending(rdev, mddev);
1526 r10_bio->devs[k].bio = NULL;
1527 }
1528 if (r10_bio->devs[k].repl_bio) {
1529 rdev_dec_pending(rrdev, mddev);
1530 r10_bio->devs[k].repl_bio = NULL;
1531 }
1532 }
1533
1534 bio->bi_status = errno_to_blk_status(error);
1535 set_bit(R10BIO_Uptodate, &r10_bio->state);
1536 raid_end_bio_io(r10_bio);
1537 }
1538
__make_request(struct mddev * mddev,struct bio * bio,int sectors)1539 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1540 {
1541 struct r10conf *conf = mddev->private;
1542 struct r10bio *r10_bio;
1543
1544 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1545
1546 r10_bio->master_bio = bio;
1547 r10_bio->sectors = sectors;
1548
1549 r10_bio->mddev = mddev;
1550 r10_bio->sector = bio->bi_iter.bi_sector;
1551 r10_bio->state = 0;
1552 r10_bio->read_slot = -1;
1553 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1554 conf->geo.raid_disks);
1555
1556 if (bio_data_dir(bio) == READ)
1557 raid10_read_request(mddev, bio, r10_bio, true);
1558 else
1559 raid10_write_request(mddev, bio, r10_bio);
1560 }
1561
raid_end_discard_bio(struct r10bio * r10bio)1562 static void raid_end_discard_bio(struct r10bio *r10bio)
1563 {
1564 struct r10conf *conf = r10bio->mddev->private;
1565 struct r10bio *first_r10bio;
1566
1567 while (atomic_dec_and_test(&r10bio->remaining)) {
1568
1569 allow_barrier(conf);
1570
1571 if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1572 first_r10bio = (struct r10bio *)r10bio->master_bio;
1573 free_r10bio(r10bio);
1574 r10bio = first_r10bio;
1575 } else {
1576 md_write_end(r10bio->mddev);
1577 bio_endio(r10bio->master_bio);
1578 free_r10bio(r10bio);
1579 break;
1580 }
1581 }
1582 }
1583
raid10_end_discard_request(struct bio * bio)1584 static void raid10_end_discard_request(struct bio *bio)
1585 {
1586 struct r10bio *r10_bio = bio->bi_private;
1587 struct r10conf *conf = r10_bio->mddev->private;
1588 struct md_rdev *rdev = NULL;
1589 int dev;
1590 int slot, repl;
1591
1592 /*
1593 * We don't care the return value of discard bio
1594 */
1595 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1596 set_bit(R10BIO_Uptodate, &r10_bio->state);
1597
1598 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1599 rdev = repl ? conf->mirrors[dev].replacement :
1600 conf->mirrors[dev].rdev;
1601
1602 raid_end_discard_bio(r10_bio);
1603 rdev_dec_pending(rdev, conf->mddev);
1604 }
1605
1606 /*
1607 * There are some limitations to handle discard bio
1608 * 1st, the discard size is bigger than stripe_size*2.
1609 * 2st, if the discard bio spans reshape progress, we use the old way to
1610 * handle discard bio
1611 */
raid10_handle_discard(struct mddev * mddev,struct bio * bio)1612 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1613 {
1614 struct r10conf *conf = mddev->private;
1615 struct geom *geo = &conf->geo;
1616 int far_copies = geo->far_copies;
1617 bool first_copy = true;
1618 struct r10bio *r10_bio, *first_r10bio;
1619 struct bio *split;
1620 int disk;
1621 sector_t chunk;
1622 unsigned int stripe_size;
1623 unsigned int stripe_data_disks;
1624 sector_t split_size;
1625 sector_t bio_start, bio_end;
1626 sector_t first_stripe_index, last_stripe_index;
1627 sector_t start_disk_offset;
1628 unsigned int start_disk_index;
1629 sector_t end_disk_offset;
1630 unsigned int end_disk_index;
1631 unsigned int remainder;
1632
1633 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1634 return -EAGAIN;
1635
1636 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1637 bio_wouldblock_error(bio);
1638 return 0;
1639 }
1640
1641 /*
1642 * Check reshape again to avoid reshape happens after checking
1643 * MD_RECOVERY_RESHAPE and before wait_barrier
1644 */
1645 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1646 goto out;
1647
1648 if (geo->near_copies)
1649 stripe_data_disks = geo->raid_disks / geo->near_copies +
1650 geo->raid_disks % geo->near_copies;
1651 else
1652 stripe_data_disks = geo->raid_disks;
1653
1654 stripe_size = stripe_data_disks << geo->chunk_shift;
1655
1656 bio_start = bio->bi_iter.bi_sector;
1657 bio_end = bio_end_sector(bio);
1658
1659 /*
1660 * Maybe one discard bio is smaller than strip size or across one
1661 * stripe and discard region is larger than one stripe size. For far
1662 * offset layout, if the discard region is not aligned with stripe
1663 * size, there is hole when we submit discard bio to member disk.
1664 * For simplicity, we only handle discard bio which discard region
1665 * is bigger than stripe_size * 2
1666 */
1667 if (bio_sectors(bio) < stripe_size*2)
1668 goto out;
1669
1670 /*
1671 * Keep bio aligned with strip size.
1672 */
1673 div_u64_rem(bio_start, stripe_size, &remainder);
1674 if (remainder) {
1675 split_size = stripe_size - remainder;
1676 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1677 if (IS_ERR(split)) {
1678 bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1679 bio_endio(bio);
1680 return 0;
1681 }
1682 bio_chain(split, bio);
1683 allow_barrier(conf);
1684 /* Resend the fist split part */
1685 submit_bio_noacct(split);
1686 wait_barrier(conf, false);
1687 }
1688 div_u64_rem(bio_end, stripe_size, &remainder);
1689 if (remainder) {
1690 split_size = bio_sectors(bio) - remainder;
1691 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1692 if (IS_ERR(split)) {
1693 bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1694 bio_endio(bio);
1695 return 0;
1696 }
1697 bio_chain(split, bio);
1698 allow_barrier(conf);
1699 /* Resend the second split part */
1700 submit_bio_noacct(bio);
1701 bio = split;
1702 wait_barrier(conf, false);
1703 }
1704
1705 bio_start = bio->bi_iter.bi_sector;
1706 bio_end = bio_end_sector(bio);
1707
1708 /*
1709 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1710 * One stripe contains the chunks from all member disk (one chunk from
1711 * one disk at the same HBA address). For layout detail, see 'man md 4'
1712 */
1713 chunk = bio_start >> geo->chunk_shift;
1714 chunk *= geo->near_copies;
1715 first_stripe_index = chunk;
1716 start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1717 if (geo->far_offset)
1718 first_stripe_index *= geo->far_copies;
1719 start_disk_offset = (bio_start & geo->chunk_mask) +
1720 (first_stripe_index << geo->chunk_shift);
1721
1722 chunk = bio_end >> geo->chunk_shift;
1723 chunk *= geo->near_copies;
1724 last_stripe_index = chunk;
1725 end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1726 if (geo->far_offset)
1727 last_stripe_index *= geo->far_copies;
1728 end_disk_offset = (bio_end & geo->chunk_mask) +
1729 (last_stripe_index << geo->chunk_shift);
1730
1731 retry_discard:
1732 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1733 r10_bio->mddev = mddev;
1734 r10_bio->state = 0;
1735 r10_bio->sectors = 0;
1736 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1737 wait_blocked_dev(mddev, r10_bio);
1738
1739 /*
1740 * For far layout it needs more than one r10bio to cover all regions.
1741 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1742 * to record the discard bio. Other r10bio->master_bio record the first
1743 * r10bio. The first r10bio only release after all other r10bios finish.
1744 * The discard bio returns only first r10bio finishes
1745 */
1746 if (first_copy) {
1747 md_account_bio(mddev, &bio);
1748 r10_bio->master_bio = bio;
1749 set_bit(R10BIO_Discard, &r10_bio->state);
1750 first_copy = false;
1751 first_r10bio = r10_bio;
1752 } else
1753 r10_bio->master_bio = (struct bio *)first_r10bio;
1754
1755 /*
1756 * first select target devices under rcu_lock and
1757 * inc refcount on their rdev. Record them by setting
1758 * bios[x] to bio
1759 */
1760 for (disk = 0; disk < geo->raid_disks; disk++) {
1761 struct md_rdev *rdev, *rrdev;
1762
1763 rdev = conf->mirrors[disk].rdev;
1764 rrdev = conf->mirrors[disk].replacement;
1765 r10_bio->devs[disk].bio = NULL;
1766 r10_bio->devs[disk].repl_bio = NULL;
1767
1768 if (rdev && (test_bit(Faulty, &rdev->flags)))
1769 rdev = NULL;
1770 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1771 rrdev = NULL;
1772 if (!rdev && !rrdev)
1773 continue;
1774
1775 if (rdev) {
1776 r10_bio->devs[disk].bio = bio;
1777 atomic_inc(&rdev->nr_pending);
1778 }
1779 if (rrdev) {
1780 r10_bio->devs[disk].repl_bio = bio;
1781 atomic_inc(&rrdev->nr_pending);
1782 }
1783 }
1784
1785 atomic_set(&r10_bio->remaining, 1);
1786 for (disk = 0; disk < geo->raid_disks; disk++) {
1787 sector_t dev_start, dev_end;
1788 struct bio *mbio, *rbio = NULL;
1789
1790 /*
1791 * Now start to calculate the start and end address for each disk.
1792 * The space between dev_start and dev_end is the discard region.
1793 *
1794 * For dev_start, it needs to consider three conditions:
1795 * 1st, the disk is before start_disk, you can imagine the disk in
1796 * the next stripe. So the dev_start is the start address of next
1797 * stripe.
1798 * 2st, the disk is after start_disk, it means the disk is at the
1799 * same stripe of first disk
1800 * 3st, the first disk itself, we can use start_disk_offset directly
1801 */
1802 if (disk < start_disk_index)
1803 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1804 else if (disk > start_disk_index)
1805 dev_start = first_stripe_index * mddev->chunk_sectors;
1806 else
1807 dev_start = start_disk_offset;
1808
1809 if (disk < end_disk_index)
1810 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1811 else if (disk > end_disk_index)
1812 dev_end = last_stripe_index * mddev->chunk_sectors;
1813 else
1814 dev_end = end_disk_offset;
1815
1816 /*
1817 * It only handles discard bio which size is >= stripe size, so
1818 * dev_end > dev_start all the time.
1819 * It doesn't need to use rcu lock to get rdev here. We already
1820 * add rdev->nr_pending in the first loop.
1821 */
1822 if (r10_bio->devs[disk].bio) {
1823 struct md_rdev *rdev = conf->mirrors[disk].rdev;
1824 mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1825 &mddev->bio_set);
1826 mbio->bi_end_io = raid10_end_discard_request;
1827 mbio->bi_private = r10_bio;
1828 r10_bio->devs[disk].bio = mbio;
1829 r10_bio->devs[disk].devnum = disk;
1830 atomic_inc(&r10_bio->remaining);
1831 md_submit_discard_bio(mddev, rdev, mbio,
1832 dev_start + choose_data_offset(r10_bio, rdev),
1833 dev_end - dev_start);
1834 bio_endio(mbio);
1835 }
1836 if (r10_bio->devs[disk].repl_bio) {
1837 struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1838 rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1839 &mddev->bio_set);
1840 rbio->bi_end_io = raid10_end_discard_request;
1841 rbio->bi_private = r10_bio;
1842 r10_bio->devs[disk].repl_bio = rbio;
1843 r10_bio->devs[disk].devnum = disk;
1844 atomic_inc(&r10_bio->remaining);
1845 md_submit_discard_bio(mddev, rrdev, rbio,
1846 dev_start + choose_data_offset(r10_bio, rrdev),
1847 dev_end - dev_start);
1848 bio_endio(rbio);
1849 }
1850 }
1851
1852 if (!geo->far_offset && --far_copies) {
1853 first_stripe_index += geo->stride >> geo->chunk_shift;
1854 start_disk_offset += geo->stride;
1855 last_stripe_index += geo->stride >> geo->chunk_shift;
1856 end_disk_offset += geo->stride;
1857 atomic_inc(&first_r10bio->remaining);
1858 raid_end_discard_bio(r10_bio);
1859 wait_barrier(conf, false);
1860 goto retry_discard;
1861 }
1862
1863 raid_end_discard_bio(r10_bio);
1864
1865 return 0;
1866 out:
1867 allow_barrier(conf);
1868 return -EAGAIN;
1869 }
1870
raid10_make_request(struct mddev * mddev,struct bio * bio)1871 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1872 {
1873 struct r10conf *conf = mddev->private;
1874 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1875 int chunk_sects = chunk_mask + 1;
1876 int sectors = bio_sectors(bio);
1877
1878 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1879 && md_flush_request(mddev, bio))
1880 return true;
1881
1882 md_write_start(mddev, bio);
1883
1884 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1885 if (!raid10_handle_discard(mddev, bio))
1886 return true;
1887
1888 /*
1889 * If this request crosses a chunk boundary, we need to split
1890 * it.
1891 */
1892 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1893 sectors > chunk_sects
1894 && (conf->geo.near_copies < conf->geo.raid_disks
1895 || conf->prev.near_copies <
1896 conf->prev.raid_disks)))
1897 sectors = chunk_sects -
1898 (bio->bi_iter.bi_sector &
1899 (chunk_sects - 1));
1900 __make_request(mddev, bio, sectors);
1901
1902 /* In case raid10d snuck in to freeze_array */
1903 wake_up_barrier(conf);
1904 return true;
1905 }
1906
raid10_status(struct seq_file * seq,struct mddev * mddev)1907 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1908 {
1909 struct r10conf *conf = mddev->private;
1910 int i;
1911
1912 lockdep_assert_held(&mddev->lock);
1913
1914 if (conf->geo.near_copies < conf->geo.raid_disks)
1915 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1916 if (conf->geo.near_copies > 1)
1917 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1918 if (conf->geo.far_copies > 1) {
1919 if (conf->geo.far_offset)
1920 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1921 else
1922 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1923 if (conf->geo.far_set_size != conf->geo.raid_disks)
1924 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1925 }
1926 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1927 conf->geo.raid_disks - mddev->degraded);
1928 for (i = 0; i < conf->geo.raid_disks; i++) {
1929 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1930
1931 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1932 }
1933 seq_printf(seq, "]");
1934 }
1935
1936 /* check if there are enough drives for
1937 * every block to appear on atleast one.
1938 * Don't consider the device numbered 'ignore'
1939 * as we might be about to remove it.
1940 */
_enough(struct r10conf * conf,int previous,int ignore)1941 static int _enough(struct r10conf *conf, int previous, int ignore)
1942 {
1943 int first = 0;
1944 int has_enough = 0;
1945 int disks, ncopies;
1946 if (previous) {
1947 disks = conf->prev.raid_disks;
1948 ncopies = conf->prev.near_copies;
1949 } else {
1950 disks = conf->geo.raid_disks;
1951 ncopies = conf->geo.near_copies;
1952 }
1953
1954 do {
1955 int n = conf->copies;
1956 int cnt = 0;
1957 int this = first;
1958 while (n--) {
1959 struct md_rdev *rdev;
1960 if (this != ignore &&
1961 (rdev = conf->mirrors[this].rdev) &&
1962 test_bit(In_sync, &rdev->flags))
1963 cnt++;
1964 this = (this+1) % disks;
1965 }
1966 if (cnt == 0)
1967 goto out;
1968 first = (first + ncopies) % disks;
1969 } while (first != 0);
1970 has_enough = 1;
1971 out:
1972 return has_enough;
1973 }
1974
enough(struct r10conf * conf,int ignore)1975 static int enough(struct r10conf *conf, int ignore)
1976 {
1977 /* when calling 'enough', both 'prev' and 'geo' must
1978 * be stable.
1979 * This is ensured if ->reconfig_mutex or ->device_lock
1980 * is held.
1981 */
1982 return _enough(conf, 0, ignore) &&
1983 _enough(conf, 1, ignore);
1984 }
1985
1986 /**
1987 * raid10_error() - RAID10 error handler.
1988 * @mddev: affected md device.
1989 * @rdev: member device to fail.
1990 *
1991 * The routine acknowledges &rdev failure and determines new @mddev state.
1992 * If it failed, then:
1993 * - &MD_BROKEN flag is set in &mddev->flags.
1994 * Otherwise, it must be degraded:
1995 * - recovery is interrupted.
1996 * - &mddev->degraded is bumped.
1997 *
1998 * @rdev is marked as &Faulty excluding case when array is failed and
1999 * &mddev->fail_last_dev is off.
2000 */
raid10_error(struct mddev * mddev,struct md_rdev * rdev)2001 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
2002 {
2003 struct r10conf *conf = mddev->private;
2004 unsigned long flags;
2005
2006 spin_lock_irqsave(&conf->device_lock, flags);
2007
2008 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2009 set_bit(MD_BROKEN, &mddev->flags);
2010
2011 if (!mddev->fail_last_dev) {
2012 spin_unlock_irqrestore(&conf->device_lock, flags);
2013 return;
2014 }
2015 }
2016 if (test_and_clear_bit(In_sync, &rdev->flags))
2017 mddev->degraded++;
2018
2019 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2020 set_bit(Blocked, &rdev->flags);
2021 set_bit(Faulty, &rdev->flags);
2022 set_mask_bits(&mddev->sb_flags, 0,
2023 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2024 spin_unlock_irqrestore(&conf->device_lock, flags);
2025 pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
2026 "md/raid10:%s: Operation continuing on %d devices.\n",
2027 mdname(mddev), rdev->bdev,
2028 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2029 }
2030
print_conf(struct r10conf * conf)2031 static void print_conf(struct r10conf *conf)
2032 {
2033 int i;
2034 struct md_rdev *rdev;
2035
2036 pr_debug("RAID10 conf printout:\n");
2037 if (!conf) {
2038 pr_debug("(!conf)\n");
2039 return;
2040 }
2041 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2042 conf->geo.raid_disks);
2043
2044 lockdep_assert_held(&conf->mddev->reconfig_mutex);
2045 for (i = 0; i < conf->geo.raid_disks; i++) {
2046 rdev = conf->mirrors[i].rdev;
2047 if (rdev)
2048 pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2049 i, !test_bit(In_sync, &rdev->flags),
2050 !test_bit(Faulty, &rdev->flags),
2051 rdev->bdev);
2052 }
2053 }
2054
close_sync(struct r10conf * conf)2055 static void close_sync(struct r10conf *conf)
2056 {
2057 wait_barrier(conf, false);
2058 allow_barrier(conf);
2059
2060 mempool_exit(&conf->r10buf_pool);
2061 }
2062
raid10_spare_active(struct mddev * mddev)2063 static int raid10_spare_active(struct mddev *mddev)
2064 {
2065 int i;
2066 struct r10conf *conf = mddev->private;
2067 struct raid10_info *tmp;
2068 int count = 0;
2069 unsigned long flags;
2070
2071 /*
2072 * Find all non-in_sync disks within the RAID10 configuration
2073 * and mark them in_sync
2074 */
2075 for (i = 0; i < conf->geo.raid_disks; i++) {
2076 tmp = conf->mirrors + i;
2077 if (tmp->replacement
2078 && tmp->replacement->recovery_offset == MaxSector
2079 && !test_bit(Faulty, &tmp->replacement->flags)
2080 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2081 /* Replacement has just become active */
2082 if (!tmp->rdev
2083 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2084 count++;
2085 if (tmp->rdev) {
2086 /* Replaced device not technically faulty,
2087 * but we need to be sure it gets removed
2088 * and never re-added.
2089 */
2090 set_bit(Faulty, &tmp->rdev->flags);
2091 sysfs_notify_dirent_safe(
2092 tmp->rdev->sysfs_state);
2093 }
2094 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2095 } else if (tmp->rdev
2096 && tmp->rdev->recovery_offset == MaxSector
2097 && !test_bit(Faulty, &tmp->rdev->flags)
2098 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2099 count++;
2100 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2101 }
2102 }
2103 spin_lock_irqsave(&conf->device_lock, flags);
2104 mddev->degraded -= count;
2105 spin_unlock_irqrestore(&conf->device_lock, flags);
2106
2107 print_conf(conf);
2108 return count;
2109 }
2110
raid10_add_disk(struct mddev * mddev,struct md_rdev * rdev)2111 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2112 {
2113 struct r10conf *conf = mddev->private;
2114 int err = -EEXIST;
2115 int mirror, repl_slot = -1;
2116 int first = 0;
2117 int last = conf->geo.raid_disks - 1;
2118 struct raid10_info *p;
2119
2120 if (mddev->recovery_cp < MaxSector)
2121 /* only hot-add to in-sync arrays, as recovery is
2122 * very different from resync
2123 */
2124 return -EBUSY;
2125 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2126 return -EINVAL;
2127
2128 if (rdev->raid_disk >= 0)
2129 first = last = rdev->raid_disk;
2130
2131 if (rdev->saved_raid_disk >= first &&
2132 rdev->saved_raid_disk < conf->geo.raid_disks &&
2133 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2134 mirror = rdev->saved_raid_disk;
2135 else
2136 mirror = first;
2137 for ( ; mirror <= last ; mirror++) {
2138 p = &conf->mirrors[mirror];
2139 if (p->recovery_disabled == mddev->recovery_disabled)
2140 continue;
2141 if (p->rdev) {
2142 if (test_bit(WantReplacement, &p->rdev->flags) &&
2143 p->replacement == NULL && repl_slot < 0)
2144 repl_slot = mirror;
2145 continue;
2146 }
2147
2148 err = mddev_stack_new_rdev(mddev, rdev);
2149 if (err)
2150 return err;
2151 p->head_position = 0;
2152 p->recovery_disabled = mddev->recovery_disabled - 1;
2153 rdev->raid_disk = mirror;
2154 err = 0;
2155 if (rdev->saved_raid_disk != mirror)
2156 conf->fullsync = 1;
2157 WRITE_ONCE(p->rdev, rdev);
2158 break;
2159 }
2160
2161 if (err && repl_slot >= 0) {
2162 p = &conf->mirrors[repl_slot];
2163 clear_bit(In_sync, &rdev->flags);
2164 set_bit(Replacement, &rdev->flags);
2165 rdev->raid_disk = repl_slot;
2166 err = mddev_stack_new_rdev(mddev, rdev);
2167 if (err)
2168 return err;
2169 conf->fullsync = 1;
2170 WRITE_ONCE(p->replacement, rdev);
2171 }
2172
2173 print_conf(conf);
2174 return err;
2175 }
2176
raid10_remove_disk(struct mddev * mddev,struct md_rdev * rdev)2177 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2178 {
2179 struct r10conf *conf = mddev->private;
2180 int err = 0;
2181 int number = rdev->raid_disk;
2182 struct md_rdev **rdevp;
2183 struct raid10_info *p;
2184
2185 print_conf(conf);
2186 if (unlikely(number >= mddev->raid_disks))
2187 return 0;
2188 p = conf->mirrors + number;
2189 if (rdev == p->rdev)
2190 rdevp = &p->rdev;
2191 else if (rdev == p->replacement)
2192 rdevp = &p->replacement;
2193 else
2194 return 0;
2195
2196 if (test_bit(In_sync, &rdev->flags) ||
2197 atomic_read(&rdev->nr_pending)) {
2198 err = -EBUSY;
2199 goto abort;
2200 }
2201 /* Only remove non-faulty devices if recovery
2202 * is not possible.
2203 */
2204 if (!test_bit(Faulty, &rdev->flags) &&
2205 mddev->recovery_disabled != p->recovery_disabled &&
2206 (!p->replacement || p->replacement == rdev) &&
2207 number < conf->geo.raid_disks &&
2208 enough(conf, -1)) {
2209 err = -EBUSY;
2210 goto abort;
2211 }
2212 WRITE_ONCE(*rdevp, NULL);
2213 if (p->replacement) {
2214 /* We must have just cleared 'rdev' */
2215 WRITE_ONCE(p->rdev, p->replacement);
2216 clear_bit(Replacement, &p->replacement->flags);
2217 WRITE_ONCE(p->replacement, NULL);
2218 }
2219
2220 clear_bit(WantReplacement, &rdev->flags);
2221 err = md_integrity_register(mddev);
2222
2223 abort:
2224
2225 print_conf(conf);
2226 return err;
2227 }
2228
__end_sync_read(struct r10bio * r10_bio,struct bio * bio,int d)2229 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2230 {
2231 struct r10conf *conf = r10_bio->mddev->private;
2232
2233 if (!bio->bi_status)
2234 set_bit(R10BIO_Uptodate, &r10_bio->state);
2235 else
2236 /* The write handler will notice the lack of
2237 * R10BIO_Uptodate and record any errors etc
2238 */
2239 atomic_add(r10_bio->sectors,
2240 &conf->mirrors[d].rdev->corrected_errors);
2241
2242 /* for reconstruct, we always reschedule after a read.
2243 * for resync, only after all reads
2244 */
2245 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2246 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2247 atomic_dec_and_test(&r10_bio->remaining)) {
2248 /* we have read all the blocks,
2249 * do the comparison in process context in raid10d
2250 */
2251 reschedule_retry(r10_bio);
2252 }
2253 }
2254
end_sync_read(struct bio * bio)2255 static void end_sync_read(struct bio *bio)
2256 {
2257 struct r10bio *r10_bio = get_resync_r10bio(bio);
2258 struct r10conf *conf = r10_bio->mddev->private;
2259 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2260
2261 __end_sync_read(r10_bio, bio, d);
2262 }
2263
end_reshape_read(struct bio * bio)2264 static void end_reshape_read(struct bio *bio)
2265 {
2266 /* reshape read bio isn't allocated from r10buf_pool */
2267 struct r10bio *r10_bio = bio->bi_private;
2268
2269 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2270 }
2271
end_sync_request(struct r10bio * r10_bio)2272 static void end_sync_request(struct r10bio *r10_bio)
2273 {
2274 struct mddev *mddev = r10_bio->mddev;
2275
2276 while (atomic_dec_and_test(&r10_bio->remaining)) {
2277 if (r10_bio->master_bio == NULL) {
2278 /* the primary of several recovery bios */
2279 sector_t s = r10_bio->sectors;
2280 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2281 test_bit(R10BIO_WriteError, &r10_bio->state))
2282 reschedule_retry(r10_bio);
2283 else
2284 put_buf(r10_bio);
2285 md_done_sync(mddev, s, 1);
2286 break;
2287 } else {
2288 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2289 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2290 test_bit(R10BIO_WriteError, &r10_bio->state))
2291 reschedule_retry(r10_bio);
2292 else
2293 put_buf(r10_bio);
2294 r10_bio = r10_bio2;
2295 }
2296 }
2297 }
2298
end_sync_write(struct bio * bio)2299 static void end_sync_write(struct bio *bio)
2300 {
2301 struct r10bio *r10_bio = get_resync_r10bio(bio);
2302 struct mddev *mddev = r10_bio->mddev;
2303 struct r10conf *conf = mddev->private;
2304 int d;
2305 int slot;
2306 int repl;
2307 struct md_rdev *rdev = NULL;
2308
2309 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2310 if (repl)
2311 rdev = conf->mirrors[d].replacement;
2312 else
2313 rdev = conf->mirrors[d].rdev;
2314
2315 if (bio->bi_status) {
2316 if (repl)
2317 md_error(mddev, rdev);
2318 else {
2319 set_bit(WriteErrorSeen, &rdev->flags);
2320 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2321 set_bit(MD_RECOVERY_NEEDED,
2322 &rdev->mddev->recovery);
2323 set_bit(R10BIO_WriteError, &r10_bio->state);
2324 }
2325 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
2326 r10_bio->sectors)) {
2327 set_bit(R10BIO_MadeGood, &r10_bio->state);
2328 }
2329
2330 rdev_dec_pending(rdev, mddev);
2331
2332 end_sync_request(r10_bio);
2333 }
2334
2335 /*
2336 * Note: sync and recover and handled very differently for raid10
2337 * This code is for resync.
2338 * For resync, we read through virtual addresses and read all blocks.
2339 * If there is any error, we schedule a write. The lowest numbered
2340 * drive is authoritative.
2341 * However requests come for physical address, so we need to map.
2342 * For every physical address there are raid_disks/copies virtual addresses,
2343 * which is always are least one, but is not necessarly an integer.
2344 * This means that a physical address can span multiple chunks, so we may
2345 * have to submit multiple io requests for a single sync request.
2346 */
2347 /*
2348 * We check if all blocks are in-sync and only write to blocks that
2349 * aren't in sync
2350 */
sync_request_write(struct mddev * mddev,struct r10bio * r10_bio)2351 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2352 {
2353 struct r10conf *conf = mddev->private;
2354 int i, first;
2355 struct bio *tbio, *fbio;
2356 int vcnt;
2357 struct page **tpages, **fpages;
2358
2359 atomic_set(&r10_bio->remaining, 1);
2360
2361 /* find the first device with a block */
2362 for (i=0; i<conf->copies; i++)
2363 if (!r10_bio->devs[i].bio->bi_status)
2364 break;
2365
2366 if (i == conf->copies)
2367 goto done;
2368
2369 first = i;
2370 fbio = r10_bio->devs[i].bio;
2371 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2372 fbio->bi_iter.bi_idx = 0;
2373 fpages = get_resync_pages(fbio)->pages;
2374
2375 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2376 /* now find blocks with errors */
2377 for (i=0 ; i < conf->copies ; i++) {
2378 int j, d;
2379 struct md_rdev *rdev;
2380 struct resync_pages *rp;
2381
2382 tbio = r10_bio->devs[i].bio;
2383
2384 if (tbio->bi_end_io != end_sync_read)
2385 continue;
2386 if (i == first)
2387 continue;
2388
2389 tpages = get_resync_pages(tbio)->pages;
2390 d = r10_bio->devs[i].devnum;
2391 rdev = conf->mirrors[d].rdev;
2392 if (!r10_bio->devs[i].bio->bi_status) {
2393 /* We know that the bi_io_vec layout is the same for
2394 * both 'first' and 'i', so we just compare them.
2395 * All vec entries are PAGE_SIZE;
2396 */
2397 int sectors = r10_bio->sectors;
2398 for (j = 0; j < vcnt; j++) {
2399 int len = PAGE_SIZE;
2400 if (sectors < (len / 512))
2401 len = sectors * 512;
2402 if (memcmp(page_address(fpages[j]),
2403 page_address(tpages[j]),
2404 len))
2405 break;
2406 sectors -= len/512;
2407 }
2408 if (j == vcnt)
2409 continue;
2410 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2411 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2412 /* Don't fix anything. */
2413 continue;
2414 } else if (test_bit(FailFast, &rdev->flags)) {
2415 /* Just give up on this device */
2416 md_error(rdev->mddev, rdev);
2417 continue;
2418 }
2419 /* Ok, we need to write this bio, either to correct an
2420 * inconsistency or to correct an unreadable block.
2421 * First we need to fixup bv_offset, bv_len and
2422 * bi_vecs, as the read request might have corrupted these
2423 */
2424 rp = get_resync_pages(tbio);
2425 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2426
2427 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2428
2429 rp->raid_bio = r10_bio;
2430 tbio->bi_private = rp;
2431 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2432 tbio->bi_end_io = end_sync_write;
2433
2434 bio_copy_data(tbio, fbio);
2435
2436 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2437 atomic_inc(&r10_bio->remaining);
2438
2439 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2440 tbio->bi_opf |= MD_FAILFAST;
2441 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2442 submit_bio_noacct(tbio);
2443 }
2444
2445 /* Now write out to any replacement devices
2446 * that are active
2447 */
2448 for (i = 0; i < conf->copies; i++) {
2449 int d;
2450
2451 tbio = r10_bio->devs[i].repl_bio;
2452 if (!tbio || !tbio->bi_end_io)
2453 continue;
2454 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2455 && r10_bio->devs[i].bio != fbio)
2456 bio_copy_data(tbio, fbio);
2457 d = r10_bio->devs[i].devnum;
2458 atomic_inc(&r10_bio->remaining);
2459 submit_bio_noacct(tbio);
2460 }
2461
2462 done:
2463 if (atomic_dec_and_test(&r10_bio->remaining)) {
2464 md_done_sync(mddev, r10_bio->sectors, 1);
2465 put_buf(r10_bio);
2466 }
2467 }
2468
2469 /*
2470 * Now for the recovery code.
2471 * Recovery happens across physical sectors.
2472 * We recover all non-is_sync drives by finding the virtual address of
2473 * each, and then choose a working drive that also has that virt address.
2474 * There is a separate r10_bio for each non-in_sync drive.
2475 * Only the first two slots are in use. The first for reading,
2476 * The second for writing.
2477 *
2478 */
fix_recovery_read_error(struct r10bio * r10_bio)2479 static void fix_recovery_read_error(struct r10bio *r10_bio)
2480 {
2481 /* We got a read error during recovery.
2482 * We repeat the read in smaller page-sized sections.
2483 * If a read succeeds, write it to the new device or record
2484 * a bad block if we cannot.
2485 * If a read fails, record a bad block on both old and
2486 * new devices.
2487 */
2488 struct mddev *mddev = r10_bio->mddev;
2489 struct r10conf *conf = mddev->private;
2490 struct bio *bio = r10_bio->devs[0].bio;
2491 sector_t sect = 0;
2492 int sectors = r10_bio->sectors;
2493 int idx = 0;
2494 int dr = r10_bio->devs[0].devnum;
2495 int dw = r10_bio->devs[1].devnum;
2496 struct page **pages = get_resync_pages(bio)->pages;
2497
2498 while (sectors) {
2499 int s = sectors;
2500 struct md_rdev *rdev;
2501 sector_t addr;
2502 int ok;
2503
2504 if (s > (PAGE_SIZE>>9))
2505 s = PAGE_SIZE >> 9;
2506
2507 rdev = conf->mirrors[dr].rdev;
2508 addr = r10_bio->devs[0].addr + sect;
2509 ok = sync_page_io(rdev,
2510 addr,
2511 s << 9,
2512 pages[idx],
2513 REQ_OP_READ, false);
2514 if (ok) {
2515 rdev = conf->mirrors[dw].rdev;
2516 addr = r10_bio->devs[1].addr + sect;
2517 ok = sync_page_io(rdev,
2518 addr,
2519 s << 9,
2520 pages[idx],
2521 REQ_OP_WRITE, false);
2522 if (!ok) {
2523 set_bit(WriteErrorSeen, &rdev->flags);
2524 if (!test_and_set_bit(WantReplacement,
2525 &rdev->flags))
2526 set_bit(MD_RECOVERY_NEEDED,
2527 &rdev->mddev->recovery);
2528 }
2529 }
2530 if (!ok) {
2531 /* We don't worry if we cannot set a bad block -
2532 * it really is bad so there is no loss in not
2533 * recording it yet
2534 */
2535 rdev_set_badblocks(rdev, addr, s, 0);
2536
2537 if (rdev != conf->mirrors[dw].rdev) {
2538 /* need bad block on destination too */
2539 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2540 addr = r10_bio->devs[1].addr + sect;
2541 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2542 if (!ok) {
2543 /* just abort the recovery */
2544 pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2545 mdname(mddev));
2546
2547 conf->mirrors[dw].recovery_disabled
2548 = mddev->recovery_disabled;
2549 set_bit(MD_RECOVERY_INTR,
2550 &mddev->recovery);
2551 break;
2552 }
2553 }
2554 }
2555
2556 sectors -= s;
2557 sect += s;
2558 idx++;
2559 }
2560 }
2561
recovery_request_write(struct mddev * mddev,struct r10bio * r10_bio)2562 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2563 {
2564 struct r10conf *conf = mddev->private;
2565 int d;
2566 struct bio *wbio = r10_bio->devs[1].bio;
2567 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2568
2569 /* Need to test wbio2->bi_end_io before we call
2570 * submit_bio_noacct as if the former is NULL,
2571 * the latter is free to free wbio2.
2572 */
2573 if (wbio2 && !wbio2->bi_end_io)
2574 wbio2 = NULL;
2575
2576 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2577 fix_recovery_read_error(r10_bio);
2578 if (wbio->bi_end_io)
2579 end_sync_request(r10_bio);
2580 if (wbio2)
2581 end_sync_request(r10_bio);
2582 return;
2583 }
2584
2585 /*
2586 * share the pages with the first bio
2587 * and submit the write request
2588 */
2589 d = r10_bio->devs[1].devnum;
2590 if (wbio->bi_end_io) {
2591 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2592 submit_bio_noacct(wbio);
2593 }
2594 if (wbio2) {
2595 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2596 submit_bio_noacct(wbio2);
2597 }
2598 }
2599
r10_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,enum req_op op)2600 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2601 int sectors, struct page *page, enum req_op op)
2602 {
2603 if (rdev_has_badblock(rdev, sector, sectors) &&
2604 (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2605 return -1;
2606 if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2607 /* success */
2608 return 1;
2609 if (op == REQ_OP_WRITE) {
2610 set_bit(WriteErrorSeen, &rdev->flags);
2611 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2612 set_bit(MD_RECOVERY_NEEDED,
2613 &rdev->mddev->recovery);
2614 }
2615 /* need to record an error - either for the block or the device */
2616 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2617 md_error(rdev->mddev, rdev);
2618 return 0;
2619 }
2620
2621 /*
2622 * This is a kernel thread which:
2623 *
2624 * 1. Retries failed read operations on working mirrors.
2625 * 2. Updates the raid superblock when problems encounter.
2626 * 3. Performs writes following reads for array synchronising.
2627 */
2628
fix_read_error(struct r10conf * conf,struct mddev * mddev,struct r10bio * r10_bio)2629 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2630 {
2631 int sect = 0; /* Offset from r10_bio->sector */
2632 int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2633 struct md_rdev *rdev;
2634 int d = r10_bio->devs[slot].devnum;
2635
2636 /* still own a reference to this rdev, so it cannot
2637 * have been cleared recently.
2638 */
2639 rdev = conf->mirrors[d].rdev;
2640
2641 if (test_bit(Faulty, &rdev->flags))
2642 /* drive has already been failed, just ignore any
2643 more fix_read_error() attempts */
2644 return;
2645
2646 if (exceed_read_errors(mddev, rdev)) {
2647 r10_bio->devs[slot].bio = IO_BLOCKED;
2648 return;
2649 }
2650
2651 while(sectors) {
2652 int s = sectors;
2653 int sl = slot;
2654 int success = 0;
2655 int start;
2656
2657 if (s > (PAGE_SIZE>>9))
2658 s = PAGE_SIZE >> 9;
2659
2660 do {
2661 d = r10_bio->devs[sl].devnum;
2662 rdev = conf->mirrors[d].rdev;
2663 if (rdev &&
2664 test_bit(In_sync, &rdev->flags) &&
2665 !test_bit(Faulty, &rdev->flags) &&
2666 rdev_has_badblock(rdev,
2667 r10_bio->devs[sl].addr + sect,
2668 s) == 0) {
2669 atomic_inc(&rdev->nr_pending);
2670 success = sync_page_io(rdev,
2671 r10_bio->devs[sl].addr +
2672 sect,
2673 s<<9,
2674 conf->tmppage,
2675 REQ_OP_READ, false);
2676 rdev_dec_pending(rdev, mddev);
2677 if (success)
2678 break;
2679 }
2680 sl++;
2681 if (sl == conf->copies)
2682 sl = 0;
2683 } while (sl != slot);
2684
2685 if (!success) {
2686 /* Cannot read from anywhere, just mark the block
2687 * as bad on the first device to discourage future
2688 * reads.
2689 */
2690 int dn = r10_bio->devs[slot].devnum;
2691 rdev = conf->mirrors[dn].rdev;
2692
2693 if (!rdev_set_badblocks(
2694 rdev,
2695 r10_bio->devs[slot].addr
2696 + sect,
2697 s, 0)) {
2698 md_error(mddev, rdev);
2699 r10_bio->devs[slot].bio
2700 = IO_BLOCKED;
2701 }
2702 break;
2703 }
2704
2705 start = sl;
2706 /* write it back and re-read */
2707 while (sl != slot) {
2708 if (sl==0)
2709 sl = conf->copies;
2710 sl--;
2711 d = r10_bio->devs[sl].devnum;
2712 rdev = conf->mirrors[d].rdev;
2713 if (!rdev ||
2714 test_bit(Faulty, &rdev->flags) ||
2715 !test_bit(In_sync, &rdev->flags))
2716 continue;
2717
2718 atomic_inc(&rdev->nr_pending);
2719 if (r10_sync_page_io(rdev,
2720 r10_bio->devs[sl].addr +
2721 sect,
2722 s, conf->tmppage, REQ_OP_WRITE)
2723 == 0) {
2724 /* Well, this device is dead */
2725 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2726 mdname(mddev), s,
2727 (unsigned long long)(
2728 sect +
2729 choose_data_offset(r10_bio,
2730 rdev)),
2731 rdev->bdev);
2732 pr_notice("md/raid10:%s: %pg: failing drive\n",
2733 mdname(mddev),
2734 rdev->bdev);
2735 }
2736 rdev_dec_pending(rdev, mddev);
2737 }
2738 sl = start;
2739 while (sl != slot) {
2740 if (sl==0)
2741 sl = conf->copies;
2742 sl--;
2743 d = r10_bio->devs[sl].devnum;
2744 rdev = conf->mirrors[d].rdev;
2745 if (!rdev ||
2746 test_bit(Faulty, &rdev->flags) ||
2747 !test_bit(In_sync, &rdev->flags))
2748 continue;
2749
2750 atomic_inc(&rdev->nr_pending);
2751 switch (r10_sync_page_io(rdev,
2752 r10_bio->devs[sl].addr +
2753 sect,
2754 s, conf->tmppage, REQ_OP_READ)) {
2755 case 0:
2756 /* Well, this device is dead */
2757 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
2758 mdname(mddev), s,
2759 (unsigned long long)(
2760 sect +
2761 choose_data_offset(r10_bio, rdev)),
2762 rdev->bdev);
2763 pr_notice("md/raid10:%s: %pg: failing drive\n",
2764 mdname(mddev),
2765 rdev->bdev);
2766 break;
2767 case 1:
2768 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
2769 mdname(mddev), s,
2770 (unsigned long long)(
2771 sect +
2772 choose_data_offset(r10_bio, rdev)),
2773 rdev->bdev);
2774 atomic_add(s, &rdev->corrected_errors);
2775 }
2776
2777 rdev_dec_pending(rdev, mddev);
2778 }
2779
2780 sectors -= s;
2781 sect += s;
2782 }
2783 }
2784
narrow_write_error(struct r10bio * r10_bio,int i)2785 static bool narrow_write_error(struct r10bio *r10_bio, int i)
2786 {
2787 struct bio *bio = r10_bio->master_bio;
2788 struct mddev *mddev = r10_bio->mddev;
2789 struct r10conf *conf = mddev->private;
2790 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2791 /* bio has the data to be written to slot 'i' where
2792 * we just recently had a write error.
2793 * We repeatedly clone the bio and trim down to one block,
2794 * then try the write. Where the write fails we record
2795 * a bad block.
2796 * It is conceivable that the bio doesn't exactly align with
2797 * blocks. We must handle this.
2798 *
2799 * We currently own a reference to the rdev.
2800 */
2801
2802 int block_sectors;
2803 sector_t sector;
2804 int sectors;
2805 int sect_to_write = r10_bio->sectors;
2806 bool ok = true;
2807
2808 if (rdev->badblocks.shift < 0)
2809 return false;
2810
2811 block_sectors = roundup(1 << rdev->badblocks.shift,
2812 bdev_logical_block_size(rdev->bdev) >> 9);
2813 sector = r10_bio->sector;
2814 sectors = ((r10_bio->sector + block_sectors)
2815 & ~(sector_t)(block_sectors - 1))
2816 - sector;
2817
2818 while (sect_to_write) {
2819 struct bio *wbio;
2820 sector_t wsector;
2821 if (sectors > sect_to_write)
2822 sectors = sect_to_write;
2823 /* Write at 'sector' for 'sectors' */
2824 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2825 &mddev->bio_set);
2826 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2827 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2828 wbio->bi_iter.bi_sector = wsector +
2829 choose_data_offset(r10_bio, rdev);
2830 wbio->bi_opf = REQ_OP_WRITE;
2831
2832 if (submit_bio_wait(wbio) < 0)
2833 /* Failure! */
2834 ok = rdev_set_badblocks(rdev, wsector,
2835 sectors, 0)
2836 && ok;
2837
2838 bio_put(wbio);
2839 sect_to_write -= sectors;
2840 sector += sectors;
2841 sectors = block_sectors;
2842 }
2843 return ok;
2844 }
2845
handle_read_error(struct mddev * mddev,struct r10bio * r10_bio)2846 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2847 {
2848 int slot = r10_bio->read_slot;
2849 struct bio *bio;
2850 struct r10conf *conf = mddev->private;
2851 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2852
2853 /* we got a read error. Maybe the drive is bad. Maybe just
2854 * the block and we can fix it.
2855 * We freeze all other IO, and try reading the block from
2856 * other devices. When we find one, we re-write
2857 * and check it that fixes the read error.
2858 * This is all done synchronously while the array is
2859 * frozen.
2860 */
2861 bio = r10_bio->devs[slot].bio;
2862 bio_put(bio);
2863 r10_bio->devs[slot].bio = NULL;
2864
2865 if (mddev->ro)
2866 r10_bio->devs[slot].bio = IO_BLOCKED;
2867 else if (!test_bit(FailFast, &rdev->flags)) {
2868 freeze_array(conf, 1);
2869 fix_read_error(conf, mddev, r10_bio);
2870 unfreeze_array(conf);
2871 } else
2872 md_error(mddev, rdev);
2873
2874 rdev_dec_pending(rdev, mddev);
2875 r10_bio->state = 0;
2876 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2877 /*
2878 * allow_barrier after re-submit to ensure no sync io
2879 * can be issued while regular io pending.
2880 */
2881 allow_barrier(conf);
2882 }
2883
handle_write_completed(struct r10conf * conf,struct r10bio * r10_bio)2884 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2885 {
2886 /* Some sort of write request has finished and it
2887 * succeeded in writing where we thought there was a
2888 * bad block. So forget the bad block.
2889 * Or possibly if failed and we need to record
2890 * a bad block.
2891 */
2892 int m;
2893 struct md_rdev *rdev;
2894
2895 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2896 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2897 for (m = 0; m < conf->copies; m++) {
2898 int dev = r10_bio->devs[m].devnum;
2899 rdev = conf->mirrors[dev].rdev;
2900 if (r10_bio->devs[m].bio == NULL ||
2901 r10_bio->devs[m].bio->bi_end_io == NULL)
2902 continue;
2903 if (!r10_bio->devs[m].bio->bi_status) {
2904 rdev_clear_badblocks(
2905 rdev,
2906 r10_bio->devs[m].addr,
2907 r10_bio->sectors, 0);
2908 } else {
2909 if (!rdev_set_badblocks(
2910 rdev,
2911 r10_bio->devs[m].addr,
2912 r10_bio->sectors, 0))
2913 md_error(conf->mddev, rdev);
2914 }
2915 rdev = conf->mirrors[dev].replacement;
2916 if (r10_bio->devs[m].repl_bio == NULL ||
2917 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2918 continue;
2919
2920 if (!r10_bio->devs[m].repl_bio->bi_status) {
2921 rdev_clear_badblocks(
2922 rdev,
2923 r10_bio->devs[m].addr,
2924 r10_bio->sectors, 0);
2925 } else {
2926 if (!rdev_set_badblocks(
2927 rdev,
2928 r10_bio->devs[m].addr,
2929 r10_bio->sectors, 0))
2930 md_error(conf->mddev, rdev);
2931 }
2932 }
2933 put_buf(r10_bio);
2934 } else {
2935 bool fail = false;
2936 for (m = 0; m < conf->copies; m++) {
2937 int dev = r10_bio->devs[m].devnum;
2938 struct bio *bio = r10_bio->devs[m].bio;
2939 rdev = conf->mirrors[dev].rdev;
2940 if (bio == IO_MADE_GOOD) {
2941 rdev_clear_badblocks(
2942 rdev,
2943 r10_bio->devs[m].addr,
2944 r10_bio->sectors, 0);
2945 rdev_dec_pending(rdev, conf->mddev);
2946 } else if (bio != NULL && bio->bi_status) {
2947 fail = true;
2948 if (!narrow_write_error(r10_bio, m))
2949 md_error(conf->mddev, rdev);
2950 rdev_dec_pending(rdev, conf->mddev);
2951 }
2952 bio = r10_bio->devs[m].repl_bio;
2953 rdev = conf->mirrors[dev].replacement;
2954 if (rdev && bio == IO_MADE_GOOD) {
2955 rdev_clear_badblocks(
2956 rdev,
2957 r10_bio->devs[m].addr,
2958 r10_bio->sectors, 0);
2959 rdev_dec_pending(rdev, conf->mddev);
2960 }
2961 }
2962 if (fail) {
2963 spin_lock_irq(&conf->device_lock);
2964 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2965 conf->nr_queued++;
2966 spin_unlock_irq(&conf->device_lock);
2967 /*
2968 * In case freeze_array() is waiting for condition
2969 * nr_pending == nr_queued + extra to be true.
2970 */
2971 wake_up(&conf->wait_barrier);
2972 md_wakeup_thread(conf->mddev->thread);
2973 } else {
2974 if (test_bit(R10BIO_WriteError,
2975 &r10_bio->state))
2976 close_write(r10_bio);
2977 raid_end_bio_io(r10_bio);
2978 }
2979 }
2980 }
2981
raid10d(struct md_thread * thread)2982 static void raid10d(struct md_thread *thread)
2983 {
2984 struct mddev *mddev = thread->mddev;
2985 struct r10bio *r10_bio;
2986 unsigned long flags;
2987 struct r10conf *conf = mddev->private;
2988 struct list_head *head = &conf->retry_list;
2989 struct blk_plug plug;
2990
2991 md_check_recovery(mddev);
2992
2993 if (!list_empty_careful(&conf->bio_end_io_list) &&
2994 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2995 LIST_HEAD(tmp);
2996 spin_lock_irqsave(&conf->device_lock, flags);
2997 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2998 while (!list_empty(&conf->bio_end_io_list)) {
2999 list_move(conf->bio_end_io_list.prev, &tmp);
3000 conf->nr_queued--;
3001 }
3002 }
3003 spin_unlock_irqrestore(&conf->device_lock, flags);
3004 while (!list_empty(&tmp)) {
3005 r10_bio = list_first_entry(&tmp, struct r10bio,
3006 retry_list);
3007 list_del(&r10_bio->retry_list);
3008
3009 if (test_bit(R10BIO_WriteError,
3010 &r10_bio->state))
3011 close_write(r10_bio);
3012 raid_end_bio_io(r10_bio);
3013 }
3014 }
3015
3016 blk_start_plug(&plug);
3017 for (;;) {
3018
3019 flush_pending_writes(conf);
3020
3021 spin_lock_irqsave(&conf->device_lock, flags);
3022 if (list_empty(head)) {
3023 spin_unlock_irqrestore(&conf->device_lock, flags);
3024 break;
3025 }
3026 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3027 list_del(head->prev);
3028 conf->nr_queued--;
3029 spin_unlock_irqrestore(&conf->device_lock, flags);
3030
3031 mddev = r10_bio->mddev;
3032 conf = mddev->private;
3033 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3034 test_bit(R10BIO_WriteError, &r10_bio->state))
3035 handle_write_completed(conf, r10_bio);
3036 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3037 reshape_request_write(mddev, r10_bio);
3038 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3039 sync_request_write(mddev, r10_bio);
3040 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3041 recovery_request_write(mddev, r10_bio);
3042 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3043 handle_read_error(mddev, r10_bio);
3044 else
3045 WARN_ON_ONCE(1);
3046
3047 cond_resched();
3048 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3049 md_check_recovery(mddev);
3050 }
3051 blk_finish_plug(&plug);
3052 }
3053
init_resync(struct r10conf * conf)3054 static int init_resync(struct r10conf *conf)
3055 {
3056 int ret, buffs, i;
3057
3058 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3059 BUG_ON(mempool_initialized(&conf->r10buf_pool));
3060 conf->have_replacement = 0;
3061 for (i = 0; i < conf->geo.raid_disks; i++)
3062 if (conf->mirrors[i].replacement)
3063 conf->have_replacement = 1;
3064 ret = mempool_init(&conf->r10buf_pool, buffs,
3065 r10buf_pool_alloc, r10buf_pool_free, conf);
3066 if (ret)
3067 return ret;
3068 conf->next_resync = 0;
3069 return 0;
3070 }
3071
raid10_alloc_init_r10buf(struct r10conf * conf)3072 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3073 {
3074 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3075 struct rsync_pages *rp;
3076 struct bio *bio;
3077 int nalloc;
3078 int i;
3079
3080 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3081 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3082 nalloc = conf->copies; /* resync */
3083 else
3084 nalloc = 2; /* recovery */
3085
3086 for (i = 0; i < nalloc; i++) {
3087 bio = r10bio->devs[i].bio;
3088 rp = bio->bi_private;
3089 bio_reset(bio, NULL, 0);
3090 bio->bi_private = rp;
3091 bio = r10bio->devs[i].repl_bio;
3092 if (bio) {
3093 rp = bio->bi_private;
3094 bio_reset(bio, NULL, 0);
3095 bio->bi_private = rp;
3096 }
3097 }
3098 return r10bio;
3099 }
3100
3101 /*
3102 * Set cluster_sync_high since we need other nodes to add the
3103 * range [cluster_sync_low, cluster_sync_high] to suspend list.
3104 */
raid10_set_cluster_sync_high(struct r10conf * conf)3105 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3106 {
3107 sector_t window_size;
3108 int extra_chunk, chunks;
3109
3110 /*
3111 * First, here we define "stripe" as a unit which across
3112 * all member devices one time, so we get chunks by use
3113 * raid_disks / near_copies. Otherwise, if near_copies is
3114 * close to raid_disks, then resync window could increases
3115 * linearly with the increase of raid_disks, which means
3116 * we will suspend a really large IO window while it is not
3117 * necessary. If raid_disks is not divisible by near_copies,
3118 * an extra chunk is needed to ensure the whole "stripe" is
3119 * covered.
3120 */
3121
3122 chunks = conf->geo.raid_disks / conf->geo.near_copies;
3123 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3124 extra_chunk = 0;
3125 else
3126 extra_chunk = 1;
3127 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3128
3129 /*
3130 * At least use a 32M window to align with raid1's resync window
3131 */
3132 window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3133 CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3134
3135 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3136 }
3137
3138 /*
3139 * perform a "sync" on one "block"
3140 *
3141 * We need to make sure that no normal I/O request - particularly write
3142 * requests - conflict with active sync requests.
3143 *
3144 * This is achieved by tracking pending requests and a 'barrier' concept
3145 * that can be installed to exclude normal IO requests.
3146 *
3147 * Resync and recovery are handled very differently.
3148 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3149 *
3150 * For resync, we iterate over virtual addresses, read all copies,
3151 * and update if there are differences. If only one copy is live,
3152 * skip it.
3153 * For recovery, we iterate over physical addresses, read a good
3154 * value for each non-in_sync drive, and over-write.
3155 *
3156 * So, for recovery we may have several outstanding complex requests for a
3157 * given address, one for each out-of-sync device. We model this by allocating
3158 * a number of r10_bio structures, one for each out-of-sync device.
3159 * As we setup these structures, we collect all bio's together into a list
3160 * which we then process collectively to add pages, and then process again
3161 * to pass to submit_bio_noacct.
3162 *
3163 * The r10_bio structures are linked using a borrowed master_bio pointer.
3164 * This link is counted in ->remaining. When the r10_bio that points to NULL
3165 * has its remaining count decremented to 0, the whole complex operation
3166 * is complete.
3167 *
3168 */
3169
raid10_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)3170 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3171 sector_t max_sector, int *skipped)
3172 {
3173 struct r10conf *conf = mddev->private;
3174 struct r10bio *r10_bio;
3175 struct bio *biolist = NULL, *bio;
3176 sector_t nr_sectors;
3177 int i;
3178 int max_sync;
3179 sector_t sync_blocks;
3180 sector_t sectors_skipped = 0;
3181 int chunks_skipped = 0;
3182 sector_t chunk_mask = conf->geo.chunk_mask;
3183 int page_idx = 0;
3184 int error_disk = -1;
3185
3186 /*
3187 * Allow skipping a full rebuild for incremental assembly
3188 * of a clean array, like RAID1 does.
3189 */
3190 if (mddev->bitmap == NULL &&
3191 mddev->recovery_cp == MaxSector &&
3192 mddev->reshape_position == MaxSector &&
3193 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3194 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3195 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3196 conf->fullsync == 0) {
3197 *skipped = 1;
3198 return mddev->dev_sectors - sector_nr;
3199 }
3200
3201 if (!mempool_initialized(&conf->r10buf_pool))
3202 if (init_resync(conf))
3203 return 0;
3204
3205 skipped:
3206 if (sector_nr >= max_sector) {
3207 conf->cluster_sync_low = 0;
3208 conf->cluster_sync_high = 0;
3209
3210 /* If we aborted, we need to abort the
3211 * sync on the 'current' bitmap chucks (there can
3212 * be several when recovering multiple devices).
3213 * as we may have started syncing it but not finished.
3214 * We can find the current address in
3215 * mddev->curr_resync, but for recovery,
3216 * we need to convert that to several
3217 * virtual addresses.
3218 */
3219 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3220 end_reshape(conf);
3221 close_sync(conf);
3222 return 0;
3223 }
3224
3225 if (mddev->curr_resync < max_sector) { /* aborted */
3226 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3227 mddev->bitmap_ops->end_sync(mddev,
3228 mddev->curr_resync,
3229 &sync_blocks);
3230 else for (i = 0; i < conf->geo.raid_disks; i++) {
3231 sector_t sect =
3232 raid10_find_virt(conf, mddev->curr_resync, i);
3233
3234 mddev->bitmap_ops->end_sync(mddev, sect,
3235 &sync_blocks);
3236 }
3237 } else {
3238 /* completed sync */
3239 if ((!mddev->bitmap || conf->fullsync)
3240 && conf->have_replacement
3241 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3242 /* Completed a full sync so the replacements
3243 * are now fully recovered.
3244 */
3245 for (i = 0; i < conf->geo.raid_disks; i++) {
3246 struct md_rdev *rdev =
3247 conf->mirrors[i].replacement;
3248
3249 if (rdev)
3250 rdev->recovery_offset = MaxSector;
3251 }
3252 }
3253 conf->fullsync = 0;
3254 }
3255 mddev->bitmap_ops->close_sync(mddev);
3256 close_sync(conf);
3257 *skipped = 1;
3258 return sectors_skipped;
3259 }
3260
3261 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3262 return reshape_request(mddev, sector_nr, skipped);
3263
3264 if (chunks_skipped >= conf->geo.raid_disks) {
3265 pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
3266 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery");
3267 if (error_disk >= 0 &&
3268 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3269 /*
3270 * recovery fails, set mirrors.recovery_disabled,
3271 * device shouldn't be added to there.
3272 */
3273 conf->mirrors[error_disk].recovery_disabled =
3274 mddev->recovery_disabled;
3275 return 0;
3276 }
3277 /*
3278 * if there has been nothing to do on any drive,
3279 * then there is nothing to do at all.
3280 */
3281 *skipped = 1;
3282 return (max_sector - sector_nr) + sectors_skipped;
3283 }
3284
3285 if (max_sector > mddev->resync_max)
3286 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3287
3288 /* make sure whole request will fit in a chunk - if chunks
3289 * are meaningful
3290 */
3291 if (conf->geo.near_copies < conf->geo.raid_disks &&
3292 max_sector > (sector_nr | chunk_mask))
3293 max_sector = (sector_nr | chunk_mask) + 1;
3294
3295 /*
3296 * If there is non-resync activity waiting for a turn, then let it
3297 * though before starting on this new sync request.
3298 */
3299 if (conf->nr_waiting)
3300 schedule_timeout_uninterruptible(1);
3301
3302 /* Again, very different code for resync and recovery.
3303 * Both must result in an r10bio with a list of bios that
3304 * have bi_end_io, bi_sector, bi_bdev set,
3305 * and bi_private set to the r10bio.
3306 * For recovery, we may actually create several r10bios
3307 * with 2 bios in each, that correspond to the bios in the main one.
3308 * In this case, the subordinate r10bios link back through a
3309 * borrowed master_bio pointer, and the counter in the master
3310 * includes a ref from each subordinate.
3311 */
3312 /* First, we decide what to do and set ->bi_end_io
3313 * To end_sync_read if we want to read, and
3314 * end_sync_write if we will want to write.
3315 */
3316
3317 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3318 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3319 /* recovery... the complicated one */
3320 int j;
3321 r10_bio = NULL;
3322
3323 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3324 bool still_degraded;
3325 struct r10bio *rb2;
3326 sector_t sect;
3327 bool must_sync;
3328 int any_working;
3329 struct raid10_info *mirror = &conf->mirrors[i];
3330 struct md_rdev *mrdev, *mreplace;
3331
3332 mrdev = mirror->rdev;
3333 mreplace = mirror->replacement;
3334
3335 if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
3336 test_bit(In_sync, &mrdev->flags)))
3337 mrdev = NULL;
3338 if (mreplace && test_bit(Faulty, &mreplace->flags))
3339 mreplace = NULL;
3340
3341 if (!mrdev && !mreplace)
3342 continue;
3343
3344 still_degraded = false;
3345 /* want to reconstruct this device */
3346 rb2 = r10_bio;
3347 sect = raid10_find_virt(conf, sector_nr, i);
3348 if (sect >= mddev->resync_max_sectors)
3349 /* last stripe is not complete - don't
3350 * try to recover this sector.
3351 */
3352 continue;
3353 /* Unless we are doing a full sync, or a replacement
3354 * we only need to recover the block if it is set in
3355 * the bitmap
3356 */
3357 must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3358 &sync_blocks,
3359 true);
3360 if (sync_blocks < max_sync)
3361 max_sync = sync_blocks;
3362 if (!must_sync &&
3363 mreplace == NULL &&
3364 !conf->fullsync) {
3365 /* yep, skip the sync_blocks here, but don't assume
3366 * that there will never be anything to do here
3367 */
3368 chunks_skipped = -1;
3369 continue;
3370 }
3371 if (mrdev)
3372 atomic_inc(&mrdev->nr_pending);
3373 if (mreplace)
3374 atomic_inc(&mreplace->nr_pending);
3375
3376 r10_bio = raid10_alloc_init_r10buf(conf);
3377 r10_bio->state = 0;
3378 raise_barrier(conf, rb2 != NULL);
3379 atomic_set(&r10_bio->remaining, 0);
3380
3381 r10_bio->master_bio = (struct bio*)rb2;
3382 if (rb2)
3383 atomic_inc(&rb2->remaining);
3384 r10_bio->mddev = mddev;
3385 set_bit(R10BIO_IsRecover, &r10_bio->state);
3386 r10_bio->sector = sect;
3387
3388 raid10_find_phys(conf, r10_bio);
3389
3390 /* Need to check if the array will still be
3391 * degraded
3392 */
3393 for (j = 0; j < conf->geo.raid_disks; j++) {
3394 struct md_rdev *rdev = conf->mirrors[j].rdev;
3395
3396 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3397 still_degraded = false;
3398 break;
3399 }
3400 }
3401
3402 must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3403 &sync_blocks, still_degraded);
3404
3405 any_working = 0;
3406 for (j=0; j<conf->copies;j++) {
3407 int k;
3408 int d = r10_bio->devs[j].devnum;
3409 sector_t from_addr, to_addr;
3410 struct md_rdev *rdev = conf->mirrors[d].rdev;
3411 sector_t sector, first_bad;
3412 sector_t bad_sectors;
3413 if (!rdev ||
3414 !test_bit(In_sync, &rdev->flags))
3415 continue;
3416 /* This is where we read from */
3417 any_working = 1;
3418 sector = r10_bio->devs[j].addr;
3419
3420 if (is_badblock(rdev, sector, max_sync,
3421 &first_bad, &bad_sectors)) {
3422 if (first_bad > sector)
3423 max_sync = first_bad - sector;
3424 else {
3425 bad_sectors -= (sector
3426 - first_bad);
3427 if (max_sync > bad_sectors)
3428 max_sync = bad_sectors;
3429 continue;
3430 }
3431 }
3432 bio = r10_bio->devs[0].bio;
3433 bio->bi_next = biolist;
3434 biolist = bio;
3435 bio->bi_end_io = end_sync_read;
3436 bio->bi_opf = REQ_OP_READ;
3437 if (test_bit(FailFast, &rdev->flags))
3438 bio->bi_opf |= MD_FAILFAST;
3439 from_addr = r10_bio->devs[j].addr;
3440 bio->bi_iter.bi_sector = from_addr +
3441 rdev->data_offset;
3442 bio_set_dev(bio, rdev->bdev);
3443 atomic_inc(&rdev->nr_pending);
3444 /* and we write to 'i' (if not in_sync) */
3445
3446 for (k=0; k<conf->copies; k++)
3447 if (r10_bio->devs[k].devnum == i)
3448 break;
3449 BUG_ON(k == conf->copies);
3450 to_addr = r10_bio->devs[k].addr;
3451 r10_bio->devs[0].devnum = d;
3452 r10_bio->devs[0].addr = from_addr;
3453 r10_bio->devs[1].devnum = i;
3454 r10_bio->devs[1].addr = to_addr;
3455
3456 if (mrdev) {
3457 bio = r10_bio->devs[1].bio;
3458 bio->bi_next = biolist;
3459 biolist = bio;
3460 bio->bi_end_io = end_sync_write;
3461 bio->bi_opf = REQ_OP_WRITE;
3462 bio->bi_iter.bi_sector = to_addr
3463 + mrdev->data_offset;
3464 bio_set_dev(bio, mrdev->bdev);
3465 atomic_inc(&r10_bio->remaining);
3466 } else
3467 r10_bio->devs[1].bio->bi_end_io = NULL;
3468
3469 /* and maybe write to replacement */
3470 bio = r10_bio->devs[1].repl_bio;
3471 if (bio)
3472 bio->bi_end_io = NULL;
3473 /* Note: if replace is not NULL, then bio
3474 * cannot be NULL as r10buf_pool_alloc will
3475 * have allocated it.
3476 */
3477 if (!mreplace)
3478 break;
3479 bio->bi_next = biolist;
3480 biolist = bio;
3481 bio->bi_end_io = end_sync_write;
3482 bio->bi_opf = REQ_OP_WRITE;
3483 bio->bi_iter.bi_sector = to_addr +
3484 mreplace->data_offset;
3485 bio_set_dev(bio, mreplace->bdev);
3486 atomic_inc(&r10_bio->remaining);
3487 break;
3488 }
3489 if (j == conf->copies) {
3490 /* Cannot recover, so abort the recovery or
3491 * record a bad block */
3492 if (any_working) {
3493 /* problem is that there are bad blocks
3494 * on other device(s)
3495 */
3496 int k;
3497 for (k = 0; k < conf->copies; k++)
3498 if (r10_bio->devs[k].devnum == i)
3499 break;
3500 if (mrdev && !test_bit(In_sync,
3501 &mrdev->flags)
3502 && !rdev_set_badblocks(
3503 mrdev,
3504 r10_bio->devs[k].addr,
3505 max_sync, 0))
3506 any_working = 0;
3507 if (mreplace &&
3508 !rdev_set_badblocks(
3509 mreplace,
3510 r10_bio->devs[k].addr,
3511 max_sync, 0))
3512 any_working = 0;
3513 }
3514 if (!any_working) {
3515 if (!test_and_set_bit(MD_RECOVERY_INTR,
3516 &mddev->recovery))
3517 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3518 mdname(mddev));
3519 mirror->recovery_disabled
3520 = mddev->recovery_disabled;
3521 } else {
3522 error_disk = i;
3523 }
3524 put_buf(r10_bio);
3525 if (rb2)
3526 atomic_dec(&rb2->remaining);
3527 r10_bio = rb2;
3528 if (mrdev)
3529 rdev_dec_pending(mrdev, mddev);
3530 if (mreplace)
3531 rdev_dec_pending(mreplace, mddev);
3532 break;
3533 }
3534 if (mrdev)
3535 rdev_dec_pending(mrdev, mddev);
3536 if (mreplace)
3537 rdev_dec_pending(mreplace, mddev);
3538 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3539 /* Only want this if there is elsewhere to
3540 * read from. 'j' is currently the first
3541 * readable copy.
3542 */
3543 int targets = 1;
3544 for (; j < conf->copies; j++) {
3545 int d = r10_bio->devs[j].devnum;
3546 if (conf->mirrors[d].rdev &&
3547 test_bit(In_sync,
3548 &conf->mirrors[d].rdev->flags))
3549 targets++;
3550 }
3551 if (targets == 1)
3552 r10_bio->devs[0].bio->bi_opf
3553 &= ~MD_FAILFAST;
3554 }
3555 }
3556 if (biolist == NULL) {
3557 while (r10_bio) {
3558 struct r10bio *rb2 = r10_bio;
3559 r10_bio = (struct r10bio*) rb2->master_bio;
3560 rb2->master_bio = NULL;
3561 put_buf(rb2);
3562 }
3563 goto giveup;
3564 }
3565 } else {
3566 /* resync. Schedule a read for every block at this virt offset */
3567 int count = 0;
3568
3569 /*
3570 * Since curr_resync_completed could probably not update in
3571 * time, and we will set cluster_sync_low based on it.
3572 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3573 * safety reason, which ensures curr_resync_completed is
3574 * updated in bitmap_cond_end_sync.
3575 */
3576 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
3577 mddev_is_clustered(mddev) &&
3578 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3579
3580 if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
3581 &sync_blocks,
3582 mddev->degraded) &&
3583 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3584 &mddev->recovery)) {
3585 /* We can skip this block */
3586 *skipped = 1;
3587 return sync_blocks + sectors_skipped;
3588 }
3589 if (sync_blocks < max_sync)
3590 max_sync = sync_blocks;
3591 r10_bio = raid10_alloc_init_r10buf(conf);
3592 r10_bio->state = 0;
3593
3594 r10_bio->mddev = mddev;
3595 atomic_set(&r10_bio->remaining, 0);
3596 raise_barrier(conf, 0);
3597 conf->next_resync = sector_nr;
3598
3599 r10_bio->master_bio = NULL;
3600 r10_bio->sector = sector_nr;
3601 set_bit(R10BIO_IsSync, &r10_bio->state);
3602 raid10_find_phys(conf, r10_bio);
3603 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3604
3605 for (i = 0; i < conf->copies; i++) {
3606 int d = r10_bio->devs[i].devnum;
3607 sector_t first_bad, sector;
3608 sector_t bad_sectors;
3609 struct md_rdev *rdev;
3610
3611 if (r10_bio->devs[i].repl_bio)
3612 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3613
3614 bio = r10_bio->devs[i].bio;
3615 bio->bi_status = BLK_STS_IOERR;
3616 rdev = conf->mirrors[d].rdev;
3617 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3618 continue;
3619
3620 sector = r10_bio->devs[i].addr;
3621 if (is_badblock(rdev, sector, max_sync,
3622 &first_bad, &bad_sectors)) {
3623 if (first_bad > sector)
3624 max_sync = first_bad - sector;
3625 else {
3626 bad_sectors -= (sector - first_bad);
3627 if (max_sync > bad_sectors)
3628 max_sync = bad_sectors;
3629 continue;
3630 }
3631 }
3632 atomic_inc(&rdev->nr_pending);
3633 atomic_inc(&r10_bio->remaining);
3634 bio->bi_next = biolist;
3635 biolist = bio;
3636 bio->bi_end_io = end_sync_read;
3637 bio->bi_opf = REQ_OP_READ;
3638 if (test_bit(FailFast, &rdev->flags))
3639 bio->bi_opf |= MD_FAILFAST;
3640 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3641 bio_set_dev(bio, rdev->bdev);
3642 count++;
3643
3644 rdev = conf->mirrors[d].replacement;
3645 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3646 continue;
3647
3648 atomic_inc(&rdev->nr_pending);
3649
3650 /* Need to set up for writing to the replacement */
3651 bio = r10_bio->devs[i].repl_bio;
3652 bio->bi_status = BLK_STS_IOERR;
3653
3654 sector = r10_bio->devs[i].addr;
3655 bio->bi_next = biolist;
3656 biolist = bio;
3657 bio->bi_end_io = end_sync_write;
3658 bio->bi_opf = REQ_OP_WRITE;
3659 if (test_bit(FailFast, &rdev->flags))
3660 bio->bi_opf |= MD_FAILFAST;
3661 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3662 bio_set_dev(bio, rdev->bdev);
3663 count++;
3664 }
3665
3666 if (count < 2) {
3667 for (i=0; i<conf->copies; i++) {
3668 int d = r10_bio->devs[i].devnum;
3669 if (r10_bio->devs[i].bio->bi_end_io)
3670 rdev_dec_pending(conf->mirrors[d].rdev,
3671 mddev);
3672 if (r10_bio->devs[i].repl_bio &&
3673 r10_bio->devs[i].repl_bio->bi_end_io)
3674 rdev_dec_pending(
3675 conf->mirrors[d].replacement,
3676 mddev);
3677 }
3678 put_buf(r10_bio);
3679 biolist = NULL;
3680 goto giveup;
3681 }
3682 }
3683
3684 nr_sectors = 0;
3685 if (sector_nr + max_sync < max_sector)
3686 max_sector = sector_nr + max_sync;
3687 do {
3688 struct page *page;
3689 int len = PAGE_SIZE;
3690 if (sector_nr + (len>>9) > max_sector)
3691 len = (max_sector - sector_nr) << 9;
3692 if (len == 0)
3693 break;
3694 for (bio= biolist ; bio ; bio=bio->bi_next) {
3695 struct resync_pages *rp = get_resync_pages(bio);
3696 page = resync_fetch_page(rp, page_idx);
3697 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3698 bio->bi_status = BLK_STS_RESOURCE;
3699 bio_endio(bio);
3700 goto giveup;
3701 }
3702 }
3703 nr_sectors += len>>9;
3704 sector_nr += len>>9;
3705 } while (++page_idx < RESYNC_PAGES);
3706 r10_bio->sectors = nr_sectors;
3707
3708 if (mddev_is_clustered(mddev) &&
3709 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3710 /* It is resync not recovery */
3711 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3712 conf->cluster_sync_low = mddev->curr_resync_completed;
3713 raid10_set_cluster_sync_high(conf);
3714 /* Send resync message */
3715 mddev->cluster_ops->resync_info_update(mddev,
3716 conf->cluster_sync_low,
3717 conf->cluster_sync_high);
3718 }
3719 } else if (mddev_is_clustered(mddev)) {
3720 /* This is recovery not resync */
3721 sector_t sect_va1, sect_va2;
3722 bool broadcast_msg = false;
3723
3724 for (i = 0; i < conf->geo.raid_disks; i++) {
3725 /*
3726 * sector_nr is a device address for recovery, so we
3727 * need translate it to array address before compare
3728 * with cluster_sync_high.
3729 */
3730 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3731
3732 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3733 broadcast_msg = true;
3734 /*
3735 * curr_resync_completed is similar as
3736 * sector_nr, so make the translation too.
3737 */
3738 sect_va2 = raid10_find_virt(conf,
3739 mddev->curr_resync_completed, i);
3740
3741 if (conf->cluster_sync_low == 0 ||
3742 conf->cluster_sync_low > sect_va2)
3743 conf->cluster_sync_low = sect_va2;
3744 }
3745 }
3746 if (broadcast_msg) {
3747 raid10_set_cluster_sync_high(conf);
3748 mddev->cluster_ops->resync_info_update(mddev,
3749 conf->cluster_sync_low,
3750 conf->cluster_sync_high);
3751 }
3752 }
3753
3754 while (biolist) {
3755 bio = biolist;
3756 biolist = biolist->bi_next;
3757
3758 bio->bi_next = NULL;
3759 r10_bio = get_resync_r10bio(bio);
3760 r10_bio->sectors = nr_sectors;
3761
3762 if (bio->bi_end_io == end_sync_read) {
3763 bio->bi_status = 0;
3764 submit_bio_noacct(bio);
3765 }
3766 }
3767
3768 if (sectors_skipped)
3769 /* pretend they weren't skipped, it makes
3770 * no important difference in this case
3771 */
3772 md_done_sync(mddev, sectors_skipped, 1);
3773
3774 return sectors_skipped + nr_sectors;
3775 giveup:
3776 /* There is nowhere to write, so all non-sync
3777 * drives must be failed or in resync, all drives
3778 * have a bad block, so try the next chunk...
3779 */
3780 if (sector_nr + max_sync < max_sector)
3781 max_sector = sector_nr + max_sync;
3782
3783 sectors_skipped += (max_sector - sector_nr);
3784 chunks_skipped ++;
3785 sector_nr = max_sector;
3786 goto skipped;
3787 }
3788
3789 static sector_t
raid10_size(struct mddev * mddev,sector_t sectors,int raid_disks)3790 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3791 {
3792 sector_t size;
3793 struct r10conf *conf = mddev->private;
3794
3795 if (!raid_disks)
3796 raid_disks = min(conf->geo.raid_disks,
3797 conf->prev.raid_disks);
3798 if (!sectors)
3799 sectors = conf->dev_sectors;
3800
3801 size = sectors >> conf->geo.chunk_shift;
3802 sector_div(size, conf->geo.far_copies);
3803 size = size * raid_disks;
3804 sector_div(size, conf->geo.near_copies);
3805
3806 return size << conf->geo.chunk_shift;
3807 }
3808
calc_sectors(struct r10conf * conf,sector_t size)3809 static void calc_sectors(struct r10conf *conf, sector_t size)
3810 {
3811 /* Calculate the number of sectors-per-device that will
3812 * actually be used, and set conf->dev_sectors and
3813 * conf->stride
3814 */
3815
3816 size = size >> conf->geo.chunk_shift;
3817 sector_div(size, conf->geo.far_copies);
3818 size = size * conf->geo.raid_disks;
3819 sector_div(size, conf->geo.near_copies);
3820 /* 'size' is now the number of chunks in the array */
3821 /* calculate "used chunks per device" */
3822 size = size * conf->copies;
3823
3824 /* We need to round up when dividing by raid_disks to
3825 * get the stride size.
3826 */
3827 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3828
3829 conf->dev_sectors = size << conf->geo.chunk_shift;
3830
3831 if (conf->geo.far_offset)
3832 conf->geo.stride = 1 << conf->geo.chunk_shift;
3833 else {
3834 sector_div(size, conf->geo.far_copies);
3835 conf->geo.stride = size << conf->geo.chunk_shift;
3836 }
3837 }
3838
3839 enum geo_type {geo_new, geo_old, geo_start};
setup_geo(struct geom * geo,struct mddev * mddev,enum geo_type new)3840 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3841 {
3842 int nc, fc, fo;
3843 int layout, chunk, disks;
3844 switch (new) {
3845 case geo_old:
3846 layout = mddev->layout;
3847 chunk = mddev->chunk_sectors;
3848 disks = mddev->raid_disks - mddev->delta_disks;
3849 break;
3850 case geo_new:
3851 layout = mddev->new_layout;
3852 chunk = mddev->new_chunk_sectors;
3853 disks = mddev->raid_disks;
3854 break;
3855 default: /* avoid 'may be unused' warnings */
3856 case geo_start: /* new when starting reshape - raid_disks not
3857 * updated yet. */
3858 layout = mddev->new_layout;
3859 chunk = mddev->new_chunk_sectors;
3860 disks = mddev->raid_disks + mddev->delta_disks;
3861 break;
3862 }
3863 if (layout >> 19)
3864 return -1;
3865 if (chunk < (PAGE_SIZE >> 9) ||
3866 !is_power_of_2(chunk))
3867 return -2;
3868 nc = layout & 255;
3869 fc = (layout >> 8) & 255;
3870 fo = layout & (1<<16);
3871 geo->raid_disks = disks;
3872 geo->near_copies = nc;
3873 geo->far_copies = fc;
3874 geo->far_offset = fo;
3875 switch (layout >> 17) {
3876 case 0: /* original layout. simple but not always optimal */
3877 geo->far_set_size = disks;
3878 break;
3879 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3880 * actually using this, but leave code here just in case.*/
3881 geo->far_set_size = disks/fc;
3882 WARN(geo->far_set_size < fc,
3883 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3884 break;
3885 case 2: /* "improved" layout fixed to match documentation */
3886 geo->far_set_size = fc * nc;
3887 break;
3888 default: /* Not a valid layout */
3889 return -1;
3890 }
3891 geo->chunk_mask = chunk - 1;
3892 geo->chunk_shift = ffz(~chunk);
3893 return nc*fc;
3894 }
3895
raid10_free_conf(struct r10conf * conf)3896 static void raid10_free_conf(struct r10conf *conf)
3897 {
3898 if (!conf)
3899 return;
3900
3901 mempool_exit(&conf->r10bio_pool);
3902 kfree(conf->mirrors);
3903 kfree(conf->mirrors_old);
3904 kfree(conf->mirrors_new);
3905 safe_put_page(conf->tmppage);
3906 bioset_exit(&conf->bio_split);
3907 kfree(conf);
3908 }
3909
setup_conf(struct mddev * mddev)3910 static struct r10conf *setup_conf(struct mddev *mddev)
3911 {
3912 struct r10conf *conf = NULL;
3913 int err = -EINVAL;
3914 struct geom geo;
3915 int copies;
3916
3917 copies = setup_geo(&geo, mddev, geo_new);
3918
3919 if (copies == -2) {
3920 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3921 mdname(mddev), PAGE_SIZE);
3922 goto out;
3923 }
3924
3925 if (copies < 2 || copies > mddev->raid_disks) {
3926 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3927 mdname(mddev), mddev->new_layout);
3928 goto out;
3929 }
3930
3931 err = -ENOMEM;
3932 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3933 if (!conf)
3934 goto out;
3935
3936 /* FIXME calc properly */
3937 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3938 sizeof(struct raid10_info),
3939 GFP_KERNEL);
3940 if (!conf->mirrors)
3941 goto out;
3942
3943 conf->tmppage = alloc_page(GFP_KERNEL);
3944 if (!conf->tmppage)
3945 goto out;
3946
3947 conf->geo = geo;
3948 conf->copies = copies;
3949 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3950 rbio_pool_free, conf);
3951 if (err)
3952 goto out;
3953
3954 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3955 if (err)
3956 goto out;
3957
3958 calc_sectors(conf, mddev->dev_sectors);
3959 if (mddev->reshape_position == MaxSector) {
3960 conf->prev = conf->geo;
3961 conf->reshape_progress = MaxSector;
3962 } else {
3963 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3964 err = -EINVAL;
3965 goto out;
3966 }
3967 conf->reshape_progress = mddev->reshape_position;
3968 if (conf->prev.far_offset)
3969 conf->prev.stride = 1 << conf->prev.chunk_shift;
3970 else
3971 /* far_copies must be 1 */
3972 conf->prev.stride = conf->dev_sectors;
3973 }
3974 conf->reshape_safe = conf->reshape_progress;
3975 spin_lock_init(&conf->device_lock);
3976 INIT_LIST_HEAD(&conf->retry_list);
3977 INIT_LIST_HEAD(&conf->bio_end_io_list);
3978
3979 seqlock_init(&conf->resync_lock);
3980 init_waitqueue_head(&conf->wait_barrier);
3981 atomic_set(&conf->nr_pending, 0);
3982
3983 err = -ENOMEM;
3984 rcu_assign_pointer(conf->thread,
3985 md_register_thread(raid10d, mddev, "raid10"));
3986 if (!conf->thread)
3987 goto out;
3988
3989 conf->mddev = mddev;
3990 return conf;
3991
3992 out:
3993 raid10_free_conf(conf);
3994 return ERR_PTR(err);
3995 }
3996
raid10_nr_stripes(struct r10conf * conf)3997 static unsigned int raid10_nr_stripes(struct r10conf *conf)
3998 {
3999 unsigned int raid_disks = conf->geo.raid_disks;
4000
4001 if (conf->geo.raid_disks % conf->geo.near_copies)
4002 return raid_disks;
4003 return raid_disks / conf->geo.near_copies;
4004 }
4005
raid10_set_queue_limits(struct mddev * mddev)4006 static int raid10_set_queue_limits(struct mddev *mddev)
4007 {
4008 struct r10conf *conf = mddev->private;
4009 struct queue_limits lim;
4010 int err;
4011
4012 md_init_stacking_limits(&lim);
4013 lim.max_write_zeroes_sectors = 0;
4014 lim.io_min = mddev->chunk_sectors << 9;
4015 lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
4016 lim.features |= BLK_FEAT_ATOMIC_WRITES;
4017 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
4018 if (err)
4019 return err;
4020 return queue_limits_set(mddev->gendisk->queue, &lim);
4021 }
4022
raid10_run(struct mddev * mddev)4023 static int raid10_run(struct mddev *mddev)
4024 {
4025 struct r10conf *conf;
4026 int i, disk_idx;
4027 struct raid10_info *disk;
4028 struct md_rdev *rdev;
4029 sector_t size;
4030 sector_t min_offset_diff = 0;
4031 int first = 1;
4032 int ret = -EIO;
4033
4034 if (mddev->private == NULL) {
4035 conf = setup_conf(mddev);
4036 if (IS_ERR(conf))
4037 return PTR_ERR(conf);
4038 mddev->private = conf;
4039 }
4040 conf = mddev->private;
4041 if (!conf)
4042 goto out;
4043
4044 rcu_assign_pointer(mddev->thread, conf->thread);
4045 rcu_assign_pointer(conf->thread, NULL);
4046
4047 if (mddev_is_clustered(conf->mddev)) {
4048 int fc, fo;
4049
4050 fc = (mddev->layout >> 8) & 255;
4051 fo = mddev->layout & (1<<16);
4052 if (fc > 1 || fo > 0) {
4053 pr_err("only near layout is supported by clustered"
4054 " raid10\n");
4055 goto out_free_conf;
4056 }
4057 }
4058
4059 rdev_for_each(rdev, mddev) {
4060 long long diff;
4061
4062 disk_idx = rdev->raid_disk;
4063 if (disk_idx < 0)
4064 continue;
4065 if (disk_idx >= conf->geo.raid_disks &&
4066 disk_idx >= conf->prev.raid_disks)
4067 continue;
4068 disk = conf->mirrors + disk_idx;
4069
4070 if (test_bit(Replacement, &rdev->flags)) {
4071 if (disk->replacement)
4072 goto out_free_conf;
4073 disk->replacement = rdev;
4074 } else {
4075 if (disk->rdev)
4076 goto out_free_conf;
4077 disk->rdev = rdev;
4078 }
4079 diff = (rdev->new_data_offset - rdev->data_offset);
4080 if (!mddev->reshape_backwards)
4081 diff = -diff;
4082 if (diff < 0)
4083 diff = 0;
4084 if (first || diff < min_offset_diff)
4085 min_offset_diff = diff;
4086
4087 disk->head_position = 0;
4088 first = 0;
4089 }
4090
4091 if (!mddev_is_dm(conf->mddev)) {
4092 int err = raid10_set_queue_limits(mddev);
4093
4094 if (err) {
4095 ret = err;
4096 goto out_free_conf;
4097 }
4098 }
4099
4100 /* need to check that every block has at least one working mirror */
4101 if (!enough(conf, -1)) {
4102 pr_err("md/raid10:%s: not enough operational mirrors.\n",
4103 mdname(mddev));
4104 goto out_free_conf;
4105 }
4106
4107 if (conf->reshape_progress != MaxSector) {
4108 /* must ensure that shape change is supported */
4109 if (conf->geo.far_copies != 1 &&
4110 conf->geo.far_offset == 0)
4111 goto out_free_conf;
4112 if (conf->prev.far_copies != 1 &&
4113 conf->prev.far_offset == 0)
4114 goto out_free_conf;
4115 }
4116
4117 mddev->degraded = 0;
4118 for (i = 0;
4119 i < conf->geo.raid_disks
4120 || i < conf->prev.raid_disks;
4121 i++) {
4122
4123 disk = conf->mirrors + i;
4124
4125 if (!disk->rdev && disk->replacement) {
4126 /* The replacement is all we have - use it */
4127 disk->rdev = disk->replacement;
4128 disk->replacement = NULL;
4129 clear_bit(Replacement, &disk->rdev->flags);
4130 }
4131
4132 if (!disk->rdev ||
4133 !test_bit(In_sync, &disk->rdev->flags)) {
4134 disk->head_position = 0;
4135 mddev->degraded++;
4136 if (disk->rdev &&
4137 disk->rdev->saved_raid_disk < 0)
4138 conf->fullsync = 1;
4139 }
4140
4141 if (disk->replacement &&
4142 !test_bit(In_sync, &disk->replacement->flags) &&
4143 disk->replacement->saved_raid_disk < 0) {
4144 conf->fullsync = 1;
4145 }
4146
4147 disk->recovery_disabled = mddev->recovery_disabled - 1;
4148 }
4149
4150 if (mddev->recovery_cp != MaxSector)
4151 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4152 mdname(mddev));
4153 pr_info("md/raid10:%s: active with %d out of %d devices\n",
4154 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4155 conf->geo.raid_disks);
4156 /*
4157 * Ok, everything is just fine now
4158 */
4159 mddev->dev_sectors = conf->dev_sectors;
4160 size = raid10_size(mddev, 0, 0);
4161 md_set_array_sectors(mddev, size);
4162 mddev->resync_max_sectors = size;
4163 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4164
4165 if (md_integrity_register(mddev))
4166 goto out_free_conf;
4167
4168 if (conf->reshape_progress != MaxSector) {
4169 unsigned long before_length, after_length;
4170
4171 before_length = ((1 << conf->prev.chunk_shift) *
4172 conf->prev.far_copies);
4173 after_length = ((1 << conf->geo.chunk_shift) *
4174 conf->geo.far_copies);
4175
4176 if (max(before_length, after_length) > min_offset_diff) {
4177 /* This cannot work */
4178 pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4179 goto out_free_conf;
4180 }
4181 conf->offset_diff = min_offset_diff;
4182
4183 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4184 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4185 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4186 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4187 }
4188
4189 return 0;
4190
4191 out_free_conf:
4192 md_unregister_thread(mddev, &mddev->thread);
4193 raid10_free_conf(conf);
4194 mddev->private = NULL;
4195 out:
4196 return ret;
4197 }
4198
raid10_free(struct mddev * mddev,void * priv)4199 static void raid10_free(struct mddev *mddev, void *priv)
4200 {
4201 raid10_free_conf(priv);
4202 }
4203
raid10_quiesce(struct mddev * mddev,int quiesce)4204 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4205 {
4206 struct r10conf *conf = mddev->private;
4207
4208 if (quiesce)
4209 raise_barrier(conf, 0);
4210 else
4211 lower_barrier(conf);
4212 }
4213
raid10_resize(struct mddev * mddev,sector_t sectors)4214 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4215 {
4216 /* Resize of 'far' arrays is not supported.
4217 * For 'near' and 'offset' arrays we can set the
4218 * number of sectors used to be an appropriate multiple
4219 * of the chunk size.
4220 * For 'offset', this is far_copies*chunksize.
4221 * For 'near' the multiplier is the LCM of
4222 * near_copies and raid_disks.
4223 * So if far_copies > 1 && !far_offset, fail.
4224 * Else find LCM(raid_disks, near_copy)*far_copies and
4225 * multiply by chunk_size. Then round to this number.
4226 * This is mostly done by raid10_size()
4227 */
4228 struct r10conf *conf = mddev->private;
4229 sector_t oldsize, size;
4230 int ret;
4231
4232 if (mddev->reshape_position != MaxSector)
4233 return -EBUSY;
4234
4235 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4236 return -EINVAL;
4237
4238 oldsize = raid10_size(mddev, 0, 0);
4239 size = raid10_size(mddev, sectors, 0);
4240 if (mddev->external_size &&
4241 mddev->array_sectors > size)
4242 return -EINVAL;
4243
4244 ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
4245 if (ret)
4246 return ret;
4247
4248 md_set_array_sectors(mddev, size);
4249 if (sectors > mddev->dev_sectors &&
4250 mddev->recovery_cp > oldsize) {
4251 mddev->recovery_cp = oldsize;
4252 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4253 }
4254 calc_sectors(conf, sectors);
4255 mddev->dev_sectors = conf->dev_sectors;
4256 mddev->resync_max_sectors = size;
4257 return 0;
4258 }
4259
raid10_takeover_raid0(struct mddev * mddev,sector_t size,int devs)4260 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4261 {
4262 struct md_rdev *rdev;
4263 struct r10conf *conf;
4264
4265 if (mddev->degraded > 0) {
4266 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4267 mdname(mddev));
4268 return ERR_PTR(-EINVAL);
4269 }
4270 sector_div(size, devs);
4271
4272 /* Set new parameters */
4273 mddev->new_level = 10;
4274 /* new layout: far_copies = 1, near_copies = 2 */
4275 mddev->new_layout = (1<<8) + 2;
4276 mddev->new_chunk_sectors = mddev->chunk_sectors;
4277 mddev->delta_disks = mddev->raid_disks;
4278 mddev->raid_disks *= 2;
4279 /* make sure it will be not marked as dirty */
4280 mddev->recovery_cp = MaxSector;
4281 mddev->dev_sectors = size;
4282
4283 conf = setup_conf(mddev);
4284 if (!IS_ERR(conf)) {
4285 rdev_for_each(rdev, mddev)
4286 if (rdev->raid_disk >= 0) {
4287 rdev->new_raid_disk = rdev->raid_disk * 2;
4288 rdev->sectors = size;
4289 }
4290 }
4291
4292 return conf;
4293 }
4294
raid10_takeover(struct mddev * mddev)4295 static void *raid10_takeover(struct mddev *mddev)
4296 {
4297 struct r0conf *raid0_conf;
4298
4299 /* raid10 can take over:
4300 * raid0 - providing it has only two drives
4301 */
4302 if (mddev->level == 0) {
4303 /* for raid0 takeover only one zone is supported */
4304 raid0_conf = mddev->private;
4305 if (raid0_conf->nr_strip_zones > 1) {
4306 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4307 mdname(mddev));
4308 return ERR_PTR(-EINVAL);
4309 }
4310 return raid10_takeover_raid0(mddev,
4311 raid0_conf->strip_zone->zone_end,
4312 raid0_conf->strip_zone->nb_dev);
4313 }
4314 return ERR_PTR(-EINVAL);
4315 }
4316
raid10_check_reshape(struct mddev * mddev)4317 static int raid10_check_reshape(struct mddev *mddev)
4318 {
4319 /* Called when there is a request to change
4320 * - layout (to ->new_layout)
4321 * - chunk size (to ->new_chunk_sectors)
4322 * - raid_disks (by delta_disks)
4323 * or when trying to restart a reshape that was ongoing.
4324 *
4325 * We need to validate the request and possibly allocate
4326 * space if that might be an issue later.
4327 *
4328 * Currently we reject any reshape of a 'far' mode array,
4329 * allow chunk size to change if new is generally acceptable,
4330 * allow raid_disks to increase, and allow
4331 * a switch between 'near' mode and 'offset' mode.
4332 */
4333 struct r10conf *conf = mddev->private;
4334 struct geom geo;
4335
4336 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4337 return -EINVAL;
4338
4339 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4340 /* mustn't change number of copies */
4341 return -EINVAL;
4342 if (geo.far_copies > 1 && !geo.far_offset)
4343 /* Cannot switch to 'far' mode */
4344 return -EINVAL;
4345
4346 if (mddev->array_sectors & geo.chunk_mask)
4347 /* not factor of array size */
4348 return -EINVAL;
4349
4350 if (!enough(conf, -1))
4351 return -EINVAL;
4352
4353 kfree(conf->mirrors_new);
4354 conf->mirrors_new = NULL;
4355 if (mddev->delta_disks > 0) {
4356 /* allocate new 'mirrors' list */
4357 conf->mirrors_new =
4358 kcalloc(mddev->raid_disks + mddev->delta_disks,
4359 sizeof(struct raid10_info),
4360 GFP_KERNEL);
4361 if (!conf->mirrors_new)
4362 return -ENOMEM;
4363 }
4364 return 0;
4365 }
4366
4367 /*
4368 * Need to check if array has failed when deciding whether to:
4369 * - start an array
4370 * - remove non-faulty devices
4371 * - add a spare
4372 * - allow a reshape
4373 * This determination is simple when no reshape is happening.
4374 * However if there is a reshape, we need to carefully check
4375 * both the before and after sections.
4376 * This is because some failed devices may only affect one
4377 * of the two sections, and some non-in_sync devices may
4378 * be insync in the section most affected by failed devices.
4379 */
calc_degraded(struct r10conf * conf)4380 static int calc_degraded(struct r10conf *conf)
4381 {
4382 int degraded, degraded2;
4383 int i;
4384
4385 degraded = 0;
4386 /* 'prev' section first */
4387 for (i = 0; i < conf->prev.raid_disks; i++) {
4388 struct md_rdev *rdev = conf->mirrors[i].rdev;
4389
4390 if (!rdev || test_bit(Faulty, &rdev->flags))
4391 degraded++;
4392 else if (!test_bit(In_sync, &rdev->flags))
4393 /* When we can reduce the number of devices in
4394 * an array, this might not contribute to
4395 * 'degraded'. It does now.
4396 */
4397 degraded++;
4398 }
4399 if (conf->geo.raid_disks == conf->prev.raid_disks)
4400 return degraded;
4401 degraded2 = 0;
4402 for (i = 0; i < conf->geo.raid_disks; i++) {
4403 struct md_rdev *rdev = conf->mirrors[i].rdev;
4404
4405 if (!rdev || test_bit(Faulty, &rdev->flags))
4406 degraded2++;
4407 else if (!test_bit(In_sync, &rdev->flags)) {
4408 /* If reshape is increasing the number of devices,
4409 * this section has already been recovered, so
4410 * it doesn't contribute to degraded.
4411 * else it does.
4412 */
4413 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4414 degraded2++;
4415 }
4416 }
4417 if (degraded2 > degraded)
4418 return degraded2;
4419 return degraded;
4420 }
4421
raid10_start_reshape(struct mddev * mddev)4422 static int raid10_start_reshape(struct mddev *mddev)
4423 {
4424 /* A 'reshape' has been requested. This commits
4425 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4426 * This also checks if there are enough spares and adds them
4427 * to the array.
4428 * We currently require enough spares to make the final
4429 * array non-degraded. We also require that the difference
4430 * between old and new data_offset - on each device - is
4431 * enough that we never risk over-writing.
4432 */
4433
4434 unsigned long before_length, after_length;
4435 sector_t min_offset_diff = 0;
4436 int first = 1;
4437 struct geom new;
4438 struct r10conf *conf = mddev->private;
4439 struct md_rdev *rdev;
4440 int spares = 0;
4441 int ret;
4442
4443 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4444 return -EBUSY;
4445
4446 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4447 return -EINVAL;
4448
4449 before_length = ((1 << conf->prev.chunk_shift) *
4450 conf->prev.far_copies);
4451 after_length = ((1 << conf->geo.chunk_shift) *
4452 conf->geo.far_copies);
4453
4454 rdev_for_each(rdev, mddev) {
4455 if (!test_bit(In_sync, &rdev->flags)
4456 && !test_bit(Faulty, &rdev->flags))
4457 spares++;
4458 if (rdev->raid_disk >= 0) {
4459 long long diff = (rdev->new_data_offset
4460 - rdev->data_offset);
4461 if (!mddev->reshape_backwards)
4462 diff = -diff;
4463 if (diff < 0)
4464 diff = 0;
4465 if (first || diff < min_offset_diff)
4466 min_offset_diff = diff;
4467 first = 0;
4468 }
4469 }
4470
4471 if (max(before_length, after_length) > min_offset_diff)
4472 return -EINVAL;
4473
4474 if (spares < mddev->delta_disks)
4475 return -EINVAL;
4476
4477 conf->offset_diff = min_offset_diff;
4478 spin_lock_irq(&conf->device_lock);
4479 if (conf->mirrors_new) {
4480 memcpy(conf->mirrors_new, conf->mirrors,
4481 sizeof(struct raid10_info)*conf->prev.raid_disks);
4482 smp_mb();
4483 kfree(conf->mirrors_old);
4484 conf->mirrors_old = conf->mirrors;
4485 conf->mirrors = conf->mirrors_new;
4486 conf->mirrors_new = NULL;
4487 }
4488 setup_geo(&conf->geo, mddev, geo_start);
4489 smp_mb();
4490 if (mddev->reshape_backwards) {
4491 sector_t size = raid10_size(mddev, 0, 0);
4492 if (size < mddev->array_sectors) {
4493 spin_unlock_irq(&conf->device_lock);
4494 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4495 mdname(mddev));
4496 return -EINVAL;
4497 }
4498 mddev->resync_max_sectors = size;
4499 conf->reshape_progress = size;
4500 } else
4501 conf->reshape_progress = 0;
4502 conf->reshape_safe = conf->reshape_progress;
4503 spin_unlock_irq(&conf->device_lock);
4504
4505 if (mddev->delta_disks && mddev->bitmap) {
4506 struct mdp_superblock_1 *sb = NULL;
4507 sector_t oldsize, newsize;
4508
4509 oldsize = raid10_size(mddev, 0, 0);
4510 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4511
4512 if (!mddev_is_clustered(mddev)) {
4513 ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4514 if (ret)
4515 goto abort;
4516 else
4517 goto out;
4518 }
4519
4520 rdev_for_each(rdev, mddev) {
4521 if (rdev->raid_disk > -1 &&
4522 !test_bit(Faulty, &rdev->flags))
4523 sb = page_address(rdev->sb_page);
4524 }
4525
4526 /*
4527 * some node is already performing reshape, and no need to
4528 * call bitmap_ops->resize again since it should be called when
4529 * receiving BITMAP_RESIZE msg
4530 */
4531 if ((sb && (le32_to_cpu(sb->feature_map) &
4532 MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4533 goto out;
4534
4535 ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4536 if (ret)
4537 goto abort;
4538
4539 ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4540 if (ret) {
4541 mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
4542 goto abort;
4543 }
4544 }
4545 out:
4546 if (mddev->delta_disks > 0) {
4547 rdev_for_each(rdev, mddev)
4548 if (rdev->raid_disk < 0 &&
4549 !test_bit(Faulty, &rdev->flags)) {
4550 if (raid10_add_disk(mddev, rdev) == 0) {
4551 if (rdev->raid_disk >=
4552 conf->prev.raid_disks)
4553 set_bit(In_sync, &rdev->flags);
4554 else
4555 rdev->recovery_offset = 0;
4556
4557 /* Failure here is OK */
4558 sysfs_link_rdev(mddev, rdev);
4559 }
4560 } else if (rdev->raid_disk >= conf->prev.raid_disks
4561 && !test_bit(Faulty, &rdev->flags)) {
4562 /* This is a spare that was manually added */
4563 set_bit(In_sync, &rdev->flags);
4564 }
4565 }
4566 /* When a reshape changes the number of devices,
4567 * ->degraded is measured against the larger of the
4568 * pre and post numbers.
4569 */
4570 spin_lock_irq(&conf->device_lock);
4571 mddev->degraded = calc_degraded(conf);
4572 spin_unlock_irq(&conf->device_lock);
4573 mddev->raid_disks = conf->geo.raid_disks;
4574 mddev->reshape_position = conf->reshape_progress;
4575 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4576
4577 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4578 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4579 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4580 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4581 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4582 conf->reshape_checkpoint = jiffies;
4583 md_new_event();
4584 return 0;
4585
4586 abort:
4587 mddev->recovery = 0;
4588 spin_lock_irq(&conf->device_lock);
4589 conf->geo = conf->prev;
4590 mddev->raid_disks = conf->geo.raid_disks;
4591 rdev_for_each(rdev, mddev)
4592 rdev->new_data_offset = rdev->data_offset;
4593 smp_wmb();
4594 conf->reshape_progress = MaxSector;
4595 conf->reshape_safe = MaxSector;
4596 mddev->reshape_position = MaxSector;
4597 spin_unlock_irq(&conf->device_lock);
4598 return ret;
4599 }
4600
4601 /* Calculate the last device-address that could contain
4602 * any block from the chunk that includes the array-address 's'
4603 * and report the next address.
4604 * i.e. the address returned will be chunk-aligned and after
4605 * any data that is in the chunk containing 's'.
4606 */
last_dev_address(sector_t s,struct geom * geo)4607 static sector_t last_dev_address(sector_t s, struct geom *geo)
4608 {
4609 s = (s | geo->chunk_mask) + 1;
4610 s >>= geo->chunk_shift;
4611 s *= geo->near_copies;
4612 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4613 s *= geo->far_copies;
4614 s <<= geo->chunk_shift;
4615 return s;
4616 }
4617
4618 /* Calculate the first device-address that could contain
4619 * any block from the chunk that includes the array-address 's'.
4620 * This too will be the start of a chunk
4621 */
first_dev_address(sector_t s,struct geom * geo)4622 static sector_t first_dev_address(sector_t s, struct geom *geo)
4623 {
4624 s >>= geo->chunk_shift;
4625 s *= geo->near_copies;
4626 sector_div(s, geo->raid_disks);
4627 s *= geo->far_copies;
4628 s <<= geo->chunk_shift;
4629 return s;
4630 }
4631
reshape_request(struct mddev * mddev,sector_t sector_nr,int * skipped)4632 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4633 int *skipped)
4634 {
4635 /* We simply copy at most one chunk (smallest of old and new)
4636 * at a time, possibly less if that exceeds RESYNC_PAGES,
4637 * or we hit a bad block or something.
4638 * This might mean we pause for normal IO in the middle of
4639 * a chunk, but that is not a problem as mddev->reshape_position
4640 * can record any location.
4641 *
4642 * If we will want to write to a location that isn't
4643 * yet recorded as 'safe' (i.e. in metadata on disk) then
4644 * we need to flush all reshape requests and update the metadata.
4645 *
4646 * When reshaping forwards (e.g. to more devices), we interpret
4647 * 'safe' as the earliest block which might not have been copied
4648 * down yet. We divide this by previous stripe size and multiply
4649 * by previous stripe length to get lowest device offset that we
4650 * cannot write to yet.
4651 * We interpret 'sector_nr' as an address that we want to write to.
4652 * From this we use last_device_address() to find where we might
4653 * write to, and first_device_address on the 'safe' position.
4654 * If this 'next' write position is after the 'safe' position,
4655 * we must update the metadata to increase the 'safe' position.
4656 *
4657 * When reshaping backwards, we round in the opposite direction
4658 * and perform the reverse test: next write position must not be
4659 * less than current safe position.
4660 *
4661 * In all this the minimum difference in data offsets
4662 * (conf->offset_diff - always positive) allows a bit of slack,
4663 * so next can be after 'safe', but not by more than offset_diff
4664 *
4665 * We need to prepare all the bios here before we start any IO
4666 * to ensure the size we choose is acceptable to all devices.
4667 * The means one for each copy for write-out and an extra one for
4668 * read-in.
4669 * We store the read-in bio in ->master_bio and the others in
4670 * ->devs[x].bio and ->devs[x].repl_bio.
4671 */
4672 struct r10conf *conf = mddev->private;
4673 struct r10bio *r10_bio;
4674 sector_t next, safe, last;
4675 int max_sectors;
4676 int nr_sectors;
4677 int s;
4678 struct md_rdev *rdev;
4679 int need_flush = 0;
4680 struct bio *blist;
4681 struct bio *bio, *read_bio;
4682 int sectors_done = 0;
4683 struct page **pages;
4684
4685 if (sector_nr == 0) {
4686 /* If restarting in the middle, skip the initial sectors */
4687 if (mddev->reshape_backwards &&
4688 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4689 sector_nr = (raid10_size(mddev, 0, 0)
4690 - conf->reshape_progress);
4691 } else if (!mddev->reshape_backwards &&
4692 conf->reshape_progress > 0)
4693 sector_nr = conf->reshape_progress;
4694 if (sector_nr) {
4695 mddev->curr_resync_completed = sector_nr;
4696 sysfs_notify_dirent_safe(mddev->sysfs_completed);
4697 *skipped = 1;
4698 return sector_nr;
4699 }
4700 }
4701
4702 /* We don't use sector_nr to track where we are up to
4703 * as that doesn't work well for ->reshape_backwards.
4704 * So just use ->reshape_progress.
4705 */
4706 if (mddev->reshape_backwards) {
4707 /* 'next' is the earliest device address that we might
4708 * write to for this chunk in the new layout
4709 */
4710 next = first_dev_address(conf->reshape_progress - 1,
4711 &conf->geo);
4712
4713 /* 'safe' is the last device address that we might read from
4714 * in the old layout after a restart
4715 */
4716 safe = last_dev_address(conf->reshape_safe - 1,
4717 &conf->prev);
4718
4719 if (next + conf->offset_diff < safe)
4720 need_flush = 1;
4721
4722 last = conf->reshape_progress - 1;
4723 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4724 & conf->prev.chunk_mask);
4725 if (sector_nr + RESYNC_SECTORS < last)
4726 sector_nr = last + 1 - RESYNC_SECTORS;
4727 } else {
4728 /* 'next' is after the last device address that we
4729 * might write to for this chunk in the new layout
4730 */
4731 next = last_dev_address(conf->reshape_progress, &conf->geo);
4732
4733 /* 'safe' is the earliest device address that we might
4734 * read from in the old layout after a restart
4735 */
4736 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4737
4738 /* Need to update metadata if 'next' might be beyond 'safe'
4739 * as that would possibly corrupt data
4740 */
4741 if (next > safe + conf->offset_diff)
4742 need_flush = 1;
4743
4744 sector_nr = conf->reshape_progress;
4745 last = sector_nr | (conf->geo.chunk_mask
4746 & conf->prev.chunk_mask);
4747
4748 if (sector_nr + RESYNC_SECTORS <= last)
4749 last = sector_nr + RESYNC_SECTORS - 1;
4750 }
4751
4752 if (need_flush ||
4753 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4754 /* Need to update reshape_position in metadata */
4755 wait_barrier(conf, false);
4756 mddev->reshape_position = conf->reshape_progress;
4757 if (mddev->reshape_backwards)
4758 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4759 - conf->reshape_progress;
4760 else
4761 mddev->curr_resync_completed = conf->reshape_progress;
4762 conf->reshape_checkpoint = jiffies;
4763 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4764 md_wakeup_thread(mddev->thread);
4765 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4766 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4767 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4768 allow_barrier(conf);
4769 return sectors_done;
4770 }
4771 conf->reshape_safe = mddev->reshape_position;
4772 allow_barrier(conf);
4773 }
4774
4775 raise_barrier(conf, 0);
4776 read_more:
4777 /* Now schedule reads for blocks from sector_nr to last */
4778 r10_bio = raid10_alloc_init_r10buf(conf);
4779 r10_bio->state = 0;
4780 raise_barrier(conf, 1);
4781 atomic_set(&r10_bio->remaining, 0);
4782 r10_bio->mddev = mddev;
4783 r10_bio->sector = sector_nr;
4784 set_bit(R10BIO_IsReshape, &r10_bio->state);
4785 r10_bio->sectors = last - sector_nr + 1;
4786 rdev = read_balance(conf, r10_bio, &max_sectors);
4787 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4788
4789 if (!rdev) {
4790 /* Cannot read from here, so need to record bad blocks
4791 * on all the target devices.
4792 */
4793 // FIXME
4794 mempool_free(r10_bio, &conf->r10buf_pool);
4795 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4796 return sectors_done;
4797 }
4798
4799 read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4800 GFP_KERNEL, &mddev->bio_set);
4801 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4802 + rdev->data_offset);
4803 read_bio->bi_private = r10_bio;
4804 read_bio->bi_end_io = end_reshape_read;
4805 r10_bio->master_bio = read_bio;
4806 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4807
4808 /*
4809 * Broadcast RESYNC message to other nodes, so all nodes would not
4810 * write to the region to avoid conflict.
4811 */
4812 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4813 struct mdp_superblock_1 *sb = NULL;
4814 int sb_reshape_pos = 0;
4815
4816 conf->cluster_sync_low = sector_nr;
4817 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4818 sb = page_address(rdev->sb_page);
4819 if (sb) {
4820 sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4821 /*
4822 * Set cluster_sync_low again if next address for array
4823 * reshape is less than cluster_sync_low. Since we can't
4824 * update cluster_sync_low until it has finished reshape.
4825 */
4826 if (sb_reshape_pos < conf->cluster_sync_low)
4827 conf->cluster_sync_low = sb_reshape_pos;
4828 }
4829
4830 mddev->cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4831 conf->cluster_sync_high);
4832 }
4833
4834 /* Now find the locations in the new layout */
4835 __raid10_find_phys(&conf->geo, r10_bio);
4836
4837 blist = read_bio;
4838 read_bio->bi_next = NULL;
4839
4840 for (s = 0; s < conf->copies*2; s++) {
4841 struct bio *b;
4842 int d = r10_bio->devs[s/2].devnum;
4843 struct md_rdev *rdev2;
4844 if (s&1) {
4845 rdev2 = conf->mirrors[d].replacement;
4846 b = r10_bio->devs[s/2].repl_bio;
4847 } else {
4848 rdev2 = conf->mirrors[d].rdev;
4849 b = r10_bio->devs[s/2].bio;
4850 }
4851 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4852 continue;
4853
4854 bio_set_dev(b, rdev2->bdev);
4855 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4856 rdev2->new_data_offset;
4857 b->bi_end_io = end_reshape_write;
4858 b->bi_opf = REQ_OP_WRITE;
4859 b->bi_next = blist;
4860 blist = b;
4861 }
4862
4863 /* Now add as many pages as possible to all of these bios. */
4864
4865 nr_sectors = 0;
4866 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4867 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4868 struct page *page = pages[s / (PAGE_SIZE >> 9)];
4869 int len = (max_sectors - s) << 9;
4870 if (len > PAGE_SIZE)
4871 len = PAGE_SIZE;
4872 for (bio = blist; bio ; bio = bio->bi_next) {
4873 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
4874 bio->bi_status = BLK_STS_RESOURCE;
4875 bio_endio(bio);
4876 return sectors_done;
4877 }
4878 }
4879 sector_nr += len >> 9;
4880 nr_sectors += len >> 9;
4881 }
4882 r10_bio->sectors = nr_sectors;
4883
4884 /* Now submit the read */
4885 atomic_inc(&r10_bio->remaining);
4886 read_bio->bi_next = NULL;
4887 submit_bio_noacct(read_bio);
4888 sectors_done += nr_sectors;
4889 if (sector_nr <= last)
4890 goto read_more;
4891
4892 lower_barrier(conf);
4893
4894 /* Now that we have done the whole section we can
4895 * update reshape_progress
4896 */
4897 if (mddev->reshape_backwards)
4898 conf->reshape_progress -= sectors_done;
4899 else
4900 conf->reshape_progress += sectors_done;
4901
4902 return sectors_done;
4903 }
4904
4905 static void end_reshape_request(struct r10bio *r10_bio);
4906 static int handle_reshape_read_error(struct mddev *mddev,
4907 struct r10bio *r10_bio);
reshape_request_write(struct mddev * mddev,struct r10bio * r10_bio)4908 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4909 {
4910 /* Reshape read completed. Hopefully we have a block
4911 * to write out.
4912 * If we got a read error then we do sync 1-page reads from
4913 * elsewhere until we find the data - or give up.
4914 */
4915 struct r10conf *conf = mddev->private;
4916 int s;
4917
4918 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4919 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4920 /* Reshape has been aborted */
4921 md_done_sync(mddev, r10_bio->sectors, 0);
4922 return;
4923 }
4924
4925 /* We definitely have the data in the pages, schedule the
4926 * writes.
4927 */
4928 atomic_set(&r10_bio->remaining, 1);
4929 for (s = 0; s < conf->copies*2; s++) {
4930 struct bio *b;
4931 int d = r10_bio->devs[s/2].devnum;
4932 struct md_rdev *rdev;
4933 if (s&1) {
4934 rdev = conf->mirrors[d].replacement;
4935 b = r10_bio->devs[s/2].repl_bio;
4936 } else {
4937 rdev = conf->mirrors[d].rdev;
4938 b = r10_bio->devs[s/2].bio;
4939 }
4940 if (!rdev || test_bit(Faulty, &rdev->flags))
4941 continue;
4942
4943 atomic_inc(&rdev->nr_pending);
4944 atomic_inc(&r10_bio->remaining);
4945 b->bi_next = NULL;
4946 submit_bio_noacct(b);
4947 }
4948 end_reshape_request(r10_bio);
4949 }
4950
end_reshape(struct r10conf * conf)4951 static void end_reshape(struct r10conf *conf)
4952 {
4953 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4954 return;
4955
4956 spin_lock_irq(&conf->device_lock);
4957 conf->prev = conf->geo;
4958 md_finish_reshape(conf->mddev);
4959 smp_wmb();
4960 conf->reshape_progress = MaxSector;
4961 conf->reshape_safe = MaxSector;
4962 spin_unlock_irq(&conf->device_lock);
4963
4964 mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
4965 conf->fullsync = 0;
4966 }
4967
raid10_update_reshape_pos(struct mddev * mddev)4968 static void raid10_update_reshape_pos(struct mddev *mddev)
4969 {
4970 struct r10conf *conf = mddev->private;
4971 sector_t lo, hi;
4972
4973 mddev->cluster_ops->resync_info_get(mddev, &lo, &hi);
4974 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4975 || mddev->reshape_position == MaxSector)
4976 conf->reshape_progress = mddev->reshape_position;
4977 else
4978 WARN_ON_ONCE(1);
4979 }
4980
handle_reshape_read_error(struct mddev * mddev,struct r10bio * r10_bio)4981 static int handle_reshape_read_error(struct mddev *mddev,
4982 struct r10bio *r10_bio)
4983 {
4984 /* Use sync reads to get the blocks from somewhere else */
4985 int sectors = r10_bio->sectors;
4986 struct r10conf *conf = mddev->private;
4987 struct r10bio *r10b;
4988 int slot = 0;
4989 int idx = 0;
4990 struct page **pages;
4991
4992 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
4993 if (!r10b) {
4994 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4995 return -ENOMEM;
4996 }
4997
4998 /* reshape IOs share pages from .devs[0].bio */
4999 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5000
5001 r10b->sector = r10_bio->sector;
5002 __raid10_find_phys(&conf->prev, r10b);
5003
5004 while (sectors) {
5005 int s = sectors;
5006 int success = 0;
5007 int first_slot = slot;
5008
5009 if (s > (PAGE_SIZE >> 9))
5010 s = PAGE_SIZE >> 9;
5011
5012 while (!success) {
5013 int d = r10b->devs[slot].devnum;
5014 struct md_rdev *rdev = conf->mirrors[d].rdev;
5015 sector_t addr;
5016 if (rdev == NULL ||
5017 test_bit(Faulty, &rdev->flags) ||
5018 !test_bit(In_sync, &rdev->flags))
5019 goto failed;
5020
5021 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5022 atomic_inc(&rdev->nr_pending);
5023 success = sync_page_io(rdev,
5024 addr,
5025 s << 9,
5026 pages[idx],
5027 REQ_OP_READ, false);
5028 rdev_dec_pending(rdev, mddev);
5029 if (success)
5030 break;
5031 failed:
5032 slot++;
5033 if (slot >= conf->copies)
5034 slot = 0;
5035 if (slot == first_slot)
5036 break;
5037 }
5038 if (!success) {
5039 /* couldn't read this block, must give up */
5040 set_bit(MD_RECOVERY_INTR,
5041 &mddev->recovery);
5042 kfree(r10b);
5043 return -EIO;
5044 }
5045 sectors -= s;
5046 idx++;
5047 }
5048 kfree(r10b);
5049 return 0;
5050 }
5051
end_reshape_write(struct bio * bio)5052 static void end_reshape_write(struct bio *bio)
5053 {
5054 struct r10bio *r10_bio = get_resync_r10bio(bio);
5055 struct mddev *mddev = r10_bio->mddev;
5056 struct r10conf *conf = mddev->private;
5057 int d;
5058 int slot;
5059 int repl;
5060 struct md_rdev *rdev = NULL;
5061
5062 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5063 rdev = repl ? conf->mirrors[d].replacement :
5064 conf->mirrors[d].rdev;
5065
5066 if (bio->bi_status) {
5067 /* FIXME should record badblock */
5068 md_error(mddev, rdev);
5069 }
5070
5071 rdev_dec_pending(rdev, mddev);
5072 end_reshape_request(r10_bio);
5073 }
5074
end_reshape_request(struct r10bio * r10_bio)5075 static void end_reshape_request(struct r10bio *r10_bio)
5076 {
5077 if (!atomic_dec_and_test(&r10_bio->remaining))
5078 return;
5079 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5080 bio_put(r10_bio->master_bio);
5081 put_buf(r10_bio);
5082 }
5083
raid10_finish_reshape(struct mddev * mddev)5084 static void raid10_finish_reshape(struct mddev *mddev)
5085 {
5086 struct r10conf *conf = mddev->private;
5087
5088 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5089 return;
5090
5091 if (mddev->delta_disks > 0) {
5092 if (mddev->recovery_cp > mddev->resync_max_sectors) {
5093 mddev->recovery_cp = mddev->resync_max_sectors;
5094 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5095 }
5096 mddev->resync_max_sectors = mddev->array_sectors;
5097 } else {
5098 int d;
5099 for (d = conf->geo.raid_disks ;
5100 d < conf->geo.raid_disks - mddev->delta_disks;
5101 d++) {
5102 struct md_rdev *rdev = conf->mirrors[d].rdev;
5103 if (rdev)
5104 clear_bit(In_sync, &rdev->flags);
5105 rdev = conf->mirrors[d].replacement;
5106 if (rdev)
5107 clear_bit(In_sync, &rdev->flags);
5108 }
5109 }
5110 mddev->layout = mddev->new_layout;
5111 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5112 mddev->reshape_position = MaxSector;
5113 mddev->delta_disks = 0;
5114 mddev->reshape_backwards = 0;
5115 }
5116
5117 static struct md_personality raid10_personality =
5118 {
5119 .head = {
5120 .type = MD_PERSONALITY,
5121 .id = ID_RAID10,
5122 .name = "raid10",
5123 .owner = THIS_MODULE,
5124 },
5125
5126 .make_request = raid10_make_request,
5127 .run = raid10_run,
5128 .free = raid10_free,
5129 .status = raid10_status,
5130 .error_handler = raid10_error,
5131 .hot_add_disk = raid10_add_disk,
5132 .hot_remove_disk= raid10_remove_disk,
5133 .spare_active = raid10_spare_active,
5134 .sync_request = raid10_sync_request,
5135 .quiesce = raid10_quiesce,
5136 .size = raid10_size,
5137 .resize = raid10_resize,
5138 .takeover = raid10_takeover,
5139 .check_reshape = raid10_check_reshape,
5140 .start_reshape = raid10_start_reshape,
5141 .finish_reshape = raid10_finish_reshape,
5142 .update_reshape_pos = raid10_update_reshape_pos,
5143 };
5144
raid10_init(void)5145 static int __init raid10_init(void)
5146 {
5147 return register_md_submodule(&raid10_personality.head);
5148 }
5149
raid10_exit(void)5150 static void __exit raid10_exit(void)
5151 {
5152 unregister_md_submodule(&raid10_personality.head);
5153 }
5154
5155 module_init(raid10_init);
5156 module_exit(raid10_exit);
5157 MODULE_LICENSE("GPL");
5158 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5159 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5160 MODULE_ALIAS("md-raid10");
5161 MODULE_ALIAS("md-level-10");
5162