xref: /linux/drivers/md/raid10.c (revision 8a61cb6e150ea907b580a1b5e705decb0a3ffc86)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid10.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 2000-2004 Neil Brown
6  *
7  * RAID-10 support for md.
8  *
9  * Base on code in raid1.c.  See raid1.c for further copyright information.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22 
23 #define RAID_1_10_NAME "raid10"
24 #include "raid10.h"
25 #include "raid0.h"
26 #include "md-bitmap.h"
27 
28 /*
29  * RAID10 provides a combination of RAID0 and RAID1 functionality.
30  * The layout of data is defined by
31  *    chunk_size
32  *    raid_disks
33  *    near_copies (stored in low byte of layout)
34  *    far_copies (stored in second byte of layout)
35  *    far_offset (stored in bit 16 of layout )
36  *    use_far_sets (stored in bit 17 of layout )
37  *    use_far_sets_bugfixed (stored in bit 18 of layout )
38  *
39  * The data to be stored is divided into chunks using chunksize.  Each device
40  * is divided into far_copies sections.   In each section, chunks are laid out
41  * in a style similar to raid0, but near_copies copies of each chunk is stored
42  * (each on a different drive).  The starting device for each section is offset
43  * near_copies from the starting device of the previous section.  Thus there
44  * are (near_copies * far_copies) of each chunk, and each is on a different
45  * drive.  near_copies and far_copies must be at least one, and their product
46  * is at most raid_disks.
47  *
48  * If far_offset is true, then the far_copies are handled a bit differently.
49  * The copies are still in different stripes, but instead of being very far
50  * apart on disk, there are adjacent stripes.
51  *
52  * The far and offset algorithms are handled slightly differently if
53  * 'use_far_sets' is true.  In this case, the array's devices are grouped into
54  * sets that are (near_copies * far_copies) in size.  The far copied stripes
55  * are still shifted by 'near_copies' devices, but this shifting stays confined
56  * to the set rather than the entire array.  This is done to improve the number
57  * of device combinations that can fail without causing the array to fail.
58  * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
59  * on a device):
60  *    A B C D    A B C D E
61  *      ...         ...
62  *    D A B C    E A B C D
63  * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
64  *    [A B] [C D]    [A B] [C D E]
65  *    |...| |...|    |...| | ... |
66  *    [B A] [D C]    [B A] [E C D]
67  */
68 
69 static void allow_barrier(struct r10conf *conf);
70 static void lower_barrier(struct r10conf *conf);
71 static int _enough(struct r10conf *conf, int previous, int ignore);
72 static int enough(struct r10conf *conf, int ignore);
73 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
74 				int *skipped);
75 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
76 static void end_reshape_write(struct bio *bio);
77 static void end_reshape(struct r10conf *conf);
78 
79 #include "raid1-10.c"
80 
81 #define NULL_CMD
82 #define cmd_before(conf, cmd) \
83 	do { \
84 		write_sequnlock_irq(&(conf)->resync_lock); \
85 		cmd; \
86 	} while (0)
87 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
88 
89 #define wait_event_barrier_cmd(conf, cond, cmd) \
90 	wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
91 		       cmd_after(conf))
92 
93 #define wait_event_barrier(conf, cond) \
94 	wait_event_barrier_cmd(conf, cond, NULL_CMD)
95 
96 /*
97  * for resync bio, r10bio pointer can be retrieved from the per-bio
98  * 'struct resync_pages'.
99  */
get_resync_r10bio(struct bio * bio)100 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
101 {
102 	return get_resync_pages(bio)->raid_bio;
103 }
104 
r10bio_pool_alloc(gfp_t gfp_flags,void * data)105 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
106 {
107 	struct r10conf *conf = data;
108 	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
109 
110 	/* allocate a r10bio with room for raid_disks entries in the
111 	 * bios array */
112 	return kzalloc(size, gfp_flags);
113 }
114 
115 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
116 /* amount of memory to reserve for resync requests */
117 #define RESYNC_WINDOW (1024*1024)
118 /* maximum number of concurrent requests, memory permitting */
119 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
120 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
121 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
122 
123 /*
124  * When performing a resync, we need to read and compare, so
125  * we need as many pages are there are copies.
126  * When performing a recovery, we need 2 bios, one for read,
127  * one for write (we recover only one drive per r10buf)
128  *
129  */
r10buf_pool_alloc(gfp_t gfp_flags,void * data)130 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
131 {
132 	struct r10conf *conf = data;
133 	struct r10bio *r10_bio;
134 	struct bio *bio;
135 	int j;
136 	int nalloc, nalloc_rp;
137 	struct resync_pages *rps;
138 
139 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
140 	if (!r10_bio)
141 		return NULL;
142 
143 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
144 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
145 		nalloc = conf->copies; /* resync */
146 	else
147 		nalloc = 2; /* recovery */
148 
149 	/* allocate once for all bios */
150 	if (!conf->have_replacement)
151 		nalloc_rp = nalloc;
152 	else
153 		nalloc_rp = nalloc * 2;
154 	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
155 	if (!rps)
156 		goto out_free_r10bio;
157 
158 	/*
159 	 * Allocate bios.
160 	 */
161 	for (j = nalloc ; j-- ; ) {
162 		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
163 		if (!bio)
164 			goto out_free_bio;
165 		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
166 		r10_bio->devs[j].bio = bio;
167 		if (!conf->have_replacement)
168 			continue;
169 		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
170 		if (!bio)
171 			goto out_free_bio;
172 		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
173 		r10_bio->devs[j].repl_bio = bio;
174 	}
175 	/*
176 	 * Allocate RESYNC_PAGES data pages and attach them
177 	 * where needed.
178 	 */
179 	for (j = 0; j < nalloc; j++) {
180 		struct bio *rbio = r10_bio->devs[j].repl_bio;
181 		struct resync_pages *rp, *rp_repl;
182 
183 		rp = &rps[j];
184 		if (rbio)
185 			rp_repl = &rps[nalloc + j];
186 
187 		bio = r10_bio->devs[j].bio;
188 
189 		if (!j || test_bit(MD_RECOVERY_SYNC,
190 				   &conf->mddev->recovery)) {
191 			if (resync_alloc_pages(rp, gfp_flags))
192 				goto out_free_pages;
193 		} else {
194 			memcpy(rp, &rps[0], sizeof(*rp));
195 			resync_get_all_pages(rp);
196 		}
197 
198 		rp->raid_bio = r10_bio;
199 		bio->bi_private = rp;
200 		if (rbio) {
201 			memcpy(rp_repl, rp, sizeof(*rp));
202 			rbio->bi_private = rp_repl;
203 		}
204 	}
205 
206 	return r10_bio;
207 
208 out_free_pages:
209 	while (--j >= 0)
210 		resync_free_pages(&rps[j]);
211 
212 	j = 0;
213 out_free_bio:
214 	for ( ; j < nalloc; j++) {
215 		if (r10_bio->devs[j].bio)
216 			bio_uninit(r10_bio->devs[j].bio);
217 		kfree(r10_bio->devs[j].bio);
218 		if (r10_bio->devs[j].repl_bio)
219 			bio_uninit(r10_bio->devs[j].repl_bio);
220 		kfree(r10_bio->devs[j].repl_bio);
221 	}
222 	kfree(rps);
223 out_free_r10bio:
224 	rbio_pool_free(r10_bio, conf);
225 	return NULL;
226 }
227 
r10buf_pool_free(void * __r10_bio,void * data)228 static void r10buf_pool_free(void *__r10_bio, void *data)
229 {
230 	struct r10conf *conf = data;
231 	struct r10bio *r10bio = __r10_bio;
232 	int j;
233 	struct resync_pages *rp = NULL;
234 
235 	for (j = conf->copies; j--; ) {
236 		struct bio *bio = r10bio->devs[j].bio;
237 
238 		if (bio) {
239 			rp = get_resync_pages(bio);
240 			resync_free_pages(rp);
241 			bio_uninit(bio);
242 			kfree(bio);
243 		}
244 
245 		bio = r10bio->devs[j].repl_bio;
246 		if (bio) {
247 			bio_uninit(bio);
248 			kfree(bio);
249 		}
250 	}
251 
252 	/* resync pages array stored in the 1st bio's .bi_private */
253 	kfree(rp);
254 
255 	rbio_pool_free(r10bio, conf);
256 }
257 
put_all_bios(struct r10conf * conf,struct r10bio * r10_bio)258 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
259 {
260 	int i;
261 
262 	for (i = 0; i < conf->geo.raid_disks; i++) {
263 		struct bio **bio = & r10_bio->devs[i].bio;
264 		if (!BIO_SPECIAL(*bio))
265 			bio_put(*bio);
266 		*bio = NULL;
267 		bio = &r10_bio->devs[i].repl_bio;
268 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
269 			bio_put(*bio);
270 		*bio = NULL;
271 	}
272 }
273 
free_r10bio(struct r10bio * r10_bio)274 static void free_r10bio(struct r10bio *r10_bio)
275 {
276 	struct r10conf *conf = r10_bio->mddev->private;
277 
278 	put_all_bios(conf, r10_bio);
279 	mempool_free(r10_bio, &conf->r10bio_pool);
280 }
281 
put_buf(struct r10bio * r10_bio)282 static void put_buf(struct r10bio *r10_bio)
283 {
284 	struct r10conf *conf = r10_bio->mddev->private;
285 
286 	mempool_free(r10_bio, &conf->r10buf_pool);
287 
288 	lower_barrier(conf);
289 }
290 
wake_up_barrier(struct r10conf * conf)291 static void wake_up_barrier(struct r10conf *conf)
292 {
293 	if (wq_has_sleeper(&conf->wait_barrier))
294 		wake_up(&conf->wait_barrier);
295 }
296 
reschedule_retry(struct r10bio * r10_bio)297 static void reschedule_retry(struct r10bio *r10_bio)
298 {
299 	unsigned long flags;
300 	struct mddev *mddev = r10_bio->mddev;
301 	struct r10conf *conf = mddev->private;
302 
303 	spin_lock_irqsave(&conf->device_lock, flags);
304 	list_add(&r10_bio->retry_list, &conf->retry_list);
305 	conf->nr_queued ++;
306 	spin_unlock_irqrestore(&conf->device_lock, flags);
307 
308 	/* wake up frozen array... */
309 	wake_up(&conf->wait_barrier);
310 
311 	md_wakeup_thread(mddev->thread);
312 }
313 
314 /*
315  * raid_end_bio_io() is called when we have finished servicing a mirrored
316  * operation and are ready to return a success/failure code to the buffer
317  * cache layer.
318  */
raid_end_bio_io(struct r10bio * r10_bio)319 static void raid_end_bio_io(struct r10bio *r10_bio)
320 {
321 	struct bio *bio = r10_bio->master_bio;
322 	struct r10conf *conf = r10_bio->mddev->private;
323 
324 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
325 		bio->bi_status = BLK_STS_IOERR;
326 
327 	bio_endio(bio);
328 	/*
329 	 * Wake up any possible resync thread that waits for the device
330 	 * to go idle.
331 	 */
332 	allow_barrier(conf);
333 
334 	free_r10bio(r10_bio);
335 }
336 
337 /*
338  * Update disk head position estimator based on IRQ completion info.
339  */
update_head_pos(int slot,struct r10bio * r10_bio)340 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
341 {
342 	struct r10conf *conf = r10_bio->mddev->private;
343 
344 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
345 		r10_bio->devs[slot].addr + (r10_bio->sectors);
346 }
347 
348 /*
349  * Find the disk number which triggered given bio
350  */
find_bio_disk(struct r10conf * conf,struct r10bio * r10_bio,struct bio * bio,int * slotp,int * replp)351 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
352 			 struct bio *bio, int *slotp, int *replp)
353 {
354 	int slot;
355 	int repl = 0;
356 
357 	for (slot = 0; slot < conf->geo.raid_disks; slot++) {
358 		if (r10_bio->devs[slot].bio == bio)
359 			break;
360 		if (r10_bio->devs[slot].repl_bio == bio) {
361 			repl = 1;
362 			break;
363 		}
364 	}
365 
366 	update_head_pos(slot, r10_bio);
367 
368 	if (slotp)
369 		*slotp = slot;
370 	if (replp)
371 		*replp = repl;
372 	return r10_bio->devs[slot].devnum;
373 }
374 
raid10_end_read_request(struct bio * bio)375 static void raid10_end_read_request(struct bio *bio)
376 {
377 	int uptodate = !bio->bi_status;
378 	struct r10bio *r10_bio = bio->bi_private;
379 	int slot;
380 	struct md_rdev *rdev;
381 	struct r10conf *conf = r10_bio->mddev->private;
382 
383 	slot = r10_bio->read_slot;
384 	rdev = r10_bio->devs[slot].rdev;
385 	/*
386 	 * this branch is our 'one mirror IO has finished' event handler:
387 	 */
388 	update_head_pos(slot, r10_bio);
389 
390 	if (uptodate) {
391 		/*
392 		 * Set R10BIO_Uptodate in our master bio, so that
393 		 * we will return a good error code to the higher
394 		 * levels even if IO on some other mirrored buffer fails.
395 		 *
396 		 * The 'master' represents the composite IO operation to
397 		 * user-side. So if something waits for IO, then it will
398 		 * wait for the 'master' bio.
399 		 */
400 		set_bit(R10BIO_Uptodate, &r10_bio->state);
401 	} else {
402 		/* If all other devices that store this block have
403 		 * failed, we want to return the error upwards rather
404 		 * than fail the last device.  Here we redefine
405 		 * "uptodate" to mean "Don't want to retry"
406 		 */
407 		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
408 			     rdev->raid_disk))
409 			uptodate = 1;
410 	}
411 	if (uptodate) {
412 		raid_end_bio_io(r10_bio);
413 		rdev_dec_pending(rdev, conf->mddev);
414 	} else {
415 		/*
416 		 * oops, read error - keep the refcount on the rdev
417 		 */
418 		pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
419 				   mdname(conf->mddev),
420 				   rdev->bdev,
421 				   (unsigned long long)r10_bio->sector);
422 		set_bit(R10BIO_ReadError, &r10_bio->state);
423 		reschedule_retry(r10_bio);
424 	}
425 }
426 
close_write(struct r10bio * r10_bio)427 static void close_write(struct r10bio *r10_bio)
428 {
429 	struct mddev *mddev = r10_bio->mddev;
430 
431 	md_write_end(mddev);
432 }
433 
one_write_done(struct r10bio * r10_bio)434 static void one_write_done(struct r10bio *r10_bio)
435 {
436 	if (atomic_dec_and_test(&r10_bio->remaining)) {
437 		if (test_bit(R10BIO_WriteError, &r10_bio->state))
438 			reschedule_retry(r10_bio);
439 		else {
440 			close_write(r10_bio);
441 			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
442 				reschedule_retry(r10_bio);
443 			else
444 				raid_end_bio_io(r10_bio);
445 		}
446 	}
447 }
448 
raid10_end_write_request(struct bio * bio)449 static void raid10_end_write_request(struct bio *bio)
450 {
451 	struct r10bio *r10_bio = bio->bi_private;
452 	int dev;
453 	int dec_rdev = 1;
454 	struct r10conf *conf = r10_bio->mddev->private;
455 	int slot, repl;
456 	struct md_rdev *rdev = NULL;
457 	struct bio *to_put = NULL;
458 	bool discard_error;
459 
460 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
461 
462 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
463 
464 	if (repl)
465 		rdev = conf->mirrors[dev].replacement;
466 	if (!rdev) {
467 		smp_rmb();
468 		repl = 0;
469 		rdev = conf->mirrors[dev].rdev;
470 	}
471 	/*
472 	 * this branch is our 'one mirror IO has finished' event handler:
473 	 */
474 	if (bio->bi_status && !discard_error) {
475 		if (repl)
476 			/* Never record new bad blocks to replacement,
477 			 * just fail it.
478 			 */
479 			md_error(rdev->mddev, rdev);
480 		else {
481 			set_bit(WriteErrorSeen,	&rdev->flags);
482 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
483 				set_bit(MD_RECOVERY_NEEDED,
484 					&rdev->mddev->recovery);
485 
486 			dec_rdev = 0;
487 			if (test_bit(FailFast, &rdev->flags) &&
488 			    (bio->bi_opf & MD_FAILFAST)) {
489 				md_error(rdev->mddev, rdev);
490 			}
491 
492 			/*
493 			 * When the device is faulty, it is not necessary to
494 			 * handle write error.
495 			 */
496 			if (!test_bit(Faulty, &rdev->flags))
497 				set_bit(R10BIO_WriteError, &r10_bio->state);
498 			else {
499 				/* Fail the request */
500 				r10_bio->devs[slot].bio = NULL;
501 				to_put = bio;
502 				dec_rdev = 1;
503 			}
504 		}
505 	} else {
506 		/*
507 		 * Set R10BIO_Uptodate in our master bio, so that
508 		 * we will return a good error code for to the higher
509 		 * levels even if IO on some other mirrored buffer fails.
510 		 *
511 		 * The 'master' represents the composite IO operation to
512 		 * user-side. So if something waits for IO, then it will
513 		 * wait for the 'master' bio.
514 		 *
515 		 * Do not set R10BIO_Uptodate if the current device is
516 		 * rebuilding or Faulty. This is because we cannot use
517 		 * such device for properly reading the data back (we could
518 		 * potentially use it, if the current write would have felt
519 		 * before rdev->recovery_offset, but for simplicity we don't
520 		 * check this here.
521 		 */
522 		if (test_bit(In_sync, &rdev->flags) &&
523 		    !test_bit(Faulty, &rdev->flags))
524 			set_bit(R10BIO_Uptodate, &r10_bio->state);
525 
526 		/* Maybe we can clear some bad blocks. */
527 		if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
528 				      r10_bio->sectors) &&
529 		    !discard_error) {
530 			bio_put(bio);
531 			if (repl)
532 				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
533 			else
534 				r10_bio->devs[slot].bio = IO_MADE_GOOD;
535 			dec_rdev = 0;
536 			set_bit(R10BIO_MadeGood, &r10_bio->state);
537 		}
538 	}
539 
540 	/*
541 	 *
542 	 * Let's see if all mirrored write operations have finished
543 	 * already.
544 	 */
545 	one_write_done(r10_bio);
546 	if (dec_rdev)
547 		rdev_dec_pending(rdev, conf->mddev);
548 	if (to_put)
549 		bio_put(to_put);
550 }
551 
552 /*
553  * RAID10 layout manager
554  * As well as the chunksize and raid_disks count, there are two
555  * parameters: near_copies and far_copies.
556  * near_copies * far_copies must be <= raid_disks.
557  * Normally one of these will be 1.
558  * If both are 1, we get raid0.
559  * If near_copies == raid_disks, we get raid1.
560  *
561  * Chunks are laid out in raid0 style with near_copies copies of the
562  * first chunk, followed by near_copies copies of the next chunk and
563  * so on.
564  * If far_copies > 1, then after 1/far_copies of the array has been assigned
565  * as described above, we start again with a device offset of near_copies.
566  * So we effectively have another copy of the whole array further down all
567  * the drives, but with blocks on different drives.
568  * With this layout, and block is never stored twice on the one device.
569  *
570  * raid10_find_phys finds the sector offset of a given virtual sector
571  * on each device that it is on.
572  *
573  * raid10_find_virt does the reverse mapping, from a device and a
574  * sector offset to a virtual address
575  */
576 
__raid10_find_phys(struct geom * geo,struct r10bio * r10bio)577 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
578 {
579 	int n,f;
580 	sector_t sector;
581 	sector_t chunk;
582 	sector_t stripe;
583 	int dev;
584 	int slot = 0;
585 	int last_far_set_start, last_far_set_size;
586 
587 	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
588 	last_far_set_start *= geo->far_set_size;
589 
590 	last_far_set_size = geo->far_set_size;
591 	last_far_set_size += (geo->raid_disks % geo->far_set_size);
592 
593 	/* now calculate first sector/dev */
594 	chunk = r10bio->sector >> geo->chunk_shift;
595 	sector = r10bio->sector & geo->chunk_mask;
596 
597 	chunk *= geo->near_copies;
598 	stripe = chunk;
599 	dev = sector_div(stripe, geo->raid_disks);
600 	if (geo->far_offset)
601 		stripe *= geo->far_copies;
602 
603 	sector += stripe << geo->chunk_shift;
604 
605 	/* and calculate all the others */
606 	for (n = 0; n < geo->near_copies; n++) {
607 		int d = dev;
608 		int set;
609 		sector_t s = sector;
610 		r10bio->devs[slot].devnum = d;
611 		r10bio->devs[slot].addr = s;
612 		slot++;
613 
614 		for (f = 1; f < geo->far_copies; f++) {
615 			set = d / geo->far_set_size;
616 			d += geo->near_copies;
617 
618 			if ((geo->raid_disks % geo->far_set_size) &&
619 			    (d > last_far_set_start)) {
620 				d -= last_far_set_start;
621 				d %= last_far_set_size;
622 				d += last_far_set_start;
623 			} else {
624 				d %= geo->far_set_size;
625 				d += geo->far_set_size * set;
626 			}
627 			s += geo->stride;
628 			r10bio->devs[slot].devnum = d;
629 			r10bio->devs[slot].addr = s;
630 			slot++;
631 		}
632 		dev++;
633 		if (dev >= geo->raid_disks) {
634 			dev = 0;
635 			sector += (geo->chunk_mask + 1);
636 		}
637 	}
638 }
639 
raid10_find_phys(struct r10conf * conf,struct r10bio * r10bio)640 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
641 {
642 	struct geom *geo = &conf->geo;
643 
644 	if (conf->reshape_progress != MaxSector &&
645 	    ((r10bio->sector >= conf->reshape_progress) !=
646 	     conf->mddev->reshape_backwards)) {
647 		set_bit(R10BIO_Previous, &r10bio->state);
648 		geo = &conf->prev;
649 	} else
650 		clear_bit(R10BIO_Previous, &r10bio->state);
651 
652 	__raid10_find_phys(geo, r10bio);
653 }
654 
raid10_find_virt(struct r10conf * conf,sector_t sector,int dev)655 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
656 {
657 	sector_t offset, chunk, vchunk;
658 	/* Never use conf->prev as this is only called during resync
659 	 * or recovery, so reshape isn't happening
660 	 */
661 	struct geom *geo = &conf->geo;
662 	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
663 	int far_set_size = geo->far_set_size;
664 	int last_far_set_start;
665 
666 	if (geo->raid_disks % geo->far_set_size) {
667 		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
668 		last_far_set_start *= geo->far_set_size;
669 
670 		if (dev >= last_far_set_start) {
671 			far_set_size = geo->far_set_size;
672 			far_set_size += (geo->raid_disks % geo->far_set_size);
673 			far_set_start = last_far_set_start;
674 		}
675 	}
676 
677 	offset = sector & geo->chunk_mask;
678 	if (geo->far_offset) {
679 		int fc;
680 		chunk = sector >> geo->chunk_shift;
681 		fc = sector_div(chunk, geo->far_copies);
682 		dev -= fc * geo->near_copies;
683 		if (dev < far_set_start)
684 			dev += far_set_size;
685 	} else {
686 		while (sector >= geo->stride) {
687 			sector -= geo->stride;
688 			if (dev < (geo->near_copies + far_set_start))
689 				dev += far_set_size - geo->near_copies;
690 			else
691 				dev -= geo->near_copies;
692 		}
693 		chunk = sector >> geo->chunk_shift;
694 	}
695 	vchunk = chunk * geo->raid_disks + dev;
696 	sector_div(vchunk, geo->near_copies);
697 	return (vchunk << geo->chunk_shift) + offset;
698 }
699 
700 /*
701  * This routine returns the disk from which the requested read should
702  * be done. There is a per-array 'next expected sequential IO' sector
703  * number - if this matches on the next IO then we use the last disk.
704  * There is also a per-disk 'last know head position' sector that is
705  * maintained from IRQ contexts, both the normal and the resync IO
706  * completion handlers update this position correctly. If there is no
707  * perfect sequential match then we pick the disk whose head is closest.
708  *
709  * If there are 2 mirrors in the same 2 devices, performance degrades
710  * because position is mirror, not device based.
711  *
712  * The rdev for the device selected will have nr_pending incremented.
713  */
714 
715 /*
716  * FIXME: possibly should rethink readbalancing and do it differently
717  * depending on near_copies / far_copies geometry.
718  */
read_balance(struct r10conf * conf,struct r10bio * r10_bio,int * max_sectors)719 static struct md_rdev *read_balance(struct r10conf *conf,
720 				    struct r10bio *r10_bio,
721 				    int *max_sectors)
722 {
723 	const sector_t this_sector = r10_bio->sector;
724 	int disk, slot;
725 	int sectors = r10_bio->sectors;
726 	int best_good_sectors;
727 	sector_t new_distance, best_dist;
728 	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
729 	int do_balance;
730 	int best_dist_slot, best_pending_slot;
731 	bool has_nonrot_disk = false;
732 	unsigned int min_pending;
733 	struct geom *geo = &conf->geo;
734 
735 	raid10_find_phys(conf, r10_bio);
736 	best_dist_slot = -1;
737 	min_pending = UINT_MAX;
738 	best_dist_rdev = NULL;
739 	best_pending_rdev = NULL;
740 	best_dist = MaxSector;
741 	best_good_sectors = 0;
742 	do_balance = 1;
743 	clear_bit(R10BIO_FailFast, &r10_bio->state);
744 
745 	if (raid1_should_read_first(conf->mddev, this_sector, sectors))
746 		do_balance = 0;
747 
748 	for (slot = 0; slot < conf->copies ; slot++) {
749 		sector_t first_bad;
750 		int bad_sectors;
751 		sector_t dev_sector;
752 		unsigned int pending;
753 		bool nonrot;
754 
755 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
756 			continue;
757 		disk = r10_bio->devs[slot].devnum;
758 		rdev = conf->mirrors[disk].replacement;
759 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
760 		    r10_bio->devs[slot].addr + sectors >
761 		    rdev->recovery_offset)
762 			rdev = conf->mirrors[disk].rdev;
763 		if (rdev == NULL ||
764 		    test_bit(Faulty, &rdev->flags))
765 			continue;
766 		if (!test_bit(In_sync, &rdev->flags) &&
767 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
768 			continue;
769 
770 		dev_sector = r10_bio->devs[slot].addr;
771 		if (is_badblock(rdev, dev_sector, sectors,
772 				&first_bad, &bad_sectors)) {
773 			if (best_dist < MaxSector)
774 				/* Already have a better slot */
775 				continue;
776 			if (first_bad <= dev_sector) {
777 				/* Cannot read here.  If this is the
778 				 * 'primary' device, then we must not read
779 				 * beyond 'bad_sectors' from another device.
780 				 */
781 				bad_sectors -= (dev_sector - first_bad);
782 				if (!do_balance && sectors > bad_sectors)
783 					sectors = bad_sectors;
784 				if (best_good_sectors > sectors)
785 					best_good_sectors = sectors;
786 			} else {
787 				sector_t good_sectors =
788 					first_bad - dev_sector;
789 				if (good_sectors > best_good_sectors) {
790 					best_good_sectors = good_sectors;
791 					best_dist_slot = slot;
792 					best_dist_rdev = rdev;
793 				}
794 				if (!do_balance)
795 					/* Must read from here */
796 					break;
797 			}
798 			continue;
799 		} else
800 			best_good_sectors = sectors;
801 
802 		if (!do_balance)
803 			break;
804 
805 		nonrot = bdev_nonrot(rdev->bdev);
806 		has_nonrot_disk |= nonrot;
807 		pending = atomic_read(&rdev->nr_pending);
808 		if (min_pending > pending && nonrot) {
809 			min_pending = pending;
810 			best_pending_slot = slot;
811 			best_pending_rdev = rdev;
812 		}
813 
814 		if (best_dist_slot >= 0)
815 			/* At least 2 disks to choose from so failfast is OK */
816 			set_bit(R10BIO_FailFast, &r10_bio->state);
817 		/* This optimisation is debatable, and completely destroys
818 		 * sequential read speed for 'far copies' arrays.  So only
819 		 * keep it for 'near' arrays, and review those later.
820 		 */
821 		if (geo->near_copies > 1 && !pending)
822 			new_distance = 0;
823 
824 		/* for far > 1 always use the lowest address */
825 		else if (geo->far_copies > 1)
826 			new_distance = r10_bio->devs[slot].addr;
827 		else
828 			new_distance = abs(r10_bio->devs[slot].addr -
829 					   conf->mirrors[disk].head_position);
830 
831 		if (new_distance < best_dist) {
832 			best_dist = new_distance;
833 			best_dist_slot = slot;
834 			best_dist_rdev = rdev;
835 		}
836 	}
837 	if (slot >= conf->copies) {
838 		if (has_nonrot_disk) {
839 			slot = best_pending_slot;
840 			rdev = best_pending_rdev;
841 		} else {
842 			slot = best_dist_slot;
843 			rdev = best_dist_rdev;
844 		}
845 	}
846 
847 	if (slot >= 0) {
848 		atomic_inc(&rdev->nr_pending);
849 		r10_bio->read_slot = slot;
850 	} else
851 		rdev = NULL;
852 	*max_sectors = best_good_sectors;
853 
854 	return rdev;
855 }
856 
flush_pending_writes(struct r10conf * conf)857 static void flush_pending_writes(struct r10conf *conf)
858 {
859 	/* Any writes that have been queued but are awaiting
860 	 * bitmap updates get flushed here.
861 	 */
862 	spin_lock_irq(&conf->device_lock);
863 
864 	if (conf->pending_bio_list.head) {
865 		struct blk_plug plug;
866 		struct bio *bio;
867 
868 		bio = bio_list_get(&conf->pending_bio_list);
869 		spin_unlock_irq(&conf->device_lock);
870 
871 		/*
872 		 * As this is called in a wait_event() loop (see freeze_array),
873 		 * current->state might be TASK_UNINTERRUPTIBLE which will
874 		 * cause a warning when we prepare to wait again.  As it is
875 		 * rare that this path is taken, it is perfectly safe to force
876 		 * us to go around the wait_event() loop again, so the warning
877 		 * is a false-positive. Silence the warning by resetting
878 		 * thread state
879 		 */
880 		__set_current_state(TASK_RUNNING);
881 
882 		blk_start_plug(&plug);
883 		raid1_prepare_flush_writes(conf->mddev);
884 		wake_up(&conf->wait_barrier);
885 
886 		while (bio) { /* submit pending writes */
887 			struct bio *next = bio->bi_next;
888 
889 			raid1_submit_write(bio);
890 			bio = next;
891 			cond_resched();
892 		}
893 		blk_finish_plug(&plug);
894 	} else
895 		spin_unlock_irq(&conf->device_lock);
896 }
897 
898 /* Barriers....
899  * Sometimes we need to suspend IO while we do something else,
900  * either some resync/recovery, or reconfigure the array.
901  * To do this we raise a 'barrier'.
902  * The 'barrier' is a counter that can be raised multiple times
903  * to count how many activities are happening which preclude
904  * normal IO.
905  * We can only raise the barrier if there is no pending IO.
906  * i.e. if nr_pending == 0.
907  * We choose only to raise the barrier if no-one is waiting for the
908  * barrier to go down.  This means that as soon as an IO request
909  * is ready, no other operations which require a barrier will start
910  * until the IO request has had a chance.
911  *
912  * So: regular IO calls 'wait_barrier'.  When that returns there
913  *    is no backgroup IO happening,  It must arrange to call
914  *    allow_barrier when it has finished its IO.
915  * backgroup IO calls must call raise_barrier.  Once that returns
916  *    there is no normal IO happeing.  It must arrange to call
917  *    lower_barrier when the particular background IO completes.
918  */
919 
raise_barrier(struct r10conf * conf,int force)920 static void raise_barrier(struct r10conf *conf, int force)
921 {
922 	write_seqlock_irq(&conf->resync_lock);
923 
924 	if (WARN_ON_ONCE(force && !conf->barrier))
925 		force = false;
926 
927 	/* Wait until no block IO is waiting (unless 'force') */
928 	wait_event_barrier(conf, force || !conf->nr_waiting);
929 
930 	/* block any new IO from starting */
931 	WRITE_ONCE(conf->barrier, conf->barrier + 1);
932 
933 	/* Now wait for all pending IO to complete */
934 	wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
935 				 conf->barrier < RESYNC_DEPTH);
936 
937 	write_sequnlock_irq(&conf->resync_lock);
938 }
939 
lower_barrier(struct r10conf * conf)940 static void lower_barrier(struct r10conf *conf)
941 {
942 	unsigned long flags;
943 
944 	write_seqlock_irqsave(&conf->resync_lock, flags);
945 	WRITE_ONCE(conf->barrier, conf->barrier - 1);
946 	write_sequnlock_irqrestore(&conf->resync_lock, flags);
947 	wake_up(&conf->wait_barrier);
948 }
949 
stop_waiting_barrier(struct r10conf * conf)950 static bool stop_waiting_barrier(struct r10conf *conf)
951 {
952 	struct bio_list *bio_list = current->bio_list;
953 	struct md_thread *thread;
954 
955 	/* barrier is dropped */
956 	if (!conf->barrier)
957 		return true;
958 
959 	/*
960 	 * If there are already pending requests (preventing the barrier from
961 	 * rising completely), and the pre-process bio queue isn't empty, then
962 	 * don't wait, as we need to empty that queue to get the nr_pending
963 	 * count down.
964 	 */
965 	if (atomic_read(&conf->nr_pending) && bio_list &&
966 	    (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
967 		return true;
968 
969 	/* daemon thread must exist while handling io */
970 	thread = rcu_dereference_protected(conf->mddev->thread, true);
971 	/*
972 	 * move on if io is issued from raid10d(), nr_pending is not released
973 	 * from original io(see handle_read_error()). All raise barrier is
974 	 * blocked until this io is done.
975 	 */
976 	if (thread->tsk == current) {
977 		WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
978 		return true;
979 	}
980 
981 	return false;
982 }
983 
wait_barrier_nolock(struct r10conf * conf)984 static bool wait_barrier_nolock(struct r10conf *conf)
985 {
986 	unsigned int seq = read_seqbegin(&conf->resync_lock);
987 
988 	if (READ_ONCE(conf->barrier))
989 		return false;
990 
991 	atomic_inc(&conf->nr_pending);
992 	if (!read_seqretry(&conf->resync_lock, seq))
993 		return true;
994 
995 	if (atomic_dec_and_test(&conf->nr_pending))
996 		wake_up_barrier(conf);
997 
998 	return false;
999 }
1000 
wait_barrier(struct r10conf * conf,bool nowait)1001 static bool wait_barrier(struct r10conf *conf, bool nowait)
1002 {
1003 	bool ret = true;
1004 
1005 	if (wait_barrier_nolock(conf))
1006 		return true;
1007 
1008 	write_seqlock_irq(&conf->resync_lock);
1009 	if (conf->barrier) {
1010 		/* Return false when nowait flag is set */
1011 		if (nowait) {
1012 			ret = false;
1013 		} else {
1014 			conf->nr_waiting++;
1015 			mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
1016 			wait_event_barrier(conf, stop_waiting_barrier(conf));
1017 			conf->nr_waiting--;
1018 		}
1019 		if (!conf->nr_waiting)
1020 			wake_up(&conf->wait_barrier);
1021 	}
1022 	/* Only increment nr_pending when we wait */
1023 	if (ret)
1024 		atomic_inc(&conf->nr_pending);
1025 	write_sequnlock_irq(&conf->resync_lock);
1026 	return ret;
1027 }
1028 
allow_barrier(struct r10conf * conf)1029 static void allow_barrier(struct r10conf *conf)
1030 {
1031 	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1032 			(conf->array_freeze_pending))
1033 		wake_up_barrier(conf);
1034 }
1035 
freeze_array(struct r10conf * conf,int extra)1036 static void freeze_array(struct r10conf *conf, int extra)
1037 {
1038 	/* stop syncio and normal IO and wait for everything to
1039 	 * go quiet.
1040 	 * We increment barrier and nr_waiting, and then
1041 	 * wait until nr_pending match nr_queued+extra
1042 	 * This is called in the context of one normal IO request
1043 	 * that has failed. Thus any sync request that might be pending
1044 	 * will be blocked by nr_pending, and we need to wait for
1045 	 * pending IO requests to complete or be queued for re-try.
1046 	 * Thus the number queued (nr_queued) plus this request (extra)
1047 	 * must match the number of pending IOs (nr_pending) before
1048 	 * we continue.
1049 	 */
1050 	write_seqlock_irq(&conf->resync_lock);
1051 	conf->array_freeze_pending++;
1052 	WRITE_ONCE(conf->barrier, conf->barrier + 1);
1053 	conf->nr_waiting++;
1054 	wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1055 			conf->nr_queued + extra, flush_pending_writes(conf));
1056 	conf->array_freeze_pending--;
1057 	write_sequnlock_irq(&conf->resync_lock);
1058 }
1059 
unfreeze_array(struct r10conf * conf)1060 static void unfreeze_array(struct r10conf *conf)
1061 {
1062 	/* reverse the effect of the freeze */
1063 	write_seqlock_irq(&conf->resync_lock);
1064 	WRITE_ONCE(conf->barrier, conf->barrier - 1);
1065 	conf->nr_waiting--;
1066 	wake_up(&conf->wait_barrier);
1067 	write_sequnlock_irq(&conf->resync_lock);
1068 }
1069 
choose_data_offset(struct r10bio * r10_bio,struct md_rdev * rdev)1070 static sector_t choose_data_offset(struct r10bio *r10_bio,
1071 				   struct md_rdev *rdev)
1072 {
1073 	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1074 	    test_bit(R10BIO_Previous, &r10_bio->state))
1075 		return rdev->data_offset;
1076 	else
1077 		return rdev->new_data_offset;
1078 }
1079 
raid10_unplug(struct blk_plug_cb * cb,bool from_schedule)1080 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1081 {
1082 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
1083 	struct mddev *mddev = plug->cb.data;
1084 	struct r10conf *conf = mddev->private;
1085 	struct bio *bio;
1086 
1087 	if (from_schedule) {
1088 		spin_lock_irq(&conf->device_lock);
1089 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1090 		spin_unlock_irq(&conf->device_lock);
1091 		wake_up_barrier(conf);
1092 		md_wakeup_thread(mddev->thread);
1093 		kfree(plug);
1094 		return;
1095 	}
1096 
1097 	/* we aren't scheduling, so we can do the write-out directly. */
1098 	bio = bio_list_get(&plug->pending);
1099 	raid1_prepare_flush_writes(mddev);
1100 	wake_up_barrier(conf);
1101 
1102 	while (bio) { /* submit pending writes */
1103 		struct bio *next = bio->bi_next;
1104 
1105 		raid1_submit_write(bio);
1106 		bio = next;
1107 		cond_resched();
1108 	}
1109 	kfree(plug);
1110 }
1111 
1112 /*
1113  * 1. Register the new request and wait if the reconstruction thread has put
1114  * up a bar for new requests. Continue immediately if no resync is active
1115  * currently.
1116  * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1117  */
regular_request_wait(struct mddev * mddev,struct r10conf * conf,struct bio * bio,sector_t sectors)1118 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1119 				 struct bio *bio, sector_t sectors)
1120 {
1121 	/* Bail out if REQ_NOWAIT is set for the bio */
1122 	if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1123 		bio_wouldblock_error(bio);
1124 		return false;
1125 	}
1126 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1127 	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1128 	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1129 		allow_barrier(conf);
1130 		if (bio->bi_opf & REQ_NOWAIT) {
1131 			bio_wouldblock_error(bio);
1132 			return false;
1133 		}
1134 		mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
1135 		wait_event(conf->wait_barrier,
1136 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1137 			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1138 			   sectors);
1139 		wait_barrier(conf, false);
1140 	}
1141 	return true;
1142 }
1143 
raid10_read_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio,bool io_accounting)1144 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1145 				struct r10bio *r10_bio, bool io_accounting)
1146 {
1147 	struct r10conf *conf = mddev->private;
1148 	struct bio *read_bio;
1149 	const enum req_op op = bio_op(bio);
1150 	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1151 	int max_sectors;
1152 	struct md_rdev *rdev;
1153 	char b[BDEVNAME_SIZE];
1154 	int slot = r10_bio->read_slot;
1155 	struct md_rdev *err_rdev = NULL;
1156 	gfp_t gfp = GFP_NOIO;
1157 	int error;
1158 
1159 	if (slot >= 0 && r10_bio->devs[slot].rdev) {
1160 		/*
1161 		 * This is an error retry, but we cannot
1162 		 * safely dereference the rdev in the r10_bio,
1163 		 * we must use the one in conf.
1164 		 * If it has already been disconnected (unlikely)
1165 		 * we lose the device name in error messages.
1166 		 */
1167 		int disk;
1168 		/*
1169 		 * As we are blocking raid10, it is a little safer to
1170 		 * use __GFP_HIGH.
1171 		 */
1172 		gfp = GFP_NOIO | __GFP_HIGH;
1173 
1174 		disk = r10_bio->devs[slot].devnum;
1175 		err_rdev = conf->mirrors[disk].rdev;
1176 		if (err_rdev)
1177 			snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1178 		else {
1179 			strcpy(b, "???");
1180 			/* This never gets dereferenced */
1181 			err_rdev = r10_bio->devs[slot].rdev;
1182 		}
1183 	}
1184 
1185 	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1186 		return;
1187 	rdev = read_balance(conf, r10_bio, &max_sectors);
1188 	if (!rdev) {
1189 		if (err_rdev) {
1190 			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1191 					    mdname(mddev), b,
1192 					    (unsigned long long)r10_bio->sector);
1193 		}
1194 		raid_end_bio_io(r10_bio);
1195 		return;
1196 	}
1197 	if (err_rdev)
1198 		pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1199 				   mdname(mddev),
1200 				   rdev->bdev,
1201 				   (unsigned long long)r10_bio->sector);
1202 	if (max_sectors < bio_sectors(bio)) {
1203 		struct bio *split = bio_split(bio, max_sectors,
1204 					      gfp, &conf->bio_split);
1205 		if (IS_ERR(split)) {
1206 			error = PTR_ERR(split);
1207 			goto err_handle;
1208 		}
1209 		bio_chain(split, bio);
1210 		allow_barrier(conf);
1211 		submit_bio_noacct(bio);
1212 		wait_barrier(conf, false);
1213 		bio = split;
1214 		r10_bio->master_bio = bio;
1215 		r10_bio->sectors = max_sectors;
1216 	}
1217 	slot = r10_bio->read_slot;
1218 
1219 	if (io_accounting) {
1220 		md_account_bio(mddev, &bio);
1221 		r10_bio->master_bio = bio;
1222 	}
1223 	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1224 
1225 	r10_bio->devs[slot].bio = read_bio;
1226 	r10_bio->devs[slot].rdev = rdev;
1227 
1228 	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1229 		choose_data_offset(r10_bio, rdev);
1230 	read_bio->bi_end_io = raid10_end_read_request;
1231 	read_bio->bi_opf = op | do_sync;
1232 	if (test_bit(FailFast, &rdev->flags) &&
1233 	    test_bit(R10BIO_FailFast, &r10_bio->state))
1234 	        read_bio->bi_opf |= MD_FAILFAST;
1235 	read_bio->bi_private = r10_bio;
1236 	mddev_trace_remap(mddev, read_bio, r10_bio->sector);
1237 	submit_bio_noacct(read_bio);
1238 	return;
1239 err_handle:
1240 	atomic_dec(&rdev->nr_pending);
1241 	bio->bi_status = errno_to_blk_status(error);
1242 	set_bit(R10BIO_Uptodate, &r10_bio->state);
1243 	raid_end_bio_io(r10_bio);
1244 }
1245 
raid10_write_one_disk(struct mddev * mddev,struct r10bio * r10_bio,struct bio * bio,bool replacement,int n_copy)1246 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1247 				  struct bio *bio, bool replacement,
1248 				  int n_copy)
1249 {
1250 	const enum req_op op = bio_op(bio);
1251 	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1252 	const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
1253 	const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC;
1254 	unsigned long flags;
1255 	struct r10conf *conf = mddev->private;
1256 	struct md_rdev *rdev;
1257 	int devnum = r10_bio->devs[n_copy].devnum;
1258 	struct bio *mbio;
1259 
1260 	rdev = replacement ? conf->mirrors[devnum].replacement :
1261 			     conf->mirrors[devnum].rdev;
1262 
1263 	mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1264 	if (replacement)
1265 		r10_bio->devs[n_copy].repl_bio = mbio;
1266 	else
1267 		r10_bio->devs[n_copy].bio = mbio;
1268 
1269 	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1270 				   choose_data_offset(r10_bio, rdev));
1271 	mbio->bi_end_io	= raid10_end_write_request;
1272 	mbio->bi_opf = op | do_sync | do_fua | do_atomic;
1273 	if (!replacement && test_bit(FailFast,
1274 				     &conf->mirrors[devnum].rdev->flags)
1275 			 && enough(conf, devnum))
1276 		mbio->bi_opf |= MD_FAILFAST;
1277 	mbio->bi_private = r10_bio;
1278 	mddev_trace_remap(mddev, mbio, r10_bio->sector);
1279 	/* flush_pending_writes() needs access to the rdev so...*/
1280 	mbio->bi_bdev = (void *)rdev;
1281 
1282 	atomic_inc(&r10_bio->remaining);
1283 
1284 	if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1285 		spin_lock_irqsave(&conf->device_lock, flags);
1286 		bio_list_add(&conf->pending_bio_list, mbio);
1287 		spin_unlock_irqrestore(&conf->device_lock, flags);
1288 		md_wakeup_thread(mddev->thread);
1289 	}
1290 }
1291 
wait_blocked_dev(struct mddev * mddev,struct r10bio * r10_bio)1292 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1293 {
1294 	struct r10conf *conf = mddev->private;
1295 	struct md_rdev *blocked_rdev;
1296 	int i;
1297 
1298 retry_wait:
1299 	blocked_rdev = NULL;
1300 	for (i = 0; i < conf->copies; i++) {
1301 		struct md_rdev *rdev, *rrdev;
1302 
1303 		rdev = conf->mirrors[i].rdev;
1304 		if (rdev) {
1305 			sector_t dev_sector = r10_bio->devs[i].addr;
1306 
1307 			/*
1308 			 * Discard request doesn't care the write result
1309 			 * so it doesn't need to wait blocked disk here.
1310 			 */
1311 			if (test_bit(WriteErrorSeen, &rdev->flags) &&
1312 			    r10_bio->sectors &&
1313 			    rdev_has_badblock(rdev, dev_sector,
1314 					      r10_bio->sectors) < 0)
1315 				/*
1316 				 * Mustn't write here until the bad
1317 				 * block is acknowledged
1318 				 */
1319 				set_bit(BlockedBadBlocks, &rdev->flags);
1320 
1321 			if (rdev_blocked(rdev)) {
1322 				blocked_rdev = rdev;
1323 				atomic_inc(&rdev->nr_pending);
1324 				break;
1325 			}
1326 		}
1327 
1328 		rrdev = conf->mirrors[i].replacement;
1329 		if (rrdev && rdev_blocked(rrdev)) {
1330 			atomic_inc(&rrdev->nr_pending);
1331 			blocked_rdev = rrdev;
1332 			break;
1333 		}
1334 	}
1335 
1336 	if (unlikely(blocked_rdev)) {
1337 		/* Have to wait for this device to get unblocked, then retry */
1338 		allow_barrier(conf);
1339 		mddev_add_trace_msg(conf->mddev,
1340 			"raid10 %s wait rdev %d blocked",
1341 			__func__, blocked_rdev->raid_disk);
1342 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1343 		wait_barrier(conf, false);
1344 		goto retry_wait;
1345 	}
1346 }
1347 
raid10_write_request(struct mddev * mddev,struct bio * bio,struct r10bio * r10_bio)1348 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1349 				 struct r10bio *r10_bio)
1350 {
1351 	struct r10conf *conf = mddev->private;
1352 	int i, k;
1353 	sector_t sectors;
1354 	int max_sectors;
1355 	int error;
1356 
1357 	if ((mddev_is_clustered(mddev) &&
1358 	     md_cluster_ops->area_resyncing(mddev, WRITE,
1359 					    bio->bi_iter.bi_sector,
1360 					    bio_end_sector(bio)))) {
1361 		DEFINE_WAIT(w);
1362 		/* Bail out if REQ_NOWAIT is set for the bio */
1363 		if (bio->bi_opf & REQ_NOWAIT) {
1364 			bio_wouldblock_error(bio);
1365 			return;
1366 		}
1367 		for (;;) {
1368 			prepare_to_wait(&conf->wait_barrier,
1369 					&w, TASK_IDLE);
1370 			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1371 				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1372 				break;
1373 			schedule();
1374 		}
1375 		finish_wait(&conf->wait_barrier, &w);
1376 	}
1377 
1378 	sectors = r10_bio->sectors;
1379 	if (!regular_request_wait(mddev, conf, bio, sectors))
1380 		return;
1381 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1382 	    (mddev->reshape_backwards
1383 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1384 		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1385 	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1386 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1387 		/* Need to update reshape_position in metadata */
1388 		mddev->reshape_position = conf->reshape_progress;
1389 		set_mask_bits(&mddev->sb_flags, 0,
1390 			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1391 		md_wakeup_thread(mddev->thread);
1392 		if (bio->bi_opf & REQ_NOWAIT) {
1393 			allow_barrier(conf);
1394 			bio_wouldblock_error(bio);
1395 			return;
1396 		}
1397 		mddev_add_trace_msg(conf->mddev,
1398 			"raid10 wait reshape metadata");
1399 		wait_event(mddev->sb_wait,
1400 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1401 
1402 		conf->reshape_safe = mddev->reshape_position;
1403 	}
1404 
1405 	/* first select target devices under rcu_lock and
1406 	 * inc refcount on their rdev.  Record them by setting
1407 	 * bios[x] to bio
1408 	 * If there are known/acknowledged bad blocks on any device
1409 	 * on which we have seen a write error, we want to avoid
1410 	 * writing to those blocks.  This potentially requires several
1411 	 * writes to write around the bad blocks.  Each set of writes
1412 	 * gets its own r10_bio with a set of bios attached.
1413 	 */
1414 
1415 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1416 	raid10_find_phys(conf, r10_bio);
1417 
1418 	wait_blocked_dev(mddev, r10_bio);
1419 
1420 	max_sectors = r10_bio->sectors;
1421 
1422 	for (i = 0;  i < conf->copies; i++) {
1423 		int d = r10_bio->devs[i].devnum;
1424 		struct md_rdev *rdev, *rrdev;
1425 
1426 		rdev = conf->mirrors[d].rdev;
1427 		rrdev = conf->mirrors[d].replacement;
1428 		if (rdev && (test_bit(Faulty, &rdev->flags)))
1429 			rdev = NULL;
1430 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1431 			rrdev = NULL;
1432 
1433 		r10_bio->devs[i].bio = NULL;
1434 		r10_bio->devs[i].repl_bio = NULL;
1435 
1436 		if (!rdev && !rrdev)
1437 			continue;
1438 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1439 			sector_t first_bad;
1440 			sector_t dev_sector = r10_bio->devs[i].addr;
1441 			int bad_sectors;
1442 			int is_bad;
1443 
1444 			is_bad = is_badblock(rdev, dev_sector, max_sectors,
1445 					     &first_bad, &bad_sectors);
1446 			if (is_bad && first_bad <= dev_sector) {
1447 				/* Cannot write here at all */
1448 				bad_sectors -= (dev_sector - first_bad);
1449 				if (bad_sectors < max_sectors)
1450 					/* Mustn't write more than bad_sectors
1451 					 * to other devices yet
1452 					 */
1453 					max_sectors = bad_sectors;
1454 				continue;
1455 			}
1456 			if (is_bad) {
1457 				int good_sectors;
1458 
1459 				/*
1460 				 * We cannot atomically write this, so just
1461 				 * error in that case. It could be possible to
1462 				 * atomically write other mirrors, but the
1463 				 * complexity of supporting that is not worth
1464 				 * the benefit.
1465 				 */
1466 				if (bio->bi_opf & REQ_ATOMIC) {
1467 					error = -EIO;
1468 					goto err_handle;
1469 				}
1470 
1471 				good_sectors = first_bad - dev_sector;
1472 				if (good_sectors < max_sectors)
1473 					max_sectors = good_sectors;
1474 			}
1475 		}
1476 		if (rdev) {
1477 			r10_bio->devs[i].bio = bio;
1478 			atomic_inc(&rdev->nr_pending);
1479 		}
1480 		if (rrdev) {
1481 			r10_bio->devs[i].repl_bio = bio;
1482 			atomic_inc(&rrdev->nr_pending);
1483 		}
1484 	}
1485 
1486 	if (max_sectors < r10_bio->sectors)
1487 		r10_bio->sectors = max_sectors;
1488 
1489 	if (r10_bio->sectors < bio_sectors(bio)) {
1490 		struct bio *split = bio_split(bio, r10_bio->sectors,
1491 					      GFP_NOIO, &conf->bio_split);
1492 		if (IS_ERR(split)) {
1493 			error = PTR_ERR(split);
1494 			goto err_handle;
1495 		}
1496 		bio_chain(split, bio);
1497 		allow_barrier(conf);
1498 		submit_bio_noacct(bio);
1499 		wait_barrier(conf, false);
1500 		bio = split;
1501 		r10_bio->master_bio = bio;
1502 	}
1503 
1504 	md_account_bio(mddev, &bio);
1505 	r10_bio->master_bio = bio;
1506 	atomic_set(&r10_bio->remaining, 1);
1507 
1508 	for (i = 0; i < conf->copies; i++) {
1509 		if (r10_bio->devs[i].bio)
1510 			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1511 		if (r10_bio->devs[i].repl_bio)
1512 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1513 	}
1514 	one_write_done(r10_bio);
1515 	return;
1516 err_handle:
1517 	for (k = 0;  k < i; k++) {
1518 		int d = r10_bio->devs[k].devnum;
1519 		struct md_rdev *rdev = conf->mirrors[d].rdev;
1520 		struct md_rdev *rrdev = conf->mirrors[d].replacement;
1521 
1522 		if (r10_bio->devs[k].bio) {
1523 			rdev_dec_pending(rdev, mddev);
1524 			r10_bio->devs[k].bio = NULL;
1525 		}
1526 		if (r10_bio->devs[k].repl_bio) {
1527 			rdev_dec_pending(rrdev, mddev);
1528 			r10_bio->devs[k].repl_bio = NULL;
1529 		}
1530 	}
1531 
1532 	bio->bi_status = errno_to_blk_status(error);
1533 	set_bit(R10BIO_Uptodate, &r10_bio->state);
1534 	raid_end_bio_io(r10_bio);
1535 }
1536 
__make_request(struct mddev * mddev,struct bio * bio,int sectors)1537 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1538 {
1539 	struct r10conf *conf = mddev->private;
1540 	struct r10bio *r10_bio;
1541 
1542 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1543 
1544 	r10_bio->master_bio = bio;
1545 	r10_bio->sectors = sectors;
1546 
1547 	r10_bio->mddev = mddev;
1548 	r10_bio->sector = bio->bi_iter.bi_sector;
1549 	r10_bio->state = 0;
1550 	r10_bio->read_slot = -1;
1551 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1552 			conf->geo.raid_disks);
1553 
1554 	if (bio_data_dir(bio) == READ)
1555 		raid10_read_request(mddev, bio, r10_bio, true);
1556 	else
1557 		raid10_write_request(mddev, bio, r10_bio);
1558 }
1559 
raid_end_discard_bio(struct r10bio * r10bio)1560 static void raid_end_discard_bio(struct r10bio *r10bio)
1561 {
1562 	struct r10conf *conf = r10bio->mddev->private;
1563 	struct r10bio *first_r10bio;
1564 
1565 	while (atomic_dec_and_test(&r10bio->remaining)) {
1566 
1567 		allow_barrier(conf);
1568 
1569 		if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1570 			first_r10bio = (struct r10bio *)r10bio->master_bio;
1571 			free_r10bio(r10bio);
1572 			r10bio = first_r10bio;
1573 		} else {
1574 			md_write_end(r10bio->mddev);
1575 			bio_endio(r10bio->master_bio);
1576 			free_r10bio(r10bio);
1577 			break;
1578 		}
1579 	}
1580 }
1581 
raid10_end_discard_request(struct bio * bio)1582 static void raid10_end_discard_request(struct bio *bio)
1583 {
1584 	struct r10bio *r10_bio = bio->bi_private;
1585 	struct r10conf *conf = r10_bio->mddev->private;
1586 	struct md_rdev *rdev = NULL;
1587 	int dev;
1588 	int slot, repl;
1589 
1590 	/*
1591 	 * We don't care the return value of discard bio
1592 	 */
1593 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1594 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1595 
1596 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1597 	rdev = repl ? conf->mirrors[dev].replacement :
1598 		      conf->mirrors[dev].rdev;
1599 
1600 	raid_end_discard_bio(r10_bio);
1601 	rdev_dec_pending(rdev, conf->mddev);
1602 }
1603 
1604 /*
1605  * There are some limitations to handle discard bio
1606  * 1st, the discard size is bigger than stripe_size*2.
1607  * 2st, if the discard bio spans reshape progress, we use the old way to
1608  * handle discard bio
1609  */
raid10_handle_discard(struct mddev * mddev,struct bio * bio)1610 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1611 {
1612 	struct r10conf *conf = mddev->private;
1613 	struct geom *geo = &conf->geo;
1614 	int far_copies = geo->far_copies;
1615 	bool first_copy = true;
1616 	struct r10bio *r10_bio, *first_r10bio;
1617 	struct bio *split;
1618 	int disk;
1619 	sector_t chunk;
1620 	unsigned int stripe_size;
1621 	unsigned int stripe_data_disks;
1622 	sector_t split_size;
1623 	sector_t bio_start, bio_end;
1624 	sector_t first_stripe_index, last_stripe_index;
1625 	sector_t start_disk_offset;
1626 	unsigned int start_disk_index;
1627 	sector_t end_disk_offset;
1628 	unsigned int end_disk_index;
1629 	unsigned int remainder;
1630 
1631 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1632 		return -EAGAIN;
1633 
1634 	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1635 		bio_wouldblock_error(bio);
1636 		return 0;
1637 	}
1638 	wait_barrier(conf, false);
1639 
1640 	/*
1641 	 * Check reshape again to avoid reshape happens after checking
1642 	 * MD_RECOVERY_RESHAPE and before wait_barrier
1643 	 */
1644 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1645 		goto out;
1646 
1647 	if (geo->near_copies)
1648 		stripe_data_disks = geo->raid_disks / geo->near_copies +
1649 					geo->raid_disks % geo->near_copies;
1650 	else
1651 		stripe_data_disks = geo->raid_disks;
1652 
1653 	stripe_size = stripe_data_disks << geo->chunk_shift;
1654 
1655 	bio_start = bio->bi_iter.bi_sector;
1656 	bio_end = bio_end_sector(bio);
1657 
1658 	/*
1659 	 * Maybe one discard bio is smaller than strip size or across one
1660 	 * stripe and discard region is larger than one stripe size. For far
1661 	 * offset layout, if the discard region is not aligned with stripe
1662 	 * size, there is hole when we submit discard bio to member disk.
1663 	 * For simplicity, we only handle discard bio which discard region
1664 	 * is bigger than stripe_size * 2
1665 	 */
1666 	if (bio_sectors(bio) < stripe_size*2)
1667 		goto out;
1668 
1669 	/*
1670 	 * Keep bio aligned with strip size.
1671 	 */
1672 	div_u64_rem(bio_start, stripe_size, &remainder);
1673 	if (remainder) {
1674 		split_size = stripe_size - remainder;
1675 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1676 		if (IS_ERR(split)) {
1677 			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1678 			bio_endio(bio);
1679 			return 0;
1680 		}
1681 		bio_chain(split, bio);
1682 		allow_barrier(conf);
1683 		/* Resend the fist split part */
1684 		submit_bio_noacct(split);
1685 		wait_barrier(conf, false);
1686 	}
1687 	div_u64_rem(bio_end, stripe_size, &remainder);
1688 	if (remainder) {
1689 		split_size = bio_sectors(bio) - remainder;
1690 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1691 		if (IS_ERR(split)) {
1692 			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
1693 			bio_endio(bio);
1694 			return 0;
1695 		}
1696 		bio_chain(split, bio);
1697 		allow_barrier(conf);
1698 		/* Resend the second split part */
1699 		submit_bio_noacct(bio);
1700 		bio = split;
1701 		wait_barrier(conf, false);
1702 	}
1703 
1704 	bio_start = bio->bi_iter.bi_sector;
1705 	bio_end = bio_end_sector(bio);
1706 
1707 	/*
1708 	 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1709 	 * One stripe contains the chunks from all member disk (one chunk from
1710 	 * one disk at the same HBA address). For layout detail, see 'man md 4'
1711 	 */
1712 	chunk = bio_start >> geo->chunk_shift;
1713 	chunk *= geo->near_copies;
1714 	first_stripe_index = chunk;
1715 	start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1716 	if (geo->far_offset)
1717 		first_stripe_index *= geo->far_copies;
1718 	start_disk_offset = (bio_start & geo->chunk_mask) +
1719 				(first_stripe_index << geo->chunk_shift);
1720 
1721 	chunk = bio_end >> geo->chunk_shift;
1722 	chunk *= geo->near_copies;
1723 	last_stripe_index = chunk;
1724 	end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1725 	if (geo->far_offset)
1726 		last_stripe_index *= geo->far_copies;
1727 	end_disk_offset = (bio_end & geo->chunk_mask) +
1728 				(last_stripe_index << geo->chunk_shift);
1729 
1730 retry_discard:
1731 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1732 	r10_bio->mddev = mddev;
1733 	r10_bio->state = 0;
1734 	r10_bio->sectors = 0;
1735 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1736 	wait_blocked_dev(mddev, r10_bio);
1737 
1738 	/*
1739 	 * For far layout it needs more than one r10bio to cover all regions.
1740 	 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1741 	 * to record the discard bio. Other r10bio->master_bio record the first
1742 	 * r10bio. The first r10bio only release after all other r10bios finish.
1743 	 * The discard bio returns only first r10bio finishes
1744 	 */
1745 	if (first_copy) {
1746 		r10_bio->master_bio = bio;
1747 		set_bit(R10BIO_Discard, &r10_bio->state);
1748 		first_copy = false;
1749 		first_r10bio = r10_bio;
1750 	} else
1751 		r10_bio->master_bio = (struct bio *)first_r10bio;
1752 
1753 	/*
1754 	 * first select target devices under rcu_lock and
1755 	 * inc refcount on their rdev.  Record them by setting
1756 	 * bios[x] to bio
1757 	 */
1758 	for (disk = 0; disk < geo->raid_disks; disk++) {
1759 		struct md_rdev *rdev, *rrdev;
1760 
1761 		rdev = conf->mirrors[disk].rdev;
1762 		rrdev = conf->mirrors[disk].replacement;
1763 		r10_bio->devs[disk].bio = NULL;
1764 		r10_bio->devs[disk].repl_bio = NULL;
1765 
1766 		if (rdev && (test_bit(Faulty, &rdev->flags)))
1767 			rdev = NULL;
1768 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1769 			rrdev = NULL;
1770 		if (!rdev && !rrdev)
1771 			continue;
1772 
1773 		if (rdev) {
1774 			r10_bio->devs[disk].bio = bio;
1775 			atomic_inc(&rdev->nr_pending);
1776 		}
1777 		if (rrdev) {
1778 			r10_bio->devs[disk].repl_bio = bio;
1779 			atomic_inc(&rrdev->nr_pending);
1780 		}
1781 	}
1782 
1783 	atomic_set(&r10_bio->remaining, 1);
1784 	for (disk = 0; disk < geo->raid_disks; disk++) {
1785 		sector_t dev_start, dev_end;
1786 		struct bio *mbio, *rbio = NULL;
1787 
1788 		/*
1789 		 * Now start to calculate the start and end address for each disk.
1790 		 * The space between dev_start and dev_end is the discard region.
1791 		 *
1792 		 * For dev_start, it needs to consider three conditions:
1793 		 * 1st, the disk is before start_disk, you can imagine the disk in
1794 		 * the next stripe. So the dev_start is the start address of next
1795 		 * stripe.
1796 		 * 2st, the disk is after start_disk, it means the disk is at the
1797 		 * same stripe of first disk
1798 		 * 3st, the first disk itself, we can use start_disk_offset directly
1799 		 */
1800 		if (disk < start_disk_index)
1801 			dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1802 		else if (disk > start_disk_index)
1803 			dev_start = first_stripe_index * mddev->chunk_sectors;
1804 		else
1805 			dev_start = start_disk_offset;
1806 
1807 		if (disk < end_disk_index)
1808 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1809 		else if (disk > end_disk_index)
1810 			dev_end = last_stripe_index * mddev->chunk_sectors;
1811 		else
1812 			dev_end = end_disk_offset;
1813 
1814 		/*
1815 		 * It only handles discard bio which size is >= stripe size, so
1816 		 * dev_end > dev_start all the time.
1817 		 * It doesn't need to use rcu lock to get rdev here. We already
1818 		 * add rdev->nr_pending in the first loop.
1819 		 */
1820 		if (r10_bio->devs[disk].bio) {
1821 			struct md_rdev *rdev = conf->mirrors[disk].rdev;
1822 			mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1823 					       &mddev->bio_set);
1824 			mbio->bi_end_io = raid10_end_discard_request;
1825 			mbio->bi_private = r10_bio;
1826 			r10_bio->devs[disk].bio = mbio;
1827 			r10_bio->devs[disk].devnum = disk;
1828 			atomic_inc(&r10_bio->remaining);
1829 			md_submit_discard_bio(mddev, rdev, mbio,
1830 					dev_start + choose_data_offset(r10_bio, rdev),
1831 					dev_end - dev_start);
1832 			bio_endio(mbio);
1833 		}
1834 		if (r10_bio->devs[disk].repl_bio) {
1835 			struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1836 			rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1837 					       &mddev->bio_set);
1838 			rbio->bi_end_io = raid10_end_discard_request;
1839 			rbio->bi_private = r10_bio;
1840 			r10_bio->devs[disk].repl_bio = rbio;
1841 			r10_bio->devs[disk].devnum = disk;
1842 			atomic_inc(&r10_bio->remaining);
1843 			md_submit_discard_bio(mddev, rrdev, rbio,
1844 					dev_start + choose_data_offset(r10_bio, rrdev),
1845 					dev_end - dev_start);
1846 			bio_endio(rbio);
1847 		}
1848 	}
1849 
1850 	if (!geo->far_offset && --far_copies) {
1851 		first_stripe_index += geo->stride >> geo->chunk_shift;
1852 		start_disk_offset += geo->stride;
1853 		last_stripe_index += geo->stride >> geo->chunk_shift;
1854 		end_disk_offset += geo->stride;
1855 		atomic_inc(&first_r10bio->remaining);
1856 		raid_end_discard_bio(r10_bio);
1857 		wait_barrier(conf, false);
1858 		goto retry_discard;
1859 	}
1860 
1861 	raid_end_discard_bio(r10_bio);
1862 
1863 	return 0;
1864 out:
1865 	allow_barrier(conf);
1866 	return -EAGAIN;
1867 }
1868 
raid10_make_request(struct mddev * mddev,struct bio * bio)1869 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1870 {
1871 	struct r10conf *conf = mddev->private;
1872 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1873 	int chunk_sects = chunk_mask + 1;
1874 	int sectors = bio_sectors(bio);
1875 
1876 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1877 	    && md_flush_request(mddev, bio))
1878 		return true;
1879 
1880 	md_write_start(mddev, bio);
1881 
1882 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1883 		if (!raid10_handle_discard(mddev, bio))
1884 			return true;
1885 
1886 	/*
1887 	 * If this request crosses a chunk boundary, we need to split
1888 	 * it.
1889 	 */
1890 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1891 		     sectors > chunk_sects
1892 		     && (conf->geo.near_copies < conf->geo.raid_disks
1893 			 || conf->prev.near_copies <
1894 			 conf->prev.raid_disks)))
1895 		sectors = chunk_sects -
1896 			(bio->bi_iter.bi_sector &
1897 			 (chunk_sects - 1));
1898 	__make_request(mddev, bio, sectors);
1899 
1900 	/* In case raid10d snuck in to freeze_array */
1901 	wake_up_barrier(conf);
1902 	return true;
1903 }
1904 
raid10_status(struct seq_file * seq,struct mddev * mddev)1905 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1906 {
1907 	struct r10conf *conf = mddev->private;
1908 	int i;
1909 
1910 	lockdep_assert_held(&mddev->lock);
1911 
1912 	if (conf->geo.near_copies < conf->geo.raid_disks)
1913 		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1914 	if (conf->geo.near_copies > 1)
1915 		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1916 	if (conf->geo.far_copies > 1) {
1917 		if (conf->geo.far_offset)
1918 			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1919 		else
1920 			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1921 		if (conf->geo.far_set_size != conf->geo.raid_disks)
1922 			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1923 	}
1924 	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1925 					conf->geo.raid_disks - mddev->degraded);
1926 	for (i = 0; i < conf->geo.raid_disks; i++) {
1927 		struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1928 
1929 		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1930 	}
1931 	seq_printf(seq, "]");
1932 }
1933 
1934 /* check if there are enough drives for
1935  * every block to appear on atleast one.
1936  * Don't consider the device numbered 'ignore'
1937  * as we might be about to remove it.
1938  */
_enough(struct r10conf * conf,int previous,int ignore)1939 static int _enough(struct r10conf *conf, int previous, int ignore)
1940 {
1941 	int first = 0;
1942 	int has_enough = 0;
1943 	int disks, ncopies;
1944 	if (previous) {
1945 		disks = conf->prev.raid_disks;
1946 		ncopies = conf->prev.near_copies;
1947 	} else {
1948 		disks = conf->geo.raid_disks;
1949 		ncopies = conf->geo.near_copies;
1950 	}
1951 
1952 	do {
1953 		int n = conf->copies;
1954 		int cnt = 0;
1955 		int this = first;
1956 		while (n--) {
1957 			struct md_rdev *rdev;
1958 			if (this != ignore &&
1959 			    (rdev = conf->mirrors[this].rdev) &&
1960 			    test_bit(In_sync, &rdev->flags))
1961 				cnt++;
1962 			this = (this+1) % disks;
1963 		}
1964 		if (cnt == 0)
1965 			goto out;
1966 		first = (first + ncopies) % disks;
1967 	} while (first != 0);
1968 	has_enough = 1;
1969 out:
1970 	return has_enough;
1971 }
1972 
enough(struct r10conf * conf,int ignore)1973 static int enough(struct r10conf *conf, int ignore)
1974 {
1975 	/* when calling 'enough', both 'prev' and 'geo' must
1976 	 * be stable.
1977 	 * This is ensured if ->reconfig_mutex or ->device_lock
1978 	 * is held.
1979 	 */
1980 	return _enough(conf, 0, ignore) &&
1981 		_enough(conf, 1, ignore);
1982 }
1983 
1984 /**
1985  * raid10_error() - RAID10 error handler.
1986  * @mddev: affected md device.
1987  * @rdev: member device to fail.
1988  *
1989  * The routine acknowledges &rdev failure and determines new @mddev state.
1990  * If it failed, then:
1991  *	- &MD_BROKEN flag is set in &mddev->flags.
1992  * Otherwise, it must be degraded:
1993  *	- recovery is interrupted.
1994  *	- &mddev->degraded is bumped.
1995  *
1996  * @rdev is marked as &Faulty excluding case when array is failed and
1997  * &mddev->fail_last_dev is off.
1998  */
raid10_error(struct mddev * mddev,struct md_rdev * rdev)1999 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
2000 {
2001 	struct r10conf *conf = mddev->private;
2002 	unsigned long flags;
2003 
2004 	spin_lock_irqsave(&conf->device_lock, flags);
2005 
2006 	if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2007 		set_bit(MD_BROKEN, &mddev->flags);
2008 
2009 		if (!mddev->fail_last_dev) {
2010 			spin_unlock_irqrestore(&conf->device_lock, flags);
2011 			return;
2012 		}
2013 	}
2014 	if (test_and_clear_bit(In_sync, &rdev->flags))
2015 		mddev->degraded++;
2016 
2017 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2018 	set_bit(Blocked, &rdev->flags);
2019 	set_bit(Faulty, &rdev->flags);
2020 	set_mask_bits(&mddev->sb_flags, 0,
2021 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2022 	spin_unlock_irqrestore(&conf->device_lock, flags);
2023 	pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
2024 		"md/raid10:%s: Operation continuing on %d devices.\n",
2025 		mdname(mddev), rdev->bdev,
2026 		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2027 }
2028 
print_conf(struct r10conf * conf)2029 static void print_conf(struct r10conf *conf)
2030 {
2031 	int i;
2032 	struct md_rdev *rdev;
2033 
2034 	pr_debug("RAID10 conf printout:\n");
2035 	if (!conf) {
2036 		pr_debug("(!conf)\n");
2037 		return;
2038 	}
2039 	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2040 		 conf->geo.raid_disks);
2041 
2042 	lockdep_assert_held(&conf->mddev->reconfig_mutex);
2043 	for (i = 0; i < conf->geo.raid_disks; i++) {
2044 		rdev = conf->mirrors[i].rdev;
2045 		if (rdev)
2046 			pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2047 				 i, !test_bit(In_sync, &rdev->flags),
2048 				 !test_bit(Faulty, &rdev->flags),
2049 				 rdev->bdev);
2050 	}
2051 }
2052 
close_sync(struct r10conf * conf)2053 static void close_sync(struct r10conf *conf)
2054 {
2055 	wait_barrier(conf, false);
2056 	allow_barrier(conf);
2057 
2058 	mempool_exit(&conf->r10buf_pool);
2059 }
2060 
raid10_spare_active(struct mddev * mddev)2061 static int raid10_spare_active(struct mddev *mddev)
2062 {
2063 	int i;
2064 	struct r10conf *conf = mddev->private;
2065 	struct raid10_info *tmp;
2066 	int count = 0;
2067 	unsigned long flags;
2068 
2069 	/*
2070 	 * Find all non-in_sync disks within the RAID10 configuration
2071 	 * and mark them in_sync
2072 	 */
2073 	for (i = 0; i < conf->geo.raid_disks; i++) {
2074 		tmp = conf->mirrors + i;
2075 		if (tmp->replacement
2076 		    && tmp->replacement->recovery_offset == MaxSector
2077 		    && !test_bit(Faulty, &tmp->replacement->flags)
2078 		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2079 			/* Replacement has just become active */
2080 			if (!tmp->rdev
2081 			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2082 				count++;
2083 			if (tmp->rdev) {
2084 				/* Replaced device not technically faulty,
2085 				 * but we need to be sure it gets removed
2086 				 * and never re-added.
2087 				 */
2088 				set_bit(Faulty, &tmp->rdev->flags);
2089 				sysfs_notify_dirent_safe(
2090 					tmp->rdev->sysfs_state);
2091 			}
2092 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2093 		} else if (tmp->rdev
2094 			   && tmp->rdev->recovery_offset == MaxSector
2095 			   && !test_bit(Faulty, &tmp->rdev->flags)
2096 			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2097 			count++;
2098 			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2099 		}
2100 	}
2101 	spin_lock_irqsave(&conf->device_lock, flags);
2102 	mddev->degraded -= count;
2103 	spin_unlock_irqrestore(&conf->device_lock, flags);
2104 
2105 	print_conf(conf);
2106 	return count;
2107 }
2108 
raid10_add_disk(struct mddev * mddev,struct md_rdev * rdev)2109 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2110 {
2111 	struct r10conf *conf = mddev->private;
2112 	int err = -EEXIST;
2113 	int mirror, repl_slot = -1;
2114 	int first = 0;
2115 	int last = conf->geo.raid_disks - 1;
2116 	struct raid10_info *p;
2117 
2118 	if (mddev->recovery_cp < MaxSector)
2119 		/* only hot-add to in-sync arrays, as recovery is
2120 		 * very different from resync
2121 		 */
2122 		return -EBUSY;
2123 	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2124 		return -EINVAL;
2125 
2126 	if (rdev->raid_disk >= 0)
2127 		first = last = rdev->raid_disk;
2128 
2129 	if (rdev->saved_raid_disk >= first &&
2130 	    rdev->saved_raid_disk < conf->geo.raid_disks &&
2131 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2132 		mirror = rdev->saved_raid_disk;
2133 	else
2134 		mirror = first;
2135 	for ( ; mirror <= last ; mirror++) {
2136 		p = &conf->mirrors[mirror];
2137 		if (p->recovery_disabled == mddev->recovery_disabled)
2138 			continue;
2139 		if (p->rdev) {
2140 			if (test_bit(WantReplacement, &p->rdev->flags) &&
2141 			    p->replacement == NULL && repl_slot < 0)
2142 				repl_slot = mirror;
2143 			continue;
2144 		}
2145 
2146 		err = mddev_stack_new_rdev(mddev, rdev);
2147 		if (err)
2148 			return err;
2149 		p->head_position = 0;
2150 		p->recovery_disabled = mddev->recovery_disabled - 1;
2151 		rdev->raid_disk = mirror;
2152 		err = 0;
2153 		if (rdev->saved_raid_disk != mirror)
2154 			conf->fullsync = 1;
2155 		WRITE_ONCE(p->rdev, rdev);
2156 		break;
2157 	}
2158 
2159 	if (err && repl_slot >= 0) {
2160 		p = &conf->mirrors[repl_slot];
2161 		clear_bit(In_sync, &rdev->flags);
2162 		set_bit(Replacement, &rdev->flags);
2163 		rdev->raid_disk = repl_slot;
2164 		err = mddev_stack_new_rdev(mddev, rdev);
2165 		if (err)
2166 			return err;
2167 		conf->fullsync = 1;
2168 		WRITE_ONCE(p->replacement, rdev);
2169 	}
2170 
2171 	print_conf(conf);
2172 	return err;
2173 }
2174 
raid10_remove_disk(struct mddev * mddev,struct md_rdev * rdev)2175 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2176 {
2177 	struct r10conf *conf = mddev->private;
2178 	int err = 0;
2179 	int number = rdev->raid_disk;
2180 	struct md_rdev **rdevp;
2181 	struct raid10_info *p;
2182 
2183 	print_conf(conf);
2184 	if (unlikely(number >= mddev->raid_disks))
2185 		return 0;
2186 	p = conf->mirrors + number;
2187 	if (rdev == p->rdev)
2188 		rdevp = &p->rdev;
2189 	else if (rdev == p->replacement)
2190 		rdevp = &p->replacement;
2191 	else
2192 		return 0;
2193 
2194 	if (test_bit(In_sync, &rdev->flags) ||
2195 	    atomic_read(&rdev->nr_pending)) {
2196 		err = -EBUSY;
2197 		goto abort;
2198 	}
2199 	/* Only remove non-faulty devices if recovery
2200 	 * is not possible.
2201 	 */
2202 	if (!test_bit(Faulty, &rdev->flags) &&
2203 	    mddev->recovery_disabled != p->recovery_disabled &&
2204 	    (!p->replacement || p->replacement == rdev) &&
2205 	    number < conf->geo.raid_disks &&
2206 	    enough(conf, -1)) {
2207 		err = -EBUSY;
2208 		goto abort;
2209 	}
2210 	WRITE_ONCE(*rdevp, NULL);
2211 	if (p->replacement) {
2212 		/* We must have just cleared 'rdev' */
2213 		WRITE_ONCE(p->rdev, p->replacement);
2214 		clear_bit(Replacement, &p->replacement->flags);
2215 		WRITE_ONCE(p->replacement, NULL);
2216 	}
2217 
2218 	clear_bit(WantReplacement, &rdev->flags);
2219 	err = md_integrity_register(mddev);
2220 
2221 abort:
2222 
2223 	print_conf(conf);
2224 	return err;
2225 }
2226 
__end_sync_read(struct r10bio * r10_bio,struct bio * bio,int d)2227 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2228 {
2229 	struct r10conf *conf = r10_bio->mddev->private;
2230 
2231 	if (!bio->bi_status)
2232 		set_bit(R10BIO_Uptodate, &r10_bio->state);
2233 	else
2234 		/* The write handler will notice the lack of
2235 		 * R10BIO_Uptodate and record any errors etc
2236 		 */
2237 		atomic_add(r10_bio->sectors,
2238 			   &conf->mirrors[d].rdev->corrected_errors);
2239 
2240 	/* for reconstruct, we always reschedule after a read.
2241 	 * for resync, only after all reads
2242 	 */
2243 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2244 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2245 	    atomic_dec_and_test(&r10_bio->remaining)) {
2246 		/* we have read all the blocks,
2247 		 * do the comparison in process context in raid10d
2248 		 */
2249 		reschedule_retry(r10_bio);
2250 	}
2251 }
2252 
end_sync_read(struct bio * bio)2253 static void end_sync_read(struct bio *bio)
2254 {
2255 	struct r10bio *r10_bio = get_resync_r10bio(bio);
2256 	struct r10conf *conf = r10_bio->mddev->private;
2257 	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2258 
2259 	__end_sync_read(r10_bio, bio, d);
2260 }
2261 
end_reshape_read(struct bio * bio)2262 static void end_reshape_read(struct bio *bio)
2263 {
2264 	/* reshape read bio isn't allocated from r10buf_pool */
2265 	struct r10bio *r10_bio = bio->bi_private;
2266 
2267 	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
2268 }
2269 
end_sync_request(struct r10bio * r10_bio)2270 static void end_sync_request(struct r10bio *r10_bio)
2271 {
2272 	struct mddev *mddev = r10_bio->mddev;
2273 
2274 	while (atomic_dec_and_test(&r10_bio->remaining)) {
2275 		if (r10_bio->master_bio == NULL) {
2276 			/* the primary of several recovery bios */
2277 			sector_t s = r10_bio->sectors;
2278 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2279 			    test_bit(R10BIO_WriteError, &r10_bio->state))
2280 				reschedule_retry(r10_bio);
2281 			else
2282 				put_buf(r10_bio);
2283 			md_done_sync(mddev, s, 1);
2284 			break;
2285 		} else {
2286 			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2287 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2288 			    test_bit(R10BIO_WriteError, &r10_bio->state))
2289 				reschedule_retry(r10_bio);
2290 			else
2291 				put_buf(r10_bio);
2292 			r10_bio = r10_bio2;
2293 		}
2294 	}
2295 }
2296 
end_sync_write(struct bio * bio)2297 static void end_sync_write(struct bio *bio)
2298 {
2299 	struct r10bio *r10_bio = get_resync_r10bio(bio);
2300 	struct mddev *mddev = r10_bio->mddev;
2301 	struct r10conf *conf = mddev->private;
2302 	int d;
2303 	int slot;
2304 	int repl;
2305 	struct md_rdev *rdev = NULL;
2306 
2307 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2308 	if (repl)
2309 		rdev = conf->mirrors[d].replacement;
2310 	else
2311 		rdev = conf->mirrors[d].rdev;
2312 
2313 	if (bio->bi_status) {
2314 		if (repl)
2315 			md_error(mddev, rdev);
2316 		else {
2317 			set_bit(WriteErrorSeen, &rdev->flags);
2318 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2319 				set_bit(MD_RECOVERY_NEEDED,
2320 					&rdev->mddev->recovery);
2321 			set_bit(R10BIO_WriteError, &r10_bio->state);
2322 		}
2323 	} else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
2324 				     r10_bio->sectors)) {
2325 		set_bit(R10BIO_MadeGood, &r10_bio->state);
2326 	}
2327 
2328 	rdev_dec_pending(rdev, mddev);
2329 
2330 	end_sync_request(r10_bio);
2331 }
2332 
2333 /*
2334  * Note: sync and recover and handled very differently for raid10
2335  * This code is for resync.
2336  * For resync, we read through virtual addresses and read all blocks.
2337  * If there is any error, we schedule a write.  The lowest numbered
2338  * drive is authoritative.
2339  * However requests come for physical address, so we need to map.
2340  * For every physical address there are raid_disks/copies virtual addresses,
2341  * which is always are least one, but is not necessarly an integer.
2342  * This means that a physical address can span multiple chunks, so we may
2343  * have to submit multiple io requests for a single sync request.
2344  */
2345 /*
2346  * We check if all blocks are in-sync and only write to blocks that
2347  * aren't in sync
2348  */
sync_request_write(struct mddev * mddev,struct r10bio * r10_bio)2349 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2350 {
2351 	struct r10conf *conf = mddev->private;
2352 	int i, first;
2353 	struct bio *tbio, *fbio;
2354 	int vcnt;
2355 	struct page **tpages, **fpages;
2356 
2357 	atomic_set(&r10_bio->remaining, 1);
2358 
2359 	/* find the first device with a block */
2360 	for (i=0; i<conf->copies; i++)
2361 		if (!r10_bio->devs[i].bio->bi_status)
2362 			break;
2363 
2364 	if (i == conf->copies)
2365 		goto done;
2366 
2367 	first = i;
2368 	fbio = r10_bio->devs[i].bio;
2369 	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2370 	fbio->bi_iter.bi_idx = 0;
2371 	fpages = get_resync_pages(fbio)->pages;
2372 
2373 	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2374 	/* now find blocks with errors */
2375 	for (i=0 ; i < conf->copies ; i++) {
2376 		int  j, d;
2377 		struct md_rdev *rdev;
2378 		struct resync_pages *rp;
2379 
2380 		tbio = r10_bio->devs[i].bio;
2381 
2382 		if (tbio->bi_end_io != end_sync_read)
2383 			continue;
2384 		if (i == first)
2385 			continue;
2386 
2387 		tpages = get_resync_pages(tbio)->pages;
2388 		d = r10_bio->devs[i].devnum;
2389 		rdev = conf->mirrors[d].rdev;
2390 		if (!r10_bio->devs[i].bio->bi_status) {
2391 			/* We know that the bi_io_vec layout is the same for
2392 			 * both 'first' and 'i', so we just compare them.
2393 			 * All vec entries are PAGE_SIZE;
2394 			 */
2395 			int sectors = r10_bio->sectors;
2396 			for (j = 0; j < vcnt; j++) {
2397 				int len = PAGE_SIZE;
2398 				if (sectors < (len / 512))
2399 					len = sectors * 512;
2400 				if (memcmp(page_address(fpages[j]),
2401 					   page_address(tpages[j]),
2402 					   len))
2403 					break;
2404 				sectors -= len/512;
2405 			}
2406 			if (j == vcnt)
2407 				continue;
2408 			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2409 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2410 				/* Don't fix anything. */
2411 				continue;
2412 		} else if (test_bit(FailFast, &rdev->flags)) {
2413 			/* Just give up on this device */
2414 			md_error(rdev->mddev, rdev);
2415 			continue;
2416 		}
2417 		/* Ok, we need to write this bio, either to correct an
2418 		 * inconsistency or to correct an unreadable block.
2419 		 * First we need to fixup bv_offset, bv_len and
2420 		 * bi_vecs, as the read request might have corrupted these
2421 		 */
2422 		rp = get_resync_pages(tbio);
2423 		bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2424 
2425 		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2426 
2427 		rp->raid_bio = r10_bio;
2428 		tbio->bi_private = rp;
2429 		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2430 		tbio->bi_end_io = end_sync_write;
2431 
2432 		bio_copy_data(tbio, fbio);
2433 
2434 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2435 		atomic_inc(&r10_bio->remaining);
2436 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2437 
2438 		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2439 			tbio->bi_opf |= MD_FAILFAST;
2440 		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2441 		submit_bio_noacct(tbio);
2442 	}
2443 
2444 	/* Now write out to any replacement devices
2445 	 * that are active
2446 	 */
2447 	for (i = 0; i < conf->copies; i++) {
2448 		int d;
2449 
2450 		tbio = r10_bio->devs[i].repl_bio;
2451 		if (!tbio || !tbio->bi_end_io)
2452 			continue;
2453 		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2454 		    && r10_bio->devs[i].bio != fbio)
2455 			bio_copy_data(tbio, fbio);
2456 		d = r10_bio->devs[i].devnum;
2457 		atomic_inc(&r10_bio->remaining);
2458 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2459 			     bio_sectors(tbio));
2460 		submit_bio_noacct(tbio);
2461 	}
2462 
2463 done:
2464 	if (atomic_dec_and_test(&r10_bio->remaining)) {
2465 		md_done_sync(mddev, r10_bio->sectors, 1);
2466 		put_buf(r10_bio);
2467 	}
2468 }
2469 
2470 /*
2471  * Now for the recovery code.
2472  * Recovery happens across physical sectors.
2473  * We recover all non-is_sync drives by finding the virtual address of
2474  * each, and then choose a working drive that also has that virt address.
2475  * There is a separate r10_bio for each non-in_sync drive.
2476  * Only the first two slots are in use. The first for reading,
2477  * The second for writing.
2478  *
2479  */
fix_recovery_read_error(struct r10bio * r10_bio)2480 static void fix_recovery_read_error(struct r10bio *r10_bio)
2481 {
2482 	/* We got a read error during recovery.
2483 	 * We repeat the read in smaller page-sized sections.
2484 	 * If a read succeeds, write it to the new device or record
2485 	 * a bad block if we cannot.
2486 	 * If a read fails, record a bad block on both old and
2487 	 * new devices.
2488 	 */
2489 	struct mddev *mddev = r10_bio->mddev;
2490 	struct r10conf *conf = mddev->private;
2491 	struct bio *bio = r10_bio->devs[0].bio;
2492 	sector_t sect = 0;
2493 	int sectors = r10_bio->sectors;
2494 	int idx = 0;
2495 	int dr = r10_bio->devs[0].devnum;
2496 	int dw = r10_bio->devs[1].devnum;
2497 	struct page **pages = get_resync_pages(bio)->pages;
2498 
2499 	while (sectors) {
2500 		int s = sectors;
2501 		struct md_rdev *rdev;
2502 		sector_t addr;
2503 		int ok;
2504 
2505 		if (s > (PAGE_SIZE>>9))
2506 			s = PAGE_SIZE >> 9;
2507 
2508 		rdev = conf->mirrors[dr].rdev;
2509 		addr = r10_bio->devs[0].addr + sect;
2510 		ok = sync_page_io(rdev,
2511 				  addr,
2512 				  s << 9,
2513 				  pages[idx],
2514 				  REQ_OP_READ, false);
2515 		if (ok) {
2516 			rdev = conf->mirrors[dw].rdev;
2517 			addr = r10_bio->devs[1].addr + sect;
2518 			ok = sync_page_io(rdev,
2519 					  addr,
2520 					  s << 9,
2521 					  pages[idx],
2522 					  REQ_OP_WRITE, false);
2523 			if (!ok) {
2524 				set_bit(WriteErrorSeen, &rdev->flags);
2525 				if (!test_and_set_bit(WantReplacement,
2526 						      &rdev->flags))
2527 					set_bit(MD_RECOVERY_NEEDED,
2528 						&rdev->mddev->recovery);
2529 			}
2530 		}
2531 		if (!ok) {
2532 			/* We don't worry if we cannot set a bad block -
2533 			 * it really is bad so there is no loss in not
2534 			 * recording it yet
2535 			 */
2536 			rdev_set_badblocks(rdev, addr, s, 0);
2537 
2538 			if (rdev != conf->mirrors[dw].rdev) {
2539 				/* need bad block on destination too */
2540 				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2541 				addr = r10_bio->devs[1].addr + sect;
2542 				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2543 				if (!ok) {
2544 					/* just abort the recovery */
2545 					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2546 						  mdname(mddev));
2547 
2548 					conf->mirrors[dw].recovery_disabled
2549 						= mddev->recovery_disabled;
2550 					set_bit(MD_RECOVERY_INTR,
2551 						&mddev->recovery);
2552 					break;
2553 				}
2554 			}
2555 		}
2556 
2557 		sectors -= s;
2558 		sect += s;
2559 		idx++;
2560 	}
2561 }
2562 
recovery_request_write(struct mddev * mddev,struct r10bio * r10_bio)2563 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2564 {
2565 	struct r10conf *conf = mddev->private;
2566 	int d;
2567 	struct bio *wbio = r10_bio->devs[1].bio;
2568 	struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2569 
2570 	/* Need to test wbio2->bi_end_io before we call
2571 	 * submit_bio_noacct as if the former is NULL,
2572 	 * the latter is free to free wbio2.
2573 	 */
2574 	if (wbio2 && !wbio2->bi_end_io)
2575 		wbio2 = NULL;
2576 
2577 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2578 		fix_recovery_read_error(r10_bio);
2579 		if (wbio->bi_end_io)
2580 			end_sync_request(r10_bio);
2581 		if (wbio2)
2582 			end_sync_request(r10_bio);
2583 		return;
2584 	}
2585 
2586 	/*
2587 	 * share the pages with the first bio
2588 	 * and submit the write request
2589 	 */
2590 	d = r10_bio->devs[1].devnum;
2591 	if (wbio->bi_end_io) {
2592 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2593 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2594 		submit_bio_noacct(wbio);
2595 	}
2596 	if (wbio2) {
2597 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2598 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2599 			     bio_sectors(wbio2));
2600 		submit_bio_noacct(wbio2);
2601 	}
2602 }
2603 
r10_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,enum req_op op)2604 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2605 			    int sectors, struct page *page, enum req_op op)
2606 {
2607 	if (rdev_has_badblock(rdev, sector, sectors) &&
2608 	    (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2609 		return -1;
2610 	if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2611 		/* success */
2612 		return 1;
2613 	if (op == REQ_OP_WRITE) {
2614 		set_bit(WriteErrorSeen, &rdev->flags);
2615 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2616 			set_bit(MD_RECOVERY_NEEDED,
2617 				&rdev->mddev->recovery);
2618 	}
2619 	/* need to record an error - either for the block or the device */
2620 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2621 		md_error(rdev->mddev, rdev);
2622 	return 0;
2623 }
2624 
2625 /*
2626  * This is a kernel thread which:
2627  *
2628  *	1.	Retries failed read operations on working mirrors.
2629  *	2.	Updates the raid superblock when problems encounter.
2630  *	3.	Performs writes following reads for array synchronising.
2631  */
2632 
fix_read_error(struct r10conf * conf,struct mddev * mddev,struct r10bio * r10_bio)2633 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2634 {
2635 	int sect = 0; /* Offset from r10_bio->sector */
2636 	int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2637 	struct md_rdev *rdev;
2638 	int d = r10_bio->devs[slot].devnum;
2639 
2640 	/* still own a reference to this rdev, so it cannot
2641 	 * have been cleared recently.
2642 	 */
2643 	rdev = conf->mirrors[d].rdev;
2644 
2645 	if (test_bit(Faulty, &rdev->flags))
2646 		/* drive has already been failed, just ignore any
2647 		   more fix_read_error() attempts */
2648 		return;
2649 
2650 	if (exceed_read_errors(mddev, rdev)) {
2651 		r10_bio->devs[slot].bio = IO_BLOCKED;
2652 		return;
2653 	}
2654 
2655 	while(sectors) {
2656 		int s = sectors;
2657 		int sl = slot;
2658 		int success = 0;
2659 		int start;
2660 
2661 		if (s > (PAGE_SIZE>>9))
2662 			s = PAGE_SIZE >> 9;
2663 
2664 		do {
2665 			d = r10_bio->devs[sl].devnum;
2666 			rdev = conf->mirrors[d].rdev;
2667 			if (rdev &&
2668 			    test_bit(In_sync, &rdev->flags) &&
2669 			    !test_bit(Faulty, &rdev->flags) &&
2670 			    rdev_has_badblock(rdev,
2671 					      r10_bio->devs[sl].addr + sect,
2672 					      s) == 0) {
2673 				atomic_inc(&rdev->nr_pending);
2674 				success = sync_page_io(rdev,
2675 						       r10_bio->devs[sl].addr +
2676 						       sect,
2677 						       s<<9,
2678 						       conf->tmppage,
2679 						       REQ_OP_READ, false);
2680 				rdev_dec_pending(rdev, mddev);
2681 				if (success)
2682 					break;
2683 			}
2684 			sl++;
2685 			if (sl == conf->copies)
2686 				sl = 0;
2687 		} while (sl != slot);
2688 
2689 		if (!success) {
2690 			/* Cannot read from anywhere, just mark the block
2691 			 * as bad on the first device to discourage future
2692 			 * reads.
2693 			 */
2694 			int dn = r10_bio->devs[slot].devnum;
2695 			rdev = conf->mirrors[dn].rdev;
2696 
2697 			if (!rdev_set_badblocks(
2698 				    rdev,
2699 				    r10_bio->devs[slot].addr
2700 				    + sect,
2701 				    s, 0)) {
2702 				md_error(mddev, rdev);
2703 				r10_bio->devs[slot].bio
2704 					= IO_BLOCKED;
2705 			}
2706 			break;
2707 		}
2708 
2709 		start = sl;
2710 		/* write it back and re-read */
2711 		while (sl != slot) {
2712 			if (sl==0)
2713 				sl = conf->copies;
2714 			sl--;
2715 			d = r10_bio->devs[sl].devnum;
2716 			rdev = conf->mirrors[d].rdev;
2717 			if (!rdev ||
2718 			    test_bit(Faulty, &rdev->flags) ||
2719 			    !test_bit(In_sync, &rdev->flags))
2720 				continue;
2721 
2722 			atomic_inc(&rdev->nr_pending);
2723 			if (r10_sync_page_io(rdev,
2724 					     r10_bio->devs[sl].addr +
2725 					     sect,
2726 					     s, conf->tmppage, REQ_OP_WRITE)
2727 			    == 0) {
2728 				/* Well, this device is dead */
2729 				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2730 					  mdname(mddev), s,
2731 					  (unsigned long long)(
2732 						  sect +
2733 						  choose_data_offset(r10_bio,
2734 								     rdev)),
2735 					  rdev->bdev);
2736 				pr_notice("md/raid10:%s: %pg: failing drive\n",
2737 					  mdname(mddev),
2738 					  rdev->bdev);
2739 			}
2740 			rdev_dec_pending(rdev, mddev);
2741 		}
2742 		sl = start;
2743 		while (sl != slot) {
2744 			if (sl==0)
2745 				sl = conf->copies;
2746 			sl--;
2747 			d = r10_bio->devs[sl].devnum;
2748 			rdev = conf->mirrors[d].rdev;
2749 			if (!rdev ||
2750 			    test_bit(Faulty, &rdev->flags) ||
2751 			    !test_bit(In_sync, &rdev->flags))
2752 				continue;
2753 
2754 			atomic_inc(&rdev->nr_pending);
2755 			switch (r10_sync_page_io(rdev,
2756 					     r10_bio->devs[sl].addr +
2757 					     sect,
2758 					     s, conf->tmppage, REQ_OP_READ)) {
2759 			case 0:
2760 				/* Well, this device is dead */
2761 				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
2762 				       mdname(mddev), s,
2763 				       (unsigned long long)(
2764 					       sect +
2765 					       choose_data_offset(r10_bio, rdev)),
2766 				       rdev->bdev);
2767 				pr_notice("md/raid10:%s: %pg: failing drive\n",
2768 				       mdname(mddev),
2769 				       rdev->bdev);
2770 				break;
2771 			case 1:
2772 				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
2773 				       mdname(mddev), s,
2774 				       (unsigned long long)(
2775 					       sect +
2776 					       choose_data_offset(r10_bio, rdev)),
2777 				       rdev->bdev);
2778 				atomic_add(s, &rdev->corrected_errors);
2779 			}
2780 
2781 			rdev_dec_pending(rdev, mddev);
2782 		}
2783 
2784 		sectors -= s;
2785 		sect += s;
2786 	}
2787 }
2788 
narrow_write_error(struct r10bio * r10_bio,int i)2789 static int narrow_write_error(struct r10bio *r10_bio, int i)
2790 {
2791 	struct bio *bio = r10_bio->master_bio;
2792 	struct mddev *mddev = r10_bio->mddev;
2793 	struct r10conf *conf = mddev->private;
2794 	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2795 	/* bio has the data to be written to slot 'i' where
2796 	 * we just recently had a write error.
2797 	 * We repeatedly clone the bio and trim down to one block,
2798 	 * then try the write.  Where the write fails we record
2799 	 * a bad block.
2800 	 * It is conceivable that the bio doesn't exactly align with
2801 	 * blocks.  We must handle this.
2802 	 *
2803 	 * We currently own a reference to the rdev.
2804 	 */
2805 
2806 	int block_sectors;
2807 	sector_t sector;
2808 	int sectors;
2809 	int sect_to_write = r10_bio->sectors;
2810 	int ok = 1;
2811 
2812 	if (rdev->badblocks.shift < 0)
2813 		return 0;
2814 
2815 	block_sectors = roundup(1 << rdev->badblocks.shift,
2816 				bdev_logical_block_size(rdev->bdev) >> 9);
2817 	sector = r10_bio->sector;
2818 	sectors = ((r10_bio->sector + block_sectors)
2819 		   & ~(sector_t)(block_sectors - 1))
2820 		- sector;
2821 
2822 	while (sect_to_write) {
2823 		struct bio *wbio;
2824 		sector_t wsector;
2825 		if (sectors > sect_to_write)
2826 			sectors = sect_to_write;
2827 		/* Write at 'sector' for 'sectors' */
2828 		wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2829 				       &mddev->bio_set);
2830 		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2831 		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2832 		wbio->bi_iter.bi_sector = wsector +
2833 				   choose_data_offset(r10_bio, rdev);
2834 		wbio->bi_opf = REQ_OP_WRITE;
2835 
2836 		if (submit_bio_wait(wbio) < 0)
2837 			/* Failure! */
2838 			ok = rdev_set_badblocks(rdev, wsector,
2839 						sectors, 0)
2840 				&& ok;
2841 
2842 		bio_put(wbio);
2843 		sect_to_write -= sectors;
2844 		sector += sectors;
2845 		sectors = block_sectors;
2846 	}
2847 	return ok;
2848 }
2849 
handle_read_error(struct mddev * mddev,struct r10bio * r10_bio)2850 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2851 {
2852 	int slot = r10_bio->read_slot;
2853 	struct bio *bio;
2854 	struct r10conf *conf = mddev->private;
2855 	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2856 
2857 	/* we got a read error. Maybe the drive is bad.  Maybe just
2858 	 * the block and we can fix it.
2859 	 * We freeze all other IO, and try reading the block from
2860 	 * other devices.  When we find one, we re-write
2861 	 * and check it that fixes the read error.
2862 	 * This is all done synchronously while the array is
2863 	 * frozen.
2864 	 */
2865 	bio = r10_bio->devs[slot].bio;
2866 	bio_put(bio);
2867 	r10_bio->devs[slot].bio = NULL;
2868 
2869 	if (mddev->ro)
2870 		r10_bio->devs[slot].bio = IO_BLOCKED;
2871 	else if (!test_bit(FailFast, &rdev->flags)) {
2872 		freeze_array(conf, 1);
2873 		fix_read_error(conf, mddev, r10_bio);
2874 		unfreeze_array(conf);
2875 	} else
2876 		md_error(mddev, rdev);
2877 
2878 	rdev_dec_pending(rdev, mddev);
2879 	r10_bio->state = 0;
2880 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2881 	/*
2882 	 * allow_barrier after re-submit to ensure no sync io
2883 	 * can be issued while regular io pending.
2884 	 */
2885 	allow_barrier(conf);
2886 }
2887 
handle_write_completed(struct r10conf * conf,struct r10bio * r10_bio)2888 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2889 {
2890 	/* Some sort of write request has finished and it
2891 	 * succeeded in writing where we thought there was a
2892 	 * bad block.  So forget the bad block.
2893 	 * Or possibly if failed and we need to record
2894 	 * a bad block.
2895 	 */
2896 	int m;
2897 	struct md_rdev *rdev;
2898 
2899 	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2900 	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2901 		for (m = 0; m < conf->copies; m++) {
2902 			int dev = r10_bio->devs[m].devnum;
2903 			rdev = conf->mirrors[dev].rdev;
2904 			if (r10_bio->devs[m].bio == NULL ||
2905 				r10_bio->devs[m].bio->bi_end_io == NULL)
2906 				continue;
2907 			if (!r10_bio->devs[m].bio->bi_status) {
2908 				rdev_clear_badblocks(
2909 					rdev,
2910 					r10_bio->devs[m].addr,
2911 					r10_bio->sectors, 0);
2912 			} else {
2913 				if (!rdev_set_badblocks(
2914 					    rdev,
2915 					    r10_bio->devs[m].addr,
2916 					    r10_bio->sectors, 0))
2917 					md_error(conf->mddev, rdev);
2918 			}
2919 			rdev = conf->mirrors[dev].replacement;
2920 			if (r10_bio->devs[m].repl_bio == NULL ||
2921 				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2922 				continue;
2923 
2924 			if (!r10_bio->devs[m].repl_bio->bi_status) {
2925 				rdev_clear_badblocks(
2926 					rdev,
2927 					r10_bio->devs[m].addr,
2928 					r10_bio->sectors, 0);
2929 			} else {
2930 				if (!rdev_set_badblocks(
2931 					    rdev,
2932 					    r10_bio->devs[m].addr,
2933 					    r10_bio->sectors, 0))
2934 					md_error(conf->mddev, rdev);
2935 			}
2936 		}
2937 		put_buf(r10_bio);
2938 	} else {
2939 		bool fail = false;
2940 		for (m = 0; m < conf->copies; m++) {
2941 			int dev = r10_bio->devs[m].devnum;
2942 			struct bio *bio = r10_bio->devs[m].bio;
2943 			rdev = conf->mirrors[dev].rdev;
2944 			if (bio == IO_MADE_GOOD) {
2945 				rdev_clear_badblocks(
2946 					rdev,
2947 					r10_bio->devs[m].addr,
2948 					r10_bio->sectors, 0);
2949 				rdev_dec_pending(rdev, conf->mddev);
2950 			} else if (bio != NULL && bio->bi_status) {
2951 				fail = true;
2952 				if (!narrow_write_error(r10_bio, m))
2953 					md_error(conf->mddev, rdev);
2954 				rdev_dec_pending(rdev, conf->mddev);
2955 			}
2956 			bio = r10_bio->devs[m].repl_bio;
2957 			rdev = conf->mirrors[dev].replacement;
2958 			if (rdev && bio == IO_MADE_GOOD) {
2959 				rdev_clear_badblocks(
2960 					rdev,
2961 					r10_bio->devs[m].addr,
2962 					r10_bio->sectors, 0);
2963 				rdev_dec_pending(rdev, conf->mddev);
2964 			}
2965 		}
2966 		if (fail) {
2967 			spin_lock_irq(&conf->device_lock);
2968 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2969 			conf->nr_queued++;
2970 			spin_unlock_irq(&conf->device_lock);
2971 			/*
2972 			 * In case freeze_array() is waiting for condition
2973 			 * nr_pending == nr_queued + extra to be true.
2974 			 */
2975 			wake_up(&conf->wait_barrier);
2976 			md_wakeup_thread(conf->mddev->thread);
2977 		} else {
2978 			if (test_bit(R10BIO_WriteError,
2979 				     &r10_bio->state))
2980 				close_write(r10_bio);
2981 			raid_end_bio_io(r10_bio);
2982 		}
2983 	}
2984 }
2985 
raid10d(struct md_thread * thread)2986 static void raid10d(struct md_thread *thread)
2987 {
2988 	struct mddev *mddev = thread->mddev;
2989 	struct r10bio *r10_bio;
2990 	unsigned long flags;
2991 	struct r10conf *conf = mddev->private;
2992 	struct list_head *head = &conf->retry_list;
2993 	struct blk_plug plug;
2994 
2995 	md_check_recovery(mddev);
2996 
2997 	if (!list_empty_careful(&conf->bio_end_io_list) &&
2998 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2999 		LIST_HEAD(tmp);
3000 		spin_lock_irqsave(&conf->device_lock, flags);
3001 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3002 			while (!list_empty(&conf->bio_end_io_list)) {
3003 				list_move(conf->bio_end_io_list.prev, &tmp);
3004 				conf->nr_queued--;
3005 			}
3006 		}
3007 		spin_unlock_irqrestore(&conf->device_lock, flags);
3008 		while (!list_empty(&tmp)) {
3009 			r10_bio = list_first_entry(&tmp, struct r10bio,
3010 						   retry_list);
3011 			list_del(&r10_bio->retry_list);
3012 
3013 			if (test_bit(R10BIO_WriteError,
3014 				     &r10_bio->state))
3015 				close_write(r10_bio);
3016 			raid_end_bio_io(r10_bio);
3017 		}
3018 	}
3019 
3020 	blk_start_plug(&plug);
3021 	for (;;) {
3022 
3023 		flush_pending_writes(conf);
3024 
3025 		spin_lock_irqsave(&conf->device_lock, flags);
3026 		if (list_empty(head)) {
3027 			spin_unlock_irqrestore(&conf->device_lock, flags);
3028 			break;
3029 		}
3030 		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3031 		list_del(head->prev);
3032 		conf->nr_queued--;
3033 		spin_unlock_irqrestore(&conf->device_lock, flags);
3034 
3035 		mddev = r10_bio->mddev;
3036 		conf = mddev->private;
3037 		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3038 		    test_bit(R10BIO_WriteError, &r10_bio->state))
3039 			handle_write_completed(conf, r10_bio);
3040 		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3041 			reshape_request_write(mddev, r10_bio);
3042 		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3043 			sync_request_write(mddev, r10_bio);
3044 		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3045 			recovery_request_write(mddev, r10_bio);
3046 		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3047 			handle_read_error(mddev, r10_bio);
3048 		else
3049 			WARN_ON_ONCE(1);
3050 
3051 		cond_resched();
3052 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3053 			md_check_recovery(mddev);
3054 	}
3055 	blk_finish_plug(&plug);
3056 }
3057 
init_resync(struct r10conf * conf)3058 static int init_resync(struct r10conf *conf)
3059 {
3060 	int ret, buffs, i;
3061 
3062 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3063 	BUG_ON(mempool_initialized(&conf->r10buf_pool));
3064 	conf->have_replacement = 0;
3065 	for (i = 0; i < conf->geo.raid_disks; i++)
3066 		if (conf->mirrors[i].replacement)
3067 			conf->have_replacement = 1;
3068 	ret = mempool_init(&conf->r10buf_pool, buffs,
3069 			   r10buf_pool_alloc, r10buf_pool_free, conf);
3070 	if (ret)
3071 		return ret;
3072 	conf->next_resync = 0;
3073 	return 0;
3074 }
3075 
raid10_alloc_init_r10buf(struct r10conf * conf)3076 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3077 {
3078 	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3079 	struct rsync_pages *rp;
3080 	struct bio *bio;
3081 	int nalloc;
3082 	int i;
3083 
3084 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3085 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3086 		nalloc = conf->copies; /* resync */
3087 	else
3088 		nalloc = 2; /* recovery */
3089 
3090 	for (i = 0; i < nalloc; i++) {
3091 		bio = r10bio->devs[i].bio;
3092 		rp = bio->bi_private;
3093 		bio_reset(bio, NULL, 0);
3094 		bio->bi_private = rp;
3095 		bio = r10bio->devs[i].repl_bio;
3096 		if (bio) {
3097 			rp = bio->bi_private;
3098 			bio_reset(bio, NULL, 0);
3099 			bio->bi_private = rp;
3100 		}
3101 	}
3102 	return r10bio;
3103 }
3104 
3105 /*
3106  * Set cluster_sync_high since we need other nodes to add the
3107  * range [cluster_sync_low, cluster_sync_high] to suspend list.
3108  */
raid10_set_cluster_sync_high(struct r10conf * conf)3109 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3110 {
3111 	sector_t window_size;
3112 	int extra_chunk, chunks;
3113 
3114 	/*
3115 	 * First, here we define "stripe" as a unit which across
3116 	 * all member devices one time, so we get chunks by use
3117 	 * raid_disks / near_copies. Otherwise, if near_copies is
3118 	 * close to raid_disks, then resync window could increases
3119 	 * linearly with the increase of raid_disks, which means
3120 	 * we will suspend a really large IO window while it is not
3121 	 * necessary. If raid_disks is not divisible by near_copies,
3122 	 * an extra chunk is needed to ensure the whole "stripe" is
3123 	 * covered.
3124 	 */
3125 
3126 	chunks = conf->geo.raid_disks / conf->geo.near_copies;
3127 	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3128 		extra_chunk = 0;
3129 	else
3130 		extra_chunk = 1;
3131 	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3132 
3133 	/*
3134 	 * At least use a 32M window to align with raid1's resync window
3135 	 */
3136 	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3137 			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3138 
3139 	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3140 }
3141 
3142 /*
3143  * perform a "sync" on one "block"
3144  *
3145  * We need to make sure that no normal I/O request - particularly write
3146  * requests - conflict with active sync requests.
3147  *
3148  * This is achieved by tracking pending requests and a 'barrier' concept
3149  * that can be installed to exclude normal IO requests.
3150  *
3151  * Resync and recovery are handled very differently.
3152  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3153  *
3154  * For resync, we iterate over virtual addresses, read all copies,
3155  * and update if there are differences.  If only one copy is live,
3156  * skip it.
3157  * For recovery, we iterate over physical addresses, read a good
3158  * value for each non-in_sync drive, and over-write.
3159  *
3160  * So, for recovery we may have several outstanding complex requests for a
3161  * given address, one for each out-of-sync device.  We model this by allocating
3162  * a number of r10_bio structures, one for each out-of-sync device.
3163  * As we setup these structures, we collect all bio's together into a list
3164  * which we then process collectively to add pages, and then process again
3165  * to pass to submit_bio_noacct.
3166  *
3167  * The r10_bio structures are linked using a borrowed master_bio pointer.
3168  * This link is counted in ->remaining.  When the r10_bio that points to NULL
3169  * has its remaining count decremented to 0, the whole complex operation
3170  * is complete.
3171  *
3172  */
3173 
raid10_sync_request(struct mddev * mddev,sector_t sector_nr,sector_t max_sector,int * skipped)3174 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3175 				    sector_t max_sector, int *skipped)
3176 {
3177 	struct r10conf *conf = mddev->private;
3178 	struct r10bio *r10_bio;
3179 	struct bio *biolist = NULL, *bio;
3180 	sector_t nr_sectors;
3181 	int i;
3182 	int max_sync;
3183 	sector_t sync_blocks;
3184 	sector_t sectors_skipped = 0;
3185 	int chunks_skipped = 0;
3186 	sector_t chunk_mask = conf->geo.chunk_mask;
3187 	int page_idx = 0;
3188 	int error_disk = -1;
3189 
3190 	/*
3191 	 * Allow skipping a full rebuild for incremental assembly
3192 	 * of a clean array, like RAID1 does.
3193 	 */
3194 	if (mddev->bitmap == NULL &&
3195 	    mddev->recovery_cp == MaxSector &&
3196 	    mddev->reshape_position == MaxSector &&
3197 	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3198 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3199 	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3200 	    conf->fullsync == 0) {
3201 		*skipped = 1;
3202 		return mddev->dev_sectors - sector_nr;
3203 	}
3204 
3205 	if (!mempool_initialized(&conf->r10buf_pool))
3206 		if (init_resync(conf))
3207 			return 0;
3208 
3209  skipped:
3210 	if (sector_nr >= max_sector) {
3211 		conf->cluster_sync_low = 0;
3212 		conf->cluster_sync_high = 0;
3213 
3214 		/* If we aborted, we need to abort the
3215 		 * sync on the 'current' bitmap chucks (there can
3216 		 * be several when recovering multiple devices).
3217 		 * as we may have started syncing it but not finished.
3218 		 * We can find the current address in
3219 		 * mddev->curr_resync, but for recovery,
3220 		 * we need to convert that to several
3221 		 * virtual addresses.
3222 		 */
3223 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3224 			end_reshape(conf);
3225 			close_sync(conf);
3226 			return 0;
3227 		}
3228 
3229 		if (mddev->curr_resync < max_sector) { /* aborted */
3230 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3231 				mddev->bitmap_ops->end_sync(mddev,
3232 							    mddev->curr_resync,
3233 							    &sync_blocks);
3234 			else for (i = 0; i < conf->geo.raid_disks; i++) {
3235 				sector_t sect =
3236 					raid10_find_virt(conf, mddev->curr_resync, i);
3237 
3238 				mddev->bitmap_ops->end_sync(mddev, sect,
3239 							    &sync_blocks);
3240 			}
3241 		} else {
3242 			/* completed sync */
3243 			if ((!mddev->bitmap || conf->fullsync)
3244 			    && conf->have_replacement
3245 			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3246 				/* Completed a full sync so the replacements
3247 				 * are now fully recovered.
3248 				 */
3249 				for (i = 0; i < conf->geo.raid_disks; i++) {
3250 					struct md_rdev *rdev =
3251 						conf->mirrors[i].replacement;
3252 
3253 					if (rdev)
3254 						rdev->recovery_offset = MaxSector;
3255 				}
3256 			}
3257 			conf->fullsync = 0;
3258 		}
3259 		mddev->bitmap_ops->close_sync(mddev);
3260 		close_sync(conf);
3261 		*skipped = 1;
3262 		return sectors_skipped;
3263 	}
3264 
3265 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3266 		return reshape_request(mddev, sector_nr, skipped);
3267 
3268 	if (chunks_skipped >= conf->geo.raid_disks) {
3269 		pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
3270 			test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?  "resync" : "recovery");
3271 		if (error_disk >= 0 &&
3272 		    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3273 			/*
3274 			 * recovery fails, set mirrors.recovery_disabled,
3275 			 * device shouldn't be added to there.
3276 			 */
3277 			conf->mirrors[error_disk].recovery_disabled =
3278 						mddev->recovery_disabled;
3279 			return 0;
3280 		}
3281 		/*
3282 		 * if there has been nothing to do on any drive,
3283 		 * then there is nothing to do at all.
3284 		 */
3285 		*skipped = 1;
3286 		return (max_sector - sector_nr) + sectors_skipped;
3287 	}
3288 
3289 	if (max_sector > mddev->resync_max)
3290 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3291 
3292 	/* make sure whole request will fit in a chunk - if chunks
3293 	 * are meaningful
3294 	 */
3295 	if (conf->geo.near_copies < conf->geo.raid_disks &&
3296 	    max_sector > (sector_nr | chunk_mask))
3297 		max_sector = (sector_nr | chunk_mask) + 1;
3298 
3299 	/*
3300 	 * If there is non-resync activity waiting for a turn, then let it
3301 	 * though before starting on this new sync request.
3302 	 */
3303 	if (conf->nr_waiting)
3304 		schedule_timeout_uninterruptible(1);
3305 
3306 	/* Again, very different code for resync and recovery.
3307 	 * Both must result in an r10bio with a list of bios that
3308 	 * have bi_end_io, bi_sector, bi_bdev set,
3309 	 * and bi_private set to the r10bio.
3310 	 * For recovery, we may actually create several r10bios
3311 	 * with 2 bios in each, that correspond to the bios in the main one.
3312 	 * In this case, the subordinate r10bios link back through a
3313 	 * borrowed master_bio pointer, and the counter in the master
3314 	 * includes a ref from each subordinate.
3315 	 */
3316 	/* First, we decide what to do and set ->bi_end_io
3317 	 * To end_sync_read if we want to read, and
3318 	 * end_sync_write if we will want to write.
3319 	 */
3320 
3321 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3322 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3323 		/* recovery... the complicated one */
3324 		int j;
3325 		r10_bio = NULL;
3326 
3327 		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3328 			bool still_degraded;
3329 			struct r10bio *rb2;
3330 			sector_t sect;
3331 			bool must_sync;
3332 			int any_working;
3333 			struct raid10_info *mirror = &conf->mirrors[i];
3334 			struct md_rdev *mrdev, *mreplace;
3335 
3336 			mrdev = mirror->rdev;
3337 			mreplace = mirror->replacement;
3338 
3339 			if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
3340 			    test_bit(In_sync, &mrdev->flags)))
3341 				mrdev = NULL;
3342 			if (mreplace && test_bit(Faulty, &mreplace->flags))
3343 				mreplace = NULL;
3344 
3345 			if (!mrdev && !mreplace)
3346 				continue;
3347 
3348 			still_degraded = false;
3349 			/* want to reconstruct this device */
3350 			rb2 = r10_bio;
3351 			sect = raid10_find_virt(conf, sector_nr, i);
3352 			if (sect >= mddev->resync_max_sectors)
3353 				/* last stripe is not complete - don't
3354 				 * try to recover this sector.
3355 				 */
3356 				continue;
3357 			/* Unless we are doing a full sync, or a replacement
3358 			 * we only need to recover the block if it is set in
3359 			 * the bitmap
3360 			 */
3361 			must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3362 								  &sync_blocks,
3363 								  true);
3364 			if (sync_blocks < max_sync)
3365 				max_sync = sync_blocks;
3366 			if (!must_sync &&
3367 			    mreplace == NULL &&
3368 			    !conf->fullsync) {
3369 				/* yep, skip the sync_blocks here, but don't assume
3370 				 * that there will never be anything to do here
3371 				 */
3372 				chunks_skipped = -1;
3373 				continue;
3374 			}
3375 			if (mrdev)
3376 				atomic_inc(&mrdev->nr_pending);
3377 			if (mreplace)
3378 				atomic_inc(&mreplace->nr_pending);
3379 
3380 			r10_bio = raid10_alloc_init_r10buf(conf);
3381 			r10_bio->state = 0;
3382 			raise_barrier(conf, rb2 != NULL);
3383 			atomic_set(&r10_bio->remaining, 0);
3384 
3385 			r10_bio->master_bio = (struct bio*)rb2;
3386 			if (rb2)
3387 				atomic_inc(&rb2->remaining);
3388 			r10_bio->mddev = mddev;
3389 			set_bit(R10BIO_IsRecover, &r10_bio->state);
3390 			r10_bio->sector = sect;
3391 
3392 			raid10_find_phys(conf, r10_bio);
3393 
3394 			/* Need to check if the array will still be
3395 			 * degraded
3396 			 */
3397 			for (j = 0; j < conf->geo.raid_disks; j++) {
3398 				struct md_rdev *rdev = conf->mirrors[j].rdev;
3399 
3400 				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3401 					still_degraded = false;
3402 					break;
3403 				}
3404 			}
3405 
3406 			must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
3407 						&sync_blocks, still_degraded);
3408 
3409 			any_working = 0;
3410 			for (j=0; j<conf->copies;j++) {
3411 				int k;
3412 				int d = r10_bio->devs[j].devnum;
3413 				sector_t from_addr, to_addr;
3414 				struct md_rdev *rdev = conf->mirrors[d].rdev;
3415 				sector_t sector, first_bad;
3416 				int bad_sectors;
3417 				if (!rdev ||
3418 				    !test_bit(In_sync, &rdev->flags))
3419 					continue;
3420 				/* This is where we read from */
3421 				any_working = 1;
3422 				sector = r10_bio->devs[j].addr;
3423 
3424 				if (is_badblock(rdev, sector, max_sync,
3425 						&first_bad, &bad_sectors)) {
3426 					if (first_bad > sector)
3427 						max_sync = first_bad - sector;
3428 					else {
3429 						bad_sectors -= (sector
3430 								- first_bad);
3431 						if (max_sync > bad_sectors)
3432 							max_sync = bad_sectors;
3433 						continue;
3434 					}
3435 				}
3436 				bio = r10_bio->devs[0].bio;
3437 				bio->bi_next = biolist;
3438 				biolist = bio;
3439 				bio->bi_end_io = end_sync_read;
3440 				bio->bi_opf = REQ_OP_READ;
3441 				if (test_bit(FailFast, &rdev->flags))
3442 					bio->bi_opf |= MD_FAILFAST;
3443 				from_addr = r10_bio->devs[j].addr;
3444 				bio->bi_iter.bi_sector = from_addr +
3445 					rdev->data_offset;
3446 				bio_set_dev(bio, rdev->bdev);
3447 				atomic_inc(&rdev->nr_pending);
3448 				/* and we write to 'i' (if not in_sync) */
3449 
3450 				for (k=0; k<conf->copies; k++)
3451 					if (r10_bio->devs[k].devnum == i)
3452 						break;
3453 				BUG_ON(k == conf->copies);
3454 				to_addr = r10_bio->devs[k].addr;
3455 				r10_bio->devs[0].devnum = d;
3456 				r10_bio->devs[0].addr = from_addr;
3457 				r10_bio->devs[1].devnum = i;
3458 				r10_bio->devs[1].addr = to_addr;
3459 
3460 				if (mrdev) {
3461 					bio = r10_bio->devs[1].bio;
3462 					bio->bi_next = biolist;
3463 					biolist = bio;
3464 					bio->bi_end_io = end_sync_write;
3465 					bio->bi_opf = REQ_OP_WRITE;
3466 					bio->bi_iter.bi_sector = to_addr
3467 						+ mrdev->data_offset;
3468 					bio_set_dev(bio, mrdev->bdev);
3469 					atomic_inc(&r10_bio->remaining);
3470 				} else
3471 					r10_bio->devs[1].bio->bi_end_io = NULL;
3472 
3473 				/* and maybe write to replacement */
3474 				bio = r10_bio->devs[1].repl_bio;
3475 				if (bio)
3476 					bio->bi_end_io = NULL;
3477 				/* Note: if replace is not NULL, then bio
3478 				 * cannot be NULL as r10buf_pool_alloc will
3479 				 * have allocated it.
3480 				 */
3481 				if (!mreplace)
3482 					break;
3483 				bio->bi_next = biolist;
3484 				biolist = bio;
3485 				bio->bi_end_io = end_sync_write;
3486 				bio->bi_opf = REQ_OP_WRITE;
3487 				bio->bi_iter.bi_sector = to_addr +
3488 					mreplace->data_offset;
3489 				bio_set_dev(bio, mreplace->bdev);
3490 				atomic_inc(&r10_bio->remaining);
3491 				break;
3492 			}
3493 			if (j == conf->copies) {
3494 				/* Cannot recover, so abort the recovery or
3495 				 * record a bad block */
3496 				if (any_working) {
3497 					/* problem is that there are bad blocks
3498 					 * on other device(s)
3499 					 */
3500 					int k;
3501 					for (k = 0; k < conf->copies; k++)
3502 						if (r10_bio->devs[k].devnum == i)
3503 							break;
3504 					if (mrdev && !test_bit(In_sync,
3505 						      &mrdev->flags)
3506 					    && !rdev_set_badblocks(
3507 						    mrdev,
3508 						    r10_bio->devs[k].addr,
3509 						    max_sync, 0))
3510 						any_working = 0;
3511 					if (mreplace &&
3512 					    !rdev_set_badblocks(
3513 						    mreplace,
3514 						    r10_bio->devs[k].addr,
3515 						    max_sync, 0))
3516 						any_working = 0;
3517 				}
3518 				if (!any_working)  {
3519 					if (!test_and_set_bit(MD_RECOVERY_INTR,
3520 							      &mddev->recovery))
3521 						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3522 						       mdname(mddev));
3523 					mirror->recovery_disabled
3524 						= mddev->recovery_disabled;
3525 				} else {
3526 					error_disk = i;
3527 				}
3528 				put_buf(r10_bio);
3529 				if (rb2)
3530 					atomic_dec(&rb2->remaining);
3531 				r10_bio = rb2;
3532 				if (mrdev)
3533 					rdev_dec_pending(mrdev, mddev);
3534 				if (mreplace)
3535 					rdev_dec_pending(mreplace, mddev);
3536 				break;
3537 			}
3538 			if (mrdev)
3539 				rdev_dec_pending(mrdev, mddev);
3540 			if (mreplace)
3541 				rdev_dec_pending(mreplace, mddev);
3542 			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3543 				/* Only want this if there is elsewhere to
3544 				 * read from. 'j' is currently the first
3545 				 * readable copy.
3546 				 */
3547 				int targets = 1;
3548 				for (; j < conf->copies; j++) {
3549 					int d = r10_bio->devs[j].devnum;
3550 					if (conf->mirrors[d].rdev &&
3551 					    test_bit(In_sync,
3552 						      &conf->mirrors[d].rdev->flags))
3553 						targets++;
3554 				}
3555 				if (targets == 1)
3556 					r10_bio->devs[0].bio->bi_opf
3557 						&= ~MD_FAILFAST;
3558 			}
3559 		}
3560 		if (biolist == NULL) {
3561 			while (r10_bio) {
3562 				struct r10bio *rb2 = r10_bio;
3563 				r10_bio = (struct r10bio*) rb2->master_bio;
3564 				rb2->master_bio = NULL;
3565 				put_buf(rb2);
3566 			}
3567 			goto giveup;
3568 		}
3569 	} else {
3570 		/* resync. Schedule a read for every block at this virt offset */
3571 		int count = 0;
3572 
3573 		/*
3574 		 * Since curr_resync_completed could probably not update in
3575 		 * time, and we will set cluster_sync_low based on it.
3576 		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3577 		 * safety reason, which ensures curr_resync_completed is
3578 		 * updated in bitmap_cond_end_sync.
3579 		 */
3580 		mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
3581 					mddev_is_clustered(mddev) &&
3582 					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3583 
3584 		if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
3585 						   &sync_blocks,
3586 						   mddev->degraded) &&
3587 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3588 						 &mddev->recovery)) {
3589 			/* We can skip this block */
3590 			*skipped = 1;
3591 			return sync_blocks + sectors_skipped;
3592 		}
3593 		if (sync_blocks < max_sync)
3594 			max_sync = sync_blocks;
3595 		r10_bio = raid10_alloc_init_r10buf(conf);
3596 		r10_bio->state = 0;
3597 
3598 		r10_bio->mddev = mddev;
3599 		atomic_set(&r10_bio->remaining, 0);
3600 		raise_barrier(conf, 0);
3601 		conf->next_resync = sector_nr;
3602 
3603 		r10_bio->master_bio = NULL;
3604 		r10_bio->sector = sector_nr;
3605 		set_bit(R10BIO_IsSync, &r10_bio->state);
3606 		raid10_find_phys(conf, r10_bio);
3607 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3608 
3609 		for (i = 0; i < conf->copies; i++) {
3610 			int d = r10_bio->devs[i].devnum;
3611 			sector_t first_bad, sector;
3612 			int bad_sectors;
3613 			struct md_rdev *rdev;
3614 
3615 			if (r10_bio->devs[i].repl_bio)
3616 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3617 
3618 			bio = r10_bio->devs[i].bio;
3619 			bio->bi_status = BLK_STS_IOERR;
3620 			rdev = conf->mirrors[d].rdev;
3621 			if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3622 				continue;
3623 
3624 			sector = r10_bio->devs[i].addr;
3625 			if (is_badblock(rdev, sector, max_sync,
3626 					&first_bad, &bad_sectors)) {
3627 				if (first_bad > sector)
3628 					max_sync = first_bad - sector;
3629 				else {
3630 					bad_sectors -= (sector - first_bad);
3631 					if (max_sync > bad_sectors)
3632 						max_sync = bad_sectors;
3633 					continue;
3634 				}
3635 			}
3636 			atomic_inc(&rdev->nr_pending);
3637 			atomic_inc(&r10_bio->remaining);
3638 			bio->bi_next = biolist;
3639 			biolist = bio;
3640 			bio->bi_end_io = end_sync_read;
3641 			bio->bi_opf = REQ_OP_READ;
3642 			if (test_bit(FailFast, &rdev->flags))
3643 				bio->bi_opf |= MD_FAILFAST;
3644 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3645 			bio_set_dev(bio, rdev->bdev);
3646 			count++;
3647 
3648 			rdev = conf->mirrors[d].replacement;
3649 			if (rdev == NULL || test_bit(Faulty, &rdev->flags))
3650 				continue;
3651 
3652 			atomic_inc(&rdev->nr_pending);
3653 
3654 			/* Need to set up for writing to the replacement */
3655 			bio = r10_bio->devs[i].repl_bio;
3656 			bio->bi_status = BLK_STS_IOERR;
3657 
3658 			sector = r10_bio->devs[i].addr;
3659 			bio->bi_next = biolist;
3660 			biolist = bio;
3661 			bio->bi_end_io = end_sync_write;
3662 			bio->bi_opf = REQ_OP_WRITE;
3663 			if (test_bit(FailFast, &rdev->flags))
3664 				bio->bi_opf |= MD_FAILFAST;
3665 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3666 			bio_set_dev(bio, rdev->bdev);
3667 			count++;
3668 		}
3669 
3670 		if (count < 2) {
3671 			for (i=0; i<conf->copies; i++) {
3672 				int d = r10_bio->devs[i].devnum;
3673 				if (r10_bio->devs[i].bio->bi_end_io)
3674 					rdev_dec_pending(conf->mirrors[d].rdev,
3675 							 mddev);
3676 				if (r10_bio->devs[i].repl_bio &&
3677 				    r10_bio->devs[i].repl_bio->bi_end_io)
3678 					rdev_dec_pending(
3679 						conf->mirrors[d].replacement,
3680 						mddev);
3681 			}
3682 			put_buf(r10_bio);
3683 			biolist = NULL;
3684 			goto giveup;
3685 		}
3686 	}
3687 
3688 	nr_sectors = 0;
3689 	if (sector_nr + max_sync < max_sector)
3690 		max_sector = sector_nr + max_sync;
3691 	do {
3692 		struct page *page;
3693 		int len = PAGE_SIZE;
3694 		if (sector_nr + (len>>9) > max_sector)
3695 			len = (max_sector - sector_nr) << 9;
3696 		if (len == 0)
3697 			break;
3698 		for (bio= biolist ; bio ; bio=bio->bi_next) {
3699 			struct resync_pages *rp = get_resync_pages(bio);
3700 			page = resync_fetch_page(rp, page_idx);
3701 			if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3702 				bio->bi_status = BLK_STS_RESOURCE;
3703 				bio_endio(bio);
3704 				goto giveup;
3705 			}
3706 		}
3707 		nr_sectors += len>>9;
3708 		sector_nr += len>>9;
3709 	} while (++page_idx < RESYNC_PAGES);
3710 	r10_bio->sectors = nr_sectors;
3711 
3712 	if (mddev_is_clustered(mddev) &&
3713 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3714 		/* It is resync not recovery */
3715 		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3716 			conf->cluster_sync_low = mddev->curr_resync_completed;
3717 			raid10_set_cluster_sync_high(conf);
3718 			/* Send resync message */
3719 			md_cluster_ops->resync_info_update(mddev,
3720 						conf->cluster_sync_low,
3721 						conf->cluster_sync_high);
3722 		}
3723 	} else if (mddev_is_clustered(mddev)) {
3724 		/* This is recovery not resync */
3725 		sector_t sect_va1, sect_va2;
3726 		bool broadcast_msg = false;
3727 
3728 		for (i = 0; i < conf->geo.raid_disks; i++) {
3729 			/*
3730 			 * sector_nr is a device address for recovery, so we
3731 			 * need translate it to array address before compare
3732 			 * with cluster_sync_high.
3733 			 */
3734 			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3735 
3736 			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3737 				broadcast_msg = true;
3738 				/*
3739 				 * curr_resync_completed is similar as
3740 				 * sector_nr, so make the translation too.
3741 				 */
3742 				sect_va2 = raid10_find_virt(conf,
3743 					mddev->curr_resync_completed, i);
3744 
3745 				if (conf->cluster_sync_low == 0 ||
3746 				    conf->cluster_sync_low > sect_va2)
3747 					conf->cluster_sync_low = sect_va2;
3748 			}
3749 		}
3750 		if (broadcast_msg) {
3751 			raid10_set_cluster_sync_high(conf);
3752 			md_cluster_ops->resync_info_update(mddev,
3753 						conf->cluster_sync_low,
3754 						conf->cluster_sync_high);
3755 		}
3756 	}
3757 
3758 	while (biolist) {
3759 		bio = biolist;
3760 		biolist = biolist->bi_next;
3761 
3762 		bio->bi_next = NULL;
3763 		r10_bio = get_resync_r10bio(bio);
3764 		r10_bio->sectors = nr_sectors;
3765 
3766 		if (bio->bi_end_io == end_sync_read) {
3767 			md_sync_acct_bio(bio, nr_sectors);
3768 			bio->bi_status = 0;
3769 			submit_bio_noacct(bio);
3770 		}
3771 	}
3772 
3773 	if (sectors_skipped)
3774 		/* pretend they weren't skipped, it makes
3775 		 * no important difference in this case
3776 		 */
3777 		md_done_sync(mddev, sectors_skipped, 1);
3778 
3779 	return sectors_skipped + nr_sectors;
3780  giveup:
3781 	/* There is nowhere to write, so all non-sync
3782 	 * drives must be failed or in resync, all drives
3783 	 * have a bad block, so try the next chunk...
3784 	 */
3785 	if (sector_nr + max_sync < max_sector)
3786 		max_sector = sector_nr + max_sync;
3787 
3788 	sectors_skipped += (max_sector - sector_nr);
3789 	chunks_skipped ++;
3790 	sector_nr = max_sector;
3791 	goto skipped;
3792 }
3793 
3794 static sector_t
raid10_size(struct mddev * mddev,sector_t sectors,int raid_disks)3795 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3796 {
3797 	sector_t size;
3798 	struct r10conf *conf = mddev->private;
3799 
3800 	if (!raid_disks)
3801 		raid_disks = min(conf->geo.raid_disks,
3802 				 conf->prev.raid_disks);
3803 	if (!sectors)
3804 		sectors = conf->dev_sectors;
3805 
3806 	size = sectors >> conf->geo.chunk_shift;
3807 	sector_div(size, conf->geo.far_copies);
3808 	size = size * raid_disks;
3809 	sector_div(size, conf->geo.near_copies);
3810 
3811 	return size << conf->geo.chunk_shift;
3812 }
3813 
calc_sectors(struct r10conf * conf,sector_t size)3814 static void calc_sectors(struct r10conf *conf, sector_t size)
3815 {
3816 	/* Calculate the number of sectors-per-device that will
3817 	 * actually be used, and set conf->dev_sectors and
3818 	 * conf->stride
3819 	 */
3820 
3821 	size = size >> conf->geo.chunk_shift;
3822 	sector_div(size, conf->geo.far_copies);
3823 	size = size * conf->geo.raid_disks;
3824 	sector_div(size, conf->geo.near_copies);
3825 	/* 'size' is now the number of chunks in the array */
3826 	/* calculate "used chunks per device" */
3827 	size = size * conf->copies;
3828 
3829 	/* We need to round up when dividing by raid_disks to
3830 	 * get the stride size.
3831 	 */
3832 	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3833 
3834 	conf->dev_sectors = size << conf->geo.chunk_shift;
3835 
3836 	if (conf->geo.far_offset)
3837 		conf->geo.stride = 1 << conf->geo.chunk_shift;
3838 	else {
3839 		sector_div(size, conf->geo.far_copies);
3840 		conf->geo.stride = size << conf->geo.chunk_shift;
3841 	}
3842 }
3843 
3844 enum geo_type {geo_new, geo_old, geo_start};
setup_geo(struct geom * geo,struct mddev * mddev,enum geo_type new)3845 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3846 {
3847 	int nc, fc, fo;
3848 	int layout, chunk, disks;
3849 	switch (new) {
3850 	case geo_old:
3851 		layout = mddev->layout;
3852 		chunk = mddev->chunk_sectors;
3853 		disks = mddev->raid_disks - mddev->delta_disks;
3854 		break;
3855 	case geo_new:
3856 		layout = mddev->new_layout;
3857 		chunk = mddev->new_chunk_sectors;
3858 		disks = mddev->raid_disks;
3859 		break;
3860 	default: /* avoid 'may be unused' warnings */
3861 	case geo_start: /* new when starting reshape - raid_disks not
3862 			 * updated yet. */
3863 		layout = mddev->new_layout;
3864 		chunk = mddev->new_chunk_sectors;
3865 		disks = mddev->raid_disks + mddev->delta_disks;
3866 		break;
3867 	}
3868 	if (layout >> 19)
3869 		return -1;
3870 	if (chunk < (PAGE_SIZE >> 9) ||
3871 	    !is_power_of_2(chunk))
3872 		return -2;
3873 	nc = layout & 255;
3874 	fc = (layout >> 8) & 255;
3875 	fo = layout & (1<<16);
3876 	geo->raid_disks = disks;
3877 	geo->near_copies = nc;
3878 	geo->far_copies = fc;
3879 	geo->far_offset = fo;
3880 	switch (layout >> 17) {
3881 	case 0:	/* original layout.  simple but not always optimal */
3882 		geo->far_set_size = disks;
3883 		break;
3884 	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3885 		 * actually using this, but leave code here just in case.*/
3886 		geo->far_set_size = disks/fc;
3887 		WARN(geo->far_set_size < fc,
3888 		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3889 		break;
3890 	case 2: /* "improved" layout fixed to match documentation */
3891 		geo->far_set_size = fc * nc;
3892 		break;
3893 	default: /* Not a valid layout */
3894 		return -1;
3895 	}
3896 	geo->chunk_mask = chunk - 1;
3897 	geo->chunk_shift = ffz(~chunk);
3898 	return nc*fc;
3899 }
3900 
raid10_free_conf(struct r10conf * conf)3901 static void raid10_free_conf(struct r10conf *conf)
3902 {
3903 	if (!conf)
3904 		return;
3905 
3906 	mempool_exit(&conf->r10bio_pool);
3907 	kfree(conf->mirrors);
3908 	kfree(conf->mirrors_old);
3909 	kfree(conf->mirrors_new);
3910 	safe_put_page(conf->tmppage);
3911 	bioset_exit(&conf->bio_split);
3912 	kfree(conf);
3913 }
3914 
setup_conf(struct mddev * mddev)3915 static struct r10conf *setup_conf(struct mddev *mddev)
3916 {
3917 	struct r10conf *conf = NULL;
3918 	int err = -EINVAL;
3919 	struct geom geo;
3920 	int copies;
3921 
3922 	copies = setup_geo(&geo, mddev, geo_new);
3923 
3924 	if (copies == -2) {
3925 		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3926 			mdname(mddev), PAGE_SIZE);
3927 		goto out;
3928 	}
3929 
3930 	if (copies < 2 || copies > mddev->raid_disks) {
3931 		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3932 			mdname(mddev), mddev->new_layout);
3933 		goto out;
3934 	}
3935 
3936 	err = -ENOMEM;
3937 	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3938 	if (!conf)
3939 		goto out;
3940 
3941 	/* FIXME calc properly */
3942 	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3943 				sizeof(struct raid10_info),
3944 				GFP_KERNEL);
3945 	if (!conf->mirrors)
3946 		goto out;
3947 
3948 	conf->tmppage = alloc_page(GFP_KERNEL);
3949 	if (!conf->tmppage)
3950 		goto out;
3951 
3952 	conf->geo = geo;
3953 	conf->copies = copies;
3954 	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3955 			   rbio_pool_free, conf);
3956 	if (err)
3957 		goto out;
3958 
3959 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3960 	if (err)
3961 		goto out;
3962 
3963 	calc_sectors(conf, mddev->dev_sectors);
3964 	if (mddev->reshape_position == MaxSector) {
3965 		conf->prev = conf->geo;
3966 		conf->reshape_progress = MaxSector;
3967 	} else {
3968 		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3969 			err = -EINVAL;
3970 			goto out;
3971 		}
3972 		conf->reshape_progress = mddev->reshape_position;
3973 		if (conf->prev.far_offset)
3974 			conf->prev.stride = 1 << conf->prev.chunk_shift;
3975 		else
3976 			/* far_copies must be 1 */
3977 			conf->prev.stride = conf->dev_sectors;
3978 	}
3979 	conf->reshape_safe = conf->reshape_progress;
3980 	spin_lock_init(&conf->device_lock);
3981 	INIT_LIST_HEAD(&conf->retry_list);
3982 	INIT_LIST_HEAD(&conf->bio_end_io_list);
3983 
3984 	seqlock_init(&conf->resync_lock);
3985 	init_waitqueue_head(&conf->wait_barrier);
3986 	atomic_set(&conf->nr_pending, 0);
3987 
3988 	err = -ENOMEM;
3989 	rcu_assign_pointer(conf->thread,
3990 			   md_register_thread(raid10d, mddev, "raid10"));
3991 	if (!conf->thread)
3992 		goto out;
3993 
3994 	conf->mddev = mddev;
3995 	return conf;
3996 
3997  out:
3998 	raid10_free_conf(conf);
3999 	return ERR_PTR(err);
4000 }
4001 
raid10_nr_stripes(struct r10conf * conf)4002 static unsigned int raid10_nr_stripes(struct r10conf *conf)
4003 {
4004 	unsigned int raid_disks = conf->geo.raid_disks;
4005 
4006 	if (conf->geo.raid_disks % conf->geo.near_copies)
4007 		return raid_disks;
4008 	return raid_disks / conf->geo.near_copies;
4009 }
4010 
raid10_set_queue_limits(struct mddev * mddev)4011 static int raid10_set_queue_limits(struct mddev *mddev)
4012 {
4013 	struct r10conf *conf = mddev->private;
4014 	struct queue_limits lim;
4015 	int err;
4016 
4017 	md_init_stacking_limits(&lim);
4018 	lim.max_write_zeroes_sectors = 0;
4019 	lim.io_min = mddev->chunk_sectors << 9;
4020 	lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
4021 	lim.features |= BLK_FEAT_ATOMIC_WRITES;
4022 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
4023 	if (err)
4024 		return err;
4025 	return queue_limits_set(mddev->gendisk->queue, &lim);
4026 }
4027 
raid10_run(struct mddev * mddev)4028 static int raid10_run(struct mddev *mddev)
4029 {
4030 	struct r10conf *conf;
4031 	int i, disk_idx;
4032 	struct raid10_info *disk;
4033 	struct md_rdev *rdev;
4034 	sector_t size;
4035 	sector_t min_offset_diff = 0;
4036 	int first = 1;
4037 	int ret = -EIO;
4038 
4039 	if (mddev->private == NULL) {
4040 		conf = setup_conf(mddev);
4041 		if (IS_ERR(conf))
4042 			return PTR_ERR(conf);
4043 		mddev->private = conf;
4044 	}
4045 	conf = mddev->private;
4046 	if (!conf)
4047 		goto out;
4048 
4049 	rcu_assign_pointer(mddev->thread, conf->thread);
4050 	rcu_assign_pointer(conf->thread, NULL);
4051 
4052 	if (mddev_is_clustered(conf->mddev)) {
4053 		int fc, fo;
4054 
4055 		fc = (mddev->layout >> 8) & 255;
4056 		fo = mddev->layout & (1<<16);
4057 		if (fc > 1 || fo > 0) {
4058 			pr_err("only near layout is supported by clustered"
4059 				" raid10\n");
4060 			goto out_free_conf;
4061 		}
4062 	}
4063 
4064 	rdev_for_each(rdev, mddev) {
4065 		long long diff;
4066 
4067 		disk_idx = rdev->raid_disk;
4068 		if (disk_idx < 0)
4069 			continue;
4070 		if (disk_idx >= conf->geo.raid_disks &&
4071 		    disk_idx >= conf->prev.raid_disks)
4072 			continue;
4073 		disk = conf->mirrors + disk_idx;
4074 
4075 		if (test_bit(Replacement, &rdev->flags)) {
4076 			if (disk->replacement)
4077 				goto out_free_conf;
4078 			disk->replacement = rdev;
4079 		} else {
4080 			if (disk->rdev)
4081 				goto out_free_conf;
4082 			disk->rdev = rdev;
4083 		}
4084 		diff = (rdev->new_data_offset - rdev->data_offset);
4085 		if (!mddev->reshape_backwards)
4086 			diff = -diff;
4087 		if (diff < 0)
4088 			diff = 0;
4089 		if (first || diff < min_offset_diff)
4090 			min_offset_diff = diff;
4091 
4092 		disk->head_position = 0;
4093 		first = 0;
4094 	}
4095 
4096 	if (!mddev_is_dm(conf->mddev)) {
4097 		int err = raid10_set_queue_limits(mddev);
4098 
4099 		if (err) {
4100 			ret = err;
4101 			goto out_free_conf;
4102 		}
4103 	}
4104 
4105 	/* need to check that every block has at least one working mirror */
4106 	if (!enough(conf, -1)) {
4107 		pr_err("md/raid10:%s: not enough operational mirrors.\n",
4108 		       mdname(mddev));
4109 		goto out_free_conf;
4110 	}
4111 
4112 	if (conf->reshape_progress != MaxSector) {
4113 		/* must ensure that shape change is supported */
4114 		if (conf->geo.far_copies != 1 &&
4115 		    conf->geo.far_offset == 0)
4116 			goto out_free_conf;
4117 		if (conf->prev.far_copies != 1 &&
4118 		    conf->prev.far_offset == 0)
4119 			goto out_free_conf;
4120 	}
4121 
4122 	mddev->degraded = 0;
4123 	for (i = 0;
4124 	     i < conf->geo.raid_disks
4125 		     || i < conf->prev.raid_disks;
4126 	     i++) {
4127 
4128 		disk = conf->mirrors + i;
4129 
4130 		if (!disk->rdev && disk->replacement) {
4131 			/* The replacement is all we have - use it */
4132 			disk->rdev = disk->replacement;
4133 			disk->replacement = NULL;
4134 			clear_bit(Replacement, &disk->rdev->flags);
4135 		}
4136 
4137 		if (!disk->rdev ||
4138 		    !test_bit(In_sync, &disk->rdev->flags)) {
4139 			disk->head_position = 0;
4140 			mddev->degraded++;
4141 			if (disk->rdev &&
4142 			    disk->rdev->saved_raid_disk < 0)
4143 				conf->fullsync = 1;
4144 		}
4145 
4146 		if (disk->replacement &&
4147 		    !test_bit(In_sync, &disk->replacement->flags) &&
4148 		    disk->replacement->saved_raid_disk < 0) {
4149 			conf->fullsync = 1;
4150 		}
4151 
4152 		disk->recovery_disabled = mddev->recovery_disabled - 1;
4153 	}
4154 
4155 	if (mddev->recovery_cp != MaxSector)
4156 		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4157 			  mdname(mddev));
4158 	pr_info("md/raid10:%s: active with %d out of %d devices\n",
4159 		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4160 		conf->geo.raid_disks);
4161 	/*
4162 	 * Ok, everything is just fine now
4163 	 */
4164 	mddev->dev_sectors = conf->dev_sectors;
4165 	size = raid10_size(mddev, 0, 0);
4166 	md_set_array_sectors(mddev, size);
4167 	mddev->resync_max_sectors = size;
4168 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4169 
4170 	if (md_integrity_register(mddev))
4171 		goto out_free_conf;
4172 
4173 	if (conf->reshape_progress != MaxSector) {
4174 		unsigned long before_length, after_length;
4175 
4176 		before_length = ((1 << conf->prev.chunk_shift) *
4177 				 conf->prev.far_copies);
4178 		after_length = ((1 << conf->geo.chunk_shift) *
4179 				conf->geo.far_copies);
4180 
4181 		if (max(before_length, after_length) > min_offset_diff) {
4182 			/* This cannot work */
4183 			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4184 			goto out_free_conf;
4185 		}
4186 		conf->offset_diff = min_offset_diff;
4187 
4188 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4189 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4190 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4191 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4192 	}
4193 
4194 	return 0;
4195 
4196 out_free_conf:
4197 	md_unregister_thread(mddev, &mddev->thread);
4198 	raid10_free_conf(conf);
4199 	mddev->private = NULL;
4200 out:
4201 	return ret;
4202 }
4203 
raid10_free(struct mddev * mddev,void * priv)4204 static void raid10_free(struct mddev *mddev, void *priv)
4205 {
4206 	raid10_free_conf(priv);
4207 }
4208 
raid10_quiesce(struct mddev * mddev,int quiesce)4209 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4210 {
4211 	struct r10conf *conf = mddev->private;
4212 
4213 	if (quiesce)
4214 		raise_barrier(conf, 0);
4215 	else
4216 		lower_barrier(conf);
4217 }
4218 
raid10_resize(struct mddev * mddev,sector_t sectors)4219 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4220 {
4221 	/* Resize of 'far' arrays is not supported.
4222 	 * For 'near' and 'offset' arrays we can set the
4223 	 * number of sectors used to be an appropriate multiple
4224 	 * of the chunk size.
4225 	 * For 'offset', this is far_copies*chunksize.
4226 	 * For 'near' the multiplier is the LCM of
4227 	 * near_copies and raid_disks.
4228 	 * So if far_copies > 1 && !far_offset, fail.
4229 	 * Else find LCM(raid_disks, near_copy)*far_copies and
4230 	 * multiply by chunk_size.  Then round to this number.
4231 	 * This is mostly done by raid10_size()
4232 	 */
4233 	struct r10conf *conf = mddev->private;
4234 	sector_t oldsize, size;
4235 	int ret;
4236 
4237 	if (mddev->reshape_position != MaxSector)
4238 		return -EBUSY;
4239 
4240 	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4241 		return -EINVAL;
4242 
4243 	oldsize = raid10_size(mddev, 0, 0);
4244 	size = raid10_size(mddev, sectors, 0);
4245 	if (mddev->external_size &&
4246 	    mddev->array_sectors > size)
4247 		return -EINVAL;
4248 
4249 	ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
4250 	if (ret)
4251 		return ret;
4252 
4253 	md_set_array_sectors(mddev, size);
4254 	if (sectors > mddev->dev_sectors &&
4255 	    mddev->recovery_cp > oldsize) {
4256 		mddev->recovery_cp = oldsize;
4257 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4258 	}
4259 	calc_sectors(conf, sectors);
4260 	mddev->dev_sectors = conf->dev_sectors;
4261 	mddev->resync_max_sectors = size;
4262 	return 0;
4263 }
4264 
raid10_takeover_raid0(struct mddev * mddev,sector_t size,int devs)4265 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4266 {
4267 	struct md_rdev *rdev;
4268 	struct r10conf *conf;
4269 
4270 	if (mddev->degraded > 0) {
4271 		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4272 			mdname(mddev));
4273 		return ERR_PTR(-EINVAL);
4274 	}
4275 	sector_div(size, devs);
4276 
4277 	/* Set new parameters */
4278 	mddev->new_level = 10;
4279 	/* new layout: far_copies = 1, near_copies = 2 */
4280 	mddev->new_layout = (1<<8) + 2;
4281 	mddev->new_chunk_sectors = mddev->chunk_sectors;
4282 	mddev->delta_disks = mddev->raid_disks;
4283 	mddev->raid_disks *= 2;
4284 	/* make sure it will be not marked as dirty */
4285 	mddev->recovery_cp = MaxSector;
4286 	mddev->dev_sectors = size;
4287 
4288 	conf = setup_conf(mddev);
4289 	if (!IS_ERR(conf)) {
4290 		rdev_for_each(rdev, mddev)
4291 			if (rdev->raid_disk >= 0) {
4292 				rdev->new_raid_disk = rdev->raid_disk * 2;
4293 				rdev->sectors = size;
4294 			}
4295 	}
4296 
4297 	return conf;
4298 }
4299 
raid10_takeover(struct mddev * mddev)4300 static void *raid10_takeover(struct mddev *mddev)
4301 {
4302 	struct r0conf *raid0_conf;
4303 
4304 	/* raid10 can take over:
4305 	 *  raid0 - providing it has only two drives
4306 	 */
4307 	if (mddev->level == 0) {
4308 		/* for raid0 takeover only one zone is supported */
4309 		raid0_conf = mddev->private;
4310 		if (raid0_conf->nr_strip_zones > 1) {
4311 			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4312 				mdname(mddev));
4313 			return ERR_PTR(-EINVAL);
4314 		}
4315 		return raid10_takeover_raid0(mddev,
4316 			raid0_conf->strip_zone->zone_end,
4317 			raid0_conf->strip_zone->nb_dev);
4318 	}
4319 	return ERR_PTR(-EINVAL);
4320 }
4321 
raid10_check_reshape(struct mddev * mddev)4322 static int raid10_check_reshape(struct mddev *mddev)
4323 {
4324 	/* Called when there is a request to change
4325 	 * - layout (to ->new_layout)
4326 	 * - chunk size (to ->new_chunk_sectors)
4327 	 * - raid_disks (by delta_disks)
4328 	 * or when trying to restart a reshape that was ongoing.
4329 	 *
4330 	 * We need to validate the request and possibly allocate
4331 	 * space if that might be an issue later.
4332 	 *
4333 	 * Currently we reject any reshape of a 'far' mode array,
4334 	 * allow chunk size to change if new is generally acceptable,
4335 	 * allow raid_disks to increase, and allow
4336 	 * a switch between 'near' mode and 'offset' mode.
4337 	 */
4338 	struct r10conf *conf = mddev->private;
4339 	struct geom geo;
4340 
4341 	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4342 		return -EINVAL;
4343 
4344 	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4345 		/* mustn't change number of copies */
4346 		return -EINVAL;
4347 	if (geo.far_copies > 1 && !geo.far_offset)
4348 		/* Cannot switch to 'far' mode */
4349 		return -EINVAL;
4350 
4351 	if (mddev->array_sectors & geo.chunk_mask)
4352 			/* not factor of array size */
4353 			return -EINVAL;
4354 
4355 	if (!enough(conf, -1))
4356 		return -EINVAL;
4357 
4358 	kfree(conf->mirrors_new);
4359 	conf->mirrors_new = NULL;
4360 	if (mddev->delta_disks > 0) {
4361 		/* allocate new 'mirrors' list */
4362 		conf->mirrors_new =
4363 			kcalloc(mddev->raid_disks + mddev->delta_disks,
4364 				sizeof(struct raid10_info),
4365 				GFP_KERNEL);
4366 		if (!conf->mirrors_new)
4367 			return -ENOMEM;
4368 	}
4369 	return 0;
4370 }
4371 
4372 /*
4373  * Need to check if array has failed when deciding whether to:
4374  *  - start an array
4375  *  - remove non-faulty devices
4376  *  - add a spare
4377  *  - allow a reshape
4378  * This determination is simple when no reshape is happening.
4379  * However if there is a reshape, we need to carefully check
4380  * both the before and after sections.
4381  * This is because some failed devices may only affect one
4382  * of the two sections, and some non-in_sync devices may
4383  * be insync in the section most affected by failed devices.
4384  */
calc_degraded(struct r10conf * conf)4385 static int calc_degraded(struct r10conf *conf)
4386 {
4387 	int degraded, degraded2;
4388 	int i;
4389 
4390 	degraded = 0;
4391 	/* 'prev' section first */
4392 	for (i = 0; i < conf->prev.raid_disks; i++) {
4393 		struct md_rdev *rdev = conf->mirrors[i].rdev;
4394 
4395 		if (!rdev || test_bit(Faulty, &rdev->flags))
4396 			degraded++;
4397 		else if (!test_bit(In_sync, &rdev->flags))
4398 			/* When we can reduce the number of devices in
4399 			 * an array, this might not contribute to
4400 			 * 'degraded'.  It does now.
4401 			 */
4402 			degraded++;
4403 	}
4404 	if (conf->geo.raid_disks == conf->prev.raid_disks)
4405 		return degraded;
4406 	degraded2 = 0;
4407 	for (i = 0; i < conf->geo.raid_disks; i++) {
4408 		struct md_rdev *rdev = conf->mirrors[i].rdev;
4409 
4410 		if (!rdev || test_bit(Faulty, &rdev->flags))
4411 			degraded2++;
4412 		else if (!test_bit(In_sync, &rdev->flags)) {
4413 			/* If reshape is increasing the number of devices,
4414 			 * this section has already been recovered, so
4415 			 * it doesn't contribute to degraded.
4416 			 * else it does.
4417 			 */
4418 			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4419 				degraded2++;
4420 		}
4421 	}
4422 	if (degraded2 > degraded)
4423 		return degraded2;
4424 	return degraded;
4425 }
4426 
raid10_start_reshape(struct mddev * mddev)4427 static int raid10_start_reshape(struct mddev *mddev)
4428 {
4429 	/* A 'reshape' has been requested. This commits
4430 	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4431 	 * This also checks if there are enough spares and adds them
4432 	 * to the array.
4433 	 * We currently require enough spares to make the final
4434 	 * array non-degraded.  We also require that the difference
4435 	 * between old and new data_offset - on each device - is
4436 	 * enough that we never risk over-writing.
4437 	 */
4438 
4439 	unsigned long before_length, after_length;
4440 	sector_t min_offset_diff = 0;
4441 	int first = 1;
4442 	struct geom new;
4443 	struct r10conf *conf = mddev->private;
4444 	struct md_rdev *rdev;
4445 	int spares = 0;
4446 	int ret;
4447 
4448 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4449 		return -EBUSY;
4450 
4451 	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4452 		return -EINVAL;
4453 
4454 	before_length = ((1 << conf->prev.chunk_shift) *
4455 			 conf->prev.far_copies);
4456 	after_length = ((1 << conf->geo.chunk_shift) *
4457 			conf->geo.far_copies);
4458 
4459 	rdev_for_each(rdev, mddev) {
4460 		if (!test_bit(In_sync, &rdev->flags)
4461 		    && !test_bit(Faulty, &rdev->flags))
4462 			spares++;
4463 		if (rdev->raid_disk >= 0) {
4464 			long long diff = (rdev->new_data_offset
4465 					  - rdev->data_offset);
4466 			if (!mddev->reshape_backwards)
4467 				diff = -diff;
4468 			if (diff < 0)
4469 				diff = 0;
4470 			if (first || diff < min_offset_diff)
4471 				min_offset_diff = diff;
4472 			first = 0;
4473 		}
4474 	}
4475 
4476 	if (max(before_length, after_length) > min_offset_diff)
4477 		return -EINVAL;
4478 
4479 	if (spares < mddev->delta_disks)
4480 		return -EINVAL;
4481 
4482 	conf->offset_diff = min_offset_diff;
4483 	spin_lock_irq(&conf->device_lock);
4484 	if (conf->mirrors_new) {
4485 		memcpy(conf->mirrors_new, conf->mirrors,
4486 		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4487 		smp_mb();
4488 		kfree(conf->mirrors_old);
4489 		conf->mirrors_old = conf->mirrors;
4490 		conf->mirrors = conf->mirrors_new;
4491 		conf->mirrors_new = NULL;
4492 	}
4493 	setup_geo(&conf->geo, mddev, geo_start);
4494 	smp_mb();
4495 	if (mddev->reshape_backwards) {
4496 		sector_t size = raid10_size(mddev, 0, 0);
4497 		if (size < mddev->array_sectors) {
4498 			spin_unlock_irq(&conf->device_lock);
4499 			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4500 				mdname(mddev));
4501 			return -EINVAL;
4502 		}
4503 		mddev->resync_max_sectors = size;
4504 		conf->reshape_progress = size;
4505 	} else
4506 		conf->reshape_progress = 0;
4507 	conf->reshape_safe = conf->reshape_progress;
4508 	spin_unlock_irq(&conf->device_lock);
4509 
4510 	if (mddev->delta_disks && mddev->bitmap) {
4511 		struct mdp_superblock_1 *sb = NULL;
4512 		sector_t oldsize, newsize;
4513 
4514 		oldsize = raid10_size(mddev, 0, 0);
4515 		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4516 
4517 		if (!mddev_is_clustered(mddev)) {
4518 			ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4519 			if (ret)
4520 				goto abort;
4521 			else
4522 				goto out;
4523 		}
4524 
4525 		rdev_for_each(rdev, mddev) {
4526 			if (rdev->raid_disk > -1 &&
4527 			    !test_bit(Faulty, &rdev->flags))
4528 				sb = page_address(rdev->sb_page);
4529 		}
4530 
4531 		/*
4532 		 * some node is already performing reshape, and no need to
4533 		 * call bitmap_ops->resize again since it should be called when
4534 		 * receiving BITMAP_RESIZE msg
4535 		 */
4536 		if ((sb && (le32_to_cpu(sb->feature_map) &
4537 			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4538 			goto out;
4539 
4540 		ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
4541 		if (ret)
4542 			goto abort;
4543 
4544 		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4545 		if (ret) {
4546 			mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
4547 			goto abort;
4548 		}
4549 	}
4550 out:
4551 	if (mddev->delta_disks > 0) {
4552 		rdev_for_each(rdev, mddev)
4553 			if (rdev->raid_disk < 0 &&
4554 			    !test_bit(Faulty, &rdev->flags)) {
4555 				if (raid10_add_disk(mddev, rdev) == 0) {
4556 					if (rdev->raid_disk >=
4557 					    conf->prev.raid_disks)
4558 						set_bit(In_sync, &rdev->flags);
4559 					else
4560 						rdev->recovery_offset = 0;
4561 
4562 					/* Failure here is OK */
4563 					sysfs_link_rdev(mddev, rdev);
4564 				}
4565 			} else if (rdev->raid_disk >= conf->prev.raid_disks
4566 				   && !test_bit(Faulty, &rdev->flags)) {
4567 				/* This is a spare that was manually added */
4568 				set_bit(In_sync, &rdev->flags);
4569 			}
4570 	}
4571 	/* When a reshape changes the number of devices,
4572 	 * ->degraded is measured against the larger of the
4573 	 * pre and  post numbers.
4574 	 */
4575 	spin_lock_irq(&conf->device_lock);
4576 	mddev->degraded = calc_degraded(conf);
4577 	spin_unlock_irq(&conf->device_lock);
4578 	mddev->raid_disks = conf->geo.raid_disks;
4579 	mddev->reshape_position = conf->reshape_progress;
4580 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4581 
4582 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4583 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4584 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4585 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4586 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4587 	conf->reshape_checkpoint = jiffies;
4588 	md_new_event();
4589 	return 0;
4590 
4591 abort:
4592 	mddev->recovery = 0;
4593 	spin_lock_irq(&conf->device_lock);
4594 	conf->geo = conf->prev;
4595 	mddev->raid_disks = conf->geo.raid_disks;
4596 	rdev_for_each(rdev, mddev)
4597 		rdev->new_data_offset = rdev->data_offset;
4598 	smp_wmb();
4599 	conf->reshape_progress = MaxSector;
4600 	conf->reshape_safe = MaxSector;
4601 	mddev->reshape_position = MaxSector;
4602 	spin_unlock_irq(&conf->device_lock);
4603 	return ret;
4604 }
4605 
4606 /* Calculate the last device-address that could contain
4607  * any block from the chunk that includes the array-address 's'
4608  * and report the next address.
4609  * i.e. the address returned will be chunk-aligned and after
4610  * any data that is in the chunk containing 's'.
4611  */
last_dev_address(sector_t s,struct geom * geo)4612 static sector_t last_dev_address(sector_t s, struct geom *geo)
4613 {
4614 	s = (s | geo->chunk_mask) + 1;
4615 	s >>= geo->chunk_shift;
4616 	s *= geo->near_copies;
4617 	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4618 	s *= geo->far_copies;
4619 	s <<= geo->chunk_shift;
4620 	return s;
4621 }
4622 
4623 /* Calculate the first device-address that could contain
4624  * any block from the chunk that includes the array-address 's'.
4625  * This too will be the start of a chunk
4626  */
first_dev_address(sector_t s,struct geom * geo)4627 static sector_t first_dev_address(sector_t s, struct geom *geo)
4628 {
4629 	s >>= geo->chunk_shift;
4630 	s *= geo->near_copies;
4631 	sector_div(s, geo->raid_disks);
4632 	s *= geo->far_copies;
4633 	s <<= geo->chunk_shift;
4634 	return s;
4635 }
4636 
reshape_request(struct mddev * mddev,sector_t sector_nr,int * skipped)4637 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4638 				int *skipped)
4639 {
4640 	/* We simply copy at most one chunk (smallest of old and new)
4641 	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4642 	 * or we hit a bad block or something.
4643 	 * This might mean we pause for normal IO in the middle of
4644 	 * a chunk, but that is not a problem as mddev->reshape_position
4645 	 * can record any location.
4646 	 *
4647 	 * If we will want to write to a location that isn't
4648 	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4649 	 * we need to flush all reshape requests and update the metadata.
4650 	 *
4651 	 * When reshaping forwards (e.g. to more devices), we interpret
4652 	 * 'safe' as the earliest block which might not have been copied
4653 	 * down yet.  We divide this by previous stripe size and multiply
4654 	 * by previous stripe length to get lowest device offset that we
4655 	 * cannot write to yet.
4656 	 * We interpret 'sector_nr' as an address that we want to write to.
4657 	 * From this we use last_device_address() to find where we might
4658 	 * write to, and first_device_address on the  'safe' position.
4659 	 * If this 'next' write position is after the 'safe' position,
4660 	 * we must update the metadata to increase the 'safe' position.
4661 	 *
4662 	 * When reshaping backwards, we round in the opposite direction
4663 	 * and perform the reverse test:  next write position must not be
4664 	 * less than current safe position.
4665 	 *
4666 	 * In all this the minimum difference in data offsets
4667 	 * (conf->offset_diff - always positive) allows a bit of slack,
4668 	 * so next can be after 'safe', but not by more than offset_diff
4669 	 *
4670 	 * We need to prepare all the bios here before we start any IO
4671 	 * to ensure the size we choose is acceptable to all devices.
4672 	 * The means one for each copy for write-out and an extra one for
4673 	 * read-in.
4674 	 * We store the read-in bio in ->master_bio and the others in
4675 	 * ->devs[x].bio and ->devs[x].repl_bio.
4676 	 */
4677 	struct r10conf *conf = mddev->private;
4678 	struct r10bio *r10_bio;
4679 	sector_t next, safe, last;
4680 	int max_sectors;
4681 	int nr_sectors;
4682 	int s;
4683 	struct md_rdev *rdev;
4684 	int need_flush = 0;
4685 	struct bio *blist;
4686 	struct bio *bio, *read_bio;
4687 	int sectors_done = 0;
4688 	struct page **pages;
4689 
4690 	if (sector_nr == 0) {
4691 		/* If restarting in the middle, skip the initial sectors */
4692 		if (mddev->reshape_backwards &&
4693 		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4694 			sector_nr = (raid10_size(mddev, 0, 0)
4695 				     - conf->reshape_progress);
4696 		} else if (!mddev->reshape_backwards &&
4697 			   conf->reshape_progress > 0)
4698 			sector_nr = conf->reshape_progress;
4699 		if (sector_nr) {
4700 			mddev->curr_resync_completed = sector_nr;
4701 			sysfs_notify_dirent_safe(mddev->sysfs_completed);
4702 			*skipped = 1;
4703 			return sector_nr;
4704 		}
4705 	}
4706 
4707 	/* We don't use sector_nr to track where we are up to
4708 	 * as that doesn't work well for ->reshape_backwards.
4709 	 * So just use ->reshape_progress.
4710 	 */
4711 	if (mddev->reshape_backwards) {
4712 		/* 'next' is the earliest device address that we might
4713 		 * write to for this chunk in the new layout
4714 		 */
4715 		next = first_dev_address(conf->reshape_progress - 1,
4716 					 &conf->geo);
4717 
4718 		/* 'safe' is the last device address that we might read from
4719 		 * in the old layout after a restart
4720 		 */
4721 		safe = last_dev_address(conf->reshape_safe - 1,
4722 					&conf->prev);
4723 
4724 		if (next + conf->offset_diff < safe)
4725 			need_flush = 1;
4726 
4727 		last = conf->reshape_progress - 1;
4728 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4729 					       & conf->prev.chunk_mask);
4730 		if (sector_nr + RESYNC_SECTORS < last)
4731 			sector_nr = last + 1 - RESYNC_SECTORS;
4732 	} else {
4733 		/* 'next' is after the last device address that we
4734 		 * might write to for this chunk in the new layout
4735 		 */
4736 		next = last_dev_address(conf->reshape_progress, &conf->geo);
4737 
4738 		/* 'safe' is the earliest device address that we might
4739 		 * read from in the old layout after a restart
4740 		 */
4741 		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4742 
4743 		/* Need to update metadata if 'next' might be beyond 'safe'
4744 		 * as that would possibly corrupt data
4745 		 */
4746 		if (next > safe + conf->offset_diff)
4747 			need_flush = 1;
4748 
4749 		sector_nr = conf->reshape_progress;
4750 		last  = sector_nr | (conf->geo.chunk_mask
4751 				     & conf->prev.chunk_mask);
4752 
4753 		if (sector_nr + RESYNC_SECTORS <= last)
4754 			last = sector_nr + RESYNC_SECTORS - 1;
4755 	}
4756 
4757 	if (need_flush ||
4758 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4759 		/* Need to update reshape_position in metadata */
4760 		wait_barrier(conf, false);
4761 		mddev->reshape_position = conf->reshape_progress;
4762 		if (mddev->reshape_backwards)
4763 			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4764 				- conf->reshape_progress;
4765 		else
4766 			mddev->curr_resync_completed = conf->reshape_progress;
4767 		conf->reshape_checkpoint = jiffies;
4768 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4769 		md_wakeup_thread(mddev->thread);
4770 		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4771 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4772 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4773 			allow_barrier(conf);
4774 			return sectors_done;
4775 		}
4776 		conf->reshape_safe = mddev->reshape_position;
4777 		allow_barrier(conf);
4778 	}
4779 
4780 	raise_barrier(conf, 0);
4781 read_more:
4782 	/* Now schedule reads for blocks from sector_nr to last */
4783 	r10_bio = raid10_alloc_init_r10buf(conf);
4784 	r10_bio->state = 0;
4785 	raise_barrier(conf, 1);
4786 	atomic_set(&r10_bio->remaining, 0);
4787 	r10_bio->mddev = mddev;
4788 	r10_bio->sector = sector_nr;
4789 	set_bit(R10BIO_IsReshape, &r10_bio->state);
4790 	r10_bio->sectors = last - sector_nr + 1;
4791 	rdev = read_balance(conf, r10_bio, &max_sectors);
4792 	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4793 
4794 	if (!rdev) {
4795 		/* Cannot read from here, so need to record bad blocks
4796 		 * on all the target devices.
4797 		 */
4798 		// FIXME
4799 		mempool_free(r10_bio, &conf->r10buf_pool);
4800 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4801 		return sectors_done;
4802 	}
4803 
4804 	read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4805 				    GFP_KERNEL, &mddev->bio_set);
4806 	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4807 			       + rdev->data_offset);
4808 	read_bio->bi_private = r10_bio;
4809 	read_bio->bi_end_io = end_reshape_read;
4810 	r10_bio->master_bio = read_bio;
4811 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4812 
4813 	/*
4814 	 * Broadcast RESYNC message to other nodes, so all nodes would not
4815 	 * write to the region to avoid conflict.
4816 	*/
4817 	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4818 		struct mdp_superblock_1 *sb = NULL;
4819 		int sb_reshape_pos = 0;
4820 
4821 		conf->cluster_sync_low = sector_nr;
4822 		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4823 		sb = page_address(rdev->sb_page);
4824 		if (sb) {
4825 			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4826 			/*
4827 			 * Set cluster_sync_low again if next address for array
4828 			 * reshape is less than cluster_sync_low. Since we can't
4829 			 * update cluster_sync_low until it has finished reshape.
4830 			 */
4831 			if (sb_reshape_pos < conf->cluster_sync_low)
4832 				conf->cluster_sync_low = sb_reshape_pos;
4833 		}
4834 
4835 		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4836 							  conf->cluster_sync_high);
4837 	}
4838 
4839 	/* Now find the locations in the new layout */
4840 	__raid10_find_phys(&conf->geo, r10_bio);
4841 
4842 	blist = read_bio;
4843 	read_bio->bi_next = NULL;
4844 
4845 	for (s = 0; s < conf->copies*2; s++) {
4846 		struct bio *b;
4847 		int d = r10_bio->devs[s/2].devnum;
4848 		struct md_rdev *rdev2;
4849 		if (s&1) {
4850 			rdev2 = conf->mirrors[d].replacement;
4851 			b = r10_bio->devs[s/2].repl_bio;
4852 		} else {
4853 			rdev2 = conf->mirrors[d].rdev;
4854 			b = r10_bio->devs[s/2].bio;
4855 		}
4856 		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4857 			continue;
4858 
4859 		bio_set_dev(b, rdev2->bdev);
4860 		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4861 			rdev2->new_data_offset;
4862 		b->bi_end_io = end_reshape_write;
4863 		b->bi_opf = REQ_OP_WRITE;
4864 		b->bi_next = blist;
4865 		blist = b;
4866 	}
4867 
4868 	/* Now add as many pages as possible to all of these bios. */
4869 
4870 	nr_sectors = 0;
4871 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4872 	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4873 		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4874 		int len = (max_sectors - s) << 9;
4875 		if (len > PAGE_SIZE)
4876 			len = PAGE_SIZE;
4877 		for (bio = blist; bio ; bio = bio->bi_next) {
4878 			if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
4879 				bio->bi_status = BLK_STS_RESOURCE;
4880 				bio_endio(bio);
4881 				return sectors_done;
4882 			}
4883 		}
4884 		sector_nr += len >> 9;
4885 		nr_sectors += len >> 9;
4886 	}
4887 	r10_bio->sectors = nr_sectors;
4888 
4889 	/* Now submit the read */
4890 	md_sync_acct_bio(read_bio, r10_bio->sectors);
4891 	atomic_inc(&r10_bio->remaining);
4892 	read_bio->bi_next = NULL;
4893 	submit_bio_noacct(read_bio);
4894 	sectors_done += nr_sectors;
4895 	if (sector_nr <= last)
4896 		goto read_more;
4897 
4898 	lower_barrier(conf);
4899 
4900 	/* Now that we have done the whole section we can
4901 	 * update reshape_progress
4902 	 */
4903 	if (mddev->reshape_backwards)
4904 		conf->reshape_progress -= sectors_done;
4905 	else
4906 		conf->reshape_progress += sectors_done;
4907 
4908 	return sectors_done;
4909 }
4910 
4911 static void end_reshape_request(struct r10bio *r10_bio);
4912 static int handle_reshape_read_error(struct mddev *mddev,
4913 				     struct r10bio *r10_bio);
reshape_request_write(struct mddev * mddev,struct r10bio * r10_bio)4914 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4915 {
4916 	/* Reshape read completed.  Hopefully we have a block
4917 	 * to write out.
4918 	 * If we got a read error then we do sync 1-page reads from
4919 	 * elsewhere until we find the data - or give up.
4920 	 */
4921 	struct r10conf *conf = mddev->private;
4922 	int s;
4923 
4924 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4925 		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4926 			/* Reshape has been aborted */
4927 			md_done_sync(mddev, r10_bio->sectors, 0);
4928 			return;
4929 		}
4930 
4931 	/* We definitely have the data in the pages, schedule the
4932 	 * writes.
4933 	 */
4934 	atomic_set(&r10_bio->remaining, 1);
4935 	for (s = 0; s < conf->copies*2; s++) {
4936 		struct bio *b;
4937 		int d = r10_bio->devs[s/2].devnum;
4938 		struct md_rdev *rdev;
4939 		if (s&1) {
4940 			rdev = conf->mirrors[d].replacement;
4941 			b = r10_bio->devs[s/2].repl_bio;
4942 		} else {
4943 			rdev = conf->mirrors[d].rdev;
4944 			b = r10_bio->devs[s/2].bio;
4945 		}
4946 		if (!rdev || test_bit(Faulty, &rdev->flags))
4947 			continue;
4948 
4949 		atomic_inc(&rdev->nr_pending);
4950 		md_sync_acct_bio(b, r10_bio->sectors);
4951 		atomic_inc(&r10_bio->remaining);
4952 		b->bi_next = NULL;
4953 		submit_bio_noacct(b);
4954 	}
4955 	end_reshape_request(r10_bio);
4956 }
4957 
end_reshape(struct r10conf * conf)4958 static void end_reshape(struct r10conf *conf)
4959 {
4960 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4961 		return;
4962 
4963 	spin_lock_irq(&conf->device_lock);
4964 	conf->prev = conf->geo;
4965 	md_finish_reshape(conf->mddev);
4966 	smp_wmb();
4967 	conf->reshape_progress = MaxSector;
4968 	conf->reshape_safe = MaxSector;
4969 	spin_unlock_irq(&conf->device_lock);
4970 
4971 	mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
4972 	conf->fullsync = 0;
4973 }
4974 
raid10_update_reshape_pos(struct mddev * mddev)4975 static void raid10_update_reshape_pos(struct mddev *mddev)
4976 {
4977 	struct r10conf *conf = mddev->private;
4978 	sector_t lo, hi;
4979 
4980 	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4981 	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4982 	    || mddev->reshape_position == MaxSector)
4983 		conf->reshape_progress = mddev->reshape_position;
4984 	else
4985 		WARN_ON_ONCE(1);
4986 }
4987 
handle_reshape_read_error(struct mddev * mddev,struct r10bio * r10_bio)4988 static int handle_reshape_read_error(struct mddev *mddev,
4989 				     struct r10bio *r10_bio)
4990 {
4991 	/* Use sync reads to get the blocks from somewhere else */
4992 	int sectors = r10_bio->sectors;
4993 	struct r10conf *conf = mddev->private;
4994 	struct r10bio *r10b;
4995 	int slot = 0;
4996 	int idx = 0;
4997 	struct page **pages;
4998 
4999 	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5000 	if (!r10b) {
5001 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5002 		return -ENOMEM;
5003 	}
5004 
5005 	/* reshape IOs share pages from .devs[0].bio */
5006 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5007 
5008 	r10b->sector = r10_bio->sector;
5009 	__raid10_find_phys(&conf->prev, r10b);
5010 
5011 	while (sectors) {
5012 		int s = sectors;
5013 		int success = 0;
5014 		int first_slot = slot;
5015 
5016 		if (s > (PAGE_SIZE >> 9))
5017 			s = PAGE_SIZE >> 9;
5018 
5019 		while (!success) {
5020 			int d = r10b->devs[slot].devnum;
5021 			struct md_rdev *rdev = conf->mirrors[d].rdev;
5022 			sector_t addr;
5023 			if (rdev == NULL ||
5024 			    test_bit(Faulty, &rdev->flags) ||
5025 			    !test_bit(In_sync, &rdev->flags))
5026 				goto failed;
5027 
5028 			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5029 			atomic_inc(&rdev->nr_pending);
5030 			success = sync_page_io(rdev,
5031 					       addr,
5032 					       s << 9,
5033 					       pages[idx],
5034 					       REQ_OP_READ, false);
5035 			rdev_dec_pending(rdev, mddev);
5036 			if (success)
5037 				break;
5038 		failed:
5039 			slot++;
5040 			if (slot >= conf->copies)
5041 				slot = 0;
5042 			if (slot == first_slot)
5043 				break;
5044 		}
5045 		if (!success) {
5046 			/* couldn't read this block, must give up */
5047 			set_bit(MD_RECOVERY_INTR,
5048 				&mddev->recovery);
5049 			kfree(r10b);
5050 			return -EIO;
5051 		}
5052 		sectors -= s;
5053 		idx++;
5054 	}
5055 	kfree(r10b);
5056 	return 0;
5057 }
5058 
end_reshape_write(struct bio * bio)5059 static void end_reshape_write(struct bio *bio)
5060 {
5061 	struct r10bio *r10_bio = get_resync_r10bio(bio);
5062 	struct mddev *mddev = r10_bio->mddev;
5063 	struct r10conf *conf = mddev->private;
5064 	int d;
5065 	int slot;
5066 	int repl;
5067 	struct md_rdev *rdev = NULL;
5068 
5069 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5070 	rdev = repl ? conf->mirrors[d].replacement :
5071 		      conf->mirrors[d].rdev;
5072 
5073 	if (bio->bi_status) {
5074 		/* FIXME should record badblock */
5075 		md_error(mddev, rdev);
5076 	}
5077 
5078 	rdev_dec_pending(rdev, mddev);
5079 	end_reshape_request(r10_bio);
5080 }
5081 
end_reshape_request(struct r10bio * r10_bio)5082 static void end_reshape_request(struct r10bio *r10_bio)
5083 {
5084 	if (!atomic_dec_and_test(&r10_bio->remaining))
5085 		return;
5086 	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5087 	bio_put(r10_bio->master_bio);
5088 	put_buf(r10_bio);
5089 }
5090 
raid10_finish_reshape(struct mddev * mddev)5091 static void raid10_finish_reshape(struct mddev *mddev)
5092 {
5093 	struct r10conf *conf = mddev->private;
5094 
5095 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5096 		return;
5097 
5098 	if (mddev->delta_disks > 0) {
5099 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
5100 			mddev->recovery_cp = mddev->resync_max_sectors;
5101 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5102 		}
5103 		mddev->resync_max_sectors = mddev->array_sectors;
5104 	} else {
5105 		int d;
5106 		for (d = conf->geo.raid_disks ;
5107 		     d < conf->geo.raid_disks - mddev->delta_disks;
5108 		     d++) {
5109 			struct md_rdev *rdev = conf->mirrors[d].rdev;
5110 			if (rdev)
5111 				clear_bit(In_sync, &rdev->flags);
5112 			rdev = conf->mirrors[d].replacement;
5113 			if (rdev)
5114 				clear_bit(In_sync, &rdev->flags);
5115 		}
5116 	}
5117 	mddev->layout = mddev->new_layout;
5118 	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5119 	mddev->reshape_position = MaxSector;
5120 	mddev->delta_disks = 0;
5121 	mddev->reshape_backwards = 0;
5122 }
5123 
5124 static struct md_personality raid10_personality =
5125 {
5126 	.name		= "raid10",
5127 	.level		= 10,
5128 	.owner		= THIS_MODULE,
5129 	.make_request	= raid10_make_request,
5130 	.run		= raid10_run,
5131 	.free		= raid10_free,
5132 	.status		= raid10_status,
5133 	.error_handler	= raid10_error,
5134 	.hot_add_disk	= raid10_add_disk,
5135 	.hot_remove_disk= raid10_remove_disk,
5136 	.spare_active	= raid10_spare_active,
5137 	.sync_request	= raid10_sync_request,
5138 	.quiesce	= raid10_quiesce,
5139 	.size		= raid10_size,
5140 	.resize		= raid10_resize,
5141 	.takeover	= raid10_takeover,
5142 	.check_reshape	= raid10_check_reshape,
5143 	.start_reshape	= raid10_start_reshape,
5144 	.finish_reshape	= raid10_finish_reshape,
5145 	.update_reshape_pos = raid10_update_reshape_pos,
5146 };
5147 
raid_init(void)5148 static int __init raid_init(void)
5149 {
5150 	return register_md_personality(&raid10_personality);
5151 }
5152 
raid_exit(void)5153 static void raid_exit(void)
5154 {
5155 	unregister_md_personality(&raid10_personality);
5156 }
5157 
5158 module_init(raid_init);
5159 module_exit(raid_exit);
5160 MODULE_LICENSE("GPL");
5161 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5162 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5163 MODULE_ALIAS("md-raid10");
5164 MODULE_ALIAS("md-level-10");
5165