xref: /linux/drivers/md/raid1.c (revision c39b9fd728d8173ecda993524089fbc38211a17f)
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33 
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include "md.h"
41 #include "raid1.h"
42 #include "bitmap.h"
43 
44 /*
45  * Number of guaranteed r1bios in case of extreme VM load:
46  */
47 #define	NR_RAID1_BIOS 256
48 
49 /* when we get a read error on a read-only array, we redirect to another
50  * device without failing the first device, or trying to over-write to
51  * correct the read error.  To keep track of bad blocks on a per-bio
52  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53  */
54 #define IO_BLOCKED ((struct bio *)1)
55 /* When we successfully write to a known bad-block, we need to remove the
56  * bad-block marking which must be done from process context.  So we record
57  * the success by setting devs[n].bio to IO_MADE_GOOD
58  */
59 #define IO_MADE_GOOD ((struct bio *)2)
60 
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62 
63 /* When there are this many requests queue to be written by
64  * the raid1 thread, we become 'congested' to provide back-pressure
65  * for writeback.
66  */
67 static int max_queued_requests = 1024;
68 
69 static void allow_barrier(struct r1conf *conf);
70 static void lower_barrier(struct r1conf *conf);
71 
72 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
73 {
74 	struct pool_info *pi = data;
75 	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
76 
77 	/* allocate a r1bio with room for raid_disks entries in the bios array */
78 	return kzalloc(size, gfp_flags);
79 }
80 
81 static void r1bio_pool_free(void *r1_bio, void *data)
82 {
83 	kfree(r1_bio);
84 }
85 
86 #define RESYNC_BLOCK_SIZE (64*1024)
87 //#define RESYNC_BLOCK_SIZE PAGE_SIZE
88 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
89 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
90 #define RESYNC_WINDOW (2048*1024)
91 
92 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
93 {
94 	struct pool_info *pi = data;
95 	struct page *page;
96 	struct r1bio *r1_bio;
97 	struct bio *bio;
98 	int i, j;
99 
100 	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
101 	if (!r1_bio)
102 		return NULL;
103 
104 	/*
105 	 * Allocate bios : 1 for reading, n-1 for writing
106 	 */
107 	for (j = pi->raid_disks ; j-- ; ) {
108 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
109 		if (!bio)
110 			goto out_free_bio;
111 		r1_bio->bios[j] = bio;
112 	}
113 	/*
114 	 * Allocate RESYNC_PAGES data pages and attach them to
115 	 * the first bio.
116 	 * If this is a user-requested check/repair, allocate
117 	 * RESYNC_PAGES for each bio.
118 	 */
119 	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
120 		j = pi->raid_disks;
121 	else
122 		j = 1;
123 	while(j--) {
124 		bio = r1_bio->bios[j];
125 		for (i = 0; i < RESYNC_PAGES; i++) {
126 			page = alloc_page(gfp_flags);
127 			if (unlikely(!page))
128 				goto out_free_pages;
129 
130 			bio->bi_io_vec[i].bv_page = page;
131 			bio->bi_vcnt = i+1;
132 		}
133 	}
134 	/* If not user-requests, copy the page pointers to all bios */
135 	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
136 		for (i=0; i<RESYNC_PAGES ; i++)
137 			for (j=1; j<pi->raid_disks; j++)
138 				r1_bio->bios[j]->bi_io_vec[i].bv_page =
139 					r1_bio->bios[0]->bi_io_vec[i].bv_page;
140 	}
141 
142 	r1_bio->master_bio = NULL;
143 
144 	return r1_bio;
145 
146 out_free_pages:
147 	for (j=0 ; j < pi->raid_disks; j++)
148 		for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
149 			put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
150 	j = -1;
151 out_free_bio:
152 	while (++j < pi->raid_disks)
153 		bio_put(r1_bio->bios[j]);
154 	r1bio_pool_free(r1_bio, data);
155 	return NULL;
156 }
157 
158 static void r1buf_pool_free(void *__r1_bio, void *data)
159 {
160 	struct pool_info *pi = data;
161 	int i,j;
162 	struct r1bio *r1bio = __r1_bio;
163 
164 	for (i = 0; i < RESYNC_PAGES; i++)
165 		for (j = pi->raid_disks; j-- ;) {
166 			if (j == 0 ||
167 			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
168 			    r1bio->bios[0]->bi_io_vec[i].bv_page)
169 				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
170 		}
171 	for (i=0 ; i < pi->raid_disks; i++)
172 		bio_put(r1bio->bios[i]);
173 
174 	r1bio_pool_free(r1bio, data);
175 }
176 
177 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
178 {
179 	int i;
180 
181 	for (i = 0; i < conf->raid_disks * 2; i++) {
182 		struct bio **bio = r1_bio->bios + i;
183 		if (!BIO_SPECIAL(*bio))
184 			bio_put(*bio);
185 		*bio = NULL;
186 	}
187 }
188 
189 static void free_r1bio(struct r1bio *r1_bio)
190 {
191 	struct r1conf *conf = r1_bio->mddev->private;
192 
193 	put_all_bios(conf, r1_bio);
194 	mempool_free(r1_bio, conf->r1bio_pool);
195 }
196 
197 static void put_buf(struct r1bio *r1_bio)
198 {
199 	struct r1conf *conf = r1_bio->mddev->private;
200 	int i;
201 
202 	for (i = 0; i < conf->raid_disks * 2; i++) {
203 		struct bio *bio = r1_bio->bios[i];
204 		if (bio->bi_end_io)
205 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
206 	}
207 
208 	mempool_free(r1_bio, conf->r1buf_pool);
209 
210 	lower_barrier(conf);
211 }
212 
213 static void reschedule_retry(struct r1bio *r1_bio)
214 {
215 	unsigned long flags;
216 	struct mddev *mddev = r1_bio->mddev;
217 	struct r1conf *conf = mddev->private;
218 
219 	spin_lock_irqsave(&conf->device_lock, flags);
220 	list_add(&r1_bio->retry_list, &conf->retry_list);
221 	conf->nr_queued ++;
222 	spin_unlock_irqrestore(&conf->device_lock, flags);
223 
224 	wake_up(&conf->wait_barrier);
225 	md_wakeup_thread(mddev->thread);
226 }
227 
228 /*
229  * raid_end_bio_io() is called when we have finished servicing a mirrored
230  * operation and are ready to return a success/failure code to the buffer
231  * cache layer.
232  */
233 static void call_bio_endio(struct r1bio *r1_bio)
234 {
235 	struct bio *bio = r1_bio->master_bio;
236 	int done;
237 	struct r1conf *conf = r1_bio->mddev->private;
238 
239 	if (bio->bi_phys_segments) {
240 		unsigned long flags;
241 		spin_lock_irqsave(&conf->device_lock, flags);
242 		bio->bi_phys_segments--;
243 		done = (bio->bi_phys_segments == 0);
244 		spin_unlock_irqrestore(&conf->device_lock, flags);
245 	} else
246 		done = 1;
247 
248 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
249 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
250 	if (done) {
251 		bio_endio(bio, 0);
252 		/*
253 		 * Wake up any possible resync thread that waits for the device
254 		 * to go idle.
255 		 */
256 		allow_barrier(conf);
257 	}
258 }
259 
260 static void raid_end_bio_io(struct r1bio *r1_bio)
261 {
262 	struct bio *bio = r1_bio->master_bio;
263 
264 	/* if nobody has done the final endio yet, do it now */
265 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 			 (unsigned long long) bio->bi_sector,
269 			 (unsigned long long) bio->bi_sector +
270 			 (bio->bi_size >> 9) - 1);
271 
272 		call_bio_endio(r1_bio);
273 	}
274 	free_r1bio(r1_bio);
275 }
276 
277 /*
278  * Update disk head position estimator based on IRQ completion info.
279  */
280 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
281 {
282 	struct r1conf *conf = r1_bio->mddev->private;
283 
284 	conf->mirrors[disk].head_position =
285 		r1_bio->sector + (r1_bio->sectors);
286 }
287 
288 /*
289  * Find the disk number which triggered given bio
290  */
291 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
292 {
293 	int mirror;
294 	struct r1conf *conf = r1_bio->mddev->private;
295 	int raid_disks = conf->raid_disks;
296 
297 	for (mirror = 0; mirror < raid_disks * 2; mirror++)
298 		if (r1_bio->bios[mirror] == bio)
299 			break;
300 
301 	BUG_ON(mirror == raid_disks * 2);
302 	update_head_pos(mirror, r1_bio);
303 
304 	return mirror;
305 }
306 
307 static void raid1_end_read_request(struct bio *bio, int error)
308 {
309 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310 	struct r1bio *r1_bio = bio->bi_private;
311 	int mirror;
312 	struct r1conf *conf = r1_bio->mddev->private;
313 
314 	mirror = r1_bio->read_disk;
315 	/*
316 	 * this branch is our 'one mirror IO has finished' event handler:
317 	 */
318 	update_head_pos(mirror, r1_bio);
319 
320 	if (uptodate)
321 		set_bit(R1BIO_Uptodate, &r1_bio->state);
322 	else {
323 		/* If all other devices have failed, we want to return
324 		 * the error upwards rather than fail the last device.
325 		 * Here we redefine "uptodate" to mean "Don't want to retry"
326 		 */
327 		unsigned long flags;
328 		spin_lock_irqsave(&conf->device_lock, flags);
329 		if (r1_bio->mddev->degraded == conf->raid_disks ||
330 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
331 		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
332 			uptodate = 1;
333 		spin_unlock_irqrestore(&conf->device_lock, flags);
334 	}
335 
336 	if (uptodate) {
337 		raid_end_bio_io(r1_bio);
338 		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
339 	} else {
340 		/*
341 		 * oops, read error:
342 		 */
343 		char b[BDEVNAME_SIZE];
344 		printk_ratelimited(
345 			KERN_ERR "md/raid1:%s: %s: "
346 			"rescheduling sector %llu\n",
347 			mdname(conf->mddev),
348 			bdevname(conf->mirrors[mirror].rdev->bdev,
349 				 b),
350 			(unsigned long long)r1_bio->sector);
351 		set_bit(R1BIO_ReadError, &r1_bio->state);
352 		reschedule_retry(r1_bio);
353 		/* don't drop the reference on read_disk yet */
354 	}
355 }
356 
357 static void close_write(struct r1bio *r1_bio)
358 {
359 	/* it really is the end of this request */
360 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
361 		/* free extra copy of the data pages */
362 		int i = r1_bio->behind_page_count;
363 		while (i--)
364 			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
365 		kfree(r1_bio->behind_bvecs);
366 		r1_bio->behind_bvecs = NULL;
367 	}
368 	/* clear the bitmap if all writes complete successfully */
369 	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
370 			r1_bio->sectors,
371 			!test_bit(R1BIO_Degraded, &r1_bio->state),
372 			test_bit(R1BIO_BehindIO, &r1_bio->state));
373 	md_write_end(r1_bio->mddev);
374 }
375 
376 static void r1_bio_write_done(struct r1bio *r1_bio)
377 {
378 	if (!atomic_dec_and_test(&r1_bio->remaining))
379 		return;
380 
381 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
382 		reschedule_retry(r1_bio);
383 	else {
384 		close_write(r1_bio);
385 		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
386 			reschedule_retry(r1_bio);
387 		else
388 			raid_end_bio_io(r1_bio);
389 	}
390 }
391 
392 static void raid1_end_write_request(struct bio *bio, int error)
393 {
394 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
395 	struct r1bio *r1_bio = bio->bi_private;
396 	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
397 	struct r1conf *conf = r1_bio->mddev->private;
398 	struct bio *to_put = NULL;
399 
400 	mirror = find_bio_disk(r1_bio, bio);
401 
402 	/*
403 	 * 'one mirror IO has finished' event handler:
404 	 */
405 	if (!uptodate) {
406 		set_bit(WriteErrorSeen,
407 			&conf->mirrors[mirror].rdev->flags);
408 		if (!test_and_set_bit(WantReplacement,
409 				      &conf->mirrors[mirror].rdev->flags))
410 			set_bit(MD_RECOVERY_NEEDED, &
411 				conf->mddev->recovery);
412 
413 		set_bit(R1BIO_WriteError, &r1_bio->state);
414 	} else {
415 		/*
416 		 * Set R1BIO_Uptodate in our master bio, so that we
417 		 * will return a good error code for to the higher
418 		 * levels even if IO on some other mirrored buffer
419 		 * fails.
420 		 *
421 		 * The 'master' represents the composite IO operation
422 		 * to user-side. So if something waits for IO, then it
423 		 * will wait for the 'master' bio.
424 		 */
425 		sector_t first_bad;
426 		int bad_sectors;
427 
428 		r1_bio->bios[mirror] = NULL;
429 		to_put = bio;
430 		set_bit(R1BIO_Uptodate, &r1_bio->state);
431 
432 		/* Maybe we can clear some bad blocks. */
433 		if (is_badblock(conf->mirrors[mirror].rdev,
434 				r1_bio->sector, r1_bio->sectors,
435 				&first_bad, &bad_sectors)) {
436 			r1_bio->bios[mirror] = IO_MADE_GOOD;
437 			set_bit(R1BIO_MadeGood, &r1_bio->state);
438 		}
439 	}
440 
441 	if (behind) {
442 		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
443 			atomic_dec(&r1_bio->behind_remaining);
444 
445 		/*
446 		 * In behind mode, we ACK the master bio once the I/O
447 		 * has safely reached all non-writemostly
448 		 * disks. Setting the Returned bit ensures that this
449 		 * gets done only once -- we don't ever want to return
450 		 * -EIO here, instead we'll wait
451 		 */
452 		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
453 		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
454 			/* Maybe we can return now */
455 			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
456 				struct bio *mbio = r1_bio->master_bio;
457 				pr_debug("raid1: behind end write sectors"
458 					 " %llu-%llu\n",
459 					 (unsigned long long) mbio->bi_sector,
460 					 (unsigned long long) mbio->bi_sector +
461 					 (mbio->bi_size >> 9) - 1);
462 				call_bio_endio(r1_bio);
463 			}
464 		}
465 	}
466 	if (r1_bio->bios[mirror] == NULL)
467 		rdev_dec_pending(conf->mirrors[mirror].rdev,
468 				 conf->mddev);
469 
470 	/*
471 	 * Let's see if all mirrored write operations have finished
472 	 * already.
473 	 */
474 	r1_bio_write_done(r1_bio);
475 
476 	if (to_put)
477 		bio_put(to_put);
478 }
479 
480 
481 /*
482  * This routine returns the disk from which the requested read should
483  * be done. There is a per-array 'next expected sequential IO' sector
484  * number - if this matches on the next IO then we use the last disk.
485  * There is also a per-disk 'last know head position' sector that is
486  * maintained from IRQ contexts, both the normal and the resync IO
487  * completion handlers update this position correctly. If there is no
488  * perfect sequential match then we pick the disk whose head is closest.
489  *
490  * If there are 2 mirrors in the same 2 devices, performance degrades
491  * because position is mirror, not device based.
492  *
493  * The rdev for the device selected will have nr_pending incremented.
494  */
495 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
496 {
497 	const sector_t this_sector = r1_bio->sector;
498 	int sectors;
499 	int best_good_sectors;
500 	int best_disk, best_dist_disk, best_pending_disk;
501 	int has_nonrot_disk;
502 	int disk;
503 	sector_t best_dist;
504 	unsigned int min_pending;
505 	struct md_rdev *rdev;
506 	int choose_first;
507 	int choose_next_idle;
508 
509 	rcu_read_lock();
510 	/*
511 	 * Check if we can balance. We can balance on the whole
512 	 * device if no resync is going on, or below the resync window.
513 	 * We take the first readable disk when above the resync window.
514 	 */
515  retry:
516 	sectors = r1_bio->sectors;
517 	best_disk = -1;
518 	best_dist_disk = -1;
519 	best_dist = MaxSector;
520 	best_pending_disk = -1;
521 	min_pending = UINT_MAX;
522 	best_good_sectors = 0;
523 	has_nonrot_disk = 0;
524 	choose_next_idle = 0;
525 
526 	if (conf->mddev->recovery_cp < MaxSector &&
527 	    (this_sector + sectors >= conf->next_resync))
528 		choose_first = 1;
529 	else
530 		choose_first = 0;
531 
532 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
533 		sector_t dist;
534 		sector_t first_bad;
535 		int bad_sectors;
536 		unsigned int pending;
537 		bool nonrot;
538 
539 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
540 		if (r1_bio->bios[disk] == IO_BLOCKED
541 		    || rdev == NULL
542 		    || test_bit(Unmerged, &rdev->flags)
543 		    || test_bit(Faulty, &rdev->flags))
544 			continue;
545 		if (!test_bit(In_sync, &rdev->flags) &&
546 		    rdev->recovery_offset < this_sector + sectors)
547 			continue;
548 		if (test_bit(WriteMostly, &rdev->flags)) {
549 			/* Don't balance among write-mostly, just
550 			 * use the first as a last resort */
551 			if (best_disk < 0) {
552 				if (is_badblock(rdev, this_sector, sectors,
553 						&first_bad, &bad_sectors)) {
554 					if (first_bad < this_sector)
555 						/* Cannot use this */
556 						continue;
557 					best_good_sectors = first_bad - this_sector;
558 				} else
559 					best_good_sectors = sectors;
560 				best_disk = disk;
561 			}
562 			continue;
563 		}
564 		/* This is a reasonable device to use.  It might
565 		 * even be best.
566 		 */
567 		if (is_badblock(rdev, this_sector, sectors,
568 				&first_bad, &bad_sectors)) {
569 			if (best_dist < MaxSector)
570 				/* already have a better device */
571 				continue;
572 			if (first_bad <= this_sector) {
573 				/* cannot read here. If this is the 'primary'
574 				 * device, then we must not read beyond
575 				 * bad_sectors from another device..
576 				 */
577 				bad_sectors -= (this_sector - first_bad);
578 				if (choose_first && sectors > bad_sectors)
579 					sectors = bad_sectors;
580 				if (best_good_sectors > sectors)
581 					best_good_sectors = sectors;
582 
583 			} else {
584 				sector_t good_sectors = first_bad - this_sector;
585 				if (good_sectors > best_good_sectors) {
586 					best_good_sectors = good_sectors;
587 					best_disk = disk;
588 				}
589 				if (choose_first)
590 					break;
591 			}
592 			continue;
593 		} else
594 			best_good_sectors = sectors;
595 
596 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
597 		has_nonrot_disk |= nonrot;
598 		pending = atomic_read(&rdev->nr_pending);
599 		dist = abs(this_sector - conf->mirrors[disk].head_position);
600 		if (choose_first) {
601 			best_disk = disk;
602 			break;
603 		}
604 		/* Don't change to another disk for sequential reads */
605 		if (conf->mirrors[disk].next_seq_sect == this_sector
606 		    || dist == 0) {
607 			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
608 			struct raid1_info *mirror = &conf->mirrors[disk];
609 
610 			best_disk = disk;
611 			/*
612 			 * If buffered sequential IO size exceeds optimal
613 			 * iosize, check if there is idle disk. If yes, choose
614 			 * the idle disk. read_balance could already choose an
615 			 * idle disk before noticing it's a sequential IO in
616 			 * this disk. This doesn't matter because this disk
617 			 * will idle, next time it will be utilized after the
618 			 * first disk has IO size exceeds optimal iosize. In
619 			 * this way, iosize of the first disk will be optimal
620 			 * iosize at least. iosize of the second disk might be
621 			 * small, but not a big deal since when the second disk
622 			 * starts IO, the first disk is likely still busy.
623 			 */
624 			if (nonrot && opt_iosize > 0 &&
625 			    mirror->seq_start != MaxSector &&
626 			    mirror->next_seq_sect > opt_iosize &&
627 			    mirror->next_seq_sect - opt_iosize >=
628 			    mirror->seq_start) {
629 				choose_next_idle = 1;
630 				continue;
631 			}
632 			break;
633 		}
634 		/* If device is idle, use it */
635 		if (pending == 0) {
636 			best_disk = disk;
637 			break;
638 		}
639 
640 		if (choose_next_idle)
641 			continue;
642 
643 		if (min_pending > pending) {
644 			min_pending = pending;
645 			best_pending_disk = disk;
646 		}
647 
648 		if (dist < best_dist) {
649 			best_dist = dist;
650 			best_dist_disk = disk;
651 		}
652 	}
653 
654 	/*
655 	 * If all disks are rotational, choose the closest disk. If any disk is
656 	 * non-rotational, choose the disk with less pending request even the
657 	 * disk is rotational, which might/might not be optimal for raids with
658 	 * mixed ratation/non-rotational disks depending on workload.
659 	 */
660 	if (best_disk == -1) {
661 		if (has_nonrot_disk)
662 			best_disk = best_pending_disk;
663 		else
664 			best_disk = best_dist_disk;
665 	}
666 
667 	if (best_disk >= 0) {
668 		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
669 		if (!rdev)
670 			goto retry;
671 		atomic_inc(&rdev->nr_pending);
672 		if (test_bit(Faulty, &rdev->flags)) {
673 			/* cannot risk returning a device that failed
674 			 * before we inc'ed nr_pending
675 			 */
676 			rdev_dec_pending(rdev, conf->mddev);
677 			goto retry;
678 		}
679 		sectors = best_good_sectors;
680 
681 		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
682 			conf->mirrors[best_disk].seq_start = this_sector;
683 
684 		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
685 	}
686 	rcu_read_unlock();
687 	*max_sectors = sectors;
688 
689 	return best_disk;
690 }
691 
692 static int raid1_mergeable_bvec(struct request_queue *q,
693 				struct bvec_merge_data *bvm,
694 				struct bio_vec *biovec)
695 {
696 	struct mddev *mddev = q->queuedata;
697 	struct r1conf *conf = mddev->private;
698 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
699 	int max = biovec->bv_len;
700 
701 	if (mddev->merge_check_needed) {
702 		int disk;
703 		rcu_read_lock();
704 		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
705 			struct md_rdev *rdev = rcu_dereference(
706 				conf->mirrors[disk].rdev);
707 			if (rdev && !test_bit(Faulty, &rdev->flags)) {
708 				struct request_queue *q =
709 					bdev_get_queue(rdev->bdev);
710 				if (q->merge_bvec_fn) {
711 					bvm->bi_sector = sector +
712 						rdev->data_offset;
713 					bvm->bi_bdev = rdev->bdev;
714 					max = min(max, q->merge_bvec_fn(
715 							  q, bvm, biovec));
716 				}
717 			}
718 		}
719 		rcu_read_unlock();
720 	}
721 	return max;
722 
723 }
724 
725 int md_raid1_congested(struct mddev *mddev, int bits)
726 {
727 	struct r1conf *conf = mddev->private;
728 	int i, ret = 0;
729 
730 	if ((bits & (1 << BDI_async_congested)) &&
731 	    conf->pending_count >= max_queued_requests)
732 		return 1;
733 
734 	rcu_read_lock();
735 	for (i = 0; i < conf->raid_disks * 2; i++) {
736 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
737 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
738 			struct request_queue *q = bdev_get_queue(rdev->bdev);
739 
740 			BUG_ON(!q);
741 
742 			/* Note the '|| 1' - when read_balance prefers
743 			 * non-congested targets, it can be removed
744 			 */
745 			if ((bits & (1<<BDI_async_congested)) || 1)
746 				ret |= bdi_congested(&q->backing_dev_info, bits);
747 			else
748 				ret &= bdi_congested(&q->backing_dev_info, bits);
749 		}
750 	}
751 	rcu_read_unlock();
752 	return ret;
753 }
754 EXPORT_SYMBOL_GPL(md_raid1_congested);
755 
756 static int raid1_congested(void *data, int bits)
757 {
758 	struct mddev *mddev = data;
759 
760 	return mddev_congested(mddev, bits) ||
761 		md_raid1_congested(mddev, bits);
762 }
763 
764 static void flush_pending_writes(struct r1conf *conf)
765 {
766 	/* Any writes that have been queued but are awaiting
767 	 * bitmap updates get flushed here.
768 	 */
769 	spin_lock_irq(&conf->device_lock);
770 
771 	if (conf->pending_bio_list.head) {
772 		struct bio *bio;
773 		bio = bio_list_get(&conf->pending_bio_list);
774 		conf->pending_count = 0;
775 		spin_unlock_irq(&conf->device_lock);
776 		/* flush any pending bitmap writes to
777 		 * disk before proceeding w/ I/O */
778 		bitmap_unplug(conf->mddev->bitmap);
779 		wake_up(&conf->wait_barrier);
780 
781 		while (bio) { /* submit pending writes */
782 			struct bio *next = bio->bi_next;
783 			bio->bi_next = NULL;
784 			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
785 			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
786 				/* Just ignore it */
787 				bio_endio(bio, 0);
788 			else
789 				generic_make_request(bio);
790 			bio = next;
791 		}
792 	} else
793 		spin_unlock_irq(&conf->device_lock);
794 }
795 
796 /* Barriers....
797  * Sometimes we need to suspend IO while we do something else,
798  * either some resync/recovery, or reconfigure the array.
799  * To do this we raise a 'barrier'.
800  * The 'barrier' is a counter that can be raised multiple times
801  * to count how many activities are happening which preclude
802  * normal IO.
803  * We can only raise the barrier if there is no pending IO.
804  * i.e. if nr_pending == 0.
805  * We choose only to raise the barrier if no-one is waiting for the
806  * barrier to go down.  This means that as soon as an IO request
807  * is ready, no other operations which require a barrier will start
808  * until the IO request has had a chance.
809  *
810  * So: regular IO calls 'wait_barrier'.  When that returns there
811  *    is no backgroup IO happening,  It must arrange to call
812  *    allow_barrier when it has finished its IO.
813  * backgroup IO calls must call raise_barrier.  Once that returns
814  *    there is no normal IO happeing.  It must arrange to call
815  *    lower_barrier when the particular background IO completes.
816  */
817 #define RESYNC_DEPTH 32
818 
819 static void raise_barrier(struct r1conf *conf)
820 {
821 	spin_lock_irq(&conf->resync_lock);
822 
823 	/* Wait until no block IO is waiting */
824 	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
825 			    conf->resync_lock);
826 
827 	/* block any new IO from starting */
828 	conf->barrier++;
829 
830 	/* Now wait for all pending IO to complete */
831 	wait_event_lock_irq(conf->wait_barrier,
832 			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
833 			    conf->resync_lock);
834 
835 	spin_unlock_irq(&conf->resync_lock);
836 }
837 
838 static void lower_barrier(struct r1conf *conf)
839 {
840 	unsigned long flags;
841 	BUG_ON(conf->barrier <= 0);
842 	spin_lock_irqsave(&conf->resync_lock, flags);
843 	conf->barrier--;
844 	spin_unlock_irqrestore(&conf->resync_lock, flags);
845 	wake_up(&conf->wait_barrier);
846 }
847 
848 static void wait_barrier(struct r1conf *conf)
849 {
850 	spin_lock_irq(&conf->resync_lock);
851 	if (conf->barrier) {
852 		conf->nr_waiting++;
853 		/* Wait for the barrier to drop.
854 		 * However if there are already pending
855 		 * requests (preventing the barrier from
856 		 * rising completely), and the
857 		 * pre-process bio queue isn't empty,
858 		 * then don't wait, as we need to empty
859 		 * that queue to get the nr_pending
860 		 * count down.
861 		 */
862 		wait_event_lock_irq(conf->wait_barrier,
863 				    !conf->barrier ||
864 				    (conf->nr_pending &&
865 				     current->bio_list &&
866 				     !bio_list_empty(current->bio_list)),
867 				    conf->resync_lock);
868 		conf->nr_waiting--;
869 	}
870 	conf->nr_pending++;
871 	spin_unlock_irq(&conf->resync_lock);
872 }
873 
874 static void allow_barrier(struct r1conf *conf)
875 {
876 	unsigned long flags;
877 	spin_lock_irqsave(&conf->resync_lock, flags);
878 	conf->nr_pending--;
879 	spin_unlock_irqrestore(&conf->resync_lock, flags);
880 	wake_up(&conf->wait_barrier);
881 }
882 
883 static void freeze_array(struct r1conf *conf)
884 {
885 	/* stop syncio and normal IO and wait for everything to
886 	 * go quite.
887 	 * We increment barrier and nr_waiting, and then
888 	 * wait until nr_pending match nr_queued+1
889 	 * This is called in the context of one normal IO request
890 	 * that has failed. Thus any sync request that might be pending
891 	 * will be blocked by nr_pending, and we need to wait for
892 	 * pending IO requests to complete or be queued for re-try.
893 	 * Thus the number queued (nr_queued) plus this request (1)
894 	 * must match the number of pending IOs (nr_pending) before
895 	 * we continue.
896 	 */
897 	spin_lock_irq(&conf->resync_lock);
898 	conf->barrier++;
899 	conf->nr_waiting++;
900 	wait_event_lock_irq_cmd(conf->wait_barrier,
901 				conf->nr_pending == conf->nr_queued+1,
902 				conf->resync_lock,
903 				flush_pending_writes(conf));
904 	spin_unlock_irq(&conf->resync_lock);
905 }
906 static void unfreeze_array(struct r1conf *conf)
907 {
908 	/* reverse the effect of the freeze */
909 	spin_lock_irq(&conf->resync_lock);
910 	conf->barrier--;
911 	conf->nr_waiting--;
912 	wake_up(&conf->wait_barrier);
913 	spin_unlock_irq(&conf->resync_lock);
914 }
915 
916 
917 /* duplicate the data pages for behind I/O
918  */
919 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
920 {
921 	int i;
922 	struct bio_vec *bvec;
923 	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
924 					GFP_NOIO);
925 	if (unlikely(!bvecs))
926 		return;
927 
928 	bio_for_each_segment(bvec, bio, i) {
929 		bvecs[i] = *bvec;
930 		bvecs[i].bv_page = alloc_page(GFP_NOIO);
931 		if (unlikely(!bvecs[i].bv_page))
932 			goto do_sync_io;
933 		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
934 		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
935 		kunmap(bvecs[i].bv_page);
936 		kunmap(bvec->bv_page);
937 	}
938 	r1_bio->behind_bvecs = bvecs;
939 	r1_bio->behind_page_count = bio->bi_vcnt;
940 	set_bit(R1BIO_BehindIO, &r1_bio->state);
941 	return;
942 
943 do_sync_io:
944 	for (i = 0; i < bio->bi_vcnt; i++)
945 		if (bvecs[i].bv_page)
946 			put_page(bvecs[i].bv_page);
947 	kfree(bvecs);
948 	pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
949 }
950 
951 struct raid1_plug_cb {
952 	struct blk_plug_cb	cb;
953 	struct bio_list		pending;
954 	int			pending_cnt;
955 };
956 
957 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
958 {
959 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
960 						  cb);
961 	struct mddev *mddev = plug->cb.data;
962 	struct r1conf *conf = mddev->private;
963 	struct bio *bio;
964 
965 	if (from_schedule || current->bio_list) {
966 		spin_lock_irq(&conf->device_lock);
967 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
968 		conf->pending_count += plug->pending_cnt;
969 		spin_unlock_irq(&conf->device_lock);
970 		wake_up(&conf->wait_barrier);
971 		md_wakeup_thread(mddev->thread);
972 		kfree(plug);
973 		return;
974 	}
975 
976 	/* we aren't scheduling, so we can do the write-out directly. */
977 	bio = bio_list_get(&plug->pending);
978 	bitmap_unplug(mddev->bitmap);
979 	wake_up(&conf->wait_barrier);
980 
981 	while (bio) { /* submit pending writes */
982 		struct bio *next = bio->bi_next;
983 		bio->bi_next = NULL;
984 		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
985 		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
986 			/* Just ignore it */
987 			bio_endio(bio, 0);
988 		else
989 			generic_make_request(bio);
990 		bio = next;
991 	}
992 	kfree(plug);
993 }
994 
995 static void make_request(struct mddev *mddev, struct bio * bio)
996 {
997 	struct r1conf *conf = mddev->private;
998 	struct raid1_info *mirror;
999 	struct r1bio *r1_bio;
1000 	struct bio *read_bio;
1001 	int i, disks;
1002 	struct bitmap *bitmap;
1003 	unsigned long flags;
1004 	const int rw = bio_data_dir(bio);
1005 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1006 	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1007 	const unsigned long do_discard = (bio->bi_rw
1008 					  & (REQ_DISCARD | REQ_SECURE));
1009 	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1010 	struct md_rdev *blocked_rdev;
1011 	struct blk_plug_cb *cb;
1012 	struct raid1_plug_cb *plug = NULL;
1013 	int first_clone;
1014 	int sectors_handled;
1015 	int max_sectors;
1016 
1017 	/*
1018 	 * Register the new request and wait if the reconstruction
1019 	 * thread has put up a bar for new requests.
1020 	 * Continue immediately if no resync is active currently.
1021 	 */
1022 
1023 	md_write_start(mddev, bio); /* wait on superblock update early */
1024 
1025 	if (bio_data_dir(bio) == WRITE &&
1026 	    bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
1027 	    bio->bi_sector < mddev->suspend_hi) {
1028 		/* As the suspend_* range is controlled by
1029 		 * userspace, we want an interruptible
1030 		 * wait.
1031 		 */
1032 		DEFINE_WAIT(w);
1033 		for (;;) {
1034 			flush_signals(current);
1035 			prepare_to_wait(&conf->wait_barrier,
1036 					&w, TASK_INTERRUPTIBLE);
1037 			if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
1038 			    bio->bi_sector >= mddev->suspend_hi)
1039 				break;
1040 			schedule();
1041 		}
1042 		finish_wait(&conf->wait_barrier, &w);
1043 	}
1044 
1045 	wait_barrier(conf);
1046 
1047 	bitmap = mddev->bitmap;
1048 
1049 	/*
1050 	 * make_request() can abort the operation when READA is being
1051 	 * used and no empty request is available.
1052 	 *
1053 	 */
1054 	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1055 
1056 	r1_bio->master_bio = bio;
1057 	r1_bio->sectors = bio->bi_size >> 9;
1058 	r1_bio->state = 0;
1059 	r1_bio->mddev = mddev;
1060 	r1_bio->sector = bio->bi_sector;
1061 
1062 	/* We might need to issue multiple reads to different
1063 	 * devices if there are bad blocks around, so we keep
1064 	 * track of the number of reads in bio->bi_phys_segments.
1065 	 * If this is 0, there is only one r1_bio and no locking
1066 	 * will be needed when requests complete.  If it is
1067 	 * non-zero, then it is the number of not-completed requests.
1068 	 */
1069 	bio->bi_phys_segments = 0;
1070 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1071 
1072 	if (rw == READ) {
1073 		/*
1074 		 * read balancing logic:
1075 		 */
1076 		int rdisk;
1077 
1078 read_again:
1079 		rdisk = read_balance(conf, r1_bio, &max_sectors);
1080 
1081 		if (rdisk < 0) {
1082 			/* couldn't find anywhere to read from */
1083 			raid_end_bio_io(r1_bio);
1084 			return;
1085 		}
1086 		mirror = conf->mirrors + rdisk;
1087 
1088 		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1089 		    bitmap) {
1090 			/* Reading from a write-mostly device must
1091 			 * take care not to over-take any writes
1092 			 * that are 'behind'
1093 			 */
1094 			wait_event(bitmap->behind_wait,
1095 				   atomic_read(&bitmap->behind_writes) == 0);
1096 		}
1097 		r1_bio->read_disk = rdisk;
1098 
1099 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1100 		md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
1101 			    max_sectors);
1102 
1103 		r1_bio->bios[rdisk] = read_bio;
1104 
1105 		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
1106 		read_bio->bi_bdev = mirror->rdev->bdev;
1107 		read_bio->bi_end_io = raid1_end_read_request;
1108 		read_bio->bi_rw = READ | do_sync;
1109 		read_bio->bi_private = r1_bio;
1110 
1111 		if (max_sectors < r1_bio->sectors) {
1112 			/* could not read all from this device, so we will
1113 			 * need another r1_bio.
1114 			 */
1115 
1116 			sectors_handled = (r1_bio->sector + max_sectors
1117 					   - bio->bi_sector);
1118 			r1_bio->sectors = max_sectors;
1119 			spin_lock_irq(&conf->device_lock);
1120 			if (bio->bi_phys_segments == 0)
1121 				bio->bi_phys_segments = 2;
1122 			else
1123 				bio->bi_phys_segments++;
1124 			spin_unlock_irq(&conf->device_lock);
1125 			/* Cannot call generic_make_request directly
1126 			 * as that will be queued in __make_request
1127 			 * and subsequent mempool_alloc might block waiting
1128 			 * for it.  So hand bio over to raid1d.
1129 			 */
1130 			reschedule_retry(r1_bio);
1131 
1132 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1133 
1134 			r1_bio->master_bio = bio;
1135 			r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1136 			r1_bio->state = 0;
1137 			r1_bio->mddev = mddev;
1138 			r1_bio->sector = bio->bi_sector + sectors_handled;
1139 			goto read_again;
1140 		} else
1141 			generic_make_request(read_bio);
1142 		return;
1143 	}
1144 
1145 	/*
1146 	 * WRITE:
1147 	 */
1148 	if (conf->pending_count >= max_queued_requests) {
1149 		md_wakeup_thread(mddev->thread);
1150 		wait_event(conf->wait_barrier,
1151 			   conf->pending_count < max_queued_requests);
1152 	}
1153 	/* first select target devices under rcu_lock and
1154 	 * inc refcount on their rdev.  Record them by setting
1155 	 * bios[x] to bio
1156 	 * If there are known/acknowledged bad blocks on any device on
1157 	 * which we have seen a write error, we want to avoid writing those
1158 	 * blocks.
1159 	 * This potentially requires several writes to write around
1160 	 * the bad blocks.  Each set of writes gets it's own r1bio
1161 	 * with a set of bios attached.
1162 	 */
1163 
1164 	disks = conf->raid_disks * 2;
1165  retry_write:
1166 	blocked_rdev = NULL;
1167 	rcu_read_lock();
1168 	max_sectors = r1_bio->sectors;
1169 	for (i = 0;  i < disks; i++) {
1170 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1171 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1172 			atomic_inc(&rdev->nr_pending);
1173 			blocked_rdev = rdev;
1174 			break;
1175 		}
1176 		r1_bio->bios[i] = NULL;
1177 		if (!rdev || test_bit(Faulty, &rdev->flags)
1178 		    || test_bit(Unmerged, &rdev->flags)) {
1179 			if (i < conf->raid_disks)
1180 				set_bit(R1BIO_Degraded, &r1_bio->state);
1181 			continue;
1182 		}
1183 
1184 		atomic_inc(&rdev->nr_pending);
1185 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1186 			sector_t first_bad;
1187 			int bad_sectors;
1188 			int is_bad;
1189 
1190 			is_bad = is_badblock(rdev, r1_bio->sector,
1191 					     max_sectors,
1192 					     &first_bad, &bad_sectors);
1193 			if (is_bad < 0) {
1194 				/* mustn't write here until the bad block is
1195 				 * acknowledged*/
1196 				set_bit(BlockedBadBlocks, &rdev->flags);
1197 				blocked_rdev = rdev;
1198 				break;
1199 			}
1200 			if (is_bad && first_bad <= r1_bio->sector) {
1201 				/* Cannot write here at all */
1202 				bad_sectors -= (r1_bio->sector - first_bad);
1203 				if (bad_sectors < max_sectors)
1204 					/* mustn't write more than bad_sectors
1205 					 * to other devices yet
1206 					 */
1207 					max_sectors = bad_sectors;
1208 				rdev_dec_pending(rdev, mddev);
1209 				/* We don't set R1BIO_Degraded as that
1210 				 * only applies if the disk is
1211 				 * missing, so it might be re-added,
1212 				 * and we want to know to recover this
1213 				 * chunk.
1214 				 * In this case the device is here,
1215 				 * and the fact that this chunk is not
1216 				 * in-sync is recorded in the bad
1217 				 * block log
1218 				 */
1219 				continue;
1220 			}
1221 			if (is_bad) {
1222 				int good_sectors = first_bad - r1_bio->sector;
1223 				if (good_sectors < max_sectors)
1224 					max_sectors = good_sectors;
1225 			}
1226 		}
1227 		r1_bio->bios[i] = bio;
1228 	}
1229 	rcu_read_unlock();
1230 
1231 	if (unlikely(blocked_rdev)) {
1232 		/* Wait for this device to become unblocked */
1233 		int j;
1234 
1235 		for (j = 0; j < i; j++)
1236 			if (r1_bio->bios[j])
1237 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1238 		r1_bio->state = 0;
1239 		allow_barrier(conf);
1240 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1241 		wait_barrier(conf);
1242 		goto retry_write;
1243 	}
1244 
1245 	if (max_sectors < r1_bio->sectors) {
1246 		/* We are splitting this write into multiple parts, so
1247 		 * we need to prepare for allocating another r1_bio.
1248 		 */
1249 		r1_bio->sectors = max_sectors;
1250 		spin_lock_irq(&conf->device_lock);
1251 		if (bio->bi_phys_segments == 0)
1252 			bio->bi_phys_segments = 2;
1253 		else
1254 			bio->bi_phys_segments++;
1255 		spin_unlock_irq(&conf->device_lock);
1256 	}
1257 	sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1258 
1259 	atomic_set(&r1_bio->remaining, 1);
1260 	atomic_set(&r1_bio->behind_remaining, 0);
1261 
1262 	first_clone = 1;
1263 	for (i = 0; i < disks; i++) {
1264 		struct bio *mbio;
1265 		if (!r1_bio->bios[i])
1266 			continue;
1267 
1268 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1269 		md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1270 
1271 		if (first_clone) {
1272 			/* do behind I/O ?
1273 			 * Not if there are too many, or cannot
1274 			 * allocate memory, or a reader on WriteMostly
1275 			 * is waiting for behind writes to flush */
1276 			if (bitmap &&
1277 			    (atomic_read(&bitmap->behind_writes)
1278 			     < mddev->bitmap_info.max_write_behind) &&
1279 			    !waitqueue_active(&bitmap->behind_wait))
1280 				alloc_behind_pages(mbio, r1_bio);
1281 
1282 			bitmap_startwrite(bitmap, r1_bio->sector,
1283 					  r1_bio->sectors,
1284 					  test_bit(R1BIO_BehindIO,
1285 						   &r1_bio->state));
1286 			first_clone = 0;
1287 		}
1288 		if (r1_bio->behind_bvecs) {
1289 			struct bio_vec *bvec;
1290 			int j;
1291 
1292 			/* Yes, I really want the '__' version so that
1293 			 * we clear any unused pointer in the io_vec, rather
1294 			 * than leave them unchanged.  This is important
1295 			 * because when we come to free the pages, we won't
1296 			 * know the original bi_idx, so we just free
1297 			 * them all
1298 			 */
1299 			__bio_for_each_segment(bvec, mbio, j, 0)
1300 				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1301 			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1302 				atomic_inc(&r1_bio->behind_remaining);
1303 		}
1304 
1305 		r1_bio->bios[i] = mbio;
1306 
1307 		mbio->bi_sector	= (r1_bio->sector +
1308 				   conf->mirrors[i].rdev->data_offset);
1309 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1310 		mbio->bi_end_io	= raid1_end_write_request;
1311 		mbio->bi_rw =
1312 			WRITE | do_flush_fua | do_sync | do_discard | do_same;
1313 		mbio->bi_private = r1_bio;
1314 
1315 		atomic_inc(&r1_bio->remaining);
1316 
1317 		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1318 		if (cb)
1319 			plug = container_of(cb, struct raid1_plug_cb, cb);
1320 		else
1321 			plug = NULL;
1322 		spin_lock_irqsave(&conf->device_lock, flags);
1323 		if (plug) {
1324 			bio_list_add(&plug->pending, mbio);
1325 			plug->pending_cnt++;
1326 		} else {
1327 			bio_list_add(&conf->pending_bio_list, mbio);
1328 			conf->pending_count++;
1329 		}
1330 		spin_unlock_irqrestore(&conf->device_lock, flags);
1331 		if (!plug)
1332 			md_wakeup_thread(mddev->thread);
1333 	}
1334 	/* Mustn't call r1_bio_write_done before this next test,
1335 	 * as it could result in the bio being freed.
1336 	 */
1337 	if (sectors_handled < (bio->bi_size >> 9)) {
1338 		r1_bio_write_done(r1_bio);
1339 		/* We need another r1_bio.  It has already been counted
1340 		 * in bio->bi_phys_segments
1341 		 */
1342 		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1343 		r1_bio->master_bio = bio;
1344 		r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1345 		r1_bio->state = 0;
1346 		r1_bio->mddev = mddev;
1347 		r1_bio->sector = bio->bi_sector + sectors_handled;
1348 		goto retry_write;
1349 	}
1350 
1351 	r1_bio_write_done(r1_bio);
1352 
1353 	/* In case raid1d snuck in to freeze_array */
1354 	wake_up(&conf->wait_barrier);
1355 }
1356 
1357 static void status(struct seq_file *seq, struct mddev *mddev)
1358 {
1359 	struct r1conf *conf = mddev->private;
1360 	int i;
1361 
1362 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1363 		   conf->raid_disks - mddev->degraded);
1364 	rcu_read_lock();
1365 	for (i = 0; i < conf->raid_disks; i++) {
1366 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1367 		seq_printf(seq, "%s",
1368 			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1369 	}
1370 	rcu_read_unlock();
1371 	seq_printf(seq, "]");
1372 }
1373 
1374 
1375 static void error(struct mddev *mddev, struct md_rdev *rdev)
1376 {
1377 	char b[BDEVNAME_SIZE];
1378 	struct r1conf *conf = mddev->private;
1379 
1380 	/*
1381 	 * If it is not operational, then we have already marked it as dead
1382 	 * else if it is the last working disks, ignore the error, let the
1383 	 * next level up know.
1384 	 * else mark the drive as failed
1385 	 */
1386 	if (test_bit(In_sync, &rdev->flags)
1387 	    && (conf->raid_disks - mddev->degraded) == 1) {
1388 		/*
1389 		 * Don't fail the drive, act as though we were just a
1390 		 * normal single drive.
1391 		 * However don't try a recovery from this drive as
1392 		 * it is very likely to fail.
1393 		 */
1394 		conf->recovery_disabled = mddev->recovery_disabled;
1395 		return;
1396 	}
1397 	set_bit(Blocked, &rdev->flags);
1398 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1399 		unsigned long flags;
1400 		spin_lock_irqsave(&conf->device_lock, flags);
1401 		mddev->degraded++;
1402 		set_bit(Faulty, &rdev->flags);
1403 		spin_unlock_irqrestore(&conf->device_lock, flags);
1404 		/*
1405 		 * if recovery is running, make sure it aborts.
1406 		 */
1407 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1408 	} else
1409 		set_bit(Faulty, &rdev->flags);
1410 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1411 	printk(KERN_ALERT
1412 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
1413 	       "md/raid1:%s: Operation continuing on %d devices.\n",
1414 	       mdname(mddev), bdevname(rdev->bdev, b),
1415 	       mdname(mddev), conf->raid_disks - mddev->degraded);
1416 }
1417 
1418 static void print_conf(struct r1conf *conf)
1419 {
1420 	int i;
1421 
1422 	printk(KERN_DEBUG "RAID1 conf printout:\n");
1423 	if (!conf) {
1424 		printk(KERN_DEBUG "(!conf)\n");
1425 		return;
1426 	}
1427 	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1428 		conf->raid_disks);
1429 
1430 	rcu_read_lock();
1431 	for (i = 0; i < conf->raid_disks; i++) {
1432 		char b[BDEVNAME_SIZE];
1433 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1434 		if (rdev)
1435 			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1436 			       i, !test_bit(In_sync, &rdev->flags),
1437 			       !test_bit(Faulty, &rdev->flags),
1438 			       bdevname(rdev->bdev,b));
1439 	}
1440 	rcu_read_unlock();
1441 }
1442 
1443 static void close_sync(struct r1conf *conf)
1444 {
1445 	wait_barrier(conf);
1446 	allow_barrier(conf);
1447 
1448 	mempool_destroy(conf->r1buf_pool);
1449 	conf->r1buf_pool = NULL;
1450 }
1451 
1452 static int raid1_spare_active(struct mddev *mddev)
1453 {
1454 	int i;
1455 	struct r1conf *conf = mddev->private;
1456 	int count = 0;
1457 	unsigned long flags;
1458 
1459 	/*
1460 	 * Find all failed disks within the RAID1 configuration
1461 	 * and mark them readable.
1462 	 * Called under mddev lock, so rcu protection not needed.
1463 	 */
1464 	for (i = 0; i < conf->raid_disks; i++) {
1465 		struct md_rdev *rdev = conf->mirrors[i].rdev;
1466 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1467 		if (repl
1468 		    && repl->recovery_offset == MaxSector
1469 		    && !test_bit(Faulty, &repl->flags)
1470 		    && !test_and_set_bit(In_sync, &repl->flags)) {
1471 			/* replacement has just become active */
1472 			if (!rdev ||
1473 			    !test_and_clear_bit(In_sync, &rdev->flags))
1474 				count++;
1475 			if (rdev) {
1476 				/* Replaced device not technically
1477 				 * faulty, but we need to be sure
1478 				 * it gets removed and never re-added
1479 				 */
1480 				set_bit(Faulty, &rdev->flags);
1481 				sysfs_notify_dirent_safe(
1482 					rdev->sysfs_state);
1483 			}
1484 		}
1485 		if (rdev
1486 		    && !test_bit(Faulty, &rdev->flags)
1487 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1488 			count++;
1489 			sysfs_notify_dirent_safe(rdev->sysfs_state);
1490 		}
1491 	}
1492 	spin_lock_irqsave(&conf->device_lock, flags);
1493 	mddev->degraded -= count;
1494 	spin_unlock_irqrestore(&conf->device_lock, flags);
1495 
1496 	print_conf(conf);
1497 	return count;
1498 }
1499 
1500 
1501 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1502 {
1503 	struct r1conf *conf = mddev->private;
1504 	int err = -EEXIST;
1505 	int mirror = 0;
1506 	struct raid1_info *p;
1507 	int first = 0;
1508 	int last = conf->raid_disks - 1;
1509 	struct request_queue *q = bdev_get_queue(rdev->bdev);
1510 
1511 	if (mddev->recovery_disabled == conf->recovery_disabled)
1512 		return -EBUSY;
1513 
1514 	if (rdev->raid_disk >= 0)
1515 		first = last = rdev->raid_disk;
1516 
1517 	if (q->merge_bvec_fn) {
1518 		set_bit(Unmerged, &rdev->flags);
1519 		mddev->merge_check_needed = 1;
1520 	}
1521 
1522 	for (mirror = first; mirror <= last; mirror++) {
1523 		p = conf->mirrors+mirror;
1524 		if (!p->rdev) {
1525 
1526 			disk_stack_limits(mddev->gendisk, rdev->bdev,
1527 					  rdev->data_offset << 9);
1528 
1529 			p->head_position = 0;
1530 			rdev->raid_disk = mirror;
1531 			err = 0;
1532 			/* As all devices are equivalent, we don't need a full recovery
1533 			 * if this was recently any drive of the array
1534 			 */
1535 			if (rdev->saved_raid_disk < 0)
1536 				conf->fullsync = 1;
1537 			rcu_assign_pointer(p->rdev, rdev);
1538 			break;
1539 		}
1540 		if (test_bit(WantReplacement, &p->rdev->flags) &&
1541 		    p[conf->raid_disks].rdev == NULL) {
1542 			/* Add this device as a replacement */
1543 			clear_bit(In_sync, &rdev->flags);
1544 			set_bit(Replacement, &rdev->flags);
1545 			rdev->raid_disk = mirror;
1546 			err = 0;
1547 			conf->fullsync = 1;
1548 			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1549 			break;
1550 		}
1551 	}
1552 	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1553 		/* Some requests might not have seen this new
1554 		 * merge_bvec_fn.  We must wait for them to complete
1555 		 * before merging the device fully.
1556 		 * First we make sure any code which has tested
1557 		 * our function has submitted the request, then
1558 		 * we wait for all outstanding requests to complete.
1559 		 */
1560 		synchronize_sched();
1561 		raise_barrier(conf);
1562 		lower_barrier(conf);
1563 		clear_bit(Unmerged, &rdev->flags);
1564 	}
1565 	md_integrity_add_rdev(rdev, mddev);
1566 	if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
1567 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1568 	print_conf(conf);
1569 	return err;
1570 }
1571 
1572 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1573 {
1574 	struct r1conf *conf = mddev->private;
1575 	int err = 0;
1576 	int number = rdev->raid_disk;
1577 	struct raid1_info *p = conf->mirrors + number;
1578 
1579 	if (rdev != p->rdev)
1580 		p = conf->mirrors + conf->raid_disks + number;
1581 
1582 	print_conf(conf);
1583 	if (rdev == p->rdev) {
1584 		if (test_bit(In_sync, &rdev->flags) ||
1585 		    atomic_read(&rdev->nr_pending)) {
1586 			err = -EBUSY;
1587 			goto abort;
1588 		}
1589 		/* Only remove non-faulty devices if recovery
1590 		 * is not possible.
1591 		 */
1592 		if (!test_bit(Faulty, &rdev->flags) &&
1593 		    mddev->recovery_disabled != conf->recovery_disabled &&
1594 		    mddev->degraded < conf->raid_disks) {
1595 			err = -EBUSY;
1596 			goto abort;
1597 		}
1598 		p->rdev = NULL;
1599 		synchronize_rcu();
1600 		if (atomic_read(&rdev->nr_pending)) {
1601 			/* lost the race, try later */
1602 			err = -EBUSY;
1603 			p->rdev = rdev;
1604 			goto abort;
1605 		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
1606 			/* We just removed a device that is being replaced.
1607 			 * Move down the replacement.  We drain all IO before
1608 			 * doing this to avoid confusion.
1609 			 */
1610 			struct md_rdev *repl =
1611 				conf->mirrors[conf->raid_disks + number].rdev;
1612 			raise_barrier(conf);
1613 			clear_bit(Replacement, &repl->flags);
1614 			p->rdev = repl;
1615 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1616 			lower_barrier(conf);
1617 			clear_bit(WantReplacement, &rdev->flags);
1618 		} else
1619 			clear_bit(WantReplacement, &rdev->flags);
1620 		err = md_integrity_register(mddev);
1621 	}
1622 abort:
1623 
1624 	print_conf(conf);
1625 	return err;
1626 }
1627 
1628 
1629 static void end_sync_read(struct bio *bio, int error)
1630 {
1631 	struct r1bio *r1_bio = bio->bi_private;
1632 
1633 	update_head_pos(r1_bio->read_disk, r1_bio);
1634 
1635 	/*
1636 	 * we have read a block, now it needs to be re-written,
1637 	 * or re-read if the read failed.
1638 	 * We don't do much here, just schedule handling by raid1d
1639 	 */
1640 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1641 		set_bit(R1BIO_Uptodate, &r1_bio->state);
1642 
1643 	if (atomic_dec_and_test(&r1_bio->remaining))
1644 		reschedule_retry(r1_bio);
1645 }
1646 
1647 static void end_sync_write(struct bio *bio, int error)
1648 {
1649 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1650 	struct r1bio *r1_bio = bio->bi_private;
1651 	struct mddev *mddev = r1_bio->mddev;
1652 	struct r1conf *conf = mddev->private;
1653 	int mirror=0;
1654 	sector_t first_bad;
1655 	int bad_sectors;
1656 
1657 	mirror = find_bio_disk(r1_bio, bio);
1658 
1659 	if (!uptodate) {
1660 		sector_t sync_blocks = 0;
1661 		sector_t s = r1_bio->sector;
1662 		long sectors_to_go = r1_bio->sectors;
1663 		/* make sure these bits doesn't get cleared. */
1664 		do {
1665 			bitmap_end_sync(mddev->bitmap, s,
1666 					&sync_blocks, 1);
1667 			s += sync_blocks;
1668 			sectors_to_go -= sync_blocks;
1669 		} while (sectors_to_go > 0);
1670 		set_bit(WriteErrorSeen,
1671 			&conf->mirrors[mirror].rdev->flags);
1672 		if (!test_and_set_bit(WantReplacement,
1673 				      &conf->mirrors[mirror].rdev->flags))
1674 			set_bit(MD_RECOVERY_NEEDED, &
1675 				mddev->recovery);
1676 		set_bit(R1BIO_WriteError, &r1_bio->state);
1677 	} else if (is_badblock(conf->mirrors[mirror].rdev,
1678 			       r1_bio->sector,
1679 			       r1_bio->sectors,
1680 			       &first_bad, &bad_sectors) &&
1681 		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1682 				r1_bio->sector,
1683 				r1_bio->sectors,
1684 				&first_bad, &bad_sectors)
1685 		)
1686 		set_bit(R1BIO_MadeGood, &r1_bio->state);
1687 
1688 	if (atomic_dec_and_test(&r1_bio->remaining)) {
1689 		int s = r1_bio->sectors;
1690 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1691 		    test_bit(R1BIO_WriteError, &r1_bio->state))
1692 			reschedule_retry(r1_bio);
1693 		else {
1694 			put_buf(r1_bio);
1695 			md_done_sync(mddev, s, uptodate);
1696 		}
1697 	}
1698 }
1699 
1700 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1701 			    int sectors, struct page *page, int rw)
1702 {
1703 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1704 		/* success */
1705 		return 1;
1706 	if (rw == WRITE) {
1707 		set_bit(WriteErrorSeen, &rdev->flags);
1708 		if (!test_and_set_bit(WantReplacement,
1709 				      &rdev->flags))
1710 			set_bit(MD_RECOVERY_NEEDED, &
1711 				rdev->mddev->recovery);
1712 	}
1713 	/* need to record an error - either for the block or the device */
1714 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1715 		md_error(rdev->mddev, rdev);
1716 	return 0;
1717 }
1718 
1719 static int fix_sync_read_error(struct r1bio *r1_bio)
1720 {
1721 	/* Try some synchronous reads of other devices to get
1722 	 * good data, much like with normal read errors.  Only
1723 	 * read into the pages we already have so we don't
1724 	 * need to re-issue the read request.
1725 	 * We don't need to freeze the array, because being in an
1726 	 * active sync request, there is no normal IO, and
1727 	 * no overlapping syncs.
1728 	 * We don't need to check is_badblock() again as we
1729 	 * made sure that anything with a bad block in range
1730 	 * will have bi_end_io clear.
1731 	 */
1732 	struct mddev *mddev = r1_bio->mddev;
1733 	struct r1conf *conf = mddev->private;
1734 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1735 	sector_t sect = r1_bio->sector;
1736 	int sectors = r1_bio->sectors;
1737 	int idx = 0;
1738 
1739 	while(sectors) {
1740 		int s = sectors;
1741 		int d = r1_bio->read_disk;
1742 		int success = 0;
1743 		struct md_rdev *rdev;
1744 		int start;
1745 
1746 		if (s > (PAGE_SIZE>>9))
1747 			s = PAGE_SIZE >> 9;
1748 		do {
1749 			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1750 				/* No rcu protection needed here devices
1751 				 * can only be removed when no resync is
1752 				 * active, and resync is currently active
1753 				 */
1754 				rdev = conf->mirrors[d].rdev;
1755 				if (sync_page_io(rdev, sect, s<<9,
1756 						 bio->bi_io_vec[idx].bv_page,
1757 						 READ, false)) {
1758 					success = 1;
1759 					break;
1760 				}
1761 			}
1762 			d++;
1763 			if (d == conf->raid_disks * 2)
1764 				d = 0;
1765 		} while (!success && d != r1_bio->read_disk);
1766 
1767 		if (!success) {
1768 			char b[BDEVNAME_SIZE];
1769 			int abort = 0;
1770 			/* Cannot read from anywhere, this block is lost.
1771 			 * Record a bad block on each device.  If that doesn't
1772 			 * work just disable and interrupt the recovery.
1773 			 * Don't fail devices as that won't really help.
1774 			 */
1775 			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1776 			       " for block %llu\n",
1777 			       mdname(mddev),
1778 			       bdevname(bio->bi_bdev, b),
1779 			       (unsigned long long)r1_bio->sector);
1780 			for (d = 0; d < conf->raid_disks * 2; d++) {
1781 				rdev = conf->mirrors[d].rdev;
1782 				if (!rdev || test_bit(Faulty, &rdev->flags))
1783 					continue;
1784 				if (!rdev_set_badblocks(rdev, sect, s, 0))
1785 					abort = 1;
1786 			}
1787 			if (abort) {
1788 				conf->recovery_disabled =
1789 					mddev->recovery_disabled;
1790 				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1791 				md_done_sync(mddev, r1_bio->sectors, 0);
1792 				put_buf(r1_bio);
1793 				return 0;
1794 			}
1795 			/* Try next page */
1796 			sectors -= s;
1797 			sect += s;
1798 			idx++;
1799 			continue;
1800 		}
1801 
1802 		start = d;
1803 		/* write it back and re-read */
1804 		while (d != r1_bio->read_disk) {
1805 			if (d == 0)
1806 				d = conf->raid_disks * 2;
1807 			d--;
1808 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1809 				continue;
1810 			rdev = conf->mirrors[d].rdev;
1811 			if (r1_sync_page_io(rdev, sect, s,
1812 					    bio->bi_io_vec[idx].bv_page,
1813 					    WRITE) == 0) {
1814 				r1_bio->bios[d]->bi_end_io = NULL;
1815 				rdev_dec_pending(rdev, mddev);
1816 			}
1817 		}
1818 		d = start;
1819 		while (d != r1_bio->read_disk) {
1820 			if (d == 0)
1821 				d = conf->raid_disks * 2;
1822 			d--;
1823 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1824 				continue;
1825 			rdev = conf->mirrors[d].rdev;
1826 			if (r1_sync_page_io(rdev, sect, s,
1827 					    bio->bi_io_vec[idx].bv_page,
1828 					    READ) != 0)
1829 				atomic_add(s, &rdev->corrected_errors);
1830 		}
1831 		sectors -= s;
1832 		sect += s;
1833 		idx ++;
1834 	}
1835 	set_bit(R1BIO_Uptodate, &r1_bio->state);
1836 	set_bit(BIO_UPTODATE, &bio->bi_flags);
1837 	return 1;
1838 }
1839 
1840 static int process_checks(struct r1bio *r1_bio)
1841 {
1842 	/* We have read all readable devices.  If we haven't
1843 	 * got the block, then there is no hope left.
1844 	 * If we have, then we want to do a comparison
1845 	 * and skip the write if everything is the same.
1846 	 * If any blocks failed to read, then we need to
1847 	 * attempt an over-write
1848 	 */
1849 	struct mddev *mddev = r1_bio->mddev;
1850 	struct r1conf *conf = mddev->private;
1851 	int primary;
1852 	int i;
1853 	int vcnt;
1854 
1855 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
1856 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1857 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1858 			r1_bio->bios[primary]->bi_end_io = NULL;
1859 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1860 			break;
1861 		}
1862 	r1_bio->read_disk = primary;
1863 	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1864 	for (i = 0; i < conf->raid_disks * 2; i++) {
1865 		int j;
1866 		struct bio *pbio = r1_bio->bios[primary];
1867 		struct bio *sbio = r1_bio->bios[i];
1868 		int size;
1869 
1870 		if (r1_bio->bios[i]->bi_end_io != end_sync_read)
1871 			continue;
1872 
1873 		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1874 			for (j = vcnt; j-- ; ) {
1875 				struct page *p, *s;
1876 				p = pbio->bi_io_vec[j].bv_page;
1877 				s = sbio->bi_io_vec[j].bv_page;
1878 				if (memcmp(page_address(p),
1879 					   page_address(s),
1880 					   sbio->bi_io_vec[j].bv_len))
1881 					break;
1882 			}
1883 		} else
1884 			j = 0;
1885 		if (j >= 0)
1886 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
1887 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1888 			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1889 			/* No need to write to this device. */
1890 			sbio->bi_end_io = NULL;
1891 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1892 			continue;
1893 		}
1894 		/* fixup the bio for reuse */
1895 		sbio->bi_vcnt = vcnt;
1896 		sbio->bi_size = r1_bio->sectors << 9;
1897 		sbio->bi_idx = 0;
1898 		sbio->bi_phys_segments = 0;
1899 		sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1900 		sbio->bi_flags |= 1 << BIO_UPTODATE;
1901 		sbio->bi_next = NULL;
1902 		sbio->bi_sector = r1_bio->sector +
1903 			conf->mirrors[i].rdev->data_offset;
1904 		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1905 		size = sbio->bi_size;
1906 		for (j = 0; j < vcnt ; j++) {
1907 			struct bio_vec *bi;
1908 			bi = &sbio->bi_io_vec[j];
1909 			bi->bv_offset = 0;
1910 			if (size > PAGE_SIZE)
1911 				bi->bv_len = PAGE_SIZE;
1912 			else
1913 				bi->bv_len = size;
1914 			size -= PAGE_SIZE;
1915 			memcpy(page_address(bi->bv_page),
1916 			       page_address(pbio->bi_io_vec[j].bv_page),
1917 			       PAGE_SIZE);
1918 		}
1919 	}
1920 	return 0;
1921 }
1922 
1923 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1924 {
1925 	struct r1conf *conf = mddev->private;
1926 	int i;
1927 	int disks = conf->raid_disks * 2;
1928 	struct bio *bio, *wbio;
1929 
1930 	bio = r1_bio->bios[r1_bio->read_disk];
1931 
1932 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1933 		/* ouch - failed to read all of that. */
1934 		if (!fix_sync_read_error(r1_bio))
1935 			return;
1936 
1937 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1938 		if (process_checks(r1_bio) < 0)
1939 			return;
1940 	/*
1941 	 * schedule writes
1942 	 */
1943 	atomic_set(&r1_bio->remaining, 1);
1944 	for (i = 0; i < disks ; i++) {
1945 		wbio = r1_bio->bios[i];
1946 		if (wbio->bi_end_io == NULL ||
1947 		    (wbio->bi_end_io == end_sync_read &&
1948 		     (i == r1_bio->read_disk ||
1949 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1950 			continue;
1951 
1952 		wbio->bi_rw = WRITE;
1953 		wbio->bi_end_io = end_sync_write;
1954 		atomic_inc(&r1_bio->remaining);
1955 		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1956 
1957 		generic_make_request(wbio);
1958 	}
1959 
1960 	if (atomic_dec_and_test(&r1_bio->remaining)) {
1961 		/* if we're here, all write(s) have completed, so clean up */
1962 		int s = r1_bio->sectors;
1963 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1964 		    test_bit(R1BIO_WriteError, &r1_bio->state))
1965 			reschedule_retry(r1_bio);
1966 		else {
1967 			put_buf(r1_bio);
1968 			md_done_sync(mddev, s, 1);
1969 		}
1970 	}
1971 }
1972 
1973 /*
1974  * This is a kernel thread which:
1975  *
1976  *	1.	Retries failed read operations on working mirrors.
1977  *	2.	Updates the raid superblock when problems encounter.
1978  *	3.	Performs writes following reads for array synchronising.
1979  */
1980 
1981 static void fix_read_error(struct r1conf *conf, int read_disk,
1982 			   sector_t sect, int sectors)
1983 {
1984 	struct mddev *mddev = conf->mddev;
1985 	while(sectors) {
1986 		int s = sectors;
1987 		int d = read_disk;
1988 		int success = 0;
1989 		int start;
1990 		struct md_rdev *rdev;
1991 
1992 		if (s > (PAGE_SIZE>>9))
1993 			s = PAGE_SIZE >> 9;
1994 
1995 		do {
1996 			/* Note: no rcu protection needed here
1997 			 * as this is synchronous in the raid1d thread
1998 			 * which is the thread that might remove
1999 			 * a device.  If raid1d ever becomes multi-threaded....
2000 			 */
2001 			sector_t first_bad;
2002 			int bad_sectors;
2003 
2004 			rdev = conf->mirrors[d].rdev;
2005 			if (rdev &&
2006 			    (test_bit(In_sync, &rdev->flags) ||
2007 			     (!test_bit(Faulty, &rdev->flags) &&
2008 			      rdev->recovery_offset >= sect + s)) &&
2009 			    is_badblock(rdev, sect, s,
2010 					&first_bad, &bad_sectors) == 0 &&
2011 			    sync_page_io(rdev, sect, s<<9,
2012 					 conf->tmppage, READ, false))
2013 				success = 1;
2014 			else {
2015 				d++;
2016 				if (d == conf->raid_disks * 2)
2017 					d = 0;
2018 			}
2019 		} while (!success && d != read_disk);
2020 
2021 		if (!success) {
2022 			/* Cannot read from anywhere - mark it bad */
2023 			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2024 			if (!rdev_set_badblocks(rdev, sect, s, 0))
2025 				md_error(mddev, rdev);
2026 			break;
2027 		}
2028 		/* write it back and re-read */
2029 		start = d;
2030 		while (d != read_disk) {
2031 			if (d==0)
2032 				d = conf->raid_disks * 2;
2033 			d--;
2034 			rdev = conf->mirrors[d].rdev;
2035 			if (rdev &&
2036 			    test_bit(In_sync, &rdev->flags))
2037 				r1_sync_page_io(rdev, sect, s,
2038 						conf->tmppage, WRITE);
2039 		}
2040 		d = start;
2041 		while (d != read_disk) {
2042 			char b[BDEVNAME_SIZE];
2043 			if (d==0)
2044 				d = conf->raid_disks * 2;
2045 			d--;
2046 			rdev = conf->mirrors[d].rdev;
2047 			if (rdev &&
2048 			    test_bit(In_sync, &rdev->flags)) {
2049 				if (r1_sync_page_io(rdev, sect, s,
2050 						    conf->tmppage, READ)) {
2051 					atomic_add(s, &rdev->corrected_errors);
2052 					printk(KERN_INFO
2053 					       "md/raid1:%s: read error corrected "
2054 					       "(%d sectors at %llu on %s)\n",
2055 					       mdname(mddev), s,
2056 					       (unsigned long long)(sect +
2057 					           rdev->data_offset),
2058 					       bdevname(rdev->bdev, b));
2059 				}
2060 			}
2061 		}
2062 		sectors -= s;
2063 		sect += s;
2064 	}
2065 }
2066 
2067 static void bi_complete(struct bio *bio, int error)
2068 {
2069 	complete((struct completion *)bio->bi_private);
2070 }
2071 
2072 static int submit_bio_wait(int rw, struct bio *bio)
2073 {
2074 	struct completion event;
2075 	rw |= REQ_SYNC;
2076 
2077 	init_completion(&event);
2078 	bio->bi_private = &event;
2079 	bio->bi_end_io = bi_complete;
2080 	submit_bio(rw, bio);
2081 	wait_for_completion(&event);
2082 
2083 	return test_bit(BIO_UPTODATE, &bio->bi_flags);
2084 }
2085 
2086 static int narrow_write_error(struct r1bio *r1_bio, int i)
2087 {
2088 	struct mddev *mddev = r1_bio->mddev;
2089 	struct r1conf *conf = mddev->private;
2090 	struct md_rdev *rdev = conf->mirrors[i].rdev;
2091 	int vcnt, idx;
2092 	struct bio_vec *vec;
2093 
2094 	/* bio has the data to be written to device 'i' where
2095 	 * we just recently had a write error.
2096 	 * We repeatedly clone the bio and trim down to one block,
2097 	 * then try the write.  Where the write fails we record
2098 	 * a bad block.
2099 	 * It is conceivable that the bio doesn't exactly align with
2100 	 * blocks.  We must handle this somehow.
2101 	 *
2102 	 * We currently own a reference on the rdev.
2103 	 */
2104 
2105 	int block_sectors;
2106 	sector_t sector;
2107 	int sectors;
2108 	int sect_to_write = r1_bio->sectors;
2109 	int ok = 1;
2110 
2111 	if (rdev->badblocks.shift < 0)
2112 		return 0;
2113 
2114 	block_sectors = 1 << rdev->badblocks.shift;
2115 	sector = r1_bio->sector;
2116 	sectors = ((sector + block_sectors)
2117 		   & ~(sector_t)(block_sectors - 1))
2118 		- sector;
2119 
2120 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2121 		vcnt = r1_bio->behind_page_count;
2122 		vec = r1_bio->behind_bvecs;
2123 		idx = 0;
2124 		while (vec[idx].bv_page == NULL)
2125 			idx++;
2126 	} else {
2127 		vcnt = r1_bio->master_bio->bi_vcnt;
2128 		vec = r1_bio->master_bio->bi_io_vec;
2129 		idx = r1_bio->master_bio->bi_idx;
2130 	}
2131 	while (sect_to_write) {
2132 		struct bio *wbio;
2133 		if (sectors > sect_to_write)
2134 			sectors = sect_to_write;
2135 		/* Write at 'sector' for 'sectors'*/
2136 
2137 		wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2138 		memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2139 		wbio->bi_sector = r1_bio->sector;
2140 		wbio->bi_rw = WRITE;
2141 		wbio->bi_vcnt = vcnt;
2142 		wbio->bi_size = r1_bio->sectors << 9;
2143 		wbio->bi_idx = idx;
2144 
2145 		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
2146 		wbio->bi_sector += rdev->data_offset;
2147 		wbio->bi_bdev = rdev->bdev;
2148 		if (submit_bio_wait(WRITE, wbio) == 0)
2149 			/* failure! */
2150 			ok = rdev_set_badblocks(rdev, sector,
2151 						sectors, 0)
2152 				&& ok;
2153 
2154 		bio_put(wbio);
2155 		sect_to_write -= sectors;
2156 		sector += sectors;
2157 		sectors = block_sectors;
2158 	}
2159 	return ok;
2160 }
2161 
2162 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2163 {
2164 	int m;
2165 	int s = r1_bio->sectors;
2166 	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2167 		struct md_rdev *rdev = conf->mirrors[m].rdev;
2168 		struct bio *bio = r1_bio->bios[m];
2169 		if (bio->bi_end_io == NULL)
2170 			continue;
2171 		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2172 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2173 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2174 		}
2175 		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2176 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
2177 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2178 				md_error(conf->mddev, rdev);
2179 		}
2180 	}
2181 	put_buf(r1_bio);
2182 	md_done_sync(conf->mddev, s, 1);
2183 }
2184 
2185 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2186 {
2187 	int m;
2188 	for (m = 0; m < conf->raid_disks * 2 ; m++)
2189 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2190 			struct md_rdev *rdev = conf->mirrors[m].rdev;
2191 			rdev_clear_badblocks(rdev,
2192 					     r1_bio->sector,
2193 					     r1_bio->sectors, 0);
2194 			rdev_dec_pending(rdev, conf->mddev);
2195 		} else if (r1_bio->bios[m] != NULL) {
2196 			/* This drive got a write error.  We need to
2197 			 * narrow down and record precise write
2198 			 * errors.
2199 			 */
2200 			if (!narrow_write_error(r1_bio, m)) {
2201 				md_error(conf->mddev,
2202 					 conf->mirrors[m].rdev);
2203 				/* an I/O failed, we can't clear the bitmap */
2204 				set_bit(R1BIO_Degraded, &r1_bio->state);
2205 			}
2206 			rdev_dec_pending(conf->mirrors[m].rdev,
2207 					 conf->mddev);
2208 		}
2209 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
2210 		close_write(r1_bio);
2211 	raid_end_bio_io(r1_bio);
2212 }
2213 
2214 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2215 {
2216 	int disk;
2217 	int max_sectors;
2218 	struct mddev *mddev = conf->mddev;
2219 	struct bio *bio;
2220 	char b[BDEVNAME_SIZE];
2221 	struct md_rdev *rdev;
2222 
2223 	clear_bit(R1BIO_ReadError, &r1_bio->state);
2224 	/* we got a read error. Maybe the drive is bad.  Maybe just
2225 	 * the block and we can fix it.
2226 	 * We freeze all other IO, and try reading the block from
2227 	 * other devices.  When we find one, we re-write
2228 	 * and check it that fixes the read error.
2229 	 * This is all done synchronously while the array is
2230 	 * frozen
2231 	 */
2232 	if (mddev->ro == 0) {
2233 		freeze_array(conf);
2234 		fix_read_error(conf, r1_bio->read_disk,
2235 			       r1_bio->sector, r1_bio->sectors);
2236 		unfreeze_array(conf);
2237 	} else
2238 		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2239 	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2240 
2241 	bio = r1_bio->bios[r1_bio->read_disk];
2242 	bdevname(bio->bi_bdev, b);
2243 read_more:
2244 	disk = read_balance(conf, r1_bio, &max_sectors);
2245 	if (disk == -1) {
2246 		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2247 		       " read error for block %llu\n",
2248 		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
2249 		raid_end_bio_io(r1_bio);
2250 	} else {
2251 		const unsigned long do_sync
2252 			= r1_bio->master_bio->bi_rw & REQ_SYNC;
2253 		if (bio) {
2254 			r1_bio->bios[r1_bio->read_disk] =
2255 				mddev->ro ? IO_BLOCKED : NULL;
2256 			bio_put(bio);
2257 		}
2258 		r1_bio->read_disk = disk;
2259 		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2260 		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
2261 		r1_bio->bios[r1_bio->read_disk] = bio;
2262 		rdev = conf->mirrors[disk].rdev;
2263 		printk_ratelimited(KERN_ERR
2264 				   "md/raid1:%s: redirecting sector %llu"
2265 				   " to other mirror: %s\n",
2266 				   mdname(mddev),
2267 				   (unsigned long long)r1_bio->sector,
2268 				   bdevname(rdev->bdev, b));
2269 		bio->bi_sector = r1_bio->sector + rdev->data_offset;
2270 		bio->bi_bdev = rdev->bdev;
2271 		bio->bi_end_io = raid1_end_read_request;
2272 		bio->bi_rw = READ | do_sync;
2273 		bio->bi_private = r1_bio;
2274 		if (max_sectors < r1_bio->sectors) {
2275 			/* Drat - have to split this up more */
2276 			struct bio *mbio = r1_bio->master_bio;
2277 			int sectors_handled = (r1_bio->sector + max_sectors
2278 					       - mbio->bi_sector);
2279 			r1_bio->sectors = max_sectors;
2280 			spin_lock_irq(&conf->device_lock);
2281 			if (mbio->bi_phys_segments == 0)
2282 				mbio->bi_phys_segments = 2;
2283 			else
2284 				mbio->bi_phys_segments++;
2285 			spin_unlock_irq(&conf->device_lock);
2286 			generic_make_request(bio);
2287 			bio = NULL;
2288 
2289 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2290 
2291 			r1_bio->master_bio = mbio;
2292 			r1_bio->sectors = (mbio->bi_size >> 9)
2293 					  - sectors_handled;
2294 			r1_bio->state = 0;
2295 			set_bit(R1BIO_ReadError, &r1_bio->state);
2296 			r1_bio->mddev = mddev;
2297 			r1_bio->sector = mbio->bi_sector + sectors_handled;
2298 
2299 			goto read_more;
2300 		} else
2301 			generic_make_request(bio);
2302 	}
2303 }
2304 
2305 static void raid1d(struct md_thread *thread)
2306 {
2307 	struct mddev *mddev = thread->mddev;
2308 	struct r1bio *r1_bio;
2309 	unsigned long flags;
2310 	struct r1conf *conf = mddev->private;
2311 	struct list_head *head = &conf->retry_list;
2312 	struct blk_plug plug;
2313 
2314 	md_check_recovery(mddev);
2315 
2316 	blk_start_plug(&plug);
2317 	for (;;) {
2318 
2319 		flush_pending_writes(conf);
2320 
2321 		spin_lock_irqsave(&conf->device_lock, flags);
2322 		if (list_empty(head)) {
2323 			spin_unlock_irqrestore(&conf->device_lock, flags);
2324 			break;
2325 		}
2326 		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2327 		list_del(head->prev);
2328 		conf->nr_queued--;
2329 		spin_unlock_irqrestore(&conf->device_lock, flags);
2330 
2331 		mddev = r1_bio->mddev;
2332 		conf = mddev->private;
2333 		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2334 			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2335 			    test_bit(R1BIO_WriteError, &r1_bio->state))
2336 				handle_sync_write_finished(conf, r1_bio);
2337 			else
2338 				sync_request_write(mddev, r1_bio);
2339 		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2340 			   test_bit(R1BIO_WriteError, &r1_bio->state))
2341 			handle_write_finished(conf, r1_bio);
2342 		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2343 			handle_read_error(conf, r1_bio);
2344 		else
2345 			/* just a partial read to be scheduled from separate
2346 			 * context
2347 			 */
2348 			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2349 
2350 		cond_resched();
2351 		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2352 			md_check_recovery(mddev);
2353 	}
2354 	blk_finish_plug(&plug);
2355 }
2356 
2357 
2358 static int init_resync(struct r1conf *conf)
2359 {
2360 	int buffs;
2361 
2362 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2363 	BUG_ON(conf->r1buf_pool);
2364 	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2365 					  conf->poolinfo);
2366 	if (!conf->r1buf_pool)
2367 		return -ENOMEM;
2368 	conf->next_resync = 0;
2369 	return 0;
2370 }
2371 
2372 /*
2373  * perform a "sync" on one "block"
2374  *
2375  * We need to make sure that no normal I/O request - particularly write
2376  * requests - conflict with active sync requests.
2377  *
2378  * This is achieved by tracking pending requests and a 'barrier' concept
2379  * that can be installed to exclude normal IO requests.
2380  */
2381 
2382 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2383 {
2384 	struct r1conf *conf = mddev->private;
2385 	struct r1bio *r1_bio;
2386 	struct bio *bio;
2387 	sector_t max_sector, nr_sectors;
2388 	int disk = -1;
2389 	int i;
2390 	int wonly = -1;
2391 	int write_targets = 0, read_targets = 0;
2392 	sector_t sync_blocks;
2393 	int still_degraded = 0;
2394 	int good_sectors = RESYNC_SECTORS;
2395 	int min_bad = 0; /* number of sectors that are bad in all devices */
2396 
2397 	if (!conf->r1buf_pool)
2398 		if (init_resync(conf))
2399 			return 0;
2400 
2401 	max_sector = mddev->dev_sectors;
2402 	if (sector_nr >= max_sector) {
2403 		/* If we aborted, we need to abort the
2404 		 * sync on the 'current' bitmap chunk (there will
2405 		 * only be one in raid1 resync.
2406 		 * We can find the current addess in mddev->curr_resync
2407 		 */
2408 		if (mddev->curr_resync < max_sector) /* aborted */
2409 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2410 						&sync_blocks, 1);
2411 		else /* completed sync */
2412 			conf->fullsync = 0;
2413 
2414 		bitmap_close_sync(mddev->bitmap);
2415 		close_sync(conf);
2416 		return 0;
2417 	}
2418 
2419 	if (mddev->bitmap == NULL &&
2420 	    mddev->recovery_cp == MaxSector &&
2421 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2422 	    conf->fullsync == 0) {
2423 		*skipped = 1;
2424 		return max_sector - sector_nr;
2425 	}
2426 	/* before building a request, check if we can skip these blocks..
2427 	 * This call the bitmap_start_sync doesn't actually record anything
2428 	 */
2429 	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2430 	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2431 		/* We can skip this block, and probably several more */
2432 		*skipped = 1;
2433 		return sync_blocks;
2434 	}
2435 	/*
2436 	 * If there is non-resync activity waiting for a turn,
2437 	 * and resync is going fast enough,
2438 	 * then let it though before starting on this new sync request.
2439 	 */
2440 	if (!go_faster && conf->nr_waiting)
2441 		msleep_interruptible(1000);
2442 
2443 	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2444 	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2445 	raise_barrier(conf);
2446 
2447 	conf->next_resync = sector_nr;
2448 
2449 	rcu_read_lock();
2450 	/*
2451 	 * If we get a correctably read error during resync or recovery,
2452 	 * we might want to read from a different device.  So we
2453 	 * flag all drives that could conceivably be read from for READ,
2454 	 * and any others (which will be non-In_sync devices) for WRITE.
2455 	 * If a read fails, we try reading from something else for which READ
2456 	 * is OK.
2457 	 */
2458 
2459 	r1_bio->mddev = mddev;
2460 	r1_bio->sector = sector_nr;
2461 	r1_bio->state = 0;
2462 	set_bit(R1BIO_IsSync, &r1_bio->state);
2463 
2464 	for (i = 0; i < conf->raid_disks * 2; i++) {
2465 		struct md_rdev *rdev;
2466 		bio = r1_bio->bios[i];
2467 
2468 		/* take from bio_init */
2469 		bio->bi_next = NULL;
2470 		bio->bi_flags &= ~(BIO_POOL_MASK-1);
2471 		bio->bi_flags |= 1 << BIO_UPTODATE;
2472 		bio->bi_rw = READ;
2473 		bio->bi_vcnt = 0;
2474 		bio->bi_idx = 0;
2475 		bio->bi_phys_segments = 0;
2476 		bio->bi_size = 0;
2477 		bio->bi_end_io = NULL;
2478 		bio->bi_private = NULL;
2479 
2480 		rdev = rcu_dereference(conf->mirrors[i].rdev);
2481 		if (rdev == NULL ||
2482 		    test_bit(Faulty, &rdev->flags)) {
2483 			if (i < conf->raid_disks)
2484 				still_degraded = 1;
2485 		} else if (!test_bit(In_sync, &rdev->flags)) {
2486 			bio->bi_rw = WRITE;
2487 			bio->bi_end_io = end_sync_write;
2488 			write_targets ++;
2489 		} else {
2490 			/* may need to read from here */
2491 			sector_t first_bad = MaxSector;
2492 			int bad_sectors;
2493 
2494 			if (is_badblock(rdev, sector_nr, good_sectors,
2495 					&first_bad, &bad_sectors)) {
2496 				if (first_bad > sector_nr)
2497 					good_sectors = first_bad - sector_nr;
2498 				else {
2499 					bad_sectors -= (sector_nr - first_bad);
2500 					if (min_bad == 0 ||
2501 					    min_bad > bad_sectors)
2502 						min_bad = bad_sectors;
2503 				}
2504 			}
2505 			if (sector_nr < first_bad) {
2506 				if (test_bit(WriteMostly, &rdev->flags)) {
2507 					if (wonly < 0)
2508 						wonly = i;
2509 				} else {
2510 					if (disk < 0)
2511 						disk = i;
2512 				}
2513 				bio->bi_rw = READ;
2514 				bio->bi_end_io = end_sync_read;
2515 				read_targets++;
2516 			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2517 				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2518 				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2519 				/*
2520 				 * The device is suitable for reading (InSync),
2521 				 * but has bad block(s) here. Let's try to correct them,
2522 				 * if we are doing resync or repair. Otherwise, leave
2523 				 * this device alone for this sync request.
2524 				 */
2525 				bio->bi_rw = WRITE;
2526 				bio->bi_end_io = end_sync_write;
2527 				write_targets++;
2528 			}
2529 		}
2530 		if (bio->bi_end_io) {
2531 			atomic_inc(&rdev->nr_pending);
2532 			bio->bi_sector = sector_nr + rdev->data_offset;
2533 			bio->bi_bdev = rdev->bdev;
2534 			bio->bi_private = r1_bio;
2535 		}
2536 	}
2537 	rcu_read_unlock();
2538 	if (disk < 0)
2539 		disk = wonly;
2540 	r1_bio->read_disk = disk;
2541 
2542 	if (read_targets == 0 && min_bad > 0) {
2543 		/* These sectors are bad on all InSync devices, so we
2544 		 * need to mark them bad on all write targets
2545 		 */
2546 		int ok = 1;
2547 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2548 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2549 				struct md_rdev *rdev = conf->mirrors[i].rdev;
2550 				ok = rdev_set_badblocks(rdev, sector_nr,
2551 							min_bad, 0
2552 					) && ok;
2553 			}
2554 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
2555 		*skipped = 1;
2556 		put_buf(r1_bio);
2557 
2558 		if (!ok) {
2559 			/* Cannot record the badblocks, so need to
2560 			 * abort the resync.
2561 			 * If there are multiple read targets, could just
2562 			 * fail the really bad ones ???
2563 			 */
2564 			conf->recovery_disabled = mddev->recovery_disabled;
2565 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2566 			return 0;
2567 		} else
2568 			return min_bad;
2569 
2570 	}
2571 	if (min_bad > 0 && min_bad < good_sectors) {
2572 		/* only resync enough to reach the next bad->good
2573 		 * transition */
2574 		good_sectors = min_bad;
2575 	}
2576 
2577 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2578 		/* extra read targets are also write targets */
2579 		write_targets += read_targets-1;
2580 
2581 	if (write_targets == 0 || read_targets == 0) {
2582 		/* There is nowhere to write, so all non-sync
2583 		 * drives must be failed - so we are finished
2584 		 */
2585 		sector_t rv;
2586 		if (min_bad > 0)
2587 			max_sector = sector_nr + min_bad;
2588 		rv = max_sector - sector_nr;
2589 		*skipped = 1;
2590 		put_buf(r1_bio);
2591 		return rv;
2592 	}
2593 
2594 	if (max_sector > mddev->resync_max)
2595 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2596 	if (max_sector > sector_nr + good_sectors)
2597 		max_sector = sector_nr + good_sectors;
2598 	nr_sectors = 0;
2599 	sync_blocks = 0;
2600 	do {
2601 		struct page *page;
2602 		int len = PAGE_SIZE;
2603 		if (sector_nr + (len>>9) > max_sector)
2604 			len = (max_sector - sector_nr) << 9;
2605 		if (len == 0)
2606 			break;
2607 		if (sync_blocks == 0) {
2608 			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2609 					       &sync_blocks, still_degraded) &&
2610 			    !conf->fullsync &&
2611 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2612 				break;
2613 			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2614 			if ((len >> 9) > sync_blocks)
2615 				len = sync_blocks<<9;
2616 		}
2617 
2618 		for (i = 0 ; i < conf->raid_disks * 2; i++) {
2619 			bio = r1_bio->bios[i];
2620 			if (bio->bi_end_io) {
2621 				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2622 				if (bio_add_page(bio, page, len, 0) == 0) {
2623 					/* stop here */
2624 					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2625 					while (i > 0) {
2626 						i--;
2627 						bio = r1_bio->bios[i];
2628 						if (bio->bi_end_io==NULL)
2629 							continue;
2630 						/* remove last page from this bio */
2631 						bio->bi_vcnt--;
2632 						bio->bi_size -= len;
2633 						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2634 					}
2635 					goto bio_full;
2636 				}
2637 			}
2638 		}
2639 		nr_sectors += len>>9;
2640 		sector_nr += len>>9;
2641 		sync_blocks -= (len>>9);
2642 	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2643  bio_full:
2644 	r1_bio->sectors = nr_sectors;
2645 
2646 	/* For a user-requested sync, we read all readable devices and do a
2647 	 * compare
2648 	 */
2649 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2650 		atomic_set(&r1_bio->remaining, read_targets);
2651 		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2652 			bio = r1_bio->bios[i];
2653 			if (bio->bi_end_io == end_sync_read) {
2654 				read_targets--;
2655 				md_sync_acct(bio->bi_bdev, nr_sectors);
2656 				generic_make_request(bio);
2657 			}
2658 		}
2659 	} else {
2660 		atomic_set(&r1_bio->remaining, 1);
2661 		bio = r1_bio->bios[r1_bio->read_disk];
2662 		md_sync_acct(bio->bi_bdev, nr_sectors);
2663 		generic_make_request(bio);
2664 
2665 	}
2666 	return nr_sectors;
2667 }
2668 
2669 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2670 {
2671 	if (sectors)
2672 		return sectors;
2673 
2674 	return mddev->dev_sectors;
2675 }
2676 
2677 static struct r1conf *setup_conf(struct mddev *mddev)
2678 {
2679 	struct r1conf *conf;
2680 	int i;
2681 	struct raid1_info *disk;
2682 	struct md_rdev *rdev;
2683 	int err = -ENOMEM;
2684 
2685 	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2686 	if (!conf)
2687 		goto abort;
2688 
2689 	conf->mirrors = kzalloc(sizeof(struct raid1_info)
2690 				* mddev->raid_disks * 2,
2691 				 GFP_KERNEL);
2692 	if (!conf->mirrors)
2693 		goto abort;
2694 
2695 	conf->tmppage = alloc_page(GFP_KERNEL);
2696 	if (!conf->tmppage)
2697 		goto abort;
2698 
2699 	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2700 	if (!conf->poolinfo)
2701 		goto abort;
2702 	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2703 	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2704 					  r1bio_pool_free,
2705 					  conf->poolinfo);
2706 	if (!conf->r1bio_pool)
2707 		goto abort;
2708 
2709 	conf->poolinfo->mddev = mddev;
2710 
2711 	err = -EINVAL;
2712 	spin_lock_init(&conf->device_lock);
2713 	rdev_for_each(rdev, mddev) {
2714 		struct request_queue *q;
2715 		int disk_idx = rdev->raid_disk;
2716 		if (disk_idx >= mddev->raid_disks
2717 		    || disk_idx < 0)
2718 			continue;
2719 		if (test_bit(Replacement, &rdev->flags))
2720 			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2721 		else
2722 			disk = conf->mirrors + disk_idx;
2723 
2724 		if (disk->rdev)
2725 			goto abort;
2726 		disk->rdev = rdev;
2727 		q = bdev_get_queue(rdev->bdev);
2728 		if (q->merge_bvec_fn)
2729 			mddev->merge_check_needed = 1;
2730 
2731 		disk->head_position = 0;
2732 		disk->seq_start = MaxSector;
2733 	}
2734 	conf->raid_disks = mddev->raid_disks;
2735 	conf->mddev = mddev;
2736 	INIT_LIST_HEAD(&conf->retry_list);
2737 
2738 	spin_lock_init(&conf->resync_lock);
2739 	init_waitqueue_head(&conf->wait_barrier);
2740 
2741 	bio_list_init(&conf->pending_bio_list);
2742 	conf->pending_count = 0;
2743 	conf->recovery_disabled = mddev->recovery_disabled - 1;
2744 
2745 	err = -EIO;
2746 	for (i = 0; i < conf->raid_disks * 2; i++) {
2747 
2748 		disk = conf->mirrors + i;
2749 
2750 		if (i < conf->raid_disks &&
2751 		    disk[conf->raid_disks].rdev) {
2752 			/* This slot has a replacement. */
2753 			if (!disk->rdev) {
2754 				/* No original, just make the replacement
2755 				 * a recovering spare
2756 				 */
2757 				disk->rdev =
2758 					disk[conf->raid_disks].rdev;
2759 				disk[conf->raid_disks].rdev = NULL;
2760 			} else if (!test_bit(In_sync, &disk->rdev->flags))
2761 				/* Original is not in_sync - bad */
2762 				goto abort;
2763 		}
2764 
2765 		if (!disk->rdev ||
2766 		    !test_bit(In_sync, &disk->rdev->flags)) {
2767 			disk->head_position = 0;
2768 			if (disk->rdev &&
2769 			    (disk->rdev->saved_raid_disk < 0))
2770 				conf->fullsync = 1;
2771 		}
2772 	}
2773 
2774 	err = -ENOMEM;
2775 	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2776 	if (!conf->thread) {
2777 		printk(KERN_ERR
2778 		       "md/raid1:%s: couldn't allocate thread\n",
2779 		       mdname(mddev));
2780 		goto abort;
2781 	}
2782 
2783 	return conf;
2784 
2785  abort:
2786 	if (conf) {
2787 		if (conf->r1bio_pool)
2788 			mempool_destroy(conf->r1bio_pool);
2789 		kfree(conf->mirrors);
2790 		safe_put_page(conf->tmppage);
2791 		kfree(conf->poolinfo);
2792 		kfree(conf);
2793 	}
2794 	return ERR_PTR(err);
2795 }
2796 
2797 static int stop(struct mddev *mddev);
2798 static int run(struct mddev *mddev)
2799 {
2800 	struct r1conf *conf;
2801 	int i;
2802 	struct md_rdev *rdev;
2803 	int ret;
2804 	bool discard_supported = false;
2805 
2806 	if (mddev->level != 1) {
2807 		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2808 		       mdname(mddev), mddev->level);
2809 		return -EIO;
2810 	}
2811 	if (mddev->reshape_position != MaxSector) {
2812 		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2813 		       mdname(mddev));
2814 		return -EIO;
2815 	}
2816 	/*
2817 	 * copy the already verified devices into our private RAID1
2818 	 * bookkeeping area. [whatever we allocate in run(),
2819 	 * should be freed in stop()]
2820 	 */
2821 	if (mddev->private == NULL)
2822 		conf = setup_conf(mddev);
2823 	else
2824 		conf = mddev->private;
2825 
2826 	if (IS_ERR(conf))
2827 		return PTR_ERR(conf);
2828 
2829 	if (mddev->queue)
2830 		blk_queue_max_write_same_sectors(mddev->queue,
2831 						 mddev->chunk_sectors);
2832 	rdev_for_each(rdev, mddev) {
2833 		if (!mddev->gendisk)
2834 			continue;
2835 		disk_stack_limits(mddev->gendisk, rdev->bdev,
2836 				  rdev->data_offset << 9);
2837 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2838 			discard_supported = true;
2839 	}
2840 
2841 	mddev->degraded = 0;
2842 	for (i=0; i < conf->raid_disks; i++)
2843 		if (conf->mirrors[i].rdev == NULL ||
2844 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2845 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2846 			mddev->degraded++;
2847 
2848 	if (conf->raid_disks - mddev->degraded == 1)
2849 		mddev->recovery_cp = MaxSector;
2850 
2851 	if (mddev->recovery_cp != MaxSector)
2852 		printk(KERN_NOTICE "md/raid1:%s: not clean"
2853 		       " -- starting background reconstruction\n",
2854 		       mdname(mddev));
2855 	printk(KERN_INFO
2856 		"md/raid1:%s: active with %d out of %d mirrors\n",
2857 		mdname(mddev), mddev->raid_disks - mddev->degraded,
2858 		mddev->raid_disks);
2859 
2860 	/*
2861 	 * Ok, everything is just fine now
2862 	 */
2863 	mddev->thread = conf->thread;
2864 	conf->thread = NULL;
2865 	mddev->private = conf;
2866 
2867 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2868 
2869 	if (mddev->queue) {
2870 		mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2871 		mddev->queue->backing_dev_info.congested_data = mddev;
2872 		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2873 
2874 		if (discard_supported)
2875 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2876 						mddev->queue);
2877 		else
2878 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2879 						  mddev->queue);
2880 	}
2881 
2882 	ret =  md_integrity_register(mddev);
2883 	if (ret)
2884 		stop(mddev);
2885 	return ret;
2886 }
2887 
2888 static int stop(struct mddev *mddev)
2889 {
2890 	struct r1conf *conf = mddev->private;
2891 	struct bitmap *bitmap = mddev->bitmap;
2892 
2893 	/* wait for behind writes to complete */
2894 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2895 		printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2896 		       mdname(mddev));
2897 		/* need to kick something here to make sure I/O goes? */
2898 		wait_event(bitmap->behind_wait,
2899 			   atomic_read(&bitmap->behind_writes) == 0);
2900 	}
2901 
2902 	raise_barrier(conf);
2903 	lower_barrier(conf);
2904 
2905 	md_unregister_thread(&mddev->thread);
2906 	if (conf->r1bio_pool)
2907 		mempool_destroy(conf->r1bio_pool);
2908 	kfree(conf->mirrors);
2909 	safe_put_page(conf->tmppage);
2910 	kfree(conf->poolinfo);
2911 	kfree(conf);
2912 	mddev->private = NULL;
2913 	return 0;
2914 }
2915 
2916 static int raid1_resize(struct mddev *mddev, sector_t sectors)
2917 {
2918 	/* no resync is happening, and there is enough space
2919 	 * on all devices, so we can resize.
2920 	 * We need to make sure resync covers any new space.
2921 	 * If the array is shrinking we should possibly wait until
2922 	 * any io in the removed space completes, but it hardly seems
2923 	 * worth it.
2924 	 */
2925 	sector_t newsize = raid1_size(mddev, sectors, 0);
2926 	if (mddev->external_size &&
2927 	    mddev->array_sectors > newsize)
2928 		return -EINVAL;
2929 	if (mddev->bitmap) {
2930 		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
2931 		if (ret)
2932 			return ret;
2933 	}
2934 	md_set_array_sectors(mddev, newsize);
2935 	set_capacity(mddev->gendisk, mddev->array_sectors);
2936 	revalidate_disk(mddev->gendisk);
2937 	if (sectors > mddev->dev_sectors &&
2938 	    mddev->recovery_cp > mddev->dev_sectors) {
2939 		mddev->recovery_cp = mddev->dev_sectors;
2940 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2941 	}
2942 	mddev->dev_sectors = sectors;
2943 	mddev->resync_max_sectors = sectors;
2944 	return 0;
2945 }
2946 
2947 static int raid1_reshape(struct mddev *mddev)
2948 {
2949 	/* We need to:
2950 	 * 1/ resize the r1bio_pool
2951 	 * 2/ resize conf->mirrors
2952 	 *
2953 	 * We allocate a new r1bio_pool if we can.
2954 	 * Then raise a device barrier and wait until all IO stops.
2955 	 * Then resize conf->mirrors and swap in the new r1bio pool.
2956 	 *
2957 	 * At the same time, we "pack" the devices so that all the missing
2958 	 * devices have the higher raid_disk numbers.
2959 	 */
2960 	mempool_t *newpool, *oldpool;
2961 	struct pool_info *newpoolinfo;
2962 	struct raid1_info *newmirrors;
2963 	struct r1conf *conf = mddev->private;
2964 	int cnt, raid_disks;
2965 	unsigned long flags;
2966 	int d, d2, err;
2967 
2968 	/* Cannot change chunk_size, layout, or level */
2969 	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2970 	    mddev->layout != mddev->new_layout ||
2971 	    mddev->level != mddev->new_level) {
2972 		mddev->new_chunk_sectors = mddev->chunk_sectors;
2973 		mddev->new_layout = mddev->layout;
2974 		mddev->new_level = mddev->level;
2975 		return -EINVAL;
2976 	}
2977 
2978 	err = md_allow_write(mddev);
2979 	if (err)
2980 		return err;
2981 
2982 	raid_disks = mddev->raid_disks + mddev->delta_disks;
2983 
2984 	if (raid_disks < conf->raid_disks) {
2985 		cnt=0;
2986 		for (d= 0; d < conf->raid_disks; d++)
2987 			if (conf->mirrors[d].rdev)
2988 				cnt++;
2989 		if (cnt > raid_disks)
2990 			return -EBUSY;
2991 	}
2992 
2993 	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2994 	if (!newpoolinfo)
2995 		return -ENOMEM;
2996 	newpoolinfo->mddev = mddev;
2997 	newpoolinfo->raid_disks = raid_disks * 2;
2998 
2999 	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3000 				 r1bio_pool_free, newpoolinfo);
3001 	if (!newpool) {
3002 		kfree(newpoolinfo);
3003 		return -ENOMEM;
3004 	}
3005 	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3006 			     GFP_KERNEL);
3007 	if (!newmirrors) {
3008 		kfree(newpoolinfo);
3009 		mempool_destroy(newpool);
3010 		return -ENOMEM;
3011 	}
3012 
3013 	raise_barrier(conf);
3014 
3015 	/* ok, everything is stopped */
3016 	oldpool = conf->r1bio_pool;
3017 	conf->r1bio_pool = newpool;
3018 
3019 	for (d = d2 = 0; d < conf->raid_disks; d++) {
3020 		struct md_rdev *rdev = conf->mirrors[d].rdev;
3021 		if (rdev && rdev->raid_disk != d2) {
3022 			sysfs_unlink_rdev(mddev, rdev);
3023 			rdev->raid_disk = d2;
3024 			sysfs_unlink_rdev(mddev, rdev);
3025 			if (sysfs_link_rdev(mddev, rdev))
3026 				printk(KERN_WARNING
3027 				       "md/raid1:%s: cannot register rd%d\n",
3028 				       mdname(mddev), rdev->raid_disk);
3029 		}
3030 		if (rdev)
3031 			newmirrors[d2++].rdev = rdev;
3032 	}
3033 	kfree(conf->mirrors);
3034 	conf->mirrors = newmirrors;
3035 	kfree(conf->poolinfo);
3036 	conf->poolinfo = newpoolinfo;
3037 
3038 	spin_lock_irqsave(&conf->device_lock, flags);
3039 	mddev->degraded += (raid_disks - conf->raid_disks);
3040 	spin_unlock_irqrestore(&conf->device_lock, flags);
3041 	conf->raid_disks = mddev->raid_disks = raid_disks;
3042 	mddev->delta_disks = 0;
3043 
3044 	lower_barrier(conf);
3045 
3046 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3047 	md_wakeup_thread(mddev->thread);
3048 
3049 	mempool_destroy(oldpool);
3050 	return 0;
3051 }
3052 
3053 static void raid1_quiesce(struct mddev *mddev, int state)
3054 {
3055 	struct r1conf *conf = mddev->private;
3056 
3057 	switch(state) {
3058 	case 2: /* wake for suspend */
3059 		wake_up(&conf->wait_barrier);
3060 		break;
3061 	case 1:
3062 		raise_barrier(conf);
3063 		break;
3064 	case 0:
3065 		lower_barrier(conf);
3066 		break;
3067 	}
3068 }
3069 
3070 static void *raid1_takeover(struct mddev *mddev)
3071 {
3072 	/* raid1 can take over:
3073 	 *  raid5 with 2 devices, any layout or chunk size
3074 	 */
3075 	if (mddev->level == 5 && mddev->raid_disks == 2) {
3076 		struct r1conf *conf;
3077 		mddev->new_level = 1;
3078 		mddev->new_layout = 0;
3079 		mddev->new_chunk_sectors = 0;
3080 		conf = setup_conf(mddev);
3081 		if (!IS_ERR(conf))
3082 			conf->barrier = 1;
3083 		return conf;
3084 	}
3085 	return ERR_PTR(-EINVAL);
3086 }
3087 
3088 static struct md_personality raid1_personality =
3089 {
3090 	.name		= "raid1",
3091 	.level		= 1,
3092 	.owner		= THIS_MODULE,
3093 	.make_request	= make_request,
3094 	.run		= run,
3095 	.stop		= stop,
3096 	.status		= status,
3097 	.error_handler	= error,
3098 	.hot_add_disk	= raid1_add_disk,
3099 	.hot_remove_disk= raid1_remove_disk,
3100 	.spare_active	= raid1_spare_active,
3101 	.sync_request	= sync_request,
3102 	.resize		= raid1_resize,
3103 	.size		= raid1_size,
3104 	.check_reshape	= raid1_reshape,
3105 	.quiesce	= raid1_quiesce,
3106 	.takeover	= raid1_takeover,
3107 };
3108 
3109 static int __init raid_init(void)
3110 {
3111 	return register_md_personality(&raid1_personality);
3112 }
3113 
3114 static void raid_exit(void)
3115 {
3116 	unregister_md_personality(&raid1_personality);
3117 }
3118 
3119 module_init(raid_init);
3120 module_exit(raid_exit);
3121 MODULE_LICENSE("GPL");
3122 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3123 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3124 MODULE_ALIAS("md-raid1");
3125 MODULE_ALIAS("md-level-1");
3126 
3127 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3128