xref: /linux/drivers/md/raid10.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for futher copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include "dm-bio-list.h"
22 #include <linux/raid/raid10.h>
23 #include <linux/raid/bitmap.h>
24 
25 /*
26  * RAID10 provides a combination of RAID0 and RAID1 functionality.
27  * The layout of data is defined by
28  *    chunk_size
29  *    raid_disks
30  *    near_copies (stored in low byte of layout)
31  *    far_copies (stored in second byte of layout)
32  *
33  * The data to be stored is divided into chunks using chunksize.
34  * Each device is divided into far_copies sections.
35  * In each section, chunks are laid out in a style similar to raid0, but
36  * near_copies copies of each chunk is stored (each on a different drive).
37  * The starting device for each section is offset near_copies from the starting
38  * device of the previous section.
39  * Thus there are (near_copies*far_copies) of each chunk, and each is on a different
40  * drive.
41  * near_copies and far_copies must be at least one, and their product is at most
42  * raid_disks.
43  */
44 
45 /*
46  * Number of guaranteed r10bios in case of extreme VM load:
47  */
48 #define	NR_RAID10_BIOS 256
49 
50 static void unplug_slaves(mddev_t *mddev);
51 
52 static void allow_barrier(conf_t *conf);
53 static void lower_barrier(conf_t *conf);
54 
55 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
56 {
57 	conf_t *conf = data;
58 	r10bio_t *r10_bio;
59 	int size = offsetof(struct r10bio_s, devs[conf->copies]);
60 
61 	/* allocate a r10bio with room for raid_disks entries in the bios array */
62 	r10_bio = kzalloc(size, gfp_flags);
63 	if (!r10_bio)
64 		unplug_slaves(conf->mddev);
65 
66 	return r10_bio;
67 }
68 
69 static void r10bio_pool_free(void *r10_bio, void *data)
70 {
71 	kfree(r10_bio);
72 }
73 
74 #define RESYNC_BLOCK_SIZE (64*1024)
75 //#define RESYNC_BLOCK_SIZE PAGE_SIZE
76 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
77 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
78 #define RESYNC_WINDOW (2048*1024)
79 
80 /*
81  * When performing a resync, we need to read and compare, so
82  * we need as many pages are there are copies.
83  * When performing a recovery, we need 2 bios, one for read,
84  * one for write (we recover only one drive per r10buf)
85  *
86  */
87 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
88 {
89 	conf_t *conf = data;
90 	struct page *page;
91 	r10bio_t *r10_bio;
92 	struct bio *bio;
93 	int i, j;
94 	int nalloc;
95 
96 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
97 	if (!r10_bio) {
98 		unplug_slaves(conf->mddev);
99 		return NULL;
100 	}
101 
102 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
103 		nalloc = conf->copies; /* resync */
104 	else
105 		nalloc = 2; /* recovery */
106 
107 	/*
108 	 * Allocate bios.
109 	 */
110 	for (j = nalloc ; j-- ; ) {
111 		bio = bio_alloc(gfp_flags, RESYNC_PAGES);
112 		if (!bio)
113 			goto out_free_bio;
114 		r10_bio->devs[j].bio = bio;
115 	}
116 	/*
117 	 * Allocate RESYNC_PAGES data pages and attach them
118 	 * where needed.
119 	 */
120 	for (j = 0 ; j < nalloc; j++) {
121 		bio = r10_bio->devs[j].bio;
122 		for (i = 0; i < RESYNC_PAGES; i++) {
123 			page = alloc_page(gfp_flags);
124 			if (unlikely(!page))
125 				goto out_free_pages;
126 
127 			bio->bi_io_vec[i].bv_page = page;
128 		}
129 	}
130 
131 	return r10_bio;
132 
133 out_free_pages:
134 	for ( ; i > 0 ; i--)
135 		safe_put_page(bio->bi_io_vec[i-1].bv_page);
136 	while (j--)
137 		for (i = 0; i < RESYNC_PAGES ; i++)
138 			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
139 	j = -1;
140 out_free_bio:
141 	while ( ++j < nalloc )
142 		bio_put(r10_bio->devs[j].bio);
143 	r10bio_pool_free(r10_bio, conf);
144 	return NULL;
145 }
146 
147 static void r10buf_pool_free(void *__r10_bio, void *data)
148 {
149 	int i;
150 	conf_t *conf = data;
151 	r10bio_t *r10bio = __r10_bio;
152 	int j;
153 
154 	for (j=0; j < conf->copies; j++) {
155 		struct bio *bio = r10bio->devs[j].bio;
156 		if (bio) {
157 			for (i = 0; i < RESYNC_PAGES; i++) {
158 				safe_put_page(bio->bi_io_vec[i].bv_page);
159 				bio->bi_io_vec[i].bv_page = NULL;
160 			}
161 			bio_put(bio);
162 		}
163 	}
164 	r10bio_pool_free(r10bio, conf);
165 }
166 
167 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
168 {
169 	int i;
170 
171 	for (i = 0; i < conf->copies; i++) {
172 		struct bio **bio = & r10_bio->devs[i].bio;
173 		if (*bio && *bio != IO_BLOCKED)
174 			bio_put(*bio);
175 		*bio = NULL;
176 	}
177 }
178 
179 static void free_r10bio(r10bio_t *r10_bio)
180 {
181 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
182 
183 	/*
184 	 * Wake up any possible resync thread that waits for the device
185 	 * to go idle.
186 	 */
187 	allow_barrier(conf);
188 
189 	put_all_bios(conf, r10_bio);
190 	mempool_free(r10_bio, conf->r10bio_pool);
191 }
192 
193 static void put_buf(r10bio_t *r10_bio)
194 {
195 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
196 
197 	mempool_free(r10_bio, conf->r10buf_pool);
198 
199 	lower_barrier(conf);
200 }
201 
202 static void reschedule_retry(r10bio_t *r10_bio)
203 {
204 	unsigned long flags;
205 	mddev_t *mddev = r10_bio->mddev;
206 	conf_t *conf = mddev_to_conf(mddev);
207 
208 	spin_lock_irqsave(&conf->device_lock, flags);
209 	list_add(&r10_bio->retry_list, &conf->retry_list);
210 	conf->nr_queued ++;
211 	spin_unlock_irqrestore(&conf->device_lock, flags);
212 
213 	md_wakeup_thread(mddev->thread);
214 }
215 
216 /*
217  * raid_end_bio_io() is called when we have finished servicing a mirrored
218  * operation and are ready to return a success/failure code to the buffer
219  * cache layer.
220  */
221 static void raid_end_bio_io(r10bio_t *r10_bio)
222 {
223 	struct bio *bio = r10_bio->master_bio;
224 
225 	bio_endio(bio, bio->bi_size,
226 		test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
227 	free_r10bio(r10_bio);
228 }
229 
230 /*
231  * Update disk head position estimator based on IRQ completion info.
232  */
233 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
234 {
235 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
236 
237 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
238 		r10_bio->devs[slot].addr + (r10_bio->sectors);
239 }
240 
241 static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
242 {
243 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
244 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
245 	int slot, dev;
246 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
247 
248 	if (bio->bi_size)
249 		return 1;
250 
251 	slot = r10_bio->read_slot;
252 	dev = r10_bio->devs[slot].devnum;
253 	/*
254 	 * this branch is our 'one mirror IO has finished' event handler:
255 	 */
256 	update_head_pos(slot, r10_bio);
257 
258 	if (uptodate) {
259 		/*
260 		 * Set R10BIO_Uptodate in our master bio, so that
261 		 * we will return a good error code to the higher
262 		 * levels even if IO on some other mirrored buffer fails.
263 		 *
264 		 * The 'master' represents the composite IO operation to
265 		 * user-side. So if something waits for IO, then it will
266 		 * wait for the 'master' bio.
267 		 */
268 		set_bit(R10BIO_Uptodate, &r10_bio->state);
269 		raid_end_bio_io(r10_bio);
270 	} else {
271 		/*
272 		 * oops, read error:
273 		 */
274 		char b[BDEVNAME_SIZE];
275 		if (printk_ratelimit())
276 			printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
277 			       bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
278 		reschedule_retry(r10_bio);
279 	}
280 
281 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
282 	return 0;
283 }
284 
285 static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
286 {
287 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
288 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
289 	int slot, dev;
290 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
291 
292 	if (bio->bi_size)
293 		return 1;
294 
295 	for (slot = 0; slot < conf->copies; slot++)
296 		if (r10_bio->devs[slot].bio == bio)
297 			break;
298 	dev = r10_bio->devs[slot].devnum;
299 
300 	/*
301 	 * this branch is our 'one mirror IO has finished' event handler:
302 	 */
303 	if (!uptodate) {
304 		md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
305 		/* an I/O failed, we can't clear the bitmap */
306 		set_bit(R10BIO_Degraded, &r10_bio->state);
307 	} else
308 		/*
309 		 * Set R10BIO_Uptodate in our master bio, so that
310 		 * we will return a good error code for to the higher
311 		 * levels even if IO on some other mirrored buffer fails.
312 		 *
313 		 * The 'master' represents the composite IO operation to
314 		 * user-side. So if something waits for IO, then it will
315 		 * wait for the 'master' bio.
316 		 */
317 		set_bit(R10BIO_Uptodate, &r10_bio->state);
318 
319 	update_head_pos(slot, r10_bio);
320 
321 	/*
322 	 *
323 	 * Let's see if all mirrored write operations have finished
324 	 * already.
325 	 */
326 	if (atomic_dec_and_test(&r10_bio->remaining)) {
327 		/* clear the bitmap if all writes complete successfully */
328 		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
329 				r10_bio->sectors,
330 				!test_bit(R10BIO_Degraded, &r10_bio->state),
331 				0);
332 		md_write_end(r10_bio->mddev);
333 		raid_end_bio_io(r10_bio);
334 	}
335 
336 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
337 	return 0;
338 }
339 
340 
341 /*
342  * RAID10 layout manager
343  * Aswell as the chunksize and raid_disks count, there are two
344  * parameters: near_copies and far_copies.
345  * near_copies * far_copies must be <= raid_disks.
346  * Normally one of these will be 1.
347  * If both are 1, we get raid0.
348  * If near_copies == raid_disks, we get raid1.
349  *
350  * Chunks are layed out in raid0 style with near_copies copies of the
351  * first chunk, followed by near_copies copies of the next chunk and
352  * so on.
353  * If far_copies > 1, then after 1/far_copies of the array has been assigned
354  * as described above, we start again with a device offset of near_copies.
355  * So we effectively have another copy of the whole array further down all
356  * the drives, but with blocks on different drives.
357  * With this layout, and block is never stored twice on the one device.
358  *
359  * raid10_find_phys finds the sector offset of a given virtual sector
360  * on each device that it is on. If a block isn't on a device,
361  * that entry in the array is set to MaxSector.
362  *
363  * raid10_find_virt does the reverse mapping, from a device and a
364  * sector offset to a virtual address
365  */
366 
367 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
368 {
369 	int n,f;
370 	sector_t sector;
371 	sector_t chunk;
372 	sector_t stripe;
373 	int dev;
374 
375 	int slot = 0;
376 
377 	/* now calculate first sector/dev */
378 	chunk = r10bio->sector >> conf->chunk_shift;
379 	sector = r10bio->sector & conf->chunk_mask;
380 
381 	chunk *= conf->near_copies;
382 	stripe = chunk;
383 	dev = sector_div(stripe, conf->raid_disks);
384 
385 	sector += stripe << conf->chunk_shift;
386 
387 	/* and calculate all the others */
388 	for (n=0; n < conf->near_copies; n++) {
389 		int d = dev;
390 		sector_t s = sector;
391 		r10bio->devs[slot].addr = sector;
392 		r10bio->devs[slot].devnum = d;
393 		slot++;
394 
395 		for (f = 1; f < conf->far_copies; f++) {
396 			d += conf->near_copies;
397 			if (d >= conf->raid_disks)
398 				d -= conf->raid_disks;
399 			s += conf->stride;
400 			r10bio->devs[slot].devnum = d;
401 			r10bio->devs[slot].addr = s;
402 			slot++;
403 		}
404 		dev++;
405 		if (dev >= conf->raid_disks) {
406 			dev = 0;
407 			sector += (conf->chunk_mask + 1);
408 		}
409 	}
410 	BUG_ON(slot != conf->copies);
411 }
412 
413 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
414 {
415 	sector_t offset, chunk, vchunk;
416 
417 	while (sector > conf->stride) {
418 		sector -= conf->stride;
419 		if (dev < conf->near_copies)
420 			dev += conf->raid_disks - conf->near_copies;
421 		else
422 			dev -= conf->near_copies;
423 	}
424 
425 	offset = sector & conf->chunk_mask;
426 	chunk = sector >> conf->chunk_shift;
427 	vchunk = chunk * conf->raid_disks + dev;
428 	sector_div(vchunk, conf->near_copies);
429 	return (vchunk << conf->chunk_shift) + offset;
430 }
431 
432 /**
433  *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
434  *	@q: request queue
435  *	@bio: the buffer head that's been built up so far
436  *	@biovec: the request that could be merged to it.
437  *
438  *	Return amount of bytes we can accept at this offset
439  *      If near_copies == raid_disk, there are no striping issues,
440  *      but in that case, the function isn't called at all.
441  */
442 static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
443 				struct bio_vec *bio_vec)
444 {
445 	mddev_t *mddev = q->queuedata;
446 	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
447 	int max;
448 	unsigned int chunk_sectors = mddev->chunk_size >> 9;
449 	unsigned int bio_sectors = bio->bi_size >> 9;
450 
451 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
452 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
453 	if (max <= bio_vec->bv_len && bio_sectors == 0)
454 		return bio_vec->bv_len;
455 	else
456 		return max;
457 }
458 
459 /*
460  * This routine returns the disk from which the requested read should
461  * be done. There is a per-array 'next expected sequential IO' sector
462  * number - if this matches on the next IO then we use the last disk.
463  * There is also a per-disk 'last know head position' sector that is
464  * maintained from IRQ contexts, both the normal and the resync IO
465  * completion handlers update this position correctly. If there is no
466  * perfect sequential match then we pick the disk whose head is closest.
467  *
468  * If there are 2 mirrors in the same 2 devices, performance degrades
469  * because position is mirror, not device based.
470  *
471  * The rdev for the device selected will have nr_pending incremented.
472  */
473 
474 /*
475  * FIXME: possibly should rethink readbalancing and do it differently
476  * depending on near_copies / far_copies geometry.
477  */
478 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
479 {
480 	const unsigned long this_sector = r10_bio->sector;
481 	int disk, slot, nslot;
482 	const int sectors = r10_bio->sectors;
483 	sector_t new_distance, current_distance;
484 	mdk_rdev_t *rdev;
485 
486 	raid10_find_phys(conf, r10_bio);
487 	rcu_read_lock();
488 	/*
489 	 * Check if we can balance. We can balance on the whole
490 	 * device if no resync is going on (recovery is ok), or below
491 	 * the resync window. We take the first readable disk when
492 	 * above the resync window.
493 	 */
494 	if (conf->mddev->recovery_cp < MaxSector
495 	    && (this_sector + sectors >= conf->next_resync)) {
496 		/* make sure that disk is operational */
497 		slot = 0;
498 		disk = r10_bio->devs[slot].devnum;
499 
500 		while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
501 		       r10_bio->devs[slot].bio == IO_BLOCKED ||
502 		       !test_bit(In_sync, &rdev->flags)) {
503 			slot++;
504 			if (slot == conf->copies) {
505 				slot = 0;
506 				disk = -1;
507 				break;
508 			}
509 			disk = r10_bio->devs[slot].devnum;
510 		}
511 		goto rb_out;
512 	}
513 
514 
515 	/* make sure the disk is operational */
516 	slot = 0;
517 	disk = r10_bio->devs[slot].devnum;
518 	while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
519 	       r10_bio->devs[slot].bio == IO_BLOCKED ||
520 	       !test_bit(In_sync, &rdev->flags)) {
521 		slot ++;
522 		if (slot == conf->copies) {
523 			disk = -1;
524 			goto rb_out;
525 		}
526 		disk = r10_bio->devs[slot].devnum;
527 	}
528 
529 
530 	current_distance = abs(r10_bio->devs[slot].addr -
531 			       conf->mirrors[disk].head_position);
532 
533 	/* Find the disk whose head is closest */
534 
535 	for (nslot = slot; nslot < conf->copies; nslot++) {
536 		int ndisk = r10_bio->devs[nslot].devnum;
537 
538 
539 		if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
540 		    r10_bio->devs[nslot].bio == IO_BLOCKED ||
541 		    !test_bit(In_sync, &rdev->flags))
542 			continue;
543 
544 		/* This optimisation is debatable, and completely destroys
545 		 * sequential read speed for 'far copies' arrays.  So only
546 		 * keep it for 'near' arrays, and review those later.
547 		 */
548 		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
549 			disk = ndisk;
550 			slot = nslot;
551 			break;
552 		}
553 		new_distance = abs(r10_bio->devs[nslot].addr -
554 				   conf->mirrors[ndisk].head_position);
555 		if (new_distance < current_distance) {
556 			current_distance = new_distance;
557 			disk = ndisk;
558 			slot = nslot;
559 		}
560 	}
561 
562 rb_out:
563 	r10_bio->read_slot = slot;
564 /*	conf->next_seq_sect = this_sector + sectors;*/
565 
566 	if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
567 		atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
568 	else
569 		disk = -1;
570 	rcu_read_unlock();
571 
572 	return disk;
573 }
574 
575 static void unplug_slaves(mddev_t *mddev)
576 {
577 	conf_t *conf = mddev_to_conf(mddev);
578 	int i;
579 
580 	rcu_read_lock();
581 	for (i=0; i<mddev->raid_disks; i++) {
582 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
583 		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
584 			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
585 
586 			atomic_inc(&rdev->nr_pending);
587 			rcu_read_unlock();
588 
589 			if (r_queue->unplug_fn)
590 				r_queue->unplug_fn(r_queue);
591 
592 			rdev_dec_pending(rdev, mddev);
593 			rcu_read_lock();
594 		}
595 	}
596 	rcu_read_unlock();
597 }
598 
599 static void raid10_unplug(request_queue_t *q)
600 {
601 	mddev_t *mddev = q->queuedata;
602 
603 	unplug_slaves(q->queuedata);
604 	md_wakeup_thread(mddev->thread);
605 }
606 
607 static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
608 			     sector_t *error_sector)
609 {
610 	mddev_t *mddev = q->queuedata;
611 	conf_t *conf = mddev_to_conf(mddev);
612 	int i, ret = 0;
613 
614 	rcu_read_lock();
615 	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
616 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
617 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
618 			struct block_device *bdev = rdev->bdev;
619 			request_queue_t *r_queue = bdev_get_queue(bdev);
620 
621 			if (!r_queue->issue_flush_fn)
622 				ret = -EOPNOTSUPP;
623 			else {
624 				atomic_inc(&rdev->nr_pending);
625 				rcu_read_unlock();
626 				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
627 							      error_sector);
628 				rdev_dec_pending(rdev, mddev);
629 				rcu_read_lock();
630 			}
631 		}
632 	}
633 	rcu_read_unlock();
634 	return ret;
635 }
636 
637 /* Barriers....
638  * Sometimes we need to suspend IO while we do something else,
639  * either some resync/recovery, or reconfigure the array.
640  * To do this we raise a 'barrier'.
641  * The 'barrier' is a counter that can be raised multiple times
642  * to count how many activities are happening which preclude
643  * normal IO.
644  * We can only raise the barrier if there is no pending IO.
645  * i.e. if nr_pending == 0.
646  * We choose only to raise the barrier if no-one is waiting for the
647  * barrier to go down.  This means that as soon as an IO request
648  * is ready, no other operations which require a barrier will start
649  * until the IO request has had a chance.
650  *
651  * So: regular IO calls 'wait_barrier'.  When that returns there
652  *    is no backgroup IO happening,  It must arrange to call
653  *    allow_barrier when it has finished its IO.
654  * backgroup IO calls must call raise_barrier.  Once that returns
655  *    there is no normal IO happeing.  It must arrange to call
656  *    lower_barrier when the particular background IO completes.
657  */
658 #define RESYNC_DEPTH 32
659 
660 static void raise_barrier(conf_t *conf, int force)
661 {
662 	BUG_ON(force && !conf->barrier);
663 	spin_lock_irq(&conf->resync_lock);
664 
665 	/* Wait until no block IO is waiting (unless 'force') */
666 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
667 			    conf->resync_lock,
668 			    raid10_unplug(conf->mddev->queue));
669 
670 	/* block any new IO from starting */
671 	conf->barrier++;
672 
673 	/* No wait for all pending IO to complete */
674 	wait_event_lock_irq(conf->wait_barrier,
675 			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
676 			    conf->resync_lock,
677 			    raid10_unplug(conf->mddev->queue));
678 
679 	spin_unlock_irq(&conf->resync_lock);
680 }
681 
682 static void lower_barrier(conf_t *conf)
683 {
684 	unsigned long flags;
685 	spin_lock_irqsave(&conf->resync_lock, flags);
686 	conf->barrier--;
687 	spin_unlock_irqrestore(&conf->resync_lock, flags);
688 	wake_up(&conf->wait_barrier);
689 }
690 
691 static void wait_barrier(conf_t *conf)
692 {
693 	spin_lock_irq(&conf->resync_lock);
694 	if (conf->barrier) {
695 		conf->nr_waiting++;
696 		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
697 				    conf->resync_lock,
698 				    raid10_unplug(conf->mddev->queue));
699 		conf->nr_waiting--;
700 	}
701 	conf->nr_pending++;
702 	spin_unlock_irq(&conf->resync_lock);
703 }
704 
705 static void allow_barrier(conf_t *conf)
706 {
707 	unsigned long flags;
708 	spin_lock_irqsave(&conf->resync_lock, flags);
709 	conf->nr_pending--;
710 	spin_unlock_irqrestore(&conf->resync_lock, flags);
711 	wake_up(&conf->wait_barrier);
712 }
713 
714 static void freeze_array(conf_t *conf)
715 {
716 	/* stop syncio and normal IO and wait for everything to
717 	 * go quiet.
718 	 * We increment barrier and nr_waiting, and then
719 	 * wait until barrier+nr_pending match nr_queued+2
720 	 */
721 	spin_lock_irq(&conf->resync_lock);
722 	conf->barrier++;
723 	conf->nr_waiting++;
724 	wait_event_lock_irq(conf->wait_barrier,
725 			    conf->barrier+conf->nr_pending == conf->nr_queued+2,
726 			    conf->resync_lock,
727 			    raid10_unplug(conf->mddev->queue));
728 	spin_unlock_irq(&conf->resync_lock);
729 }
730 
731 static void unfreeze_array(conf_t *conf)
732 {
733 	/* reverse the effect of the freeze */
734 	spin_lock_irq(&conf->resync_lock);
735 	conf->barrier--;
736 	conf->nr_waiting--;
737 	wake_up(&conf->wait_barrier);
738 	spin_unlock_irq(&conf->resync_lock);
739 }
740 
741 static int make_request(request_queue_t *q, struct bio * bio)
742 {
743 	mddev_t *mddev = q->queuedata;
744 	conf_t *conf = mddev_to_conf(mddev);
745 	mirror_info_t *mirror;
746 	r10bio_t *r10_bio;
747 	struct bio *read_bio;
748 	int i;
749 	int chunk_sects = conf->chunk_mask + 1;
750 	const int rw = bio_data_dir(bio);
751 	struct bio_list bl;
752 	unsigned long flags;
753 
754 	if (unlikely(bio_barrier(bio))) {
755 		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
756 		return 0;
757 	}
758 
759 	/* If this request crosses a chunk boundary, we need to
760 	 * split it.  This will only happen for 1 PAGE (or less) requests.
761 	 */
762 	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
763 		      > chunk_sects &&
764 		    conf->near_copies < conf->raid_disks)) {
765 		struct bio_pair *bp;
766 		/* Sanity check -- queue functions should prevent this happening */
767 		if (bio->bi_vcnt != 1 ||
768 		    bio->bi_idx != 0)
769 			goto bad_map;
770 		/* This is a one page bio that upper layers
771 		 * refuse to split for us, so we need to split it.
772 		 */
773 		bp = bio_split(bio, bio_split_pool,
774 			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
775 		if (make_request(q, &bp->bio1))
776 			generic_make_request(&bp->bio1);
777 		if (make_request(q, &bp->bio2))
778 			generic_make_request(&bp->bio2);
779 
780 		bio_pair_release(bp);
781 		return 0;
782 	bad_map:
783 		printk("raid10_make_request bug: can't convert block across chunks"
784 		       " or bigger than %dk %llu %d\n", chunk_sects/2,
785 		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
786 
787 		bio_io_error(bio, bio->bi_size);
788 		return 0;
789 	}
790 
791 	md_write_start(mddev, bio);
792 
793 	/*
794 	 * Register the new request and wait if the reconstruction
795 	 * thread has put up a bar for new requests.
796 	 * Continue immediately if no resync is active currently.
797 	 */
798 	wait_barrier(conf);
799 
800 	disk_stat_inc(mddev->gendisk, ios[rw]);
801 	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
802 
803 	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
804 
805 	r10_bio->master_bio = bio;
806 	r10_bio->sectors = bio->bi_size >> 9;
807 
808 	r10_bio->mddev = mddev;
809 	r10_bio->sector = bio->bi_sector;
810 	r10_bio->state = 0;
811 
812 	if (rw == READ) {
813 		/*
814 		 * read balancing logic:
815 		 */
816 		int disk = read_balance(conf, r10_bio);
817 		int slot = r10_bio->read_slot;
818 		if (disk < 0) {
819 			raid_end_bio_io(r10_bio);
820 			return 0;
821 		}
822 		mirror = conf->mirrors + disk;
823 
824 		read_bio = bio_clone(bio, GFP_NOIO);
825 
826 		r10_bio->devs[slot].bio = read_bio;
827 
828 		read_bio->bi_sector = r10_bio->devs[slot].addr +
829 			mirror->rdev->data_offset;
830 		read_bio->bi_bdev = mirror->rdev->bdev;
831 		read_bio->bi_end_io = raid10_end_read_request;
832 		read_bio->bi_rw = READ;
833 		read_bio->bi_private = r10_bio;
834 
835 		generic_make_request(read_bio);
836 		return 0;
837 	}
838 
839 	/*
840 	 * WRITE:
841 	 */
842 	/* first select target devices under spinlock and
843 	 * inc refcount on their rdev.  Record them by setting
844 	 * bios[x] to bio
845 	 */
846 	raid10_find_phys(conf, r10_bio);
847 	rcu_read_lock();
848 	for (i = 0;  i < conf->copies; i++) {
849 		int d = r10_bio->devs[i].devnum;
850 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
851 		if (rdev &&
852 		    !test_bit(Faulty, &rdev->flags)) {
853 			atomic_inc(&rdev->nr_pending);
854 			r10_bio->devs[i].bio = bio;
855 		} else {
856 			r10_bio->devs[i].bio = NULL;
857 			set_bit(R10BIO_Degraded, &r10_bio->state);
858 		}
859 	}
860 	rcu_read_unlock();
861 
862 	atomic_set(&r10_bio->remaining, 0);
863 
864 	bio_list_init(&bl);
865 	for (i = 0; i < conf->copies; i++) {
866 		struct bio *mbio;
867 		int d = r10_bio->devs[i].devnum;
868 		if (!r10_bio->devs[i].bio)
869 			continue;
870 
871 		mbio = bio_clone(bio, GFP_NOIO);
872 		r10_bio->devs[i].bio = mbio;
873 
874 		mbio->bi_sector	= r10_bio->devs[i].addr+
875 			conf->mirrors[d].rdev->data_offset;
876 		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
877 		mbio->bi_end_io	= raid10_end_write_request;
878 		mbio->bi_rw = WRITE;
879 		mbio->bi_private = r10_bio;
880 
881 		atomic_inc(&r10_bio->remaining);
882 		bio_list_add(&bl, mbio);
883 	}
884 
885 	bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
886 	spin_lock_irqsave(&conf->device_lock, flags);
887 	bio_list_merge(&conf->pending_bio_list, &bl);
888 	blk_plug_device(mddev->queue);
889 	spin_unlock_irqrestore(&conf->device_lock, flags);
890 
891 	return 0;
892 }
893 
894 static void status(struct seq_file *seq, mddev_t *mddev)
895 {
896 	conf_t *conf = mddev_to_conf(mddev);
897 	int i;
898 
899 	if (conf->near_copies < conf->raid_disks)
900 		seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
901 	if (conf->near_copies > 1)
902 		seq_printf(seq, " %d near-copies", conf->near_copies);
903 	if (conf->far_copies > 1)
904 		seq_printf(seq, " %d far-copies", conf->far_copies);
905 
906 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
907 						conf->working_disks);
908 	for (i = 0; i < conf->raid_disks; i++)
909 		seq_printf(seq, "%s",
910 			      conf->mirrors[i].rdev &&
911 			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
912 	seq_printf(seq, "]");
913 }
914 
915 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
916 {
917 	char b[BDEVNAME_SIZE];
918 	conf_t *conf = mddev_to_conf(mddev);
919 
920 	/*
921 	 * If it is not operational, then we have already marked it as dead
922 	 * else if it is the last working disks, ignore the error, let the
923 	 * next level up know.
924 	 * else mark the drive as failed
925 	 */
926 	if (test_bit(In_sync, &rdev->flags)
927 	    && conf->working_disks == 1)
928 		/*
929 		 * Don't fail the drive, just return an IO error.
930 		 * The test should really be more sophisticated than
931 		 * "working_disks == 1", but it isn't critical, and
932 		 * can wait until we do more sophisticated "is the drive
933 		 * really dead" tests...
934 		 */
935 		return;
936 	if (test_bit(In_sync, &rdev->flags)) {
937 		mddev->degraded++;
938 		conf->working_disks--;
939 		/*
940 		 * if recovery is running, make sure it aborts.
941 		 */
942 		set_bit(MD_RECOVERY_ERR, &mddev->recovery);
943 	}
944 	clear_bit(In_sync, &rdev->flags);
945 	set_bit(Faulty, &rdev->flags);
946 	mddev->sb_dirty = 1;
947 	printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
948 		"	Operation continuing on %d devices\n",
949 		bdevname(rdev->bdev,b), conf->working_disks);
950 }
951 
952 static void print_conf(conf_t *conf)
953 {
954 	int i;
955 	mirror_info_t *tmp;
956 
957 	printk("RAID10 conf printout:\n");
958 	if (!conf) {
959 		printk("(!conf)\n");
960 		return;
961 	}
962 	printk(" --- wd:%d rd:%d\n", conf->working_disks,
963 		conf->raid_disks);
964 
965 	for (i = 0; i < conf->raid_disks; i++) {
966 		char b[BDEVNAME_SIZE];
967 		tmp = conf->mirrors + i;
968 		if (tmp->rdev)
969 			printk(" disk %d, wo:%d, o:%d, dev:%s\n",
970 				i, !test_bit(In_sync, &tmp->rdev->flags),
971 			        !test_bit(Faulty, &tmp->rdev->flags),
972 				bdevname(tmp->rdev->bdev,b));
973 	}
974 }
975 
976 static void close_sync(conf_t *conf)
977 {
978 	wait_barrier(conf);
979 	allow_barrier(conf);
980 
981 	mempool_destroy(conf->r10buf_pool);
982 	conf->r10buf_pool = NULL;
983 }
984 
985 /* check if there are enough drives for
986  * every block to appear on atleast one
987  */
988 static int enough(conf_t *conf)
989 {
990 	int first = 0;
991 
992 	do {
993 		int n = conf->copies;
994 		int cnt = 0;
995 		while (n--) {
996 			if (conf->mirrors[first].rdev)
997 				cnt++;
998 			first = (first+1) % conf->raid_disks;
999 		}
1000 		if (cnt == 0)
1001 			return 0;
1002 	} while (first != 0);
1003 	return 1;
1004 }
1005 
1006 static int raid10_spare_active(mddev_t *mddev)
1007 {
1008 	int i;
1009 	conf_t *conf = mddev->private;
1010 	mirror_info_t *tmp;
1011 
1012 	/*
1013 	 * Find all non-in_sync disks within the RAID10 configuration
1014 	 * and mark them in_sync
1015 	 */
1016 	for (i = 0; i < conf->raid_disks; i++) {
1017 		tmp = conf->mirrors + i;
1018 		if (tmp->rdev
1019 		    && !test_bit(Faulty, &tmp->rdev->flags)
1020 		    && !test_bit(In_sync, &tmp->rdev->flags)) {
1021 			conf->working_disks++;
1022 			mddev->degraded--;
1023 			set_bit(In_sync, &tmp->rdev->flags);
1024 		}
1025 	}
1026 
1027 	print_conf(conf);
1028 	return 0;
1029 }
1030 
1031 
1032 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1033 {
1034 	conf_t *conf = mddev->private;
1035 	int found = 0;
1036 	int mirror;
1037 	mirror_info_t *p;
1038 
1039 	if (mddev->recovery_cp < MaxSector)
1040 		/* only hot-add to in-sync arrays, as recovery is
1041 		 * very different from resync
1042 		 */
1043 		return 0;
1044 	if (!enough(conf))
1045 		return 0;
1046 
1047 	if (rdev->saved_raid_disk >= 0 &&
1048 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1049 		mirror = rdev->saved_raid_disk;
1050 	else
1051 		mirror = 0;
1052 	for ( ; mirror < mddev->raid_disks; mirror++)
1053 		if ( !(p=conf->mirrors+mirror)->rdev) {
1054 
1055 			blk_queue_stack_limits(mddev->queue,
1056 					       rdev->bdev->bd_disk->queue);
1057 			/* as we don't honour merge_bvec_fn, we must never risk
1058 			 * violating it, so limit ->max_sector to one PAGE, as
1059 			 * a one page request is never in violation.
1060 			 */
1061 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1062 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
1063 				mddev->queue->max_sectors = (PAGE_SIZE>>9);
1064 
1065 			p->head_position = 0;
1066 			rdev->raid_disk = mirror;
1067 			found = 1;
1068 			if (rdev->saved_raid_disk != mirror)
1069 				conf->fullsync = 1;
1070 			rcu_assign_pointer(p->rdev, rdev);
1071 			break;
1072 		}
1073 
1074 	print_conf(conf);
1075 	return found;
1076 }
1077 
1078 static int raid10_remove_disk(mddev_t *mddev, int number)
1079 {
1080 	conf_t *conf = mddev->private;
1081 	int err = 0;
1082 	mdk_rdev_t *rdev;
1083 	mirror_info_t *p = conf->mirrors+ number;
1084 
1085 	print_conf(conf);
1086 	rdev = p->rdev;
1087 	if (rdev) {
1088 		if (test_bit(In_sync, &rdev->flags) ||
1089 		    atomic_read(&rdev->nr_pending)) {
1090 			err = -EBUSY;
1091 			goto abort;
1092 		}
1093 		p->rdev = NULL;
1094 		synchronize_rcu();
1095 		if (atomic_read(&rdev->nr_pending)) {
1096 			/* lost the race, try later */
1097 			err = -EBUSY;
1098 			p->rdev = rdev;
1099 		}
1100 	}
1101 abort:
1102 
1103 	print_conf(conf);
1104 	return err;
1105 }
1106 
1107 
1108 static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1109 {
1110 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1111 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
1112 	int i,d;
1113 
1114 	if (bio->bi_size)
1115 		return 1;
1116 
1117 	for (i=0; i<conf->copies; i++)
1118 		if (r10_bio->devs[i].bio == bio)
1119 			break;
1120 	BUG_ON(i == conf->copies);
1121 	update_head_pos(i, r10_bio);
1122 	d = r10_bio->devs[i].devnum;
1123 
1124 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1125 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1126 	else {
1127 		atomic_add(r10_bio->sectors,
1128 			   &conf->mirrors[d].rdev->corrected_errors);
1129 		if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1130 			md_error(r10_bio->mddev,
1131 				 conf->mirrors[d].rdev);
1132 	}
1133 
1134 	/* for reconstruct, we always reschedule after a read.
1135 	 * for resync, only after all reads
1136 	 */
1137 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1138 	    atomic_dec_and_test(&r10_bio->remaining)) {
1139 		/* we have read all the blocks,
1140 		 * do the comparison in process context in raid10d
1141 		 */
1142 		reschedule_retry(r10_bio);
1143 	}
1144 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1145 	return 0;
1146 }
1147 
1148 static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1149 {
1150 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1151 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1152 	mddev_t *mddev = r10_bio->mddev;
1153 	conf_t *conf = mddev_to_conf(mddev);
1154 	int i,d;
1155 
1156 	if (bio->bi_size)
1157 		return 1;
1158 
1159 	for (i = 0; i < conf->copies; i++)
1160 		if (r10_bio->devs[i].bio == bio)
1161 			break;
1162 	d = r10_bio->devs[i].devnum;
1163 
1164 	if (!uptodate)
1165 		md_error(mddev, conf->mirrors[d].rdev);
1166 	update_head_pos(i, r10_bio);
1167 
1168 	while (atomic_dec_and_test(&r10_bio->remaining)) {
1169 		if (r10_bio->master_bio == NULL) {
1170 			/* the primary of several recovery bios */
1171 			md_done_sync(mddev, r10_bio->sectors, 1);
1172 			put_buf(r10_bio);
1173 			break;
1174 		} else {
1175 			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1176 			put_buf(r10_bio);
1177 			r10_bio = r10_bio2;
1178 		}
1179 	}
1180 	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1181 	return 0;
1182 }
1183 
1184 /*
1185  * Note: sync and recover and handled very differently for raid10
1186  * This code is for resync.
1187  * For resync, we read through virtual addresses and read all blocks.
1188  * If there is any error, we schedule a write.  The lowest numbered
1189  * drive is authoritative.
1190  * However requests come for physical address, so we need to map.
1191  * For every physical address there are raid_disks/copies virtual addresses,
1192  * which is always are least one, but is not necessarly an integer.
1193  * This means that a physical address can span multiple chunks, so we may
1194  * have to submit multiple io requests for a single sync request.
1195  */
1196 /*
1197  * We check if all blocks are in-sync and only write to blocks that
1198  * aren't in sync
1199  */
1200 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1201 {
1202 	conf_t *conf = mddev_to_conf(mddev);
1203 	int i, first;
1204 	struct bio *tbio, *fbio;
1205 
1206 	atomic_set(&r10_bio->remaining, 1);
1207 
1208 	/* find the first device with a block */
1209 	for (i=0; i<conf->copies; i++)
1210 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1211 			break;
1212 
1213 	if (i == conf->copies)
1214 		goto done;
1215 
1216 	first = i;
1217 	fbio = r10_bio->devs[i].bio;
1218 
1219 	/* now find blocks with errors */
1220 	for (i=0 ; i < conf->copies ; i++) {
1221 		int  j, d;
1222 		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1223 
1224 		tbio = r10_bio->devs[i].bio;
1225 
1226 		if (tbio->bi_end_io != end_sync_read)
1227 			continue;
1228 		if (i == first)
1229 			continue;
1230 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1231 			/* We know that the bi_io_vec layout is the same for
1232 			 * both 'first' and 'i', so we just compare them.
1233 			 * All vec entries are PAGE_SIZE;
1234 			 */
1235 			for (j = 0; j < vcnt; j++)
1236 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1237 					   page_address(tbio->bi_io_vec[j].bv_page),
1238 					   PAGE_SIZE))
1239 					break;
1240 			if (j == vcnt)
1241 				continue;
1242 			mddev->resync_mismatches += r10_bio->sectors;
1243 		}
1244 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1245 			/* Don't fix anything. */
1246 			continue;
1247 		/* Ok, we need to write this bio
1248 		 * First we need to fixup bv_offset, bv_len and
1249 		 * bi_vecs, as the read request might have corrupted these
1250 		 */
1251 		tbio->bi_vcnt = vcnt;
1252 		tbio->bi_size = r10_bio->sectors << 9;
1253 		tbio->bi_idx = 0;
1254 		tbio->bi_phys_segments = 0;
1255 		tbio->bi_hw_segments = 0;
1256 		tbio->bi_hw_front_size = 0;
1257 		tbio->bi_hw_back_size = 0;
1258 		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1259 		tbio->bi_flags |= 1 << BIO_UPTODATE;
1260 		tbio->bi_next = NULL;
1261 		tbio->bi_rw = WRITE;
1262 		tbio->bi_private = r10_bio;
1263 		tbio->bi_sector = r10_bio->devs[i].addr;
1264 
1265 		for (j=0; j < vcnt ; j++) {
1266 			tbio->bi_io_vec[j].bv_offset = 0;
1267 			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1268 
1269 			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1270 			       page_address(fbio->bi_io_vec[j].bv_page),
1271 			       PAGE_SIZE);
1272 		}
1273 		tbio->bi_end_io = end_sync_write;
1274 
1275 		d = r10_bio->devs[i].devnum;
1276 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1277 		atomic_inc(&r10_bio->remaining);
1278 		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1279 
1280 		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1281 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1282 		generic_make_request(tbio);
1283 	}
1284 
1285 done:
1286 	if (atomic_dec_and_test(&r10_bio->remaining)) {
1287 		md_done_sync(mddev, r10_bio->sectors, 1);
1288 		put_buf(r10_bio);
1289 	}
1290 }
1291 
1292 /*
1293  * Now for the recovery code.
1294  * Recovery happens across physical sectors.
1295  * We recover all non-is_sync drives by finding the virtual address of
1296  * each, and then choose a working drive that also has that virt address.
1297  * There is a separate r10_bio for each non-in_sync drive.
1298  * Only the first two slots are in use. The first for reading,
1299  * The second for writing.
1300  *
1301  */
1302 
1303 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1304 {
1305 	conf_t *conf = mddev_to_conf(mddev);
1306 	int i, d;
1307 	struct bio *bio, *wbio;
1308 
1309 
1310 	/* move the pages across to the second bio
1311 	 * and submit the write request
1312 	 */
1313 	bio = r10_bio->devs[0].bio;
1314 	wbio = r10_bio->devs[1].bio;
1315 	for (i=0; i < wbio->bi_vcnt; i++) {
1316 		struct page *p = bio->bi_io_vec[i].bv_page;
1317 		bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1318 		wbio->bi_io_vec[i].bv_page = p;
1319 	}
1320 	d = r10_bio->devs[1].devnum;
1321 
1322 	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1323 	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1324 	if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1325 		generic_make_request(wbio);
1326 	else
1327 		bio_endio(wbio, wbio->bi_size, -EIO);
1328 }
1329 
1330 
1331 /*
1332  * This is a kernel thread which:
1333  *
1334  *	1.	Retries failed read operations on working mirrors.
1335  *	2.	Updates the raid superblock when problems encounter.
1336  *	3.	Performs writes following reads for array syncronising.
1337  */
1338 
1339 static void raid10d(mddev_t *mddev)
1340 {
1341 	r10bio_t *r10_bio;
1342 	struct bio *bio;
1343 	unsigned long flags;
1344 	conf_t *conf = mddev_to_conf(mddev);
1345 	struct list_head *head = &conf->retry_list;
1346 	int unplug=0;
1347 	mdk_rdev_t *rdev;
1348 
1349 	md_check_recovery(mddev);
1350 
1351 	for (;;) {
1352 		char b[BDEVNAME_SIZE];
1353 		spin_lock_irqsave(&conf->device_lock, flags);
1354 
1355 		if (conf->pending_bio_list.head) {
1356 			bio = bio_list_get(&conf->pending_bio_list);
1357 			blk_remove_plug(mddev->queue);
1358 			spin_unlock_irqrestore(&conf->device_lock, flags);
1359 			/* flush any pending bitmap writes to disk before proceeding w/ I/O */
1360 			if (bitmap_unplug(mddev->bitmap) != 0)
1361 				printk("%s: bitmap file write failed!\n", mdname(mddev));
1362 
1363 			while (bio) { /* submit pending writes */
1364 				struct bio *next = bio->bi_next;
1365 				bio->bi_next = NULL;
1366 				generic_make_request(bio);
1367 				bio = next;
1368 			}
1369 			unplug = 1;
1370 
1371 			continue;
1372 		}
1373 
1374 		if (list_empty(head))
1375 			break;
1376 		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1377 		list_del(head->prev);
1378 		conf->nr_queued--;
1379 		spin_unlock_irqrestore(&conf->device_lock, flags);
1380 
1381 		mddev = r10_bio->mddev;
1382 		conf = mddev_to_conf(mddev);
1383 		if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1384 			sync_request_write(mddev, r10_bio);
1385 			unplug = 1;
1386 		} else 	if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1387 			recovery_request_write(mddev, r10_bio);
1388 			unplug = 1;
1389 		} else {
1390 			int mirror;
1391 			/* we got a read error. Maybe the drive is bad.  Maybe just
1392 			 * the block and we can fix it.
1393 			 * We freeze all other IO, and try reading the block from
1394 			 * other devices.  When we find one, we re-write
1395 			 * and check it that fixes the read error.
1396 			 * This is all done synchronously while the array is
1397 			 * frozen.
1398 			 */
1399 			int sect = 0; /* Offset from r10_bio->sector */
1400 			int sectors = r10_bio->sectors;
1401 			freeze_array(conf);
1402 			if (mddev->ro == 0) while(sectors) {
1403 				int s = sectors;
1404 				int sl = r10_bio->read_slot;
1405 				int success = 0;
1406 
1407 				if (s > (PAGE_SIZE>>9))
1408 					s = PAGE_SIZE >> 9;
1409 
1410 				rcu_read_lock();
1411 				do {
1412 					int d = r10_bio->devs[sl].devnum;
1413 					rdev = rcu_dereference(conf->mirrors[d].rdev);
1414 					if (rdev &&
1415 					    test_bit(In_sync, &rdev->flags)) {
1416 						atomic_inc(&rdev->nr_pending);
1417 						rcu_read_unlock();
1418 						success = sync_page_io(rdev->bdev,
1419 								       r10_bio->devs[sl].addr +
1420 								       sect + rdev->data_offset,
1421 								       s<<9,
1422 								       conf->tmppage, READ);
1423 						rdev_dec_pending(rdev, mddev);
1424 						rcu_read_lock();
1425 						if (success)
1426 							break;
1427 					}
1428 					sl++;
1429 					if (sl == conf->copies)
1430 						sl = 0;
1431 				} while (!success && sl != r10_bio->read_slot);
1432 				rcu_read_unlock();
1433 
1434 				if (success) {
1435 					int start = sl;
1436 					/* write it back and re-read */
1437 					rcu_read_lock();
1438 					while (sl != r10_bio->read_slot) {
1439 						int d;
1440 						if (sl==0)
1441 							sl = conf->copies;
1442 						sl--;
1443 						d = r10_bio->devs[sl].devnum;
1444 						rdev = rcu_dereference(conf->mirrors[d].rdev);
1445 						if (rdev &&
1446 						    test_bit(In_sync, &rdev->flags)) {
1447 							atomic_inc(&rdev->nr_pending);
1448 							rcu_read_unlock();
1449 							atomic_add(s, &rdev->corrected_errors);
1450 							if (sync_page_io(rdev->bdev,
1451 									 r10_bio->devs[sl].addr +
1452 									 sect + rdev->data_offset,
1453 									 s<<9, conf->tmppage, WRITE) == 0)
1454 								/* Well, this device is dead */
1455 								md_error(mddev, rdev);
1456 							rdev_dec_pending(rdev, mddev);
1457 							rcu_read_lock();
1458 						}
1459 					}
1460 					sl = start;
1461 					while (sl != r10_bio->read_slot) {
1462 						int d;
1463 						if (sl==0)
1464 							sl = conf->copies;
1465 						sl--;
1466 						d = r10_bio->devs[sl].devnum;
1467 						rdev = rcu_dereference(conf->mirrors[d].rdev);
1468 						if (rdev &&
1469 						    test_bit(In_sync, &rdev->flags)) {
1470 							atomic_inc(&rdev->nr_pending);
1471 							rcu_read_unlock();
1472 							if (sync_page_io(rdev->bdev,
1473 									 r10_bio->devs[sl].addr +
1474 									 sect + rdev->data_offset,
1475 									 s<<9, conf->tmppage, READ) == 0)
1476 								/* Well, this device is dead */
1477 								md_error(mddev, rdev);
1478 							rdev_dec_pending(rdev, mddev);
1479 							rcu_read_lock();
1480 						}
1481 					}
1482 					rcu_read_unlock();
1483 				} else {
1484 					/* Cannot read from anywhere -- bye bye array */
1485 					md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
1486 					break;
1487 				}
1488 				sectors -= s;
1489 				sect += s;
1490 			}
1491 
1492 			unfreeze_array(conf);
1493 
1494 			bio = r10_bio->devs[r10_bio->read_slot].bio;
1495 			r10_bio->devs[r10_bio->read_slot].bio =
1496 				mddev->ro ? IO_BLOCKED : NULL;
1497 			bio_put(bio);
1498 			mirror = read_balance(conf, r10_bio);
1499 			if (mirror == -1) {
1500 				printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1501 				       " read error for block %llu\n",
1502 				       bdevname(bio->bi_bdev,b),
1503 				       (unsigned long long)r10_bio->sector);
1504 				raid_end_bio_io(r10_bio);
1505 			} else {
1506 				rdev = conf->mirrors[mirror].rdev;
1507 				if (printk_ratelimit())
1508 					printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1509 					       " another mirror\n",
1510 					       bdevname(rdev->bdev,b),
1511 					       (unsigned long long)r10_bio->sector);
1512 				bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1513 				r10_bio->devs[r10_bio->read_slot].bio = bio;
1514 				bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1515 					+ rdev->data_offset;
1516 				bio->bi_bdev = rdev->bdev;
1517 				bio->bi_rw = READ;
1518 				bio->bi_private = r10_bio;
1519 				bio->bi_end_io = raid10_end_read_request;
1520 				unplug = 1;
1521 				generic_make_request(bio);
1522 			}
1523 		}
1524 	}
1525 	spin_unlock_irqrestore(&conf->device_lock, flags);
1526 	if (unplug)
1527 		unplug_slaves(mddev);
1528 }
1529 
1530 
1531 static int init_resync(conf_t *conf)
1532 {
1533 	int buffs;
1534 
1535 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1536 	BUG_ON(conf->r10buf_pool);
1537 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1538 	if (!conf->r10buf_pool)
1539 		return -ENOMEM;
1540 	conf->next_resync = 0;
1541 	return 0;
1542 }
1543 
1544 /*
1545  * perform a "sync" on one "block"
1546  *
1547  * We need to make sure that no normal I/O request - particularly write
1548  * requests - conflict with active sync requests.
1549  *
1550  * This is achieved by tracking pending requests and a 'barrier' concept
1551  * that can be installed to exclude normal IO requests.
1552  *
1553  * Resync and recovery are handled very differently.
1554  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1555  *
1556  * For resync, we iterate over virtual addresses, read all copies,
1557  * and update if there are differences.  If only one copy is live,
1558  * skip it.
1559  * For recovery, we iterate over physical addresses, read a good
1560  * value for each non-in_sync drive, and over-write.
1561  *
1562  * So, for recovery we may have several outstanding complex requests for a
1563  * given address, one for each out-of-sync device.  We model this by allocating
1564  * a number of r10_bio structures, one for each out-of-sync device.
1565  * As we setup these structures, we collect all bio's together into a list
1566  * which we then process collectively to add pages, and then process again
1567  * to pass to generic_make_request.
1568  *
1569  * The r10_bio structures are linked using a borrowed master_bio pointer.
1570  * This link is counted in ->remaining.  When the r10_bio that points to NULL
1571  * has its remaining count decremented to 0, the whole complex operation
1572  * is complete.
1573  *
1574  */
1575 
1576 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1577 {
1578 	conf_t *conf = mddev_to_conf(mddev);
1579 	r10bio_t *r10_bio;
1580 	struct bio *biolist = NULL, *bio;
1581 	sector_t max_sector, nr_sectors;
1582 	int disk;
1583 	int i;
1584 	int max_sync;
1585 	int sync_blocks;
1586 
1587 	sector_t sectors_skipped = 0;
1588 	int chunks_skipped = 0;
1589 
1590 	if (!conf->r10buf_pool)
1591 		if (init_resync(conf))
1592 			return 0;
1593 
1594  skipped:
1595 	max_sector = mddev->size << 1;
1596 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1597 		max_sector = mddev->resync_max_sectors;
1598 	if (sector_nr >= max_sector) {
1599 		/* If we aborted, we need to abort the
1600 		 * sync on the 'current' bitmap chucks (there can
1601 		 * be several when recovering multiple devices).
1602 		 * as we may have started syncing it but not finished.
1603 		 * We can find the current address in
1604 		 * mddev->curr_resync, but for recovery,
1605 		 * we need to convert that to several
1606 		 * virtual addresses.
1607 		 */
1608 		if (mddev->curr_resync < max_sector) { /* aborted */
1609 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1610 				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1611 						&sync_blocks, 1);
1612 			else for (i=0; i<conf->raid_disks; i++) {
1613 				sector_t sect =
1614 					raid10_find_virt(conf, mddev->curr_resync, i);
1615 				bitmap_end_sync(mddev->bitmap, sect,
1616 						&sync_blocks, 1);
1617 			}
1618 		} else /* completed sync */
1619 			conf->fullsync = 0;
1620 
1621 		bitmap_close_sync(mddev->bitmap);
1622 		close_sync(conf);
1623 		*skipped = 1;
1624 		return sectors_skipped;
1625 	}
1626 	if (chunks_skipped >= conf->raid_disks) {
1627 		/* if there has been nothing to do on any drive,
1628 		 * then there is nothing to do at all..
1629 		 */
1630 		*skipped = 1;
1631 		return (max_sector - sector_nr) + sectors_skipped;
1632 	}
1633 
1634 	/* make sure whole request will fit in a chunk - if chunks
1635 	 * are meaningful
1636 	 */
1637 	if (conf->near_copies < conf->raid_disks &&
1638 	    max_sector > (sector_nr | conf->chunk_mask))
1639 		max_sector = (sector_nr | conf->chunk_mask) + 1;
1640 	/*
1641 	 * If there is non-resync activity waiting for us then
1642 	 * put in a delay to throttle resync.
1643 	 */
1644 	if (!go_faster && conf->nr_waiting)
1645 		msleep_interruptible(1000);
1646 
1647 	/* Again, very different code for resync and recovery.
1648 	 * Both must result in an r10bio with a list of bios that
1649 	 * have bi_end_io, bi_sector, bi_bdev set,
1650 	 * and bi_private set to the r10bio.
1651 	 * For recovery, we may actually create several r10bios
1652 	 * with 2 bios in each, that correspond to the bios in the main one.
1653 	 * In this case, the subordinate r10bios link back through a
1654 	 * borrowed master_bio pointer, and the counter in the master
1655 	 * includes a ref from each subordinate.
1656 	 */
1657 	/* First, we decide what to do and set ->bi_end_io
1658 	 * To end_sync_read if we want to read, and
1659 	 * end_sync_write if we will want to write.
1660 	 */
1661 
1662 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1663 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1664 		/* recovery... the complicated one */
1665 		int i, j, k;
1666 		r10_bio = NULL;
1667 
1668 		for (i=0 ; i<conf->raid_disks; i++)
1669 			if (conf->mirrors[i].rdev &&
1670 			    !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1671 				int still_degraded = 0;
1672 				/* want to reconstruct this device */
1673 				r10bio_t *rb2 = r10_bio;
1674 				sector_t sect = raid10_find_virt(conf, sector_nr, i);
1675 				int must_sync;
1676 				/* Unless we are doing a full sync, we only need
1677 				 * to recover the block if it is set in the bitmap
1678 				 */
1679 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1680 							      &sync_blocks, 1);
1681 				if (sync_blocks < max_sync)
1682 					max_sync = sync_blocks;
1683 				if (!must_sync &&
1684 				    !conf->fullsync) {
1685 					/* yep, skip the sync_blocks here, but don't assume
1686 					 * that there will never be anything to do here
1687 					 */
1688 					chunks_skipped = -1;
1689 					continue;
1690 				}
1691 
1692 				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1693 				raise_barrier(conf, rb2 != NULL);
1694 				atomic_set(&r10_bio->remaining, 0);
1695 
1696 				r10_bio->master_bio = (struct bio*)rb2;
1697 				if (rb2)
1698 					atomic_inc(&rb2->remaining);
1699 				r10_bio->mddev = mddev;
1700 				set_bit(R10BIO_IsRecover, &r10_bio->state);
1701 				r10_bio->sector = sect;
1702 
1703 				raid10_find_phys(conf, r10_bio);
1704 				/* Need to check if this section will still be
1705 				 * degraded
1706 				 */
1707 				for (j=0; j<conf->copies;j++) {
1708 					int d = r10_bio->devs[j].devnum;
1709 					if (conf->mirrors[d].rdev == NULL ||
1710 					    test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1711 						still_degraded = 1;
1712 						break;
1713 					}
1714 				}
1715 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1716 							      &sync_blocks, still_degraded);
1717 
1718 				for (j=0; j<conf->copies;j++) {
1719 					int d = r10_bio->devs[j].devnum;
1720 					if (conf->mirrors[d].rdev &&
1721 					    test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1722 						/* This is where we read from */
1723 						bio = r10_bio->devs[0].bio;
1724 						bio->bi_next = biolist;
1725 						biolist = bio;
1726 						bio->bi_private = r10_bio;
1727 						bio->bi_end_io = end_sync_read;
1728 						bio->bi_rw = 0;
1729 						bio->bi_sector = r10_bio->devs[j].addr +
1730 							conf->mirrors[d].rdev->data_offset;
1731 						bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1732 						atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1733 						atomic_inc(&r10_bio->remaining);
1734 						/* and we write to 'i' */
1735 
1736 						for (k=0; k<conf->copies; k++)
1737 							if (r10_bio->devs[k].devnum == i)
1738 								break;
1739 						bio = r10_bio->devs[1].bio;
1740 						bio->bi_next = biolist;
1741 						biolist = bio;
1742 						bio->bi_private = r10_bio;
1743 						bio->bi_end_io = end_sync_write;
1744 						bio->bi_rw = 1;
1745 						bio->bi_sector = r10_bio->devs[k].addr +
1746 							conf->mirrors[i].rdev->data_offset;
1747 						bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1748 
1749 						r10_bio->devs[0].devnum = d;
1750 						r10_bio->devs[1].devnum = i;
1751 
1752 						break;
1753 					}
1754 				}
1755 				if (j == conf->copies) {
1756 					/* Cannot recover, so abort the recovery */
1757 					put_buf(r10_bio);
1758 					r10_bio = rb2;
1759 					if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
1760 						printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1761 						       mdname(mddev));
1762 					break;
1763 				}
1764 			}
1765 		if (biolist == NULL) {
1766 			while (r10_bio) {
1767 				r10bio_t *rb2 = r10_bio;
1768 				r10_bio = (r10bio_t*) rb2->master_bio;
1769 				rb2->master_bio = NULL;
1770 				put_buf(rb2);
1771 			}
1772 			goto giveup;
1773 		}
1774 	} else {
1775 		/* resync. Schedule a read for every block at this virt offset */
1776 		int count = 0;
1777 
1778 		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1779 				       &sync_blocks, mddev->degraded) &&
1780 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1781 			/* We can skip this block */
1782 			*skipped = 1;
1783 			return sync_blocks + sectors_skipped;
1784 		}
1785 		if (sync_blocks < max_sync)
1786 			max_sync = sync_blocks;
1787 		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1788 
1789 		r10_bio->mddev = mddev;
1790 		atomic_set(&r10_bio->remaining, 0);
1791 		raise_barrier(conf, 0);
1792 		conf->next_resync = sector_nr;
1793 
1794 		r10_bio->master_bio = NULL;
1795 		r10_bio->sector = sector_nr;
1796 		set_bit(R10BIO_IsSync, &r10_bio->state);
1797 		raid10_find_phys(conf, r10_bio);
1798 		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1799 
1800 		for (i=0; i<conf->copies; i++) {
1801 			int d = r10_bio->devs[i].devnum;
1802 			bio = r10_bio->devs[i].bio;
1803 			bio->bi_end_io = NULL;
1804 			if (conf->mirrors[d].rdev == NULL ||
1805 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1806 				continue;
1807 			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1808 			atomic_inc(&r10_bio->remaining);
1809 			bio->bi_next = biolist;
1810 			biolist = bio;
1811 			bio->bi_private = r10_bio;
1812 			bio->bi_end_io = end_sync_read;
1813 			bio->bi_rw = 0;
1814 			bio->bi_sector = r10_bio->devs[i].addr +
1815 				conf->mirrors[d].rdev->data_offset;
1816 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1817 			count++;
1818 		}
1819 
1820 		if (count < 2) {
1821 			for (i=0; i<conf->copies; i++) {
1822 				int d = r10_bio->devs[i].devnum;
1823 				if (r10_bio->devs[i].bio->bi_end_io)
1824 					rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1825 			}
1826 			put_buf(r10_bio);
1827 			biolist = NULL;
1828 			goto giveup;
1829 		}
1830 	}
1831 
1832 	for (bio = biolist; bio ; bio=bio->bi_next) {
1833 
1834 		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1835 		if (bio->bi_end_io)
1836 			bio->bi_flags |= 1 << BIO_UPTODATE;
1837 		bio->bi_vcnt = 0;
1838 		bio->bi_idx = 0;
1839 		bio->bi_phys_segments = 0;
1840 		bio->bi_hw_segments = 0;
1841 		bio->bi_size = 0;
1842 	}
1843 
1844 	nr_sectors = 0;
1845 	if (sector_nr + max_sync < max_sector)
1846 		max_sector = sector_nr + max_sync;
1847 	do {
1848 		struct page *page;
1849 		int len = PAGE_SIZE;
1850 		disk = 0;
1851 		if (sector_nr + (len>>9) > max_sector)
1852 			len = (max_sector - sector_nr) << 9;
1853 		if (len == 0)
1854 			break;
1855 		for (bio= biolist ; bio ; bio=bio->bi_next) {
1856 			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1857 			if (bio_add_page(bio, page, len, 0) == 0) {
1858 				/* stop here */
1859 				struct bio *bio2;
1860 				bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1861 				for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
1862 					/* remove last page from this bio */
1863 					bio2->bi_vcnt--;
1864 					bio2->bi_size -= len;
1865 					bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
1866 				}
1867 				goto bio_full;
1868 			}
1869 			disk = i;
1870 		}
1871 		nr_sectors += len>>9;
1872 		sector_nr += len>>9;
1873 	} while (biolist->bi_vcnt < RESYNC_PAGES);
1874  bio_full:
1875 	r10_bio->sectors = nr_sectors;
1876 
1877 	while (biolist) {
1878 		bio = biolist;
1879 		biolist = biolist->bi_next;
1880 
1881 		bio->bi_next = NULL;
1882 		r10_bio = bio->bi_private;
1883 		r10_bio->sectors = nr_sectors;
1884 
1885 		if (bio->bi_end_io == end_sync_read) {
1886 			md_sync_acct(bio->bi_bdev, nr_sectors);
1887 			generic_make_request(bio);
1888 		}
1889 	}
1890 
1891 	if (sectors_skipped)
1892 		/* pretend they weren't skipped, it makes
1893 		 * no important difference in this case
1894 		 */
1895 		md_done_sync(mddev, sectors_skipped, 1);
1896 
1897 	return sectors_skipped + nr_sectors;
1898  giveup:
1899 	/* There is nowhere to write, so all non-sync
1900 	 * drives must be failed, so try the next chunk...
1901 	 */
1902 	{
1903 	sector_t sec = max_sector - sector_nr;
1904 	sectors_skipped += sec;
1905 	chunks_skipped ++;
1906 	sector_nr = max_sector;
1907 	goto skipped;
1908 	}
1909 }
1910 
1911 static int run(mddev_t *mddev)
1912 {
1913 	conf_t *conf;
1914 	int i, disk_idx;
1915 	mirror_info_t *disk;
1916 	mdk_rdev_t *rdev;
1917 	struct list_head *tmp;
1918 	int nc, fc;
1919 	sector_t stride, size;
1920 
1921 	if (mddev->chunk_size == 0) {
1922 		printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
1923 		return -EINVAL;
1924 	}
1925 
1926 	nc = mddev->layout & 255;
1927 	fc = (mddev->layout >> 8) & 255;
1928 	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
1929 	    (mddev->layout >> 16)) {
1930 		printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
1931 		       mdname(mddev), mddev->layout);
1932 		goto out;
1933 	}
1934 	/*
1935 	 * copy the already verified devices into our private RAID10
1936 	 * bookkeeping area. [whatever we allocate in run(),
1937 	 * should be freed in stop()]
1938 	 */
1939 	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1940 	mddev->private = conf;
1941 	if (!conf) {
1942 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1943 			mdname(mddev));
1944 		goto out;
1945 	}
1946 	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1947 				 GFP_KERNEL);
1948 	if (!conf->mirrors) {
1949 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1950 		       mdname(mddev));
1951 		goto out_free_conf;
1952 	}
1953 
1954 	conf->tmppage = alloc_page(GFP_KERNEL);
1955 	if (!conf->tmppage)
1956 		goto out_free_conf;
1957 
1958 	conf->near_copies = nc;
1959 	conf->far_copies = fc;
1960 	conf->copies = nc*fc;
1961 	conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
1962 	conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
1963 	stride = mddev->size >> (conf->chunk_shift-1);
1964 	sector_div(stride, fc);
1965 	conf->stride = stride << conf->chunk_shift;
1966 
1967 	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
1968 						r10bio_pool_free, conf);
1969 	if (!conf->r10bio_pool) {
1970 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1971 			mdname(mddev));
1972 		goto out_free_conf;
1973 	}
1974 
1975 	ITERATE_RDEV(mddev, rdev, tmp) {
1976 		disk_idx = rdev->raid_disk;
1977 		if (disk_idx >= mddev->raid_disks
1978 		    || disk_idx < 0)
1979 			continue;
1980 		disk = conf->mirrors + disk_idx;
1981 
1982 		disk->rdev = rdev;
1983 
1984 		blk_queue_stack_limits(mddev->queue,
1985 				       rdev->bdev->bd_disk->queue);
1986 		/* as we don't honour merge_bvec_fn, we must never risk
1987 		 * violating it, so limit ->max_sector to one PAGE, as
1988 		 * a one page request is never in violation.
1989 		 */
1990 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1991 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
1992 			mddev->queue->max_sectors = (PAGE_SIZE>>9);
1993 
1994 		disk->head_position = 0;
1995 		if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
1996 			conf->working_disks++;
1997 	}
1998 	conf->raid_disks = mddev->raid_disks;
1999 	conf->mddev = mddev;
2000 	spin_lock_init(&conf->device_lock);
2001 	INIT_LIST_HEAD(&conf->retry_list);
2002 
2003 	spin_lock_init(&conf->resync_lock);
2004 	init_waitqueue_head(&conf->wait_barrier);
2005 
2006 	/* need to check that every block has at least one working mirror */
2007 	if (!enough(conf)) {
2008 		printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
2009 		       mdname(mddev));
2010 		goto out_free_conf;
2011 	}
2012 
2013 	mddev->degraded = 0;
2014 	for (i = 0; i < conf->raid_disks; i++) {
2015 
2016 		disk = conf->mirrors + i;
2017 
2018 		if (!disk->rdev) {
2019 			disk->head_position = 0;
2020 			mddev->degraded++;
2021 		}
2022 	}
2023 
2024 
2025 	mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
2026 	if (!mddev->thread) {
2027 		printk(KERN_ERR
2028 		       "raid10: couldn't allocate thread for %s\n",
2029 		       mdname(mddev));
2030 		goto out_free_conf;
2031 	}
2032 
2033 	printk(KERN_INFO
2034 		"raid10: raid set %s active with %d out of %d devices\n",
2035 		mdname(mddev), mddev->raid_disks - mddev->degraded,
2036 		mddev->raid_disks);
2037 	/*
2038 	 * Ok, everything is just fine now
2039 	 */
2040 	size = conf->stride * conf->raid_disks;
2041 	sector_div(size, conf->near_copies);
2042 	mddev->array_size = size/2;
2043 	mddev->resync_max_sectors = size;
2044 
2045 	mddev->queue->unplug_fn = raid10_unplug;
2046 	mddev->queue->issue_flush_fn = raid10_issue_flush;
2047 
2048 	/* Calculate max read-ahead size.
2049 	 * We need to readahead at least twice a whole stripe....
2050 	 * maybe...
2051 	 */
2052 	{
2053 		int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
2054 		stripe /= conf->near_copies;
2055 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2056 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2057 	}
2058 
2059 	if (conf->near_copies < mddev->raid_disks)
2060 		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2061 	return 0;
2062 
2063 out_free_conf:
2064 	if (conf->r10bio_pool)
2065 		mempool_destroy(conf->r10bio_pool);
2066 	safe_put_page(conf->tmppage);
2067 	kfree(conf->mirrors);
2068 	kfree(conf);
2069 	mddev->private = NULL;
2070 out:
2071 	return -EIO;
2072 }
2073 
2074 static int stop(mddev_t *mddev)
2075 {
2076 	conf_t *conf = mddev_to_conf(mddev);
2077 
2078 	md_unregister_thread(mddev->thread);
2079 	mddev->thread = NULL;
2080 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2081 	if (conf->r10bio_pool)
2082 		mempool_destroy(conf->r10bio_pool);
2083 	kfree(conf->mirrors);
2084 	kfree(conf);
2085 	mddev->private = NULL;
2086 	return 0;
2087 }
2088 
2089 static void raid10_quiesce(mddev_t *mddev, int state)
2090 {
2091 	conf_t *conf = mddev_to_conf(mddev);
2092 
2093 	switch(state) {
2094 	case 1:
2095 		raise_barrier(conf, 0);
2096 		break;
2097 	case 0:
2098 		lower_barrier(conf);
2099 		break;
2100 	}
2101 	if (mddev->thread) {
2102 		if (mddev->bitmap)
2103 			mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2104 		else
2105 			mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2106 		md_wakeup_thread(mddev->thread);
2107 	}
2108 }
2109 
2110 static struct mdk_personality raid10_personality =
2111 {
2112 	.name		= "raid10",
2113 	.level		= 10,
2114 	.owner		= THIS_MODULE,
2115 	.make_request	= make_request,
2116 	.run		= run,
2117 	.stop		= stop,
2118 	.status		= status,
2119 	.error_handler	= error,
2120 	.hot_add_disk	= raid10_add_disk,
2121 	.hot_remove_disk= raid10_remove_disk,
2122 	.spare_active	= raid10_spare_active,
2123 	.sync_request	= sync_request,
2124 	.quiesce	= raid10_quiesce,
2125 };
2126 
2127 static int __init raid_init(void)
2128 {
2129 	return register_md_personality(&raid10_personality);
2130 }
2131 
2132 static void raid_exit(void)
2133 {
2134 	unregister_md_personality(&raid10_personality);
2135 }
2136 
2137 module_init(raid_init);
2138 module_exit(raid_exit);
2139 MODULE_LICENSE("GPL");
2140 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2141 MODULE_ALIAS("md-raid10");
2142 MODULE_ALIAS("md-level-10");
2143