xref: /linux/drivers/md/dm-raid1.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-bio-list.h"
9 #include "dm-io.h"
10 #include "dm-log.h"
11 #include "kcopyd.h"
12 
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 
23 static struct workqueue_struct *_kmirrord_wq;
24 static struct work_struct _kmirrord_work;
25 
26 static inline void wake(void)
27 {
28 	queue_work(_kmirrord_wq, &_kmirrord_work);
29 }
30 
31 /*-----------------------------------------------------------------
32  * Region hash
33  *
34  * The mirror splits itself up into discrete regions.  Each
35  * region can be in one of three states: clean, dirty,
36  * nosync.  There is no need to put clean regions in the hash.
37  *
38  * In addition to being present in the hash table a region _may_
39  * be present on one of three lists.
40  *
41  *   clean_regions: Regions on this list have no io pending to
42  *   them, they are in sync, we are no longer interested in them,
43  *   they are dull.  rh_update_states() will remove them from the
44  *   hash table.
45  *
46  *   quiesced_regions: These regions have been spun down, ready
47  *   for recovery.  rh_recovery_start() will remove regions from
48  *   this list and hand them to kmirrord, which will schedule the
49  *   recovery io with kcopyd.
50  *
51  *   recovered_regions: Regions that kcopyd has successfully
52  *   recovered.  rh_update_states() will now schedule any delayed
53  *   io, up the recovery_count, and remove the region from the
54  *   hash.
55  *
56  * There are 2 locks:
57  *   A rw spin lock 'hash_lock' protects just the hash table,
58  *   this is never held in write mode from interrupt context,
59  *   which I believe means that we only have to disable irqs when
60  *   doing a write lock.
61  *
62  *   An ordinary spin lock 'region_lock' that protects the three
63  *   lists in the region_hash, with the 'state', 'list' and
64  *   'bhs_delayed' fields of the regions.  This is used from irq
65  *   context, so all other uses will have to suspend local irqs.
66  *---------------------------------------------------------------*/
67 struct mirror_set;
68 struct region_hash {
69 	struct mirror_set *ms;
70 	uint32_t region_size;
71 	unsigned region_shift;
72 
73 	/* holds persistent region state */
74 	struct dirty_log *log;
75 
76 	/* hash table */
77 	rwlock_t hash_lock;
78 	mempool_t *region_pool;
79 	unsigned int mask;
80 	unsigned int nr_buckets;
81 	struct list_head *buckets;
82 
83 	spinlock_t region_lock;
84 	struct semaphore recovery_count;
85 	struct list_head clean_regions;
86 	struct list_head quiesced_regions;
87 	struct list_head recovered_regions;
88 };
89 
90 enum {
91 	RH_CLEAN,
92 	RH_DIRTY,
93 	RH_NOSYNC,
94 	RH_RECOVERING
95 };
96 
97 struct region {
98 	struct region_hash *rh;	/* FIXME: can we get rid of this ? */
99 	region_t key;
100 	int state;
101 
102 	struct list_head hash_list;
103 	struct list_head list;
104 
105 	atomic_t pending;
106 	struct bio_list delayed_bios;
107 };
108 
109 /*
110  * Conversion fns
111  */
112 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
113 {
114 	return bio->bi_sector >> rh->region_shift;
115 }
116 
117 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
118 {
119 	return region << rh->region_shift;
120 }
121 
122 /* FIXME move this */
123 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
124 
125 #define MIN_REGIONS 64
126 #define MAX_RECOVERY 1
127 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
128 		   struct dirty_log *log, uint32_t region_size,
129 		   region_t nr_regions)
130 {
131 	unsigned int nr_buckets, max_buckets;
132 	size_t i;
133 
134 	/*
135 	 * Calculate a suitable number of buckets for our hash
136 	 * table.
137 	 */
138 	max_buckets = nr_regions >> 6;
139 	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
140 		;
141 	nr_buckets >>= 1;
142 
143 	rh->ms = ms;
144 	rh->log = log;
145 	rh->region_size = region_size;
146 	rh->region_shift = ffs(region_size) - 1;
147 	rwlock_init(&rh->hash_lock);
148 	rh->mask = nr_buckets - 1;
149 	rh->nr_buckets = nr_buckets;
150 
151 	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
152 	if (!rh->buckets) {
153 		DMERR("unable to allocate region hash memory");
154 		return -ENOMEM;
155 	}
156 
157 	for (i = 0; i < nr_buckets; i++)
158 		INIT_LIST_HEAD(rh->buckets + i);
159 
160 	spin_lock_init(&rh->region_lock);
161 	sema_init(&rh->recovery_count, 0);
162 	INIT_LIST_HEAD(&rh->clean_regions);
163 	INIT_LIST_HEAD(&rh->quiesced_regions);
164 	INIT_LIST_HEAD(&rh->recovered_regions);
165 
166 	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
167 						      sizeof(struct region));
168 	if (!rh->region_pool) {
169 		vfree(rh->buckets);
170 		rh->buckets = NULL;
171 		return -ENOMEM;
172 	}
173 
174 	return 0;
175 }
176 
177 static void rh_exit(struct region_hash *rh)
178 {
179 	unsigned int h;
180 	struct region *reg, *nreg;
181 
182 	BUG_ON(!list_empty(&rh->quiesced_regions));
183 	for (h = 0; h < rh->nr_buckets; h++) {
184 		list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
185 			BUG_ON(atomic_read(&reg->pending));
186 			mempool_free(reg, rh->region_pool);
187 		}
188 	}
189 
190 	if (rh->log)
191 		dm_destroy_dirty_log(rh->log);
192 	if (rh->region_pool)
193 		mempool_destroy(rh->region_pool);
194 	vfree(rh->buckets);
195 }
196 
197 #define RH_HASH_MULT 2654435387U
198 
199 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
200 {
201 	return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
202 }
203 
204 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
205 {
206 	struct region *reg;
207 
208 	list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
209 		if (reg->key == region)
210 			return reg;
211 
212 	return NULL;
213 }
214 
215 static void __rh_insert(struct region_hash *rh, struct region *reg)
216 {
217 	unsigned int h = rh_hash(rh, reg->key);
218 	list_add(&reg->hash_list, rh->buckets + h);
219 }
220 
221 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
222 {
223 	struct region *reg, *nreg;
224 
225 	read_unlock(&rh->hash_lock);
226 	nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
227 	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
228 		RH_CLEAN : RH_NOSYNC;
229 	nreg->rh = rh;
230 	nreg->key = region;
231 
232 	INIT_LIST_HEAD(&nreg->list);
233 
234 	atomic_set(&nreg->pending, 0);
235 	bio_list_init(&nreg->delayed_bios);
236 	write_lock_irq(&rh->hash_lock);
237 
238 	reg = __rh_lookup(rh, region);
239 	if (reg)
240 		/* we lost the race */
241 		mempool_free(nreg, rh->region_pool);
242 
243 	else {
244 		__rh_insert(rh, nreg);
245 		if (nreg->state == RH_CLEAN) {
246 			spin_lock(&rh->region_lock);
247 			list_add(&nreg->list, &rh->clean_regions);
248 			spin_unlock(&rh->region_lock);
249 		}
250 		reg = nreg;
251 	}
252 	write_unlock_irq(&rh->hash_lock);
253 	read_lock(&rh->hash_lock);
254 
255 	return reg;
256 }
257 
258 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
259 {
260 	struct region *reg;
261 
262 	reg = __rh_lookup(rh, region);
263 	if (!reg)
264 		reg = __rh_alloc(rh, region);
265 
266 	return reg;
267 }
268 
269 static int rh_state(struct region_hash *rh, region_t region, int may_block)
270 {
271 	int r;
272 	struct region *reg;
273 
274 	read_lock(&rh->hash_lock);
275 	reg = __rh_lookup(rh, region);
276 	read_unlock(&rh->hash_lock);
277 
278 	if (reg)
279 		return reg->state;
280 
281 	/*
282 	 * The region wasn't in the hash, so we fall back to the
283 	 * dirty log.
284 	 */
285 	r = rh->log->type->in_sync(rh->log, region, may_block);
286 
287 	/*
288 	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
289 	 * taken as a RH_NOSYNC
290 	 */
291 	return r == 1 ? RH_CLEAN : RH_NOSYNC;
292 }
293 
294 static inline int rh_in_sync(struct region_hash *rh,
295 			     region_t region, int may_block)
296 {
297 	int state = rh_state(rh, region, may_block);
298 	return state == RH_CLEAN || state == RH_DIRTY;
299 }
300 
301 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
302 {
303 	struct bio *bio;
304 
305 	while ((bio = bio_list_pop(bio_list))) {
306 		queue_bio(ms, bio, WRITE);
307 	}
308 }
309 
310 static void rh_update_states(struct region_hash *rh)
311 {
312 	struct region *reg, *next;
313 
314 	LIST_HEAD(clean);
315 	LIST_HEAD(recovered);
316 
317 	/*
318 	 * Quickly grab the lists.
319 	 */
320 	write_lock_irq(&rh->hash_lock);
321 	spin_lock(&rh->region_lock);
322 	if (!list_empty(&rh->clean_regions)) {
323 		list_splice(&rh->clean_regions, &clean);
324 		INIT_LIST_HEAD(&rh->clean_regions);
325 
326 		list_for_each_entry (reg, &clean, list) {
327 			rh->log->type->clear_region(rh->log, reg->key);
328 			list_del(&reg->hash_list);
329 		}
330 	}
331 
332 	if (!list_empty(&rh->recovered_regions)) {
333 		list_splice(&rh->recovered_regions, &recovered);
334 		INIT_LIST_HEAD(&rh->recovered_regions);
335 
336 		list_for_each_entry (reg, &recovered, list)
337 			list_del(&reg->hash_list);
338 	}
339 	spin_unlock(&rh->region_lock);
340 	write_unlock_irq(&rh->hash_lock);
341 
342 	/*
343 	 * All the regions on the recovered and clean lists have
344 	 * now been pulled out of the system, so no need to do
345 	 * any more locking.
346 	 */
347 	list_for_each_entry_safe (reg, next, &recovered, list) {
348 		rh->log->type->clear_region(rh->log, reg->key);
349 		rh->log->type->complete_resync_work(rh->log, reg->key, 1);
350 		dispatch_bios(rh->ms, &reg->delayed_bios);
351 		up(&rh->recovery_count);
352 		mempool_free(reg, rh->region_pool);
353 	}
354 
355 	if (!list_empty(&recovered))
356 		rh->log->type->flush(rh->log);
357 
358 	list_for_each_entry_safe (reg, next, &clean, list)
359 		mempool_free(reg, rh->region_pool);
360 }
361 
362 static void rh_inc(struct region_hash *rh, region_t region)
363 {
364 	struct region *reg;
365 
366 	read_lock(&rh->hash_lock);
367 	reg = __rh_find(rh, region);
368 
369 	spin_lock_irq(&rh->region_lock);
370 	atomic_inc(&reg->pending);
371 
372 	if (reg->state == RH_CLEAN) {
373 		reg->state = RH_DIRTY;
374 		list_del_init(&reg->list);	/* take off the clean list */
375 		spin_unlock_irq(&rh->region_lock);
376 
377 		rh->log->type->mark_region(rh->log, reg->key);
378 	} else
379 		spin_unlock_irq(&rh->region_lock);
380 
381 
382 	read_unlock(&rh->hash_lock);
383 }
384 
385 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
386 {
387 	struct bio *bio;
388 
389 	for (bio = bios->head; bio; bio = bio->bi_next)
390 		rh_inc(rh, bio_to_region(rh, bio));
391 }
392 
393 static void rh_dec(struct region_hash *rh, region_t region)
394 {
395 	unsigned long flags;
396 	struct region *reg;
397 	int should_wake = 0;
398 
399 	read_lock(&rh->hash_lock);
400 	reg = __rh_lookup(rh, region);
401 	read_unlock(&rh->hash_lock);
402 
403 	spin_lock_irqsave(&rh->region_lock, flags);
404 	if (atomic_dec_and_test(&reg->pending)) {
405 		/*
406 		 * There is no pending I/O for this region.
407 		 * We can move the region to corresponding list for next action.
408 		 * At this point, the region is not yet connected to any list.
409 		 *
410 		 * If the state is RH_NOSYNC, the region should be kept off
411 		 * from clean list.
412 		 * The hash entry for RH_NOSYNC will remain in memory
413 		 * until the region is recovered or the map is reloaded.
414 		 */
415 
416 		/* do nothing for RH_NOSYNC */
417 		if (reg->state == RH_RECOVERING) {
418 			list_add_tail(&reg->list, &rh->quiesced_regions);
419 		} else if (reg->state == RH_DIRTY) {
420 			reg->state = RH_CLEAN;
421 			list_add(&reg->list, &rh->clean_regions);
422 		}
423 		should_wake = 1;
424 	}
425 	spin_unlock_irqrestore(&rh->region_lock, flags);
426 
427 	if (should_wake)
428 		wake();
429 }
430 
431 /*
432  * Starts quiescing a region in preparation for recovery.
433  */
434 static int __rh_recovery_prepare(struct region_hash *rh)
435 {
436 	int r;
437 	struct region *reg;
438 	region_t region;
439 
440 	/*
441 	 * Ask the dirty log what's next.
442 	 */
443 	r = rh->log->type->get_resync_work(rh->log, &region);
444 	if (r <= 0)
445 		return r;
446 
447 	/*
448 	 * Get this region, and start it quiescing by setting the
449 	 * recovering flag.
450 	 */
451 	read_lock(&rh->hash_lock);
452 	reg = __rh_find(rh, region);
453 	read_unlock(&rh->hash_lock);
454 
455 	spin_lock_irq(&rh->region_lock);
456 	reg->state = RH_RECOVERING;
457 
458 	/* Already quiesced ? */
459 	if (atomic_read(&reg->pending))
460 		list_del_init(&reg->list);
461 
462 	else {
463 		list_del_init(&reg->list);
464 		list_add(&reg->list, &rh->quiesced_regions);
465 	}
466 	spin_unlock_irq(&rh->region_lock);
467 
468 	return 1;
469 }
470 
471 static void rh_recovery_prepare(struct region_hash *rh)
472 {
473 	while (!down_trylock(&rh->recovery_count))
474 		if (__rh_recovery_prepare(rh) <= 0) {
475 			up(&rh->recovery_count);
476 			break;
477 		}
478 }
479 
480 /*
481  * Returns any quiesced regions.
482  */
483 static struct region *rh_recovery_start(struct region_hash *rh)
484 {
485 	struct region *reg = NULL;
486 
487 	spin_lock_irq(&rh->region_lock);
488 	if (!list_empty(&rh->quiesced_regions)) {
489 		reg = list_entry(rh->quiesced_regions.next,
490 				 struct region, list);
491 		list_del_init(&reg->list);	/* remove from the quiesced list */
492 	}
493 	spin_unlock_irq(&rh->region_lock);
494 
495 	return reg;
496 }
497 
498 /* FIXME: success ignored for now */
499 static void rh_recovery_end(struct region *reg, int success)
500 {
501 	struct region_hash *rh = reg->rh;
502 
503 	spin_lock_irq(&rh->region_lock);
504 	list_add(&reg->list, &reg->rh->recovered_regions);
505 	spin_unlock_irq(&rh->region_lock);
506 
507 	wake();
508 }
509 
510 static void rh_flush(struct region_hash *rh)
511 {
512 	rh->log->type->flush(rh->log);
513 }
514 
515 static void rh_delay(struct region_hash *rh, struct bio *bio)
516 {
517 	struct region *reg;
518 
519 	read_lock(&rh->hash_lock);
520 	reg = __rh_find(rh, bio_to_region(rh, bio));
521 	bio_list_add(&reg->delayed_bios, bio);
522 	read_unlock(&rh->hash_lock);
523 }
524 
525 static void rh_stop_recovery(struct region_hash *rh)
526 {
527 	int i;
528 
529 	/* wait for any recovering regions */
530 	for (i = 0; i < MAX_RECOVERY; i++)
531 		down(&rh->recovery_count);
532 }
533 
534 static void rh_start_recovery(struct region_hash *rh)
535 {
536 	int i;
537 
538 	for (i = 0; i < MAX_RECOVERY; i++)
539 		up(&rh->recovery_count);
540 
541 	wake();
542 }
543 
544 /*-----------------------------------------------------------------
545  * Mirror set structures.
546  *---------------------------------------------------------------*/
547 struct mirror {
548 	atomic_t error_count;
549 	struct dm_dev *dev;
550 	sector_t offset;
551 };
552 
553 struct mirror_set {
554 	struct dm_target *ti;
555 	struct list_head list;
556 	struct region_hash rh;
557 	struct kcopyd_client *kcopyd_client;
558 
559 	spinlock_t lock;	/* protects the next two lists */
560 	struct bio_list reads;
561 	struct bio_list writes;
562 
563 	/* recovery */
564 	region_t nr_regions;
565 	int in_sync;
566 
567 	struct mirror *default_mirror;	/* Default mirror */
568 
569 	unsigned int nr_mirrors;
570 	struct mirror mirror[0];
571 };
572 
573 /*
574  * Every mirror should look like this one.
575  */
576 #define DEFAULT_MIRROR 0
577 
578 /*
579  * This is yucky.  We squirrel the mirror_set struct away inside
580  * bi_next for write buffers.  This is safe since the bh
581  * doesn't get submitted to the lower levels of block layer.
582  */
583 static struct mirror_set *bio_get_ms(struct bio *bio)
584 {
585 	return (struct mirror_set *) bio->bi_next;
586 }
587 
588 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
589 {
590 	bio->bi_next = (struct bio *) ms;
591 }
592 
593 /*-----------------------------------------------------------------
594  * Recovery.
595  *
596  * When a mirror is first activated we may find that some regions
597  * are in the no-sync state.  We have to recover these by
598  * recopying from the default mirror to all the others.
599  *---------------------------------------------------------------*/
600 static void recovery_complete(int read_err, unsigned int write_err,
601 			      void *context)
602 {
603 	struct region *reg = (struct region *) context;
604 
605 	/* FIXME: better error handling */
606 	rh_recovery_end(reg, read_err || write_err);
607 }
608 
609 static int recover(struct mirror_set *ms, struct region *reg)
610 {
611 	int r;
612 	unsigned int i;
613 	struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
614 	struct mirror *m;
615 	unsigned long flags = 0;
616 
617 	/* fill in the source */
618 	m = ms->default_mirror;
619 	from.bdev = m->dev->bdev;
620 	from.sector = m->offset + region_to_sector(reg->rh, reg->key);
621 	if (reg->key == (ms->nr_regions - 1)) {
622 		/*
623 		 * The final region may be smaller than
624 		 * region_size.
625 		 */
626 		from.count = ms->ti->len & (reg->rh->region_size - 1);
627 		if (!from.count)
628 			from.count = reg->rh->region_size;
629 	} else
630 		from.count = reg->rh->region_size;
631 
632 	/* fill in the destinations */
633 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
634 		if (&ms->mirror[i] == ms->default_mirror)
635 			continue;
636 
637 		m = ms->mirror + i;
638 		dest->bdev = m->dev->bdev;
639 		dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
640 		dest->count = from.count;
641 		dest++;
642 	}
643 
644 	/* hand to kcopyd */
645 	set_bit(KCOPYD_IGNORE_ERROR, &flags);
646 	r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
647 			recovery_complete, reg);
648 
649 	return r;
650 }
651 
652 static void do_recovery(struct mirror_set *ms)
653 {
654 	int r;
655 	struct region *reg;
656 	struct dirty_log *log = ms->rh.log;
657 
658 	/*
659 	 * Start quiescing some regions.
660 	 */
661 	rh_recovery_prepare(&ms->rh);
662 
663 	/*
664 	 * Copy any already quiesced regions.
665 	 */
666 	while ((reg = rh_recovery_start(&ms->rh))) {
667 		r = recover(ms, reg);
668 		if (r)
669 			rh_recovery_end(reg, 0);
670 	}
671 
672 	/*
673 	 * Update the in sync flag.
674 	 */
675 	if (!ms->in_sync &&
676 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
677 		/* the sync is complete */
678 		dm_table_event(ms->ti->table);
679 		ms->in_sync = 1;
680 	}
681 }
682 
683 /*-----------------------------------------------------------------
684  * Reads
685  *---------------------------------------------------------------*/
686 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
687 {
688 	/* FIXME: add read balancing */
689 	return ms->default_mirror;
690 }
691 
692 /*
693  * remap a buffer to a particular mirror.
694  */
695 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
696 {
697 	bio->bi_bdev = m->dev->bdev;
698 	bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
699 }
700 
701 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
702 {
703 	region_t region;
704 	struct bio *bio;
705 	struct mirror *m;
706 
707 	while ((bio = bio_list_pop(reads))) {
708 		region = bio_to_region(&ms->rh, bio);
709 
710 		/*
711 		 * We can only read balance if the region is in sync.
712 		 */
713 		if (rh_in_sync(&ms->rh, region, 0))
714 			m = choose_mirror(ms, bio->bi_sector);
715 		else
716 			m = ms->default_mirror;
717 
718 		map_bio(ms, m, bio);
719 		generic_make_request(bio);
720 	}
721 }
722 
723 /*-----------------------------------------------------------------
724  * Writes.
725  *
726  * We do different things with the write io depending on the
727  * state of the region that it's in:
728  *
729  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
730  * RECOVERING:	delay the io until recovery completes
731  * NOSYNC:	increment pending, just write to the default mirror
732  *---------------------------------------------------------------*/
733 static void write_callback(unsigned long error, void *context)
734 {
735 	unsigned int i;
736 	int uptodate = 1;
737 	struct bio *bio = (struct bio *) context;
738 	struct mirror_set *ms;
739 
740 	ms = bio_get_ms(bio);
741 	bio_set_ms(bio, NULL);
742 
743 	/*
744 	 * NOTE: We don't decrement the pending count here,
745 	 * instead it is done by the targets endio function.
746 	 * This way we handle both writes to SYNC and NOSYNC
747 	 * regions with the same code.
748 	 */
749 
750 	if (error) {
751 		/*
752 		 * only error the io if all mirrors failed.
753 		 * FIXME: bogus
754 		 */
755 		uptodate = 0;
756 		for (i = 0; i < ms->nr_mirrors; i++)
757 			if (!test_bit(i, &error)) {
758 				uptodate = 1;
759 				break;
760 			}
761 	}
762 	bio_endio(bio, bio->bi_size, 0);
763 }
764 
765 static void do_write(struct mirror_set *ms, struct bio *bio)
766 {
767 	unsigned int i;
768 	struct io_region io[KCOPYD_MAX_REGIONS+1];
769 	struct mirror *m;
770 
771 	for (i = 0; i < ms->nr_mirrors; i++) {
772 		m = ms->mirror + i;
773 
774 		io[i].bdev = m->dev->bdev;
775 		io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
776 		io[i].count = bio->bi_size >> 9;
777 	}
778 
779 	bio_set_ms(bio, ms);
780 	dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
781 			 bio->bi_io_vec + bio->bi_idx,
782 			 write_callback, bio);
783 }
784 
785 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
786 {
787 	int state;
788 	struct bio *bio;
789 	struct bio_list sync, nosync, recover, *this_list = NULL;
790 
791 	if (!writes->head)
792 		return;
793 
794 	/*
795 	 * Classify each write.
796 	 */
797 	bio_list_init(&sync);
798 	bio_list_init(&nosync);
799 	bio_list_init(&recover);
800 
801 	while ((bio = bio_list_pop(writes))) {
802 		state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
803 		switch (state) {
804 		case RH_CLEAN:
805 		case RH_DIRTY:
806 			this_list = &sync;
807 			break;
808 
809 		case RH_NOSYNC:
810 			this_list = &nosync;
811 			break;
812 
813 		case RH_RECOVERING:
814 			this_list = &recover;
815 			break;
816 		}
817 
818 		bio_list_add(this_list, bio);
819 	}
820 
821 	/*
822 	 * Increment the pending counts for any regions that will
823 	 * be written to (writes to recover regions are going to
824 	 * be delayed).
825 	 */
826 	rh_inc_pending(&ms->rh, &sync);
827 	rh_inc_pending(&ms->rh, &nosync);
828 	rh_flush(&ms->rh);
829 
830 	/*
831 	 * Dispatch io.
832 	 */
833 	while ((bio = bio_list_pop(&sync)))
834 		do_write(ms, bio);
835 
836 	while ((bio = bio_list_pop(&recover)))
837 		rh_delay(&ms->rh, bio);
838 
839 	while ((bio = bio_list_pop(&nosync))) {
840 		map_bio(ms, ms->default_mirror, bio);
841 		generic_make_request(bio);
842 	}
843 }
844 
845 /*-----------------------------------------------------------------
846  * kmirrord
847  *---------------------------------------------------------------*/
848 static LIST_HEAD(_mirror_sets);
849 static DECLARE_RWSEM(_mirror_sets_lock);
850 
851 static void do_mirror(struct mirror_set *ms)
852 {
853 	struct bio_list reads, writes;
854 
855 	spin_lock(&ms->lock);
856 	reads = ms->reads;
857 	writes = ms->writes;
858 	bio_list_init(&ms->reads);
859 	bio_list_init(&ms->writes);
860 	spin_unlock(&ms->lock);
861 
862 	rh_update_states(&ms->rh);
863 	do_recovery(ms);
864 	do_reads(ms, &reads);
865 	do_writes(ms, &writes);
866 }
867 
868 static void do_work(void *ignored)
869 {
870 	struct mirror_set *ms;
871 
872 	down_read(&_mirror_sets_lock);
873 	list_for_each_entry (ms, &_mirror_sets, list)
874 		do_mirror(ms);
875 	up_read(&_mirror_sets_lock);
876 }
877 
878 /*-----------------------------------------------------------------
879  * Target functions
880  *---------------------------------------------------------------*/
881 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
882 					uint32_t region_size,
883 					struct dm_target *ti,
884 					struct dirty_log *dl)
885 {
886 	size_t len;
887 	struct mirror_set *ms = NULL;
888 
889 	if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
890 		return NULL;
891 
892 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
893 
894 	ms = kmalloc(len, GFP_KERNEL);
895 	if (!ms) {
896 		ti->error = "dm-mirror: Cannot allocate mirror context";
897 		return NULL;
898 	}
899 
900 	memset(ms, 0, len);
901 	spin_lock_init(&ms->lock);
902 
903 	ms->ti = ti;
904 	ms->nr_mirrors = nr_mirrors;
905 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
906 	ms->in_sync = 0;
907 	ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
908 
909 	if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
910 		ti->error = "dm-mirror: Error creating dirty region hash";
911 		kfree(ms);
912 		return NULL;
913 	}
914 
915 	return ms;
916 }
917 
918 static void free_context(struct mirror_set *ms, struct dm_target *ti,
919 			 unsigned int m)
920 {
921 	while (m--)
922 		dm_put_device(ti, ms->mirror[m].dev);
923 
924 	rh_exit(&ms->rh);
925 	kfree(ms);
926 }
927 
928 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
929 {
930 	return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
931 		 size > ti->len);
932 }
933 
934 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
935 		      unsigned int mirror, char **argv)
936 {
937 	unsigned long long offset;
938 
939 	if (sscanf(argv[1], "%llu", &offset) != 1) {
940 		ti->error = "dm-mirror: Invalid offset";
941 		return -EINVAL;
942 	}
943 
944 	if (dm_get_device(ti, argv[0], offset, ti->len,
945 			  dm_table_get_mode(ti->table),
946 			  &ms->mirror[mirror].dev)) {
947 		ti->error = "dm-mirror: Device lookup failure";
948 		return -ENXIO;
949 	}
950 
951 	ms->mirror[mirror].offset = offset;
952 
953 	return 0;
954 }
955 
956 static int add_mirror_set(struct mirror_set *ms)
957 {
958 	down_write(&_mirror_sets_lock);
959 	list_add_tail(&ms->list, &_mirror_sets);
960 	up_write(&_mirror_sets_lock);
961 	wake();
962 
963 	return 0;
964 }
965 
966 static void del_mirror_set(struct mirror_set *ms)
967 {
968 	down_write(&_mirror_sets_lock);
969 	list_del(&ms->list);
970 	up_write(&_mirror_sets_lock);
971 }
972 
973 /*
974  * Create dirty log: log_type #log_params <log_params>
975  */
976 static struct dirty_log *create_dirty_log(struct dm_target *ti,
977 					  unsigned int argc, char **argv,
978 					  unsigned int *args_used)
979 {
980 	unsigned int param_count;
981 	struct dirty_log *dl;
982 
983 	if (argc < 2) {
984 		ti->error = "dm-mirror: Insufficient mirror log arguments";
985 		return NULL;
986 	}
987 
988 	if (sscanf(argv[1], "%u", &param_count) != 1) {
989 		ti->error = "dm-mirror: Invalid mirror log argument count";
990 		return NULL;
991 	}
992 
993 	*args_used = 2 + param_count;
994 
995 	if (argc < *args_used) {
996 		ti->error = "dm-mirror: Insufficient mirror log arguments";
997 		return NULL;
998 	}
999 
1000 	dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1001 	if (!dl) {
1002 		ti->error = "dm-mirror: Error creating mirror dirty log";
1003 		return NULL;
1004 	}
1005 
1006 	if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1007 		ti->error = "dm-mirror: Invalid region size";
1008 		dm_destroy_dirty_log(dl);
1009 		return NULL;
1010 	}
1011 
1012 	return dl;
1013 }
1014 
1015 /*
1016  * Construct a mirror mapping:
1017  *
1018  * log_type #log_params <log_params>
1019  * #mirrors [mirror_path offset]{2,}
1020  *
1021  * log_type is "core" or "disk"
1022  * #log_params is between 1 and 3
1023  */
1024 #define DM_IO_PAGES 64
1025 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1026 {
1027 	int r;
1028 	unsigned int nr_mirrors, m, args_used;
1029 	struct mirror_set *ms;
1030 	struct dirty_log *dl;
1031 
1032 	dl = create_dirty_log(ti, argc, argv, &args_used);
1033 	if (!dl)
1034 		return -EINVAL;
1035 
1036 	argv += args_used;
1037 	argc -= args_used;
1038 
1039 	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1040 	    nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1041 		ti->error = "dm-mirror: Invalid number of mirrors";
1042 		dm_destroy_dirty_log(dl);
1043 		return -EINVAL;
1044 	}
1045 
1046 	argv++, argc--;
1047 
1048 	if (argc != nr_mirrors * 2) {
1049 		ti->error = "dm-mirror: Wrong number of mirror arguments";
1050 		dm_destroy_dirty_log(dl);
1051 		return -EINVAL;
1052 	}
1053 
1054 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1055 	if (!ms) {
1056 		dm_destroy_dirty_log(dl);
1057 		return -ENOMEM;
1058 	}
1059 
1060 	/* Get the mirror parameter sets */
1061 	for (m = 0; m < nr_mirrors; m++) {
1062 		r = get_mirror(ms, ti, m, argv);
1063 		if (r) {
1064 			free_context(ms, ti, m);
1065 			return r;
1066 		}
1067 		argv += 2;
1068 		argc -= 2;
1069 	}
1070 
1071 	ti->private = ms;
1072  	ti->split_io = ms->rh.region_size;
1073 
1074 	r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1075 	if (r) {
1076 		free_context(ms, ti, ms->nr_mirrors);
1077 		return r;
1078 	}
1079 
1080 	add_mirror_set(ms);
1081 	return 0;
1082 }
1083 
1084 static void mirror_dtr(struct dm_target *ti)
1085 {
1086 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1087 
1088 	del_mirror_set(ms);
1089 	kcopyd_client_destroy(ms->kcopyd_client);
1090 	free_context(ms, ti, ms->nr_mirrors);
1091 }
1092 
1093 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1094 {
1095 	int should_wake = 0;
1096 	struct bio_list *bl;
1097 
1098 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1099 	spin_lock(&ms->lock);
1100 	should_wake = !(bl->head);
1101 	bio_list_add(bl, bio);
1102 	spin_unlock(&ms->lock);
1103 
1104 	if (should_wake)
1105 		wake();
1106 }
1107 
1108 /*
1109  * Mirror mapping function
1110  */
1111 static int mirror_map(struct dm_target *ti, struct bio *bio,
1112 		      union map_info *map_context)
1113 {
1114 	int r, rw = bio_rw(bio);
1115 	struct mirror *m;
1116 	struct mirror_set *ms = ti->private;
1117 
1118 	map_context->ll = bio->bi_sector >> ms->rh.region_shift;
1119 
1120 	if (rw == WRITE) {
1121 		queue_bio(ms, bio, rw);
1122 		return 0;
1123 	}
1124 
1125 	r = ms->rh.log->type->in_sync(ms->rh.log,
1126 				      bio_to_region(&ms->rh, bio), 0);
1127 	if (r < 0 && r != -EWOULDBLOCK)
1128 		return r;
1129 
1130 	if (r == -EWOULDBLOCK)	/* FIXME: ugly */
1131 		r = 0;
1132 
1133 	/*
1134 	 * We don't want to fast track a recovery just for a read
1135 	 * ahead.  So we just let it silently fail.
1136 	 * FIXME: get rid of this.
1137 	 */
1138 	if (!r && rw == READA)
1139 		return -EIO;
1140 
1141 	if (!r) {
1142 		/* Pass this io over to the daemon */
1143 		queue_bio(ms, bio, rw);
1144 		return 0;
1145 	}
1146 
1147 	m = choose_mirror(ms, bio->bi_sector);
1148 	if (!m)
1149 		return -EIO;
1150 
1151 	map_bio(ms, m, bio);
1152 	return 1;
1153 }
1154 
1155 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1156 			 int error, union map_info *map_context)
1157 {
1158 	int rw = bio_rw(bio);
1159 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1160 	region_t region = map_context->ll;
1161 
1162 	/*
1163 	 * We need to dec pending if this was a write.
1164 	 */
1165 	if (rw == WRITE)
1166 		rh_dec(&ms->rh, region);
1167 
1168 	return 0;
1169 }
1170 
1171 static void mirror_postsuspend(struct dm_target *ti)
1172 {
1173 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1174 	struct dirty_log *log = ms->rh.log;
1175 
1176 	rh_stop_recovery(&ms->rh);
1177 	if (log->type->suspend && log->type->suspend(log))
1178 		/* FIXME: need better error handling */
1179 		DMWARN("log suspend failed");
1180 }
1181 
1182 static void mirror_resume(struct dm_target *ti)
1183 {
1184 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1185 	struct dirty_log *log = ms->rh.log;
1186 	if (log->type->resume && log->type->resume(log))
1187 		/* FIXME: need better error handling */
1188 		DMWARN("log resume failed");
1189 	rh_start_recovery(&ms->rh);
1190 }
1191 
1192 static int mirror_status(struct dm_target *ti, status_type_t type,
1193 			 char *result, unsigned int maxlen)
1194 {
1195 	unsigned int m, sz;
1196 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1197 
1198 	sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1199 
1200 	switch (type) {
1201 	case STATUSTYPE_INFO:
1202 		DMEMIT("%d ", ms->nr_mirrors);
1203 		for (m = 0; m < ms->nr_mirrors; m++)
1204 			DMEMIT("%s ", ms->mirror[m].dev->name);
1205 
1206 		DMEMIT("%llu/%llu",
1207 			(unsigned long long)ms->rh.log->type->
1208 				get_sync_count(ms->rh.log),
1209 			(unsigned long long)ms->nr_regions);
1210 		break;
1211 
1212 	case STATUSTYPE_TABLE:
1213 		DMEMIT("%d ", ms->nr_mirrors);
1214 		for (m = 0; m < ms->nr_mirrors; m++)
1215 			DMEMIT("%s %llu ", ms->mirror[m].dev->name,
1216 				(unsigned long long)ms->mirror[m].offset);
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 static struct target_type mirror_target = {
1223 	.name	 = "mirror",
1224 	.version = {1, 0, 1},
1225 	.module	 = THIS_MODULE,
1226 	.ctr	 = mirror_ctr,
1227 	.dtr	 = mirror_dtr,
1228 	.map	 = mirror_map,
1229 	.end_io	 = mirror_end_io,
1230 	.postsuspend = mirror_postsuspend,
1231 	.resume	 = mirror_resume,
1232 	.status	 = mirror_status,
1233 };
1234 
1235 static int __init dm_mirror_init(void)
1236 {
1237 	int r;
1238 
1239 	r = dm_dirty_log_init();
1240 	if (r)
1241 		return r;
1242 
1243 	_kmirrord_wq = create_singlethread_workqueue("kmirrord");
1244 	if (!_kmirrord_wq) {
1245 		DMERR("couldn't start kmirrord");
1246 		dm_dirty_log_exit();
1247 		return r;
1248 	}
1249 	INIT_WORK(&_kmirrord_work, do_work, NULL);
1250 
1251 	r = dm_register_target(&mirror_target);
1252 	if (r < 0) {
1253 		DMERR("%s: Failed to register mirror target",
1254 		      mirror_target.name);
1255 		dm_dirty_log_exit();
1256 		destroy_workqueue(_kmirrord_wq);
1257 	}
1258 
1259 	return r;
1260 }
1261 
1262 static void __exit dm_mirror_exit(void)
1263 {
1264 	int r;
1265 
1266 	r = dm_unregister_target(&mirror_target);
1267 	if (r < 0)
1268 		DMERR("%s: unregister failed %d", mirror_target.name, r);
1269 
1270 	destroy_workqueue(_kmirrord_wq);
1271 	dm_dirty_log_exit();
1272 }
1273 
1274 /* Module hooks */
1275 module_init(dm_mirror_init);
1276 module_exit(dm_mirror_exit);
1277 
1278 MODULE_DESCRIPTION(DM_NAME " mirror target");
1279 MODULE_AUTHOR("Joe Thornber");
1280 MODULE_LICENSE("GPL");
1281