xref: /linux/drivers/md/dm-raid1.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-bio-list.h"
9 #include "dm-io.h"
10 #include "dm-log.h"
11 #include "kcopyd.h"
12 
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 
23 #define DM_MSG_PREFIX "raid1"
24 #define DM_IO_PAGES 64
25 
26 #define DM_RAID1_HANDLE_ERRORS 0x01
27 #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
28 
29 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30 
31 /*-----------------------------------------------------------------
32  * Region hash
33  *
34  * The mirror splits itself up into discrete regions.  Each
35  * region can be in one of three states: clean, dirty,
36  * nosync.  There is no need to put clean regions in the hash.
37  *
38  * In addition to being present in the hash table a region _may_
39  * be present on one of three lists.
40  *
41  *   clean_regions: Regions on this list have no io pending to
42  *   them, they are in sync, we are no longer interested in them,
43  *   they are dull.  rh_update_states() will remove them from the
44  *   hash table.
45  *
46  *   quiesced_regions: These regions have been spun down, ready
47  *   for recovery.  rh_recovery_start() will remove regions from
48  *   this list and hand them to kmirrord, which will schedule the
49  *   recovery io with kcopyd.
50  *
51  *   recovered_regions: Regions that kcopyd has successfully
52  *   recovered.  rh_update_states() will now schedule any delayed
53  *   io, up the recovery_count, and remove the region from the
54  *   hash.
55  *
56  * There are 2 locks:
57  *   A rw spin lock 'hash_lock' protects just the hash table,
58  *   this is never held in write mode from interrupt context,
59  *   which I believe means that we only have to disable irqs when
60  *   doing a write lock.
61  *
62  *   An ordinary spin lock 'region_lock' that protects the three
63  *   lists in the region_hash, with the 'state', 'list' and
64  *   'bhs_delayed' fields of the regions.  This is used from irq
65  *   context, so all other uses will have to suspend local irqs.
66  *---------------------------------------------------------------*/
67 struct mirror_set;
68 struct region_hash {
69 	struct mirror_set *ms;
70 	uint32_t region_size;
71 	unsigned region_shift;
72 
73 	/* holds persistent region state */
74 	struct dirty_log *log;
75 
76 	/* hash table */
77 	rwlock_t hash_lock;
78 	mempool_t *region_pool;
79 	unsigned int mask;
80 	unsigned int nr_buckets;
81 	struct list_head *buckets;
82 
83 	spinlock_t region_lock;
84 	atomic_t recovery_in_flight;
85 	struct semaphore recovery_count;
86 	struct list_head clean_regions;
87 	struct list_head quiesced_regions;
88 	struct list_head recovered_regions;
89 	struct list_head failed_recovered_regions;
90 };
91 
92 enum {
93 	RH_CLEAN,
94 	RH_DIRTY,
95 	RH_NOSYNC,
96 	RH_RECOVERING
97 };
98 
99 struct region {
100 	struct region_hash *rh;	/* FIXME: can we get rid of this ? */
101 	region_t key;
102 	int state;
103 
104 	struct list_head hash_list;
105 	struct list_head list;
106 
107 	atomic_t pending;
108 	struct bio_list delayed_bios;
109 };
110 
111 
112 /*-----------------------------------------------------------------
113  * Mirror set structures.
114  *---------------------------------------------------------------*/
115 struct mirror {
116 	atomic_t error_count;
117 	struct dm_dev *dev;
118 	sector_t offset;
119 };
120 
121 struct mirror_set {
122 	struct dm_target *ti;
123 	struct list_head list;
124 	struct region_hash rh;
125 	struct kcopyd_client *kcopyd_client;
126 	uint64_t features;
127 
128 	spinlock_t lock;	/* protects the next two lists */
129 	struct bio_list reads;
130 	struct bio_list writes;
131 
132 	struct dm_io_client *io_client;
133 
134 	/* recovery */
135 	region_t nr_regions;
136 	int in_sync;
137 	int log_failure;
138 
139 	struct mirror *default_mirror;	/* Default mirror */
140 
141 	struct workqueue_struct *kmirrord_wq;
142 	struct work_struct kmirrord_work;
143 
144 	unsigned int nr_mirrors;
145 	struct mirror mirror[0];
146 };
147 
148 /*
149  * Conversion fns
150  */
151 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
152 {
153 	return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
154 }
155 
156 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
157 {
158 	return region << rh->region_shift;
159 }
160 
161 static void wake(struct mirror_set *ms)
162 {
163 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
164 }
165 
166 /* FIXME move this */
167 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
168 
169 #define MIN_REGIONS 64
170 #define MAX_RECOVERY 1
171 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
172 		   struct dirty_log *log, uint32_t region_size,
173 		   region_t nr_regions)
174 {
175 	unsigned int nr_buckets, max_buckets;
176 	size_t i;
177 
178 	/*
179 	 * Calculate a suitable number of buckets for our hash
180 	 * table.
181 	 */
182 	max_buckets = nr_regions >> 6;
183 	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
184 		;
185 	nr_buckets >>= 1;
186 
187 	rh->ms = ms;
188 	rh->log = log;
189 	rh->region_size = region_size;
190 	rh->region_shift = ffs(region_size) - 1;
191 	rwlock_init(&rh->hash_lock);
192 	rh->mask = nr_buckets - 1;
193 	rh->nr_buckets = nr_buckets;
194 
195 	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
196 	if (!rh->buckets) {
197 		DMERR("unable to allocate region hash memory");
198 		return -ENOMEM;
199 	}
200 
201 	for (i = 0; i < nr_buckets; i++)
202 		INIT_LIST_HEAD(rh->buckets + i);
203 
204 	spin_lock_init(&rh->region_lock);
205 	sema_init(&rh->recovery_count, 0);
206 	atomic_set(&rh->recovery_in_flight, 0);
207 	INIT_LIST_HEAD(&rh->clean_regions);
208 	INIT_LIST_HEAD(&rh->quiesced_regions);
209 	INIT_LIST_HEAD(&rh->recovered_regions);
210 	INIT_LIST_HEAD(&rh->failed_recovered_regions);
211 
212 	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
213 						      sizeof(struct region));
214 	if (!rh->region_pool) {
215 		vfree(rh->buckets);
216 		rh->buckets = NULL;
217 		return -ENOMEM;
218 	}
219 
220 	return 0;
221 }
222 
223 static void rh_exit(struct region_hash *rh)
224 {
225 	unsigned int h;
226 	struct region *reg, *nreg;
227 
228 	BUG_ON(!list_empty(&rh->quiesced_regions));
229 	for (h = 0; h < rh->nr_buckets; h++) {
230 		list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
231 			BUG_ON(atomic_read(&reg->pending));
232 			mempool_free(reg, rh->region_pool);
233 		}
234 	}
235 
236 	if (rh->log)
237 		dm_destroy_dirty_log(rh->log);
238 	if (rh->region_pool)
239 		mempool_destroy(rh->region_pool);
240 	vfree(rh->buckets);
241 }
242 
243 #define RH_HASH_MULT 2654435387U
244 
245 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
246 {
247 	return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
248 }
249 
250 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
251 {
252 	struct region *reg;
253 
254 	list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
255 		if (reg->key == region)
256 			return reg;
257 
258 	return NULL;
259 }
260 
261 static void __rh_insert(struct region_hash *rh, struct region *reg)
262 {
263 	unsigned int h = rh_hash(rh, reg->key);
264 	list_add(&reg->hash_list, rh->buckets + h);
265 }
266 
267 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
268 {
269 	struct region *reg, *nreg;
270 
271 	read_unlock(&rh->hash_lock);
272 	nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
273 	if (unlikely(!nreg))
274 		nreg = kmalloc(sizeof(struct region), GFP_NOIO);
275 	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
276 		RH_CLEAN : RH_NOSYNC;
277 	nreg->rh = rh;
278 	nreg->key = region;
279 
280 	INIT_LIST_HEAD(&nreg->list);
281 
282 	atomic_set(&nreg->pending, 0);
283 	bio_list_init(&nreg->delayed_bios);
284 	write_lock_irq(&rh->hash_lock);
285 
286 	reg = __rh_lookup(rh, region);
287 	if (reg)
288 		/* we lost the race */
289 		mempool_free(nreg, rh->region_pool);
290 
291 	else {
292 		__rh_insert(rh, nreg);
293 		if (nreg->state == RH_CLEAN) {
294 			spin_lock(&rh->region_lock);
295 			list_add(&nreg->list, &rh->clean_regions);
296 			spin_unlock(&rh->region_lock);
297 		}
298 		reg = nreg;
299 	}
300 	write_unlock_irq(&rh->hash_lock);
301 	read_lock(&rh->hash_lock);
302 
303 	return reg;
304 }
305 
306 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
307 {
308 	struct region *reg;
309 
310 	reg = __rh_lookup(rh, region);
311 	if (!reg)
312 		reg = __rh_alloc(rh, region);
313 
314 	return reg;
315 }
316 
317 static int rh_state(struct region_hash *rh, region_t region, int may_block)
318 {
319 	int r;
320 	struct region *reg;
321 
322 	read_lock(&rh->hash_lock);
323 	reg = __rh_lookup(rh, region);
324 	read_unlock(&rh->hash_lock);
325 
326 	if (reg)
327 		return reg->state;
328 
329 	/*
330 	 * The region wasn't in the hash, so we fall back to the
331 	 * dirty log.
332 	 */
333 	r = rh->log->type->in_sync(rh->log, region, may_block);
334 
335 	/*
336 	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
337 	 * taken as a RH_NOSYNC
338 	 */
339 	return r == 1 ? RH_CLEAN : RH_NOSYNC;
340 }
341 
342 static inline int rh_in_sync(struct region_hash *rh,
343 			     region_t region, int may_block)
344 {
345 	int state = rh_state(rh, region, may_block);
346 	return state == RH_CLEAN || state == RH_DIRTY;
347 }
348 
349 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
350 {
351 	struct bio *bio;
352 
353 	while ((bio = bio_list_pop(bio_list))) {
354 		queue_bio(ms, bio, WRITE);
355 	}
356 }
357 
358 static void complete_resync_work(struct region *reg, int success)
359 {
360 	struct region_hash *rh = reg->rh;
361 
362 	rh->log->type->set_region_sync(rh->log, reg->key, success);
363 	dispatch_bios(rh->ms, &reg->delayed_bios);
364 	if (atomic_dec_and_test(&rh->recovery_in_flight))
365 		wake_up_all(&_kmirrord_recovery_stopped);
366 	up(&rh->recovery_count);
367 }
368 
369 static void rh_update_states(struct region_hash *rh)
370 {
371 	struct region *reg, *next;
372 
373 	LIST_HEAD(clean);
374 	LIST_HEAD(recovered);
375 	LIST_HEAD(failed_recovered);
376 
377 	/*
378 	 * Quickly grab the lists.
379 	 */
380 	write_lock_irq(&rh->hash_lock);
381 	spin_lock(&rh->region_lock);
382 	if (!list_empty(&rh->clean_regions)) {
383 		list_splice(&rh->clean_regions, &clean);
384 		INIT_LIST_HEAD(&rh->clean_regions);
385 
386 		list_for_each_entry(reg, &clean, list)
387 			list_del(&reg->hash_list);
388 	}
389 
390 	if (!list_empty(&rh->recovered_regions)) {
391 		list_splice(&rh->recovered_regions, &recovered);
392 		INIT_LIST_HEAD(&rh->recovered_regions);
393 
394 		list_for_each_entry (reg, &recovered, list)
395 			list_del(&reg->hash_list);
396 	}
397 
398 	if (!list_empty(&rh->failed_recovered_regions)) {
399 		list_splice(&rh->failed_recovered_regions, &failed_recovered);
400 		INIT_LIST_HEAD(&rh->failed_recovered_regions);
401 
402 		list_for_each_entry(reg, &failed_recovered, list)
403 			list_del(&reg->hash_list);
404 	}
405 
406 	spin_unlock(&rh->region_lock);
407 	write_unlock_irq(&rh->hash_lock);
408 
409 	/*
410 	 * All the regions on the recovered and clean lists have
411 	 * now been pulled out of the system, so no need to do
412 	 * any more locking.
413 	 */
414 	list_for_each_entry_safe (reg, next, &recovered, list) {
415 		rh->log->type->clear_region(rh->log, reg->key);
416 		complete_resync_work(reg, 1);
417 		mempool_free(reg, rh->region_pool);
418 	}
419 
420 	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
421 		complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
422 		mempool_free(reg, rh->region_pool);
423 	}
424 
425 	list_for_each_entry_safe(reg, next, &clean, list) {
426 		rh->log->type->clear_region(rh->log, reg->key);
427 		mempool_free(reg, rh->region_pool);
428 	}
429 
430 	rh->log->type->flush(rh->log);
431 }
432 
433 static void rh_inc(struct region_hash *rh, region_t region)
434 {
435 	struct region *reg;
436 
437 	read_lock(&rh->hash_lock);
438 	reg = __rh_find(rh, region);
439 
440 	spin_lock_irq(&rh->region_lock);
441 	atomic_inc(&reg->pending);
442 
443 	if (reg->state == RH_CLEAN) {
444 		reg->state = RH_DIRTY;
445 		list_del_init(&reg->list);	/* take off the clean list */
446 		spin_unlock_irq(&rh->region_lock);
447 
448 		rh->log->type->mark_region(rh->log, reg->key);
449 	} else
450 		spin_unlock_irq(&rh->region_lock);
451 
452 
453 	read_unlock(&rh->hash_lock);
454 }
455 
456 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
457 {
458 	struct bio *bio;
459 
460 	for (bio = bios->head; bio; bio = bio->bi_next)
461 		rh_inc(rh, bio_to_region(rh, bio));
462 }
463 
464 static void rh_dec(struct region_hash *rh, region_t region)
465 {
466 	unsigned long flags;
467 	struct region *reg;
468 	int should_wake = 0;
469 
470 	read_lock(&rh->hash_lock);
471 	reg = __rh_lookup(rh, region);
472 	read_unlock(&rh->hash_lock);
473 
474 	spin_lock_irqsave(&rh->region_lock, flags);
475 	if (atomic_dec_and_test(&reg->pending)) {
476 		/*
477 		 * There is no pending I/O for this region.
478 		 * We can move the region to corresponding list for next action.
479 		 * At this point, the region is not yet connected to any list.
480 		 *
481 		 * If the state is RH_NOSYNC, the region should be kept off
482 		 * from clean list.
483 		 * The hash entry for RH_NOSYNC will remain in memory
484 		 * until the region is recovered or the map is reloaded.
485 		 */
486 
487 		/* do nothing for RH_NOSYNC */
488 		if (reg->state == RH_RECOVERING) {
489 			list_add_tail(&reg->list, &rh->quiesced_regions);
490 		} else if (reg->state == RH_DIRTY) {
491 			reg->state = RH_CLEAN;
492 			list_add(&reg->list, &rh->clean_regions);
493 		}
494 		should_wake = 1;
495 	}
496 	spin_unlock_irqrestore(&rh->region_lock, flags);
497 
498 	if (should_wake)
499 		wake(rh->ms);
500 }
501 
502 /*
503  * Starts quiescing a region in preparation for recovery.
504  */
505 static int __rh_recovery_prepare(struct region_hash *rh)
506 {
507 	int r;
508 	struct region *reg;
509 	region_t region;
510 
511 	/*
512 	 * Ask the dirty log what's next.
513 	 */
514 	r = rh->log->type->get_resync_work(rh->log, &region);
515 	if (r <= 0)
516 		return r;
517 
518 	/*
519 	 * Get this region, and start it quiescing by setting the
520 	 * recovering flag.
521 	 */
522 	read_lock(&rh->hash_lock);
523 	reg = __rh_find(rh, region);
524 	read_unlock(&rh->hash_lock);
525 
526 	spin_lock_irq(&rh->region_lock);
527 	reg->state = RH_RECOVERING;
528 
529 	/* Already quiesced ? */
530 	if (atomic_read(&reg->pending))
531 		list_del_init(&reg->list);
532 	else
533 		list_move(&reg->list, &rh->quiesced_regions);
534 
535 	spin_unlock_irq(&rh->region_lock);
536 
537 	return 1;
538 }
539 
540 static void rh_recovery_prepare(struct region_hash *rh)
541 {
542 	/* Extra reference to avoid race with rh_stop_recovery */
543 	atomic_inc(&rh->recovery_in_flight);
544 
545 	while (!down_trylock(&rh->recovery_count)) {
546 		atomic_inc(&rh->recovery_in_flight);
547 		if (__rh_recovery_prepare(rh) <= 0) {
548 			atomic_dec(&rh->recovery_in_flight);
549 			up(&rh->recovery_count);
550 			break;
551 		}
552 	}
553 
554 	/* Drop the extra reference */
555 	if (atomic_dec_and_test(&rh->recovery_in_flight))
556 		wake_up_all(&_kmirrord_recovery_stopped);
557 }
558 
559 /*
560  * Returns any quiesced regions.
561  */
562 static struct region *rh_recovery_start(struct region_hash *rh)
563 {
564 	struct region *reg = NULL;
565 
566 	spin_lock_irq(&rh->region_lock);
567 	if (!list_empty(&rh->quiesced_regions)) {
568 		reg = list_entry(rh->quiesced_regions.next,
569 				 struct region, list);
570 		list_del_init(&reg->list);	/* remove from the quiesced list */
571 	}
572 	spin_unlock_irq(&rh->region_lock);
573 
574 	return reg;
575 }
576 
577 static void rh_recovery_end(struct region *reg, int success)
578 {
579 	struct region_hash *rh = reg->rh;
580 
581 	spin_lock_irq(&rh->region_lock);
582 	if (success)
583 		list_add(&reg->list, &reg->rh->recovered_regions);
584 	else {
585 		reg->state = RH_NOSYNC;
586 		list_add(&reg->list, &reg->rh->failed_recovered_regions);
587 	}
588 	spin_unlock_irq(&rh->region_lock);
589 
590 	wake(rh->ms);
591 }
592 
593 static int rh_flush(struct region_hash *rh)
594 {
595 	return rh->log->type->flush(rh->log);
596 }
597 
598 static void rh_delay(struct region_hash *rh, struct bio *bio)
599 {
600 	struct region *reg;
601 
602 	read_lock(&rh->hash_lock);
603 	reg = __rh_find(rh, bio_to_region(rh, bio));
604 	bio_list_add(&reg->delayed_bios, bio);
605 	read_unlock(&rh->hash_lock);
606 }
607 
608 static void rh_stop_recovery(struct region_hash *rh)
609 {
610 	int i;
611 
612 	/* wait for any recovering regions */
613 	for (i = 0; i < MAX_RECOVERY; i++)
614 		down(&rh->recovery_count);
615 }
616 
617 static void rh_start_recovery(struct region_hash *rh)
618 {
619 	int i;
620 
621 	for (i = 0; i < MAX_RECOVERY; i++)
622 		up(&rh->recovery_count);
623 
624 	wake(rh->ms);
625 }
626 
627 /*
628  * Every mirror should look like this one.
629  */
630 #define DEFAULT_MIRROR 0
631 
632 /*
633  * This is yucky.  We squirrel the mirror_set struct away inside
634  * bi_next for write buffers.  This is safe since the bh
635  * doesn't get submitted to the lower levels of block layer.
636  */
637 static struct mirror_set *bio_get_ms(struct bio *bio)
638 {
639 	return (struct mirror_set *) bio->bi_next;
640 }
641 
642 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
643 {
644 	bio->bi_next = (struct bio *) ms;
645 }
646 
647 /*-----------------------------------------------------------------
648  * Recovery.
649  *
650  * When a mirror is first activated we may find that some regions
651  * are in the no-sync state.  We have to recover these by
652  * recopying from the default mirror to all the others.
653  *---------------------------------------------------------------*/
654 static void recovery_complete(int read_err, unsigned int write_err,
655 			      void *context)
656 {
657 	struct region *reg = (struct region *) context;
658 
659 	if (read_err)
660 		/* Read error means the failure of default mirror. */
661 		DMERR_LIMIT("Unable to read primary mirror during recovery");
662 
663 	if (write_err)
664 		DMERR_LIMIT("Write error during recovery (error = 0x%x)",
665 			    write_err);
666 
667 	rh_recovery_end(reg, !(read_err || write_err));
668 }
669 
670 static int recover(struct mirror_set *ms, struct region *reg)
671 {
672 	int r;
673 	unsigned int i;
674 	struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
675 	struct mirror *m;
676 	unsigned long flags = 0;
677 
678 	/* fill in the source */
679 	m = ms->default_mirror;
680 	from.bdev = m->dev->bdev;
681 	from.sector = m->offset + region_to_sector(reg->rh, reg->key);
682 	if (reg->key == (ms->nr_regions - 1)) {
683 		/*
684 		 * The final region may be smaller than
685 		 * region_size.
686 		 */
687 		from.count = ms->ti->len & (reg->rh->region_size - 1);
688 		if (!from.count)
689 			from.count = reg->rh->region_size;
690 	} else
691 		from.count = reg->rh->region_size;
692 
693 	/* fill in the destinations */
694 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
695 		if (&ms->mirror[i] == ms->default_mirror)
696 			continue;
697 
698 		m = ms->mirror + i;
699 		dest->bdev = m->dev->bdev;
700 		dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
701 		dest->count = from.count;
702 		dest++;
703 	}
704 
705 	/* hand to kcopyd */
706 	set_bit(KCOPYD_IGNORE_ERROR, &flags);
707 	r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
708 			recovery_complete, reg);
709 
710 	return r;
711 }
712 
713 static void do_recovery(struct mirror_set *ms)
714 {
715 	int r;
716 	struct region *reg;
717 	struct dirty_log *log = ms->rh.log;
718 
719 	/*
720 	 * Start quiescing some regions.
721 	 */
722 	rh_recovery_prepare(&ms->rh);
723 
724 	/*
725 	 * Copy any already quiesced regions.
726 	 */
727 	while ((reg = rh_recovery_start(&ms->rh))) {
728 		r = recover(ms, reg);
729 		if (r)
730 			rh_recovery_end(reg, 0);
731 	}
732 
733 	/*
734 	 * Update the in sync flag.
735 	 */
736 	if (!ms->in_sync &&
737 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
738 		/* the sync is complete */
739 		dm_table_event(ms->ti->table);
740 		ms->in_sync = 1;
741 	}
742 }
743 
744 /*-----------------------------------------------------------------
745  * Reads
746  *---------------------------------------------------------------*/
747 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
748 {
749 	/* FIXME: add read balancing */
750 	return ms->default_mirror;
751 }
752 
753 /*
754  * remap a buffer to a particular mirror.
755  */
756 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
757 {
758 	bio->bi_bdev = m->dev->bdev;
759 	bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
760 }
761 
762 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
763 {
764 	region_t region;
765 	struct bio *bio;
766 	struct mirror *m;
767 
768 	while ((bio = bio_list_pop(reads))) {
769 		region = bio_to_region(&ms->rh, bio);
770 
771 		/*
772 		 * We can only read balance if the region is in sync.
773 		 */
774 		if (rh_in_sync(&ms->rh, region, 1))
775 			m = choose_mirror(ms, bio->bi_sector);
776 		else
777 			m = ms->default_mirror;
778 
779 		map_bio(ms, m, bio);
780 		generic_make_request(bio);
781 	}
782 }
783 
784 /*-----------------------------------------------------------------
785  * Writes.
786  *
787  * We do different things with the write io depending on the
788  * state of the region that it's in:
789  *
790  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
791  * RECOVERING:	delay the io until recovery completes
792  * NOSYNC:	increment pending, just write to the default mirror
793  *---------------------------------------------------------------*/
794 static void write_callback(unsigned long error, void *context)
795 {
796 	unsigned int i;
797 	int uptodate = 1;
798 	struct bio *bio = (struct bio *) context;
799 	struct mirror_set *ms;
800 
801 	ms = bio_get_ms(bio);
802 	bio_set_ms(bio, NULL);
803 
804 	/*
805 	 * NOTE: We don't decrement the pending count here,
806 	 * instead it is done by the targets endio function.
807 	 * This way we handle both writes to SYNC and NOSYNC
808 	 * regions with the same code.
809 	 */
810 
811 	if (error) {
812 		/*
813 		 * only error the io if all mirrors failed.
814 		 * FIXME: bogus
815 		 */
816 		uptodate = 0;
817 		for (i = 0; i < ms->nr_mirrors; i++)
818 			if (!test_bit(i, &error)) {
819 				uptodate = 1;
820 				break;
821 			}
822 	}
823 	bio_endio(bio, 0);
824 }
825 
826 static void do_write(struct mirror_set *ms, struct bio *bio)
827 {
828 	unsigned int i;
829 	struct io_region io[KCOPYD_MAX_REGIONS+1];
830 	struct mirror *m;
831 	struct dm_io_request io_req = {
832 		.bi_rw = WRITE,
833 		.mem.type = DM_IO_BVEC,
834 		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
835 		.notify.fn = write_callback,
836 		.notify.context = bio,
837 		.client = ms->io_client,
838 	};
839 
840 	for (i = 0; i < ms->nr_mirrors; i++) {
841 		m = ms->mirror + i;
842 
843 		io[i].bdev = m->dev->bdev;
844 		io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
845 		io[i].count = bio->bi_size >> 9;
846 	}
847 
848 	bio_set_ms(bio, ms);
849 
850 	(void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
851 }
852 
853 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
854 {
855 	int state;
856 	struct bio *bio;
857 	struct bio_list sync, nosync, recover, *this_list = NULL;
858 
859 	if (!writes->head)
860 		return;
861 
862 	/*
863 	 * Classify each write.
864 	 */
865 	bio_list_init(&sync);
866 	bio_list_init(&nosync);
867 	bio_list_init(&recover);
868 
869 	while ((bio = bio_list_pop(writes))) {
870 		state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
871 		switch (state) {
872 		case RH_CLEAN:
873 		case RH_DIRTY:
874 			this_list = &sync;
875 			break;
876 
877 		case RH_NOSYNC:
878 			this_list = &nosync;
879 			break;
880 
881 		case RH_RECOVERING:
882 			this_list = &recover;
883 			break;
884 		}
885 
886 		bio_list_add(this_list, bio);
887 	}
888 
889 	/*
890 	 * Increment the pending counts for any regions that will
891 	 * be written to (writes to recover regions are going to
892 	 * be delayed).
893 	 */
894 	rh_inc_pending(&ms->rh, &sync);
895 	rh_inc_pending(&ms->rh, &nosync);
896 	ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
897 
898 	/*
899 	 * Dispatch io.
900 	 */
901 	if (unlikely(ms->log_failure))
902 		while ((bio = bio_list_pop(&sync)))
903 			bio_endio(bio, -EIO);
904 	else while ((bio = bio_list_pop(&sync)))
905 		do_write(ms, bio);
906 
907 	while ((bio = bio_list_pop(&recover)))
908 		rh_delay(&ms->rh, bio);
909 
910 	while ((bio = bio_list_pop(&nosync))) {
911 		map_bio(ms, ms->default_mirror, bio);
912 		generic_make_request(bio);
913 	}
914 }
915 
916 /*-----------------------------------------------------------------
917  * kmirrord
918  *---------------------------------------------------------------*/
919 static void do_mirror(struct work_struct *work)
920 {
921 	struct mirror_set *ms =container_of(work, struct mirror_set,
922 					    kmirrord_work);
923 	struct bio_list reads, writes;
924 
925 	spin_lock(&ms->lock);
926 	reads = ms->reads;
927 	writes = ms->writes;
928 	bio_list_init(&ms->reads);
929 	bio_list_init(&ms->writes);
930 	spin_unlock(&ms->lock);
931 
932 	rh_update_states(&ms->rh);
933 	do_recovery(ms);
934 	do_reads(ms, &reads);
935 	do_writes(ms, &writes);
936 }
937 
938 /*-----------------------------------------------------------------
939  * Target functions
940  *---------------------------------------------------------------*/
941 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
942 					uint32_t region_size,
943 					struct dm_target *ti,
944 					struct dirty_log *dl)
945 {
946 	size_t len;
947 	struct mirror_set *ms = NULL;
948 
949 	if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
950 		return NULL;
951 
952 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
953 
954 	ms = kzalloc(len, GFP_KERNEL);
955 	if (!ms) {
956 		ti->error = "Cannot allocate mirror context";
957 		return NULL;
958 	}
959 
960 	spin_lock_init(&ms->lock);
961 
962 	ms->ti = ti;
963 	ms->nr_mirrors = nr_mirrors;
964 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
965 	ms->in_sync = 0;
966 	ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
967 
968 	ms->io_client = dm_io_client_create(DM_IO_PAGES);
969 	if (IS_ERR(ms->io_client)) {
970 		ti->error = "Error creating dm_io client";
971 		kfree(ms);
972  		return NULL;
973 	}
974 
975 	if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
976 		ti->error = "Error creating dirty region hash";
977 		kfree(ms);
978 		return NULL;
979 	}
980 
981 	return ms;
982 }
983 
984 static void free_context(struct mirror_set *ms, struct dm_target *ti,
985 			 unsigned int m)
986 {
987 	while (m--)
988 		dm_put_device(ti, ms->mirror[m].dev);
989 
990 	dm_io_client_destroy(ms->io_client);
991 	rh_exit(&ms->rh);
992 	kfree(ms);
993 }
994 
995 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
996 {
997 	return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
998 		 size > ti->len);
999 }
1000 
1001 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1002 		      unsigned int mirror, char **argv)
1003 {
1004 	unsigned long long offset;
1005 
1006 	if (sscanf(argv[1], "%llu", &offset) != 1) {
1007 		ti->error = "Invalid offset";
1008 		return -EINVAL;
1009 	}
1010 
1011 	if (dm_get_device(ti, argv[0], offset, ti->len,
1012 			  dm_table_get_mode(ti->table),
1013 			  &ms->mirror[mirror].dev)) {
1014 		ti->error = "Device lookup failure";
1015 		return -ENXIO;
1016 	}
1017 
1018 	ms->mirror[mirror].offset = offset;
1019 
1020 	return 0;
1021 }
1022 
1023 /*
1024  * Create dirty log: log_type #log_params <log_params>
1025  */
1026 static struct dirty_log *create_dirty_log(struct dm_target *ti,
1027 					  unsigned int argc, char **argv,
1028 					  unsigned int *args_used)
1029 {
1030 	unsigned int param_count;
1031 	struct dirty_log *dl;
1032 
1033 	if (argc < 2) {
1034 		ti->error = "Insufficient mirror log arguments";
1035 		return NULL;
1036 	}
1037 
1038 	if (sscanf(argv[1], "%u", &param_count) != 1) {
1039 		ti->error = "Invalid mirror log argument count";
1040 		return NULL;
1041 	}
1042 
1043 	*args_used = 2 + param_count;
1044 
1045 	if (argc < *args_used) {
1046 		ti->error = "Insufficient mirror log arguments";
1047 		return NULL;
1048 	}
1049 
1050 	dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1051 	if (!dl) {
1052 		ti->error = "Error creating mirror dirty log";
1053 		return NULL;
1054 	}
1055 
1056 	if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1057 		ti->error = "Invalid region size";
1058 		dm_destroy_dirty_log(dl);
1059 		return NULL;
1060 	}
1061 
1062 	return dl;
1063 }
1064 
1065 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1066 			  unsigned *args_used)
1067 {
1068 	unsigned num_features;
1069 	struct dm_target *ti = ms->ti;
1070 
1071 	*args_used = 0;
1072 
1073 	if (!argc)
1074 		return 0;
1075 
1076 	if (sscanf(argv[0], "%u", &num_features) != 1) {
1077 		ti->error = "Invalid number of features";
1078 		return -EINVAL;
1079 	}
1080 
1081 	argc--;
1082 	argv++;
1083 	(*args_used)++;
1084 
1085 	if (num_features > argc) {
1086 		ti->error = "Not enough arguments to support feature count";
1087 		return -EINVAL;
1088 	}
1089 
1090 	if (!strcmp("handle_errors", argv[0]))
1091 		ms->features |= DM_RAID1_HANDLE_ERRORS;
1092 	else {
1093 		ti->error = "Unrecognised feature requested";
1094 		return -EINVAL;
1095 	}
1096 
1097 	(*args_used)++;
1098 
1099 	return 0;
1100 }
1101 
1102 /*
1103  * Construct a mirror mapping:
1104  *
1105  * log_type #log_params <log_params>
1106  * #mirrors [mirror_path offset]{2,}
1107  * [#features <features>]
1108  *
1109  * log_type is "core" or "disk"
1110  * #log_params is between 1 and 3
1111  *
1112  * If present, features must be "handle_errors".
1113  */
1114 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1115 {
1116 	int r;
1117 	unsigned int nr_mirrors, m, args_used;
1118 	struct mirror_set *ms;
1119 	struct dirty_log *dl;
1120 
1121 	dl = create_dirty_log(ti, argc, argv, &args_used);
1122 	if (!dl)
1123 		return -EINVAL;
1124 
1125 	argv += args_used;
1126 	argc -= args_used;
1127 
1128 	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1129 	    nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1130 		ti->error = "Invalid number of mirrors";
1131 		dm_destroy_dirty_log(dl);
1132 		return -EINVAL;
1133 	}
1134 
1135 	argv++, argc--;
1136 
1137 	if (argc < nr_mirrors * 2) {
1138 		ti->error = "Too few mirror arguments";
1139 		dm_destroy_dirty_log(dl);
1140 		return -EINVAL;
1141 	}
1142 
1143 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1144 	if (!ms) {
1145 		dm_destroy_dirty_log(dl);
1146 		return -ENOMEM;
1147 	}
1148 
1149 	/* Get the mirror parameter sets */
1150 	for (m = 0; m < nr_mirrors; m++) {
1151 		r = get_mirror(ms, ti, m, argv);
1152 		if (r) {
1153 			free_context(ms, ti, m);
1154 			return r;
1155 		}
1156 		argv += 2;
1157 		argc -= 2;
1158 	}
1159 
1160 	ti->private = ms;
1161  	ti->split_io = ms->rh.region_size;
1162 
1163 	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1164 	if (!ms->kmirrord_wq) {
1165 		DMERR("couldn't start kmirrord");
1166 		free_context(ms, ti, m);
1167 		return -ENOMEM;
1168 	}
1169 	INIT_WORK(&ms->kmirrord_work, do_mirror);
1170 
1171 	r = parse_features(ms, argc, argv, &args_used);
1172 	if (r) {
1173 		free_context(ms, ti, ms->nr_mirrors);
1174 		return r;
1175 	}
1176 
1177 	argv += args_used;
1178 	argc -= args_used;
1179 
1180 	/*
1181 	 * Any read-balancing addition depends on the
1182 	 * DM_RAID1_HANDLE_ERRORS flag being present.
1183 	 * This is because the decision to balance depends
1184 	 * on the sync state of a region.  If the above
1185 	 * flag is not present, we ignore errors; and
1186 	 * the sync state may be inaccurate.
1187 	 */
1188 
1189 	if (argc) {
1190 		ti->error = "Too many mirror arguments";
1191 		free_context(ms, ti, ms->nr_mirrors);
1192 		return -EINVAL;
1193 	}
1194 
1195 	r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1196 	if (r) {
1197 		destroy_workqueue(ms->kmirrord_wq);
1198 		free_context(ms, ti, ms->nr_mirrors);
1199 		return r;
1200 	}
1201 
1202 	wake(ms);
1203 	return 0;
1204 }
1205 
1206 static void mirror_dtr(struct dm_target *ti)
1207 {
1208 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1209 
1210 	flush_workqueue(ms->kmirrord_wq);
1211 	kcopyd_client_destroy(ms->kcopyd_client);
1212 	destroy_workqueue(ms->kmirrord_wq);
1213 	free_context(ms, ti, ms->nr_mirrors);
1214 }
1215 
1216 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1217 {
1218 	int should_wake = 0;
1219 	struct bio_list *bl;
1220 
1221 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1222 	spin_lock(&ms->lock);
1223 	should_wake = !(bl->head);
1224 	bio_list_add(bl, bio);
1225 	spin_unlock(&ms->lock);
1226 
1227 	if (should_wake)
1228 		wake(ms);
1229 }
1230 
1231 /*
1232  * Mirror mapping function
1233  */
1234 static int mirror_map(struct dm_target *ti, struct bio *bio,
1235 		      union map_info *map_context)
1236 {
1237 	int r, rw = bio_rw(bio);
1238 	struct mirror *m;
1239 	struct mirror_set *ms = ti->private;
1240 
1241 	map_context->ll = bio_to_region(&ms->rh, bio);
1242 
1243 	if (rw == WRITE) {
1244 		queue_bio(ms, bio, rw);
1245 		return DM_MAPIO_SUBMITTED;
1246 	}
1247 
1248 	r = ms->rh.log->type->in_sync(ms->rh.log,
1249 				      bio_to_region(&ms->rh, bio), 0);
1250 	if (r < 0 && r != -EWOULDBLOCK)
1251 		return r;
1252 
1253 	if (r == -EWOULDBLOCK)	/* FIXME: ugly */
1254 		r = DM_MAPIO_SUBMITTED;
1255 
1256 	/*
1257 	 * We don't want to fast track a recovery just for a read
1258 	 * ahead.  So we just let it silently fail.
1259 	 * FIXME: get rid of this.
1260 	 */
1261 	if (!r && rw == READA)
1262 		return -EIO;
1263 
1264 	if (!r) {
1265 		/* Pass this io over to the daemon */
1266 		queue_bio(ms, bio, rw);
1267 		return DM_MAPIO_SUBMITTED;
1268 	}
1269 
1270 	m = choose_mirror(ms, bio->bi_sector);
1271 	if (!m)
1272 		return -EIO;
1273 
1274 	map_bio(ms, m, bio);
1275 	return DM_MAPIO_REMAPPED;
1276 }
1277 
1278 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1279 			 int error, union map_info *map_context)
1280 {
1281 	int rw = bio_rw(bio);
1282 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1283 	region_t region = map_context->ll;
1284 
1285 	/*
1286 	 * We need to dec pending if this was a write.
1287 	 */
1288 	if (rw == WRITE)
1289 		rh_dec(&ms->rh, region);
1290 
1291 	return 0;
1292 }
1293 
1294 static void mirror_postsuspend(struct dm_target *ti)
1295 {
1296 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1297 	struct dirty_log *log = ms->rh.log;
1298 
1299 	rh_stop_recovery(&ms->rh);
1300 
1301 	/* Wait for all I/O we generated to complete */
1302 	wait_event(_kmirrord_recovery_stopped,
1303 		   !atomic_read(&ms->rh.recovery_in_flight));
1304 
1305 	if (log->type->suspend && log->type->suspend(log))
1306 		/* FIXME: need better error handling */
1307 		DMWARN("log suspend failed");
1308 }
1309 
1310 static void mirror_resume(struct dm_target *ti)
1311 {
1312 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1313 	struct dirty_log *log = ms->rh.log;
1314 	if (log->type->resume && log->type->resume(log))
1315 		/* FIXME: need better error handling */
1316 		DMWARN("log resume failed");
1317 	rh_start_recovery(&ms->rh);
1318 }
1319 
1320 static int mirror_status(struct dm_target *ti, status_type_t type,
1321 			 char *result, unsigned int maxlen)
1322 {
1323 	unsigned int m, sz = 0;
1324 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1325 
1326 	switch (type) {
1327 	case STATUSTYPE_INFO:
1328 		DMEMIT("%d ", ms->nr_mirrors);
1329 		for (m = 0; m < ms->nr_mirrors; m++)
1330 			DMEMIT("%s ", ms->mirror[m].dev->name);
1331 
1332 		DMEMIT("%llu/%llu 0 ",
1333 			(unsigned long long)ms->rh.log->type->
1334 				get_sync_count(ms->rh.log),
1335 			(unsigned long long)ms->nr_regions);
1336 
1337 		sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
1338 
1339 		break;
1340 
1341 	case STATUSTYPE_TABLE:
1342 		sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1343 
1344 		DMEMIT("%d", ms->nr_mirrors);
1345 		for (m = 0; m < ms->nr_mirrors; m++)
1346 			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1347 				(unsigned long long)ms->mirror[m].offset);
1348 
1349 		if (ms->features & DM_RAID1_HANDLE_ERRORS)
1350 			DMEMIT(" 1 handle_errors");
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static struct target_type mirror_target = {
1357 	.name	 = "mirror",
1358 	.version = {1, 0, 3},
1359 	.module	 = THIS_MODULE,
1360 	.ctr	 = mirror_ctr,
1361 	.dtr	 = mirror_dtr,
1362 	.map	 = mirror_map,
1363 	.end_io	 = mirror_end_io,
1364 	.postsuspend = mirror_postsuspend,
1365 	.resume	 = mirror_resume,
1366 	.status	 = mirror_status,
1367 };
1368 
1369 static int __init dm_mirror_init(void)
1370 {
1371 	int r;
1372 
1373 	r = dm_dirty_log_init();
1374 	if (r)
1375 		return r;
1376 
1377 	r = dm_register_target(&mirror_target);
1378 	if (r < 0) {
1379 		DMERR("Failed to register mirror target");
1380 		dm_dirty_log_exit();
1381 	}
1382 
1383 	return r;
1384 }
1385 
1386 static void __exit dm_mirror_exit(void)
1387 {
1388 	int r;
1389 
1390 	r = dm_unregister_target(&mirror_target);
1391 	if (r < 0)
1392 		DMERR("unregister failed %d", r);
1393 
1394 	dm_dirty_log_exit();
1395 }
1396 
1397 /* Module hooks */
1398 module_init(dm_mirror_init);
1399 module_exit(dm_mirror_exit);
1400 
1401 MODULE_DESCRIPTION(DM_NAME " mirror target");
1402 MODULE_AUTHOR("Joe Thornber");
1403 MODULE_LICENSE("GPL");
1404