xref: /linux/drivers/md/dm-raid1.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-bio-record.h"
9 
10 #include <linux/init.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/device-mapper.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-dirty-log.h>
19 #include <linux/dm-kcopyd.h>
20 #include <linux/dm-region-hash.h>
21 
22 #define DM_MSG_PREFIX "raid1"
23 
24 #define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
25 
26 #define DM_RAID1_HANDLE_ERRORS 0x01
27 #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
28 
29 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30 
31 /*-----------------------------------------------------------------
32  * Mirror set structures.
33  *---------------------------------------------------------------*/
34 enum dm_raid1_error {
35 	DM_RAID1_WRITE_ERROR,
36 	DM_RAID1_FLUSH_ERROR,
37 	DM_RAID1_SYNC_ERROR,
38 	DM_RAID1_READ_ERROR
39 };
40 
41 struct mirror {
42 	struct mirror_set *ms;
43 	atomic_t error_count;
44 	unsigned long error_type;
45 	struct dm_dev *dev;
46 	sector_t offset;
47 };
48 
49 struct mirror_set {
50 	struct dm_target *ti;
51 	struct list_head list;
52 
53 	uint64_t features;
54 
55 	spinlock_t lock;	/* protects the lists */
56 	struct bio_list reads;
57 	struct bio_list writes;
58 	struct bio_list failures;
59 	struct bio_list holds;	/* bios are waiting until suspend */
60 
61 	struct dm_region_hash *rh;
62 	struct dm_kcopyd_client *kcopyd_client;
63 	struct dm_io_client *io_client;
64 
65 	/* recovery */
66 	region_t nr_regions;
67 	int in_sync;
68 	int log_failure;
69 	int leg_failure;
70 	atomic_t suspend;
71 
72 	atomic_t default_mirror;	/* Default mirror */
73 
74 	struct workqueue_struct *kmirrord_wq;
75 	struct work_struct kmirrord_work;
76 	struct timer_list timer;
77 	unsigned long timer_pending;
78 
79 	struct work_struct trigger_event;
80 
81 	unsigned nr_mirrors;
82 	struct mirror mirror[0];
83 };
84 
85 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
86 		"A percentage of time allocated for raid resynchronization");
87 
88 static void wakeup_mirrord(void *context)
89 {
90 	struct mirror_set *ms = context;
91 
92 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
93 }
94 
95 static void delayed_wake_fn(unsigned long data)
96 {
97 	struct mirror_set *ms = (struct mirror_set *) data;
98 
99 	clear_bit(0, &ms->timer_pending);
100 	wakeup_mirrord(ms);
101 }
102 
103 static void delayed_wake(struct mirror_set *ms)
104 {
105 	if (test_and_set_bit(0, &ms->timer_pending))
106 		return;
107 
108 	ms->timer.expires = jiffies + HZ / 5;
109 	ms->timer.data = (unsigned long) ms;
110 	ms->timer.function = delayed_wake_fn;
111 	add_timer(&ms->timer);
112 }
113 
114 static void wakeup_all_recovery_waiters(void *context)
115 {
116 	wake_up_all(&_kmirrord_recovery_stopped);
117 }
118 
119 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
120 {
121 	unsigned long flags;
122 	int should_wake = 0;
123 	struct bio_list *bl;
124 
125 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
126 	spin_lock_irqsave(&ms->lock, flags);
127 	should_wake = !(bl->head);
128 	bio_list_add(bl, bio);
129 	spin_unlock_irqrestore(&ms->lock, flags);
130 
131 	if (should_wake)
132 		wakeup_mirrord(ms);
133 }
134 
135 static void dispatch_bios(void *context, struct bio_list *bio_list)
136 {
137 	struct mirror_set *ms = context;
138 	struct bio *bio;
139 
140 	while ((bio = bio_list_pop(bio_list)))
141 		queue_bio(ms, bio, WRITE);
142 }
143 
144 struct dm_raid1_bio_record {
145 	struct mirror *m;
146 	/* if details->bi_bdev == NULL, details were not saved */
147 	struct dm_bio_details details;
148 	region_t write_region;
149 };
150 
151 /*
152  * Every mirror should look like this one.
153  */
154 #define DEFAULT_MIRROR 0
155 
156 /*
157  * This is yucky.  We squirrel the mirror struct away inside
158  * bi_next for read/write buffers.  This is safe since the bh
159  * doesn't get submitted to the lower levels of block layer.
160  */
161 static struct mirror *bio_get_m(struct bio *bio)
162 {
163 	return (struct mirror *) bio->bi_next;
164 }
165 
166 static void bio_set_m(struct bio *bio, struct mirror *m)
167 {
168 	bio->bi_next = (struct bio *) m;
169 }
170 
171 static struct mirror *get_default_mirror(struct mirror_set *ms)
172 {
173 	return &ms->mirror[atomic_read(&ms->default_mirror)];
174 }
175 
176 static void set_default_mirror(struct mirror *m)
177 {
178 	struct mirror_set *ms = m->ms;
179 	struct mirror *m0 = &(ms->mirror[0]);
180 
181 	atomic_set(&ms->default_mirror, m - m0);
182 }
183 
184 static struct mirror *get_valid_mirror(struct mirror_set *ms)
185 {
186 	struct mirror *m;
187 
188 	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
189 		if (!atomic_read(&m->error_count))
190 			return m;
191 
192 	return NULL;
193 }
194 
195 /* fail_mirror
196  * @m: mirror device to fail
197  * @error_type: one of the enum's, DM_RAID1_*_ERROR
198  *
199  * If errors are being handled, record the type of
200  * error encountered for this device.  If this type
201  * of error has already been recorded, we can return;
202  * otherwise, we must signal userspace by triggering
203  * an event.  Additionally, if the device is the
204  * primary device, we must choose a new primary, but
205  * only if the mirror is in-sync.
206  *
207  * This function must not block.
208  */
209 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
210 {
211 	struct mirror_set *ms = m->ms;
212 	struct mirror *new;
213 
214 	ms->leg_failure = 1;
215 
216 	/*
217 	 * error_count is used for nothing more than a
218 	 * simple way to tell if a device has encountered
219 	 * errors.
220 	 */
221 	atomic_inc(&m->error_count);
222 
223 	if (test_and_set_bit(error_type, &m->error_type))
224 		return;
225 
226 	if (!errors_handled(ms))
227 		return;
228 
229 	if (m != get_default_mirror(ms))
230 		goto out;
231 
232 	if (!ms->in_sync) {
233 		/*
234 		 * Better to issue requests to same failing device
235 		 * than to risk returning corrupt data.
236 		 */
237 		DMERR("Primary mirror (%s) failed while out-of-sync: "
238 		      "Reads may fail.", m->dev->name);
239 		goto out;
240 	}
241 
242 	new = get_valid_mirror(ms);
243 	if (new)
244 		set_default_mirror(new);
245 	else
246 		DMWARN("All sides of mirror have failed.");
247 
248 out:
249 	schedule_work(&ms->trigger_event);
250 }
251 
252 static int mirror_flush(struct dm_target *ti)
253 {
254 	struct mirror_set *ms = ti->private;
255 	unsigned long error_bits;
256 
257 	unsigned int i;
258 	struct dm_io_region io[ms->nr_mirrors];
259 	struct mirror *m;
260 	struct dm_io_request io_req = {
261 		.bi_rw = WRITE_FLUSH,
262 		.mem.type = DM_IO_KMEM,
263 		.mem.ptr.addr = NULL,
264 		.client = ms->io_client,
265 	};
266 
267 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
268 		io[i].bdev = m->dev->bdev;
269 		io[i].sector = 0;
270 		io[i].count = 0;
271 	}
272 
273 	error_bits = -1;
274 	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
275 	if (unlikely(error_bits != 0)) {
276 		for (i = 0; i < ms->nr_mirrors; i++)
277 			if (test_bit(i, &error_bits))
278 				fail_mirror(ms->mirror + i,
279 					    DM_RAID1_FLUSH_ERROR);
280 		return -EIO;
281 	}
282 
283 	return 0;
284 }
285 
286 /*-----------------------------------------------------------------
287  * Recovery.
288  *
289  * When a mirror is first activated we may find that some regions
290  * are in the no-sync state.  We have to recover these by
291  * recopying from the default mirror to all the others.
292  *---------------------------------------------------------------*/
293 static void recovery_complete(int read_err, unsigned long write_err,
294 			      void *context)
295 {
296 	struct dm_region *reg = context;
297 	struct mirror_set *ms = dm_rh_region_context(reg);
298 	int m, bit = 0;
299 
300 	if (read_err) {
301 		/* Read error means the failure of default mirror. */
302 		DMERR_LIMIT("Unable to read primary mirror during recovery");
303 		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
304 	}
305 
306 	if (write_err) {
307 		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
308 			    write_err);
309 		/*
310 		 * Bits correspond to devices (excluding default mirror).
311 		 * The default mirror cannot change during recovery.
312 		 */
313 		for (m = 0; m < ms->nr_mirrors; m++) {
314 			if (&ms->mirror[m] == get_default_mirror(ms))
315 				continue;
316 			if (test_bit(bit, &write_err))
317 				fail_mirror(ms->mirror + m,
318 					    DM_RAID1_SYNC_ERROR);
319 			bit++;
320 		}
321 	}
322 
323 	dm_rh_recovery_end(reg, !(read_err || write_err));
324 }
325 
326 static int recover(struct mirror_set *ms, struct dm_region *reg)
327 {
328 	int r;
329 	unsigned i;
330 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
331 	struct mirror *m;
332 	unsigned long flags = 0;
333 	region_t key = dm_rh_get_region_key(reg);
334 	sector_t region_size = dm_rh_get_region_size(ms->rh);
335 
336 	/* fill in the source */
337 	m = get_default_mirror(ms);
338 	from.bdev = m->dev->bdev;
339 	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
340 	if (key == (ms->nr_regions - 1)) {
341 		/*
342 		 * The final region may be smaller than
343 		 * region_size.
344 		 */
345 		from.count = ms->ti->len & (region_size - 1);
346 		if (!from.count)
347 			from.count = region_size;
348 	} else
349 		from.count = region_size;
350 
351 	/* fill in the destinations */
352 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
353 		if (&ms->mirror[i] == get_default_mirror(ms))
354 			continue;
355 
356 		m = ms->mirror + i;
357 		dest->bdev = m->dev->bdev;
358 		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
359 		dest->count = from.count;
360 		dest++;
361 	}
362 
363 	/* hand to kcopyd */
364 	if (!errors_handled(ms))
365 		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
366 
367 	r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
368 			   flags, recovery_complete, reg);
369 
370 	return r;
371 }
372 
373 static void do_recovery(struct mirror_set *ms)
374 {
375 	struct dm_region *reg;
376 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
377 	int r;
378 
379 	/*
380 	 * Start quiescing some regions.
381 	 */
382 	dm_rh_recovery_prepare(ms->rh);
383 
384 	/*
385 	 * Copy any already quiesced regions.
386 	 */
387 	while ((reg = dm_rh_recovery_start(ms->rh))) {
388 		r = recover(ms, reg);
389 		if (r)
390 			dm_rh_recovery_end(reg, 0);
391 	}
392 
393 	/*
394 	 * Update the in sync flag.
395 	 */
396 	if (!ms->in_sync &&
397 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
398 		/* the sync is complete */
399 		dm_table_event(ms->ti->table);
400 		ms->in_sync = 1;
401 	}
402 }
403 
404 /*-----------------------------------------------------------------
405  * Reads
406  *---------------------------------------------------------------*/
407 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
408 {
409 	struct mirror *m = get_default_mirror(ms);
410 
411 	do {
412 		if (likely(!atomic_read(&m->error_count)))
413 			return m;
414 
415 		if (m-- == ms->mirror)
416 			m += ms->nr_mirrors;
417 	} while (m != get_default_mirror(ms));
418 
419 	return NULL;
420 }
421 
422 static int default_ok(struct mirror *m)
423 {
424 	struct mirror *default_mirror = get_default_mirror(m->ms);
425 
426 	return !atomic_read(&default_mirror->error_count);
427 }
428 
429 static int mirror_available(struct mirror_set *ms, struct bio *bio)
430 {
431 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
432 	region_t region = dm_rh_bio_to_region(ms->rh, bio);
433 
434 	if (log->type->in_sync(log, region, 0))
435 		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
436 
437 	return 0;
438 }
439 
440 /*
441  * remap a buffer to a particular mirror.
442  */
443 static sector_t map_sector(struct mirror *m, struct bio *bio)
444 {
445 	if (unlikely(!bio->bi_iter.bi_size))
446 		return 0;
447 	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
448 }
449 
450 static void map_bio(struct mirror *m, struct bio *bio)
451 {
452 	bio->bi_bdev = m->dev->bdev;
453 	bio->bi_iter.bi_sector = map_sector(m, bio);
454 }
455 
456 static void map_region(struct dm_io_region *io, struct mirror *m,
457 		       struct bio *bio)
458 {
459 	io->bdev = m->dev->bdev;
460 	io->sector = map_sector(m, bio);
461 	io->count = bio_sectors(bio);
462 }
463 
464 static void hold_bio(struct mirror_set *ms, struct bio *bio)
465 {
466 	/*
467 	 * Lock is required to avoid race condition during suspend
468 	 * process.
469 	 */
470 	spin_lock_irq(&ms->lock);
471 
472 	if (atomic_read(&ms->suspend)) {
473 		spin_unlock_irq(&ms->lock);
474 
475 		/*
476 		 * If device is suspended, complete the bio.
477 		 */
478 		if (dm_noflush_suspending(ms->ti))
479 			bio_endio(bio, DM_ENDIO_REQUEUE);
480 		else
481 			bio_endio(bio, -EIO);
482 		return;
483 	}
484 
485 	/*
486 	 * Hold bio until the suspend is complete.
487 	 */
488 	bio_list_add(&ms->holds, bio);
489 	spin_unlock_irq(&ms->lock);
490 }
491 
492 /*-----------------------------------------------------------------
493  * Reads
494  *---------------------------------------------------------------*/
495 static void read_callback(unsigned long error, void *context)
496 {
497 	struct bio *bio = context;
498 	struct mirror *m;
499 
500 	m = bio_get_m(bio);
501 	bio_set_m(bio, NULL);
502 
503 	if (likely(!error)) {
504 		bio_endio(bio, 0);
505 		return;
506 	}
507 
508 	fail_mirror(m, DM_RAID1_READ_ERROR);
509 
510 	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
511 		DMWARN_LIMIT("Read failure on mirror device %s.  "
512 			     "Trying alternative device.",
513 			     m->dev->name);
514 		queue_bio(m->ms, bio, bio_rw(bio));
515 		return;
516 	}
517 
518 	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
519 		    m->dev->name);
520 	bio_endio(bio, -EIO);
521 }
522 
523 /* Asynchronous read. */
524 static void read_async_bio(struct mirror *m, struct bio *bio)
525 {
526 	struct dm_io_region io;
527 	struct dm_io_request io_req = {
528 		.bi_rw = READ,
529 		.mem.type = DM_IO_BIO,
530 		.mem.ptr.bio = bio,
531 		.notify.fn = read_callback,
532 		.notify.context = bio,
533 		.client = m->ms->io_client,
534 	};
535 
536 	map_region(&io, m, bio);
537 	bio_set_m(bio, m);
538 	BUG_ON(dm_io(&io_req, 1, &io, NULL));
539 }
540 
541 static inline int region_in_sync(struct mirror_set *ms, region_t region,
542 				 int may_block)
543 {
544 	int state = dm_rh_get_state(ms->rh, region, may_block);
545 	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
546 }
547 
548 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
549 {
550 	region_t region;
551 	struct bio *bio;
552 	struct mirror *m;
553 
554 	while ((bio = bio_list_pop(reads))) {
555 		region = dm_rh_bio_to_region(ms->rh, bio);
556 		m = get_default_mirror(ms);
557 
558 		/*
559 		 * We can only read balance if the region is in sync.
560 		 */
561 		if (likely(region_in_sync(ms, region, 1)))
562 			m = choose_mirror(ms, bio->bi_iter.bi_sector);
563 		else if (m && atomic_read(&m->error_count))
564 			m = NULL;
565 
566 		if (likely(m))
567 			read_async_bio(m, bio);
568 		else
569 			bio_endio(bio, -EIO);
570 	}
571 }
572 
573 /*-----------------------------------------------------------------
574  * Writes.
575  *
576  * We do different things with the write io depending on the
577  * state of the region that it's in:
578  *
579  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
580  * RECOVERING:	delay the io until recovery completes
581  * NOSYNC:	increment pending, just write to the default mirror
582  *---------------------------------------------------------------*/
583 
584 
585 static void write_callback(unsigned long error, void *context)
586 {
587 	unsigned i, ret = 0;
588 	struct bio *bio = (struct bio *) context;
589 	struct mirror_set *ms;
590 	int should_wake = 0;
591 	unsigned long flags;
592 
593 	ms = bio_get_m(bio)->ms;
594 	bio_set_m(bio, NULL);
595 
596 	/*
597 	 * NOTE: We don't decrement the pending count here,
598 	 * instead it is done by the targets endio function.
599 	 * This way we handle both writes to SYNC and NOSYNC
600 	 * regions with the same code.
601 	 */
602 	if (likely(!error)) {
603 		bio_endio(bio, ret);
604 		return;
605 	}
606 
607 	for (i = 0; i < ms->nr_mirrors; i++)
608 		if (test_bit(i, &error))
609 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
610 
611 	/*
612 	 * Need to raise event.  Since raising
613 	 * events can block, we need to do it in
614 	 * the main thread.
615 	 */
616 	spin_lock_irqsave(&ms->lock, flags);
617 	if (!ms->failures.head)
618 		should_wake = 1;
619 	bio_list_add(&ms->failures, bio);
620 	spin_unlock_irqrestore(&ms->lock, flags);
621 	if (should_wake)
622 		wakeup_mirrord(ms);
623 }
624 
625 static void do_write(struct mirror_set *ms, struct bio *bio)
626 {
627 	unsigned int i;
628 	struct dm_io_region io[ms->nr_mirrors], *dest = io;
629 	struct mirror *m;
630 	struct dm_io_request io_req = {
631 		.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
632 		.mem.type = DM_IO_BIO,
633 		.mem.ptr.bio = bio,
634 		.notify.fn = write_callback,
635 		.notify.context = bio,
636 		.client = ms->io_client,
637 	};
638 
639 	if (bio->bi_rw & REQ_DISCARD) {
640 		io_req.bi_rw |= REQ_DISCARD;
641 		io_req.mem.type = DM_IO_KMEM;
642 		io_req.mem.ptr.addr = NULL;
643 	}
644 
645 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
646 		map_region(dest++, m, bio);
647 
648 	/*
649 	 * Use default mirror because we only need it to retrieve the reference
650 	 * to the mirror set in write_callback().
651 	 */
652 	bio_set_m(bio, get_default_mirror(ms));
653 
654 	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
655 }
656 
657 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
658 {
659 	int state;
660 	struct bio *bio;
661 	struct bio_list sync, nosync, recover, *this_list = NULL;
662 	struct bio_list requeue;
663 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
664 	region_t region;
665 
666 	if (!writes->head)
667 		return;
668 
669 	/*
670 	 * Classify each write.
671 	 */
672 	bio_list_init(&sync);
673 	bio_list_init(&nosync);
674 	bio_list_init(&recover);
675 	bio_list_init(&requeue);
676 
677 	while ((bio = bio_list_pop(writes))) {
678 		if ((bio->bi_rw & REQ_FLUSH) ||
679 		    (bio->bi_rw & REQ_DISCARD)) {
680 			bio_list_add(&sync, bio);
681 			continue;
682 		}
683 
684 		region = dm_rh_bio_to_region(ms->rh, bio);
685 
686 		if (log->type->is_remote_recovering &&
687 		    log->type->is_remote_recovering(log, region)) {
688 			bio_list_add(&requeue, bio);
689 			continue;
690 		}
691 
692 		state = dm_rh_get_state(ms->rh, region, 1);
693 		switch (state) {
694 		case DM_RH_CLEAN:
695 		case DM_RH_DIRTY:
696 			this_list = &sync;
697 			break;
698 
699 		case DM_RH_NOSYNC:
700 			this_list = &nosync;
701 			break;
702 
703 		case DM_RH_RECOVERING:
704 			this_list = &recover;
705 			break;
706 		}
707 
708 		bio_list_add(this_list, bio);
709 	}
710 
711 	/*
712 	 * Add bios that are delayed due to remote recovery
713 	 * back on to the write queue
714 	 */
715 	if (unlikely(requeue.head)) {
716 		spin_lock_irq(&ms->lock);
717 		bio_list_merge(&ms->writes, &requeue);
718 		spin_unlock_irq(&ms->lock);
719 		delayed_wake(ms);
720 	}
721 
722 	/*
723 	 * Increment the pending counts for any regions that will
724 	 * be written to (writes to recover regions are going to
725 	 * be delayed).
726 	 */
727 	dm_rh_inc_pending(ms->rh, &sync);
728 	dm_rh_inc_pending(ms->rh, &nosync);
729 
730 	/*
731 	 * If the flush fails on a previous call and succeeds here,
732 	 * we must not reset the log_failure variable.  We need
733 	 * userspace interaction to do that.
734 	 */
735 	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
736 
737 	/*
738 	 * Dispatch io.
739 	 */
740 	if (unlikely(ms->log_failure) && errors_handled(ms)) {
741 		spin_lock_irq(&ms->lock);
742 		bio_list_merge(&ms->failures, &sync);
743 		spin_unlock_irq(&ms->lock);
744 		wakeup_mirrord(ms);
745 	} else
746 		while ((bio = bio_list_pop(&sync)))
747 			do_write(ms, bio);
748 
749 	while ((bio = bio_list_pop(&recover)))
750 		dm_rh_delay(ms->rh, bio);
751 
752 	while ((bio = bio_list_pop(&nosync))) {
753 		if (unlikely(ms->leg_failure) && errors_handled(ms)) {
754 			spin_lock_irq(&ms->lock);
755 			bio_list_add(&ms->failures, bio);
756 			spin_unlock_irq(&ms->lock);
757 			wakeup_mirrord(ms);
758 		} else {
759 			map_bio(get_default_mirror(ms), bio);
760 			generic_make_request(bio);
761 		}
762 	}
763 }
764 
765 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
766 {
767 	struct bio *bio;
768 
769 	if (likely(!failures->head))
770 		return;
771 
772 	/*
773 	 * If the log has failed, unattempted writes are being
774 	 * put on the holds list.  We can't issue those writes
775 	 * until a log has been marked, so we must store them.
776 	 *
777 	 * If a 'noflush' suspend is in progress, we can requeue
778 	 * the I/O's to the core.  This give userspace a chance
779 	 * to reconfigure the mirror, at which point the core
780 	 * will reissue the writes.  If the 'noflush' flag is
781 	 * not set, we have no choice but to return errors.
782 	 *
783 	 * Some writes on the failures list may have been
784 	 * submitted before the log failure and represent a
785 	 * failure to write to one of the devices.  It is ok
786 	 * for us to treat them the same and requeue them
787 	 * as well.
788 	 */
789 	while ((bio = bio_list_pop(failures))) {
790 		if (!ms->log_failure) {
791 			ms->in_sync = 0;
792 			dm_rh_mark_nosync(ms->rh, bio);
793 		}
794 
795 		/*
796 		 * If all the legs are dead, fail the I/O.
797 		 * If we have been told to handle errors, hold the bio
798 		 * and wait for userspace to deal with the problem.
799 		 * Otherwise pretend that the I/O succeeded. (This would
800 		 * be wrong if the failed leg returned after reboot and
801 		 * got replicated back to the good legs.)
802 		 */
803 		if (!get_valid_mirror(ms))
804 			bio_endio(bio, -EIO);
805 		else if (errors_handled(ms))
806 			hold_bio(ms, bio);
807 		else
808 			bio_endio(bio, 0);
809 	}
810 }
811 
812 static void trigger_event(struct work_struct *work)
813 {
814 	struct mirror_set *ms =
815 		container_of(work, struct mirror_set, trigger_event);
816 
817 	dm_table_event(ms->ti->table);
818 }
819 
820 /*-----------------------------------------------------------------
821  * kmirrord
822  *---------------------------------------------------------------*/
823 static void do_mirror(struct work_struct *work)
824 {
825 	struct mirror_set *ms = container_of(work, struct mirror_set,
826 					     kmirrord_work);
827 	struct bio_list reads, writes, failures;
828 	unsigned long flags;
829 
830 	spin_lock_irqsave(&ms->lock, flags);
831 	reads = ms->reads;
832 	writes = ms->writes;
833 	failures = ms->failures;
834 	bio_list_init(&ms->reads);
835 	bio_list_init(&ms->writes);
836 	bio_list_init(&ms->failures);
837 	spin_unlock_irqrestore(&ms->lock, flags);
838 
839 	dm_rh_update_states(ms->rh, errors_handled(ms));
840 	do_recovery(ms);
841 	do_reads(ms, &reads);
842 	do_writes(ms, &writes);
843 	do_failures(ms, &failures);
844 }
845 
846 /*-----------------------------------------------------------------
847  * Target functions
848  *---------------------------------------------------------------*/
849 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
850 					uint32_t region_size,
851 					struct dm_target *ti,
852 					struct dm_dirty_log *dl)
853 {
854 	size_t len;
855 	struct mirror_set *ms = NULL;
856 
857 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
858 
859 	ms = kzalloc(len, GFP_KERNEL);
860 	if (!ms) {
861 		ti->error = "Cannot allocate mirror context";
862 		return NULL;
863 	}
864 
865 	spin_lock_init(&ms->lock);
866 	bio_list_init(&ms->reads);
867 	bio_list_init(&ms->writes);
868 	bio_list_init(&ms->failures);
869 	bio_list_init(&ms->holds);
870 
871 	ms->ti = ti;
872 	ms->nr_mirrors = nr_mirrors;
873 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
874 	ms->in_sync = 0;
875 	ms->log_failure = 0;
876 	ms->leg_failure = 0;
877 	atomic_set(&ms->suspend, 0);
878 	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
879 
880 	ms->io_client = dm_io_client_create();
881 	if (IS_ERR(ms->io_client)) {
882 		ti->error = "Error creating dm_io client";
883 		kfree(ms);
884  		return NULL;
885 	}
886 
887 	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
888 				       wakeup_all_recovery_waiters,
889 				       ms->ti->begin, MAX_RECOVERY,
890 				       dl, region_size, ms->nr_regions);
891 	if (IS_ERR(ms->rh)) {
892 		ti->error = "Error creating dirty region hash";
893 		dm_io_client_destroy(ms->io_client);
894 		kfree(ms);
895 		return NULL;
896 	}
897 
898 	return ms;
899 }
900 
901 static void free_context(struct mirror_set *ms, struct dm_target *ti,
902 			 unsigned int m)
903 {
904 	while (m--)
905 		dm_put_device(ti, ms->mirror[m].dev);
906 
907 	dm_io_client_destroy(ms->io_client);
908 	dm_region_hash_destroy(ms->rh);
909 	kfree(ms);
910 }
911 
912 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
913 		      unsigned int mirror, char **argv)
914 {
915 	unsigned long long offset;
916 	char dummy;
917 
918 	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
919 		ti->error = "Invalid offset";
920 		return -EINVAL;
921 	}
922 
923 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
924 			  &ms->mirror[mirror].dev)) {
925 		ti->error = "Device lookup failure";
926 		return -ENXIO;
927 	}
928 
929 	ms->mirror[mirror].ms = ms;
930 	atomic_set(&(ms->mirror[mirror].error_count), 0);
931 	ms->mirror[mirror].error_type = 0;
932 	ms->mirror[mirror].offset = offset;
933 
934 	return 0;
935 }
936 
937 /*
938  * Create dirty log: log_type #log_params <log_params>
939  */
940 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
941 					     unsigned argc, char **argv,
942 					     unsigned *args_used)
943 {
944 	unsigned param_count;
945 	struct dm_dirty_log *dl;
946 	char dummy;
947 
948 	if (argc < 2) {
949 		ti->error = "Insufficient mirror log arguments";
950 		return NULL;
951 	}
952 
953 	if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
954 		ti->error = "Invalid mirror log argument count";
955 		return NULL;
956 	}
957 
958 	*args_used = 2 + param_count;
959 
960 	if (argc < *args_used) {
961 		ti->error = "Insufficient mirror log arguments";
962 		return NULL;
963 	}
964 
965 	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
966 				 argv + 2);
967 	if (!dl) {
968 		ti->error = "Error creating mirror dirty log";
969 		return NULL;
970 	}
971 
972 	return dl;
973 }
974 
975 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
976 			  unsigned *args_used)
977 {
978 	unsigned num_features;
979 	struct dm_target *ti = ms->ti;
980 	char dummy;
981 
982 	*args_used = 0;
983 
984 	if (!argc)
985 		return 0;
986 
987 	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
988 		ti->error = "Invalid number of features";
989 		return -EINVAL;
990 	}
991 
992 	argc--;
993 	argv++;
994 	(*args_used)++;
995 
996 	if (num_features > argc) {
997 		ti->error = "Not enough arguments to support feature count";
998 		return -EINVAL;
999 	}
1000 
1001 	if (!strcmp("handle_errors", argv[0]))
1002 		ms->features |= DM_RAID1_HANDLE_ERRORS;
1003 	else {
1004 		ti->error = "Unrecognised feature requested";
1005 		return -EINVAL;
1006 	}
1007 
1008 	(*args_used)++;
1009 
1010 	return 0;
1011 }
1012 
1013 /*
1014  * Construct a mirror mapping:
1015  *
1016  * log_type #log_params <log_params>
1017  * #mirrors [mirror_path offset]{2,}
1018  * [#features <features>]
1019  *
1020  * log_type is "core" or "disk"
1021  * #log_params is between 1 and 3
1022  *
1023  * If present, features must be "handle_errors".
1024  */
1025 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1026 {
1027 	int r;
1028 	unsigned int nr_mirrors, m, args_used;
1029 	struct mirror_set *ms;
1030 	struct dm_dirty_log *dl;
1031 	char dummy;
1032 
1033 	dl = create_dirty_log(ti, argc, argv, &args_used);
1034 	if (!dl)
1035 		return -EINVAL;
1036 
1037 	argv += args_used;
1038 	argc -= args_used;
1039 
1040 	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1041 	    nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1042 		ti->error = "Invalid number of mirrors";
1043 		dm_dirty_log_destroy(dl);
1044 		return -EINVAL;
1045 	}
1046 
1047 	argv++, argc--;
1048 
1049 	if (argc < nr_mirrors * 2) {
1050 		ti->error = "Too few mirror arguments";
1051 		dm_dirty_log_destroy(dl);
1052 		return -EINVAL;
1053 	}
1054 
1055 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1056 	if (!ms) {
1057 		dm_dirty_log_destroy(dl);
1058 		return -ENOMEM;
1059 	}
1060 
1061 	/* Get the mirror parameter sets */
1062 	for (m = 0; m < nr_mirrors; m++) {
1063 		r = get_mirror(ms, ti, m, argv);
1064 		if (r) {
1065 			free_context(ms, ti, m);
1066 			return r;
1067 		}
1068 		argv += 2;
1069 		argc -= 2;
1070 	}
1071 
1072 	ti->private = ms;
1073 
1074 	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1075 	if (r)
1076 		goto err_free_context;
1077 
1078 	ti->num_flush_bios = 1;
1079 	ti->num_discard_bios = 1;
1080 	ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
1081 	ti->discard_zeroes_data_unsupported = true;
1082 
1083 	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1084 	if (!ms->kmirrord_wq) {
1085 		DMERR("couldn't start kmirrord");
1086 		r = -ENOMEM;
1087 		goto err_free_context;
1088 	}
1089 	INIT_WORK(&ms->kmirrord_work, do_mirror);
1090 	init_timer(&ms->timer);
1091 	ms->timer_pending = 0;
1092 	INIT_WORK(&ms->trigger_event, trigger_event);
1093 
1094 	r = parse_features(ms, argc, argv, &args_used);
1095 	if (r)
1096 		goto err_destroy_wq;
1097 
1098 	argv += args_used;
1099 	argc -= args_used;
1100 
1101 	/*
1102 	 * Any read-balancing addition depends on the
1103 	 * DM_RAID1_HANDLE_ERRORS flag being present.
1104 	 * This is because the decision to balance depends
1105 	 * on the sync state of a region.  If the above
1106 	 * flag is not present, we ignore errors; and
1107 	 * the sync state may be inaccurate.
1108 	 */
1109 
1110 	if (argc) {
1111 		ti->error = "Too many mirror arguments";
1112 		r = -EINVAL;
1113 		goto err_destroy_wq;
1114 	}
1115 
1116 	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1117 	if (IS_ERR(ms->kcopyd_client)) {
1118 		r = PTR_ERR(ms->kcopyd_client);
1119 		goto err_destroy_wq;
1120 	}
1121 
1122 	wakeup_mirrord(ms);
1123 	return 0;
1124 
1125 err_destroy_wq:
1126 	destroy_workqueue(ms->kmirrord_wq);
1127 err_free_context:
1128 	free_context(ms, ti, ms->nr_mirrors);
1129 	return r;
1130 }
1131 
1132 static void mirror_dtr(struct dm_target *ti)
1133 {
1134 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1135 
1136 	del_timer_sync(&ms->timer);
1137 	flush_workqueue(ms->kmirrord_wq);
1138 	flush_work(&ms->trigger_event);
1139 	dm_kcopyd_client_destroy(ms->kcopyd_client);
1140 	destroy_workqueue(ms->kmirrord_wq);
1141 	free_context(ms, ti, ms->nr_mirrors);
1142 }
1143 
1144 /*
1145  * Mirror mapping function
1146  */
1147 static int mirror_map(struct dm_target *ti, struct bio *bio)
1148 {
1149 	int r, rw = bio_rw(bio);
1150 	struct mirror *m;
1151 	struct mirror_set *ms = ti->private;
1152 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1153 	struct dm_raid1_bio_record *bio_record =
1154 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1155 
1156 	bio_record->details.bi_bdev = NULL;
1157 
1158 	if (rw == WRITE) {
1159 		/* Save region for mirror_end_io() handler */
1160 		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1161 		queue_bio(ms, bio, rw);
1162 		return DM_MAPIO_SUBMITTED;
1163 	}
1164 
1165 	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1166 	if (r < 0 && r != -EWOULDBLOCK)
1167 		return r;
1168 
1169 	/*
1170 	 * If region is not in-sync queue the bio.
1171 	 */
1172 	if (!r || (r == -EWOULDBLOCK)) {
1173 		if (rw == READA)
1174 			return -EWOULDBLOCK;
1175 
1176 		queue_bio(ms, bio, rw);
1177 		return DM_MAPIO_SUBMITTED;
1178 	}
1179 
1180 	/*
1181 	 * The region is in-sync and we can perform reads directly.
1182 	 * Store enough information so we can retry if it fails.
1183 	 */
1184 	m = choose_mirror(ms, bio->bi_iter.bi_sector);
1185 	if (unlikely(!m))
1186 		return -EIO;
1187 
1188 	dm_bio_record(&bio_record->details, bio);
1189 	bio_record->m = m;
1190 
1191 	map_bio(m, bio);
1192 
1193 	return DM_MAPIO_REMAPPED;
1194 }
1195 
1196 static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1197 {
1198 	int rw = bio_rw(bio);
1199 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1200 	struct mirror *m = NULL;
1201 	struct dm_bio_details *bd = NULL;
1202 	struct dm_raid1_bio_record *bio_record =
1203 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1204 
1205 	/*
1206 	 * We need to dec pending if this was a write.
1207 	 */
1208 	if (rw == WRITE) {
1209 		if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
1210 			dm_rh_dec(ms->rh, bio_record->write_region);
1211 		return error;
1212 	}
1213 
1214 	if (error == -EOPNOTSUPP)
1215 		goto out;
1216 
1217 	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1218 		goto out;
1219 
1220 	if (unlikely(error)) {
1221 		if (!bio_record->details.bi_bdev) {
1222 			/*
1223 			 * There wasn't enough memory to record necessary
1224 			 * information for a retry or there was no other
1225 			 * mirror in-sync.
1226 			 */
1227 			DMERR_LIMIT("Mirror read failed.");
1228 			return -EIO;
1229 		}
1230 
1231 		m = bio_record->m;
1232 
1233 		DMERR("Mirror read failed from %s. Trying alternative device.",
1234 		      m->dev->name);
1235 
1236 		fail_mirror(m, DM_RAID1_READ_ERROR);
1237 
1238 		/*
1239 		 * A failed read is requeued for another attempt using an intact
1240 		 * mirror.
1241 		 */
1242 		if (default_ok(m) || mirror_available(ms, bio)) {
1243 			bd = &bio_record->details;
1244 
1245 			dm_bio_restore(bd, bio);
1246 			bio_record->details.bi_bdev = NULL;
1247 
1248 			atomic_inc(&bio->bi_remaining);
1249 
1250 			queue_bio(ms, bio, rw);
1251 			return DM_ENDIO_INCOMPLETE;
1252 		}
1253 		DMERR("All replicated volumes dead, failing I/O");
1254 	}
1255 
1256 out:
1257 	bio_record->details.bi_bdev = NULL;
1258 
1259 	return error;
1260 }
1261 
1262 static void mirror_presuspend(struct dm_target *ti)
1263 {
1264 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1265 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1266 
1267 	struct bio_list holds;
1268 	struct bio *bio;
1269 
1270 	atomic_set(&ms->suspend, 1);
1271 
1272 	/*
1273 	 * Process bios in the hold list to start recovery waiting
1274 	 * for bios in the hold list. After the process, no bio has
1275 	 * a chance to be added in the hold list because ms->suspend
1276 	 * is set.
1277 	 */
1278 	spin_lock_irq(&ms->lock);
1279 	holds = ms->holds;
1280 	bio_list_init(&ms->holds);
1281 	spin_unlock_irq(&ms->lock);
1282 
1283 	while ((bio = bio_list_pop(&holds)))
1284 		hold_bio(ms, bio);
1285 
1286 	/*
1287 	 * We must finish up all the work that we've
1288 	 * generated (i.e. recovery work).
1289 	 */
1290 	dm_rh_stop_recovery(ms->rh);
1291 
1292 	wait_event(_kmirrord_recovery_stopped,
1293 		   !dm_rh_recovery_in_flight(ms->rh));
1294 
1295 	if (log->type->presuspend && log->type->presuspend(log))
1296 		/* FIXME: need better error handling */
1297 		DMWARN("log presuspend failed");
1298 
1299 	/*
1300 	 * Now that recovery is complete/stopped and the
1301 	 * delayed bios are queued, we need to wait for
1302 	 * the worker thread to complete.  This way,
1303 	 * we know that all of our I/O has been pushed.
1304 	 */
1305 	flush_workqueue(ms->kmirrord_wq);
1306 }
1307 
1308 static void mirror_postsuspend(struct dm_target *ti)
1309 {
1310 	struct mirror_set *ms = ti->private;
1311 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1312 
1313 	if (log->type->postsuspend && log->type->postsuspend(log))
1314 		/* FIXME: need better error handling */
1315 		DMWARN("log postsuspend failed");
1316 }
1317 
1318 static void mirror_resume(struct dm_target *ti)
1319 {
1320 	struct mirror_set *ms = ti->private;
1321 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1322 
1323 	atomic_set(&ms->suspend, 0);
1324 	if (log->type->resume && log->type->resume(log))
1325 		/* FIXME: need better error handling */
1326 		DMWARN("log resume failed");
1327 	dm_rh_start_recovery(ms->rh);
1328 }
1329 
1330 /*
1331  * device_status_char
1332  * @m: mirror device/leg we want the status of
1333  *
1334  * We return one character representing the most severe error
1335  * we have encountered.
1336  *    A => Alive - No failures
1337  *    D => Dead - A write failure occurred leaving mirror out-of-sync
1338  *    S => Sync - A sychronization failure occurred, mirror out-of-sync
1339  *    R => Read - A read failure occurred, mirror data unaffected
1340  *
1341  * Returns: <char>
1342  */
1343 static char device_status_char(struct mirror *m)
1344 {
1345 	if (!atomic_read(&(m->error_count)))
1346 		return 'A';
1347 
1348 	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1349 		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1350 		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1351 		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1352 }
1353 
1354 
1355 static void mirror_status(struct dm_target *ti, status_type_t type,
1356 			  unsigned status_flags, char *result, unsigned maxlen)
1357 {
1358 	unsigned int m, sz = 0;
1359 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1360 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1361 	char buffer[ms->nr_mirrors + 1];
1362 
1363 	switch (type) {
1364 	case STATUSTYPE_INFO:
1365 		DMEMIT("%d ", ms->nr_mirrors);
1366 		for (m = 0; m < ms->nr_mirrors; m++) {
1367 			DMEMIT("%s ", ms->mirror[m].dev->name);
1368 			buffer[m] = device_status_char(&(ms->mirror[m]));
1369 		}
1370 		buffer[m] = '\0';
1371 
1372 		DMEMIT("%llu/%llu 1 %s ",
1373 		      (unsigned long long)log->type->get_sync_count(log),
1374 		      (unsigned long long)ms->nr_regions, buffer);
1375 
1376 		sz += log->type->status(log, type, result+sz, maxlen-sz);
1377 
1378 		break;
1379 
1380 	case STATUSTYPE_TABLE:
1381 		sz = log->type->status(log, type, result, maxlen);
1382 
1383 		DMEMIT("%d", ms->nr_mirrors);
1384 		for (m = 0; m < ms->nr_mirrors; m++)
1385 			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1386 			       (unsigned long long)ms->mirror[m].offset);
1387 
1388 		if (ms->features & DM_RAID1_HANDLE_ERRORS)
1389 			DMEMIT(" 1 handle_errors");
1390 	}
1391 }
1392 
1393 static int mirror_iterate_devices(struct dm_target *ti,
1394 				  iterate_devices_callout_fn fn, void *data)
1395 {
1396 	struct mirror_set *ms = ti->private;
1397 	int ret = 0;
1398 	unsigned i;
1399 
1400 	for (i = 0; !ret && i < ms->nr_mirrors; i++)
1401 		ret = fn(ti, ms->mirror[i].dev,
1402 			 ms->mirror[i].offset, ti->len, data);
1403 
1404 	return ret;
1405 }
1406 
1407 static struct target_type mirror_target = {
1408 	.name	 = "mirror",
1409 	.version = {1, 13, 2},
1410 	.module	 = THIS_MODULE,
1411 	.ctr	 = mirror_ctr,
1412 	.dtr	 = mirror_dtr,
1413 	.map	 = mirror_map,
1414 	.end_io	 = mirror_end_io,
1415 	.presuspend = mirror_presuspend,
1416 	.postsuspend = mirror_postsuspend,
1417 	.resume	 = mirror_resume,
1418 	.status	 = mirror_status,
1419 	.iterate_devices = mirror_iterate_devices,
1420 };
1421 
1422 static int __init dm_mirror_init(void)
1423 {
1424 	int r;
1425 
1426 	r = dm_register_target(&mirror_target);
1427 	if (r < 0) {
1428 		DMERR("Failed to register mirror target");
1429 		goto bad_target;
1430 	}
1431 
1432 	return 0;
1433 
1434 bad_target:
1435 	return r;
1436 }
1437 
1438 static void __exit dm_mirror_exit(void)
1439 {
1440 	dm_unregister_target(&mirror_target);
1441 }
1442 
1443 /* Module hooks */
1444 module_init(dm_mirror_init);
1445 module_exit(dm_mirror_exit);
1446 
1447 MODULE_DESCRIPTION(DM_NAME " mirror target");
1448 MODULE_AUTHOR("Joe Thornber");
1449 MODULE_LICENSE("GPL");
1450