xref: /linux/drivers/md/dm-snap.c (revision 3eeebf17f31c583f83e081b17b3076477cb96886)
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22 
23 #include "dm-snap.h"
24 #include "dm-bio-list.h"
25 
26 #define DM_MSG_PREFIX "snapshots"
27 
28 /*
29  * The percentage increment we will wake up users at
30  */
31 #define WAKE_UP_PERCENT 5
32 
33 /*
34  * kcopyd priority of snapshot operations
35  */
36 #define SNAPSHOT_COPY_PRIORITY 2
37 
38 /*
39  * Reserve 1MB for each snapshot initially (with minimum of 1 page).
40  */
41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
42 
43 /*
44  * The size of the mempool used to track chunks in use.
45  */
46 #define MIN_IOS 256
47 
48 static struct workqueue_struct *ksnapd;
49 static void flush_queued_bios(struct work_struct *work);
50 
51 struct dm_snap_pending_exception {
52 	struct dm_snap_exception e;
53 
54 	/*
55 	 * Origin buffers waiting for this to complete are held
56 	 * in a bio list
57 	 */
58 	struct bio_list origin_bios;
59 	struct bio_list snapshot_bios;
60 
61 	/*
62 	 * Short-term queue of pending exceptions prior to submission.
63 	 */
64 	struct list_head list;
65 
66 	/*
67 	 * The primary pending_exception is the one that holds
68 	 * the ref_count and the list of origin_bios for a
69 	 * group of pending_exceptions.  It is always last to get freed.
70 	 * These fields get set up when writing to the origin.
71 	 */
72 	struct dm_snap_pending_exception *primary_pe;
73 
74 	/*
75 	 * Number of pending_exceptions processing this chunk.
76 	 * When this drops to zero we must complete the origin bios.
77 	 * If incrementing or decrementing this, hold pe->snap->lock for
78 	 * the sibling concerned and not pe->primary_pe->snap->lock unless
79 	 * they are the same.
80 	 */
81 	atomic_t ref_count;
82 
83 	/* Pointer back to snapshot context */
84 	struct dm_snapshot *snap;
85 
86 	/*
87 	 * 1 indicates the exception has already been sent to
88 	 * kcopyd.
89 	 */
90 	int started;
91 };
92 
93 /*
94  * Hash table mapping origin volumes to lists of snapshots and
95  * a lock to protect it
96  */
97 static struct kmem_cache *exception_cache;
98 static struct kmem_cache *pending_cache;
99 
100 struct dm_snap_tracked_chunk {
101 	struct hlist_node node;
102 	chunk_t chunk;
103 };
104 
105 static struct kmem_cache *tracked_chunk_cache;
106 
107 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
108 						 chunk_t chunk)
109 {
110 	struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
111 							GFP_NOIO);
112 	unsigned long flags;
113 
114 	c->chunk = chunk;
115 
116 	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
117 	hlist_add_head(&c->node,
118 		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
119 	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
120 
121 	return c;
122 }
123 
124 static void stop_tracking_chunk(struct dm_snapshot *s,
125 				struct dm_snap_tracked_chunk *c)
126 {
127 	unsigned long flags;
128 
129 	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
130 	hlist_del(&c->node);
131 	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
132 
133 	mempool_free(c, s->tracked_chunk_pool);
134 }
135 
136 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
137 {
138 	struct dm_snap_tracked_chunk *c;
139 	struct hlist_node *hn;
140 	int found = 0;
141 
142 	spin_lock_irq(&s->tracked_chunk_lock);
143 
144 	hlist_for_each_entry(c, hn,
145 	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
146 		if (c->chunk == chunk) {
147 			found = 1;
148 			break;
149 		}
150 	}
151 
152 	spin_unlock_irq(&s->tracked_chunk_lock);
153 
154 	return found;
155 }
156 
157 /*
158  * One of these per registered origin, held in the snapshot_origins hash
159  */
160 struct origin {
161 	/* The origin device */
162 	struct block_device *bdev;
163 
164 	struct list_head hash_list;
165 
166 	/* List of snapshots for this origin */
167 	struct list_head snapshots;
168 };
169 
170 /*
171  * Size of the hash table for origin volumes. If we make this
172  * the size of the minors list then it should be nearly perfect
173  */
174 #define ORIGIN_HASH_SIZE 256
175 #define ORIGIN_MASK      0xFF
176 static struct list_head *_origins;
177 static struct rw_semaphore _origins_lock;
178 
179 static int init_origin_hash(void)
180 {
181 	int i;
182 
183 	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
184 			   GFP_KERNEL);
185 	if (!_origins) {
186 		DMERR("unable to allocate memory");
187 		return -ENOMEM;
188 	}
189 
190 	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
191 		INIT_LIST_HEAD(_origins + i);
192 	init_rwsem(&_origins_lock);
193 
194 	return 0;
195 }
196 
197 static void exit_origin_hash(void)
198 {
199 	kfree(_origins);
200 }
201 
202 static unsigned origin_hash(struct block_device *bdev)
203 {
204 	return bdev->bd_dev & ORIGIN_MASK;
205 }
206 
207 static struct origin *__lookup_origin(struct block_device *origin)
208 {
209 	struct list_head *ol;
210 	struct origin *o;
211 
212 	ol = &_origins[origin_hash(origin)];
213 	list_for_each_entry (o, ol, hash_list)
214 		if (bdev_equal(o->bdev, origin))
215 			return o;
216 
217 	return NULL;
218 }
219 
220 static void __insert_origin(struct origin *o)
221 {
222 	struct list_head *sl = &_origins[origin_hash(o->bdev)];
223 	list_add_tail(&o->hash_list, sl);
224 }
225 
226 /*
227  * Make a note of the snapshot and its origin so we can look it
228  * up when the origin has a write on it.
229  */
230 static int register_snapshot(struct dm_snapshot *snap)
231 {
232 	struct origin *o;
233 	struct block_device *bdev = snap->origin->bdev;
234 
235 	down_write(&_origins_lock);
236 	o = __lookup_origin(bdev);
237 
238 	if (!o) {
239 		/* New origin */
240 		o = kmalloc(sizeof(*o), GFP_KERNEL);
241 		if (!o) {
242 			up_write(&_origins_lock);
243 			return -ENOMEM;
244 		}
245 
246 		/* Initialise the struct */
247 		INIT_LIST_HEAD(&o->snapshots);
248 		o->bdev = bdev;
249 
250 		__insert_origin(o);
251 	}
252 
253 	list_add_tail(&snap->list, &o->snapshots);
254 
255 	up_write(&_origins_lock);
256 	return 0;
257 }
258 
259 static void unregister_snapshot(struct dm_snapshot *s)
260 {
261 	struct origin *o;
262 
263 	down_write(&_origins_lock);
264 	o = __lookup_origin(s->origin->bdev);
265 
266 	list_del(&s->list);
267 	if (list_empty(&o->snapshots)) {
268 		list_del(&o->hash_list);
269 		kfree(o);
270 	}
271 
272 	up_write(&_origins_lock);
273 }
274 
275 /*
276  * Implementation of the exception hash tables.
277  * The lowest hash_shift bits of the chunk number are ignored, allowing
278  * some consecutive chunks to be grouped together.
279  */
280 static int init_exception_table(struct exception_table *et, uint32_t size,
281 				unsigned hash_shift)
282 {
283 	unsigned int i;
284 
285 	et->hash_shift = hash_shift;
286 	et->hash_mask = size - 1;
287 	et->table = dm_vcalloc(size, sizeof(struct list_head));
288 	if (!et->table)
289 		return -ENOMEM;
290 
291 	for (i = 0; i < size; i++)
292 		INIT_LIST_HEAD(et->table + i);
293 
294 	return 0;
295 }
296 
297 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
298 {
299 	struct list_head *slot;
300 	struct dm_snap_exception *ex, *next;
301 	int i, size;
302 
303 	size = et->hash_mask + 1;
304 	for (i = 0; i < size; i++) {
305 		slot = et->table + i;
306 
307 		list_for_each_entry_safe (ex, next, slot, hash_list)
308 			kmem_cache_free(mem, ex);
309 	}
310 
311 	vfree(et->table);
312 }
313 
314 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
315 {
316 	return (chunk >> et->hash_shift) & et->hash_mask;
317 }
318 
319 static void insert_exception(struct exception_table *eh,
320 			     struct dm_snap_exception *e)
321 {
322 	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
323 	list_add(&e->hash_list, l);
324 }
325 
326 static void remove_exception(struct dm_snap_exception *e)
327 {
328 	list_del(&e->hash_list);
329 }
330 
331 /*
332  * Return the exception data for a sector, or NULL if not
333  * remapped.
334  */
335 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
336 						  chunk_t chunk)
337 {
338 	struct list_head *slot;
339 	struct dm_snap_exception *e;
340 
341 	slot = &et->table[exception_hash(et, chunk)];
342 	list_for_each_entry (e, slot, hash_list)
343 		if (chunk >= e->old_chunk &&
344 		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
345 			return e;
346 
347 	return NULL;
348 }
349 
350 static struct dm_snap_exception *alloc_exception(void)
351 {
352 	struct dm_snap_exception *e;
353 
354 	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
355 	if (!e)
356 		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
357 
358 	return e;
359 }
360 
361 static void free_exception(struct dm_snap_exception *e)
362 {
363 	kmem_cache_free(exception_cache, e);
364 }
365 
366 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
367 {
368 	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
369 							     GFP_NOIO);
370 
371 	pe->snap = s;
372 
373 	return pe;
374 }
375 
376 static void free_pending_exception(struct dm_snap_pending_exception *pe)
377 {
378 	mempool_free(pe, pe->snap->pending_pool);
379 }
380 
381 static void insert_completed_exception(struct dm_snapshot *s,
382 				       struct dm_snap_exception *new_e)
383 {
384 	struct exception_table *eh = &s->complete;
385 	struct list_head *l;
386 	struct dm_snap_exception *e = NULL;
387 
388 	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
389 
390 	/* Add immediately if this table doesn't support consecutive chunks */
391 	if (!eh->hash_shift)
392 		goto out;
393 
394 	/* List is ordered by old_chunk */
395 	list_for_each_entry_reverse(e, l, hash_list) {
396 		/* Insert after an existing chunk? */
397 		if (new_e->old_chunk == (e->old_chunk +
398 					 dm_consecutive_chunk_count(e) + 1) &&
399 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
400 					 dm_consecutive_chunk_count(e) + 1)) {
401 			dm_consecutive_chunk_count_inc(e);
402 			free_exception(new_e);
403 			return;
404 		}
405 
406 		/* Insert before an existing chunk? */
407 		if (new_e->old_chunk == (e->old_chunk - 1) &&
408 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
409 			dm_consecutive_chunk_count_inc(e);
410 			e->old_chunk--;
411 			e->new_chunk--;
412 			free_exception(new_e);
413 			return;
414 		}
415 
416 		if (new_e->old_chunk > e->old_chunk)
417 			break;
418 	}
419 
420 out:
421 	list_add(&new_e->hash_list, e ? &e->hash_list : l);
422 }
423 
424 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
425 {
426 	struct dm_snap_exception *e;
427 
428 	e = alloc_exception();
429 	if (!e)
430 		return -ENOMEM;
431 
432 	e->old_chunk = old;
433 
434 	/* Consecutive_count is implicitly initialised to zero */
435 	e->new_chunk = new;
436 
437 	insert_completed_exception(s, e);
438 
439 	return 0;
440 }
441 
442 /*
443  * Hard coded magic.
444  */
445 static int calc_max_buckets(void)
446 {
447 	/* use a fixed size of 2MB */
448 	unsigned long mem = 2 * 1024 * 1024;
449 	mem /= sizeof(struct list_head);
450 
451 	return mem;
452 }
453 
454 /*
455  * Allocate room for a suitable hash table.
456  */
457 static int init_hash_tables(struct dm_snapshot *s)
458 {
459 	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
460 
461 	/*
462 	 * Calculate based on the size of the original volume or
463 	 * the COW volume...
464 	 */
465 	cow_dev_size = get_dev_size(s->cow->bdev);
466 	origin_dev_size = get_dev_size(s->origin->bdev);
467 	max_buckets = calc_max_buckets();
468 
469 	hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
470 	hash_size = min(hash_size, max_buckets);
471 
472 	hash_size = rounddown_pow_of_two(hash_size);
473 	if (init_exception_table(&s->complete, hash_size,
474 				 DM_CHUNK_CONSECUTIVE_BITS))
475 		return -ENOMEM;
476 
477 	/*
478 	 * Allocate hash table for in-flight exceptions
479 	 * Make this smaller than the real hash table
480 	 */
481 	hash_size >>= 3;
482 	if (hash_size < 64)
483 		hash_size = 64;
484 
485 	if (init_exception_table(&s->pending, hash_size, 0)) {
486 		exit_exception_table(&s->complete, exception_cache);
487 		return -ENOMEM;
488 	}
489 
490 	return 0;
491 }
492 
493 /*
494  * Round a number up to the nearest 'size' boundary.  size must
495  * be a power of 2.
496  */
497 static ulong round_up(ulong n, ulong size)
498 {
499 	size--;
500 	return (n + size) & ~size;
501 }
502 
503 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
504 			  char **error)
505 {
506 	unsigned long chunk_size;
507 	char *value;
508 
509 	chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
510 	if (*chunk_size_arg == '\0' || *value != '\0') {
511 		*error = "Invalid chunk size";
512 		return -EINVAL;
513 	}
514 
515 	if (!chunk_size) {
516 		s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
517 		return 0;
518 	}
519 
520 	/*
521 	 * Chunk size must be multiple of page size.  Silently
522 	 * round up if it's not.
523 	 */
524 	chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
525 
526 	/* Check chunk_size is a power of 2 */
527 	if (!is_power_of_2(chunk_size)) {
528 		*error = "Chunk size is not a power of 2";
529 		return -EINVAL;
530 	}
531 
532 	/* Validate the chunk size against the device block size */
533 	if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
534 		*error = "Chunk size is not a multiple of device blocksize";
535 		return -EINVAL;
536 	}
537 
538 	s->chunk_size = chunk_size;
539 	s->chunk_mask = chunk_size - 1;
540 	s->chunk_shift = ffs(chunk_size) - 1;
541 
542 	return 0;
543 }
544 
545 /*
546  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
547  */
548 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
549 {
550 	struct dm_snapshot *s;
551 	int i;
552 	int r = -EINVAL;
553 	char persistent;
554 	char *origin_path;
555 	char *cow_path;
556 
557 	if (argc != 4) {
558 		ti->error = "requires exactly 4 arguments";
559 		r = -EINVAL;
560 		goto bad1;
561 	}
562 
563 	origin_path = argv[0];
564 	cow_path = argv[1];
565 	persistent = toupper(*argv[2]);
566 
567 	if (persistent != 'P' && persistent != 'N') {
568 		ti->error = "Persistent flag is not P or N";
569 		r = -EINVAL;
570 		goto bad1;
571 	}
572 
573 	s = kmalloc(sizeof(*s), GFP_KERNEL);
574 	if (s == NULL) {
575 		ti->error = "Cannot allocate snapshot context private "
576 		    "structure";
577 		r = -ENOMEM;
578 		goto bad1;
579 	}
580 
581 	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
582 	if (r) {
583 		ti->error = "Cannot get origin device";
584 		goto bad2;
585 	}
586 
587 	r = dm_get_device(ti, cow_path, 0, 0,
588 			  FMODE_READ | FMODE_WRITE, &s->cow);
589 	if (r) {
590 		dm_put_device(ti, s->origin);
591 		ti->error = "Cannot get COW device";
592 		goto bad2;
593 	}
594 
595 	r = set_chunk_size(s, argv[3], &ti->error);
596 	if (r)
597 		goto bad3;
598 
599 	s->type = persistent;
600 
601 	s->valid = 1;
602 	s->active = 0;
603 	init_rwsem(&s->lock);
604 	spin_lock_init(&s->pe_lock);
605 	s->ti = ti;
606 
607 	/* Allocate hash table for COW data */
608 	if (init_hash_tables(s)) {
609 		ti->error = "Unable to allocate hash table space";
610 		r = -ENOMEM;
611 		goto bad3;
612 	}
613 
614 	s->store.snap = s;
615 
616 	if (persistent == 'P')
617 		r = dm_create_persistent(&s->store);
618 	else
619 		r = dm_create_transient(&s->store);
620 
621 	if (r) {
622 		ti->error = "Couldn't create exception store";
623 		r = -EINVAL;
624 		goto bad4;
625 	}
626 
627 	r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
628 	if (r) {
629 		ti->error = "Could not create kcopyd client";
630 		goto bad5;
631 	}
632 
633 	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
634 	if (!s->pending_pool) {
635 		ti->error = "Could not allocate mempool for pending exceptions";
636 		goto bad6;
637 	}
638 
639 	s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
640 							 tracked_chunk_cache);
641 	if (!s->tracked_chunk_pool) {
642 		ti->error = "Could not allocate tracked_chunk mempool for "
643 			    "tracking reads";
644 		goto bad_tracked_chunk_pool;
645 	}
646 
647 	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
648 		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
649 
650 	spin_lock_init(&s->tracked_chunk_lock);
651 
652 	/* Metadata must only be loaded into one table at once */
653 	r = s->store.read_metadata(&s->store);
654 	if (r < 0) {
655 		ti->error = "Failed to read snapshot metadata";
656 		goto bad_load_and_register;
657 	} else if (r > 0) {
658 		s->valid = 0;
659 		DMWARN("Snapshot is marked invalid.");
660 	}
661 
662 	bio_list_init(&s->queued_bios);
663 	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
664 
665 	/* Add snapshot to the list of snapshots for this origin */
666 	/* Exceptions aren't triggered till snapshot_resume() is called */
667 	if (register_snapshot(s)) {
668 		r = -EINVAL;
669 		ti->error = "Cannot register snapshot origin";
670 		goto bad_load_and_register;
671 	}
672 
673 	ti->private = s;
674 	ti->split_io = s->chunk_size;
675 
676 	return 0;
677 
678  bad_load_and_register:
679 	mempool_destroy(s->tracked_chunk_pool);
680 
681  bad_tracked_chunk_pool:
682 	mempool_destroy(s->pending_pool);
683 
684  bad6:
685 	dm_kcopyd_client_destroy(s->kcopyd_client);
686 
687  bad5:
688 	s->store.destroy(&s->store);
689 
690  bad4:
691 	exit_exception_table(&s->pending, pending_cache);
692 	exit_exception_table(&s->complete, exception_cache);
693 
694  bad3:
695 	dm_put_device(ti, s->cow);
696 	dm_put_device(ti, s->origin);
697 
698  bad2:
699 	kfree(s);
700 
701  bad1:
702 	return r;
703 }
704 
705 static void __free_exceptions(struct dm_snapshot *s)
706 {
707 	dm_kcopyd_client_destroy(s->kcopyd_client);
708 	s->kcopyd_client = NULL;
709 
710 	exit_exception_table(&s->pending, pending_cache);
711 	exit_exception_table(&s->complete, exception_cache);
712 
713 	s->store.destroy(&s->store);
714 }
715 
716 static void snapshot_dtr(struct dm_target *ti)
717 {
718 #ifdef CONFIG_DM_DEBUG
719 	int i;
720 #endif
721 	struct dm_snapshot *s = ti->private;
722 
723 	flush_workqueue(ksnapd);
724 
725 	/* Prevent further origin writes from using this snapshot. */
726 	/* After this returns there can be no new kcopyd jobs. */
727 	unregister_snapshot(s);
728 
729 #ifdef CONFIG_DM_DEBUG
730 	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
731 		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
732 #endif
733 
734 	mempool_destroy(s->tracked_chunk_pool);
735 
736 	__free_exceptions(s);
737 
738 	mempool_destroy(s->pending_pool);
739 
740 	dm_put_device(ti, s->origin);
741 	dm_put_device(ti, s->cow);
742 
743 	kfree(s);
744 }
745 
746 /*
747  * Flush a list of buffers.
748  */
749 static void flush_bios(struct bio *bio)
750 {
751 	struct bio *n;
752 
753 	while (bio) {
754 		n = bio->bi_next;
755 		bio->bi_next = NULL;
756 		generic_make_request(bio);
757 		bio = n;
758 	}
759 }
760 
761 static void flush_queued_bios(struct work_struct *work)
762 {
763 	struct dm_snapshot *s =
764 		container_of(work, struct dm_snapshot, queued_bios_work);
765 	struct bio *queued_bios;
766 	unsigned long flags;
767 
768 	spin_lock_irqsave(&s->pe_lock, flags);
769 	queued_bios = bio_list_get(&s->queued_bios);
770 	spin_unlock_irqrestore(&s->pe_lock, flags);
771 
772 	flush_bios(queued_bios);
773 }
774 
775 /*
776  * Error a list of buffers.
777  */
778 static void error_bios(struct bio *bio)
779 {
780 	struct bio *n;
781 
782 	while (bio) {
783 		n = bio->bi_next;
784 		bio->bi_next = NULL;
785 		bio_io_error(bio);
786 		bio = n;
787 	}
788 }
789 
790 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
791 {
792 	if (!s->valid)
793 		return;
794 
795 	if (err == -EIO)
796 		DMERR("Invalidating snapshot: Error reading/writing.");
797 	else if (err == -ENOMEM)
798 		DMERR("Invalidating snapshot: Unable to allocate exception.");
799 
800 	if (s->store.drop_snapshot)
801 		s->store.drop_snapshot(&s->store);
802 
803 	s->valid = 0;
804 
805 	dm_table_event(s->ti->table);
806 }
807 
808 static void get_pending_exception(struct dm_snap_pending_exception *pe)
809 {
810 	atomic_inc(&pe->ref_count);
811 }
812 
813 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
814 {
815 	struct dm_snap_pending_exception *primary_pe;
816 	struct bio *origin_bios = NULL;
817 
818 	primary_pe = pe->primary_pe;
819 
820 	/*
821 	 * If this pe is involved in a write to the origin and
822 	 * it is the last sibling to complete then release
823 	 * the bios for the original write to the origin.
824 	 */
825 	if (primary_pe &&
826 	    atomic_dec_and_test(&primary_pe->ref_count)) {
827 		origin_bios = bio_list_get(&primary_pe->origin_bios);
828 		free_pending_exception(primary_pe);
829 	}
830 
831 	/*
832 	 * Free the pe if it's not linked to an origin write or if
833 	 * it's not itself a primary pe.
834 	 */
835 	if (!primary_pe || primary_pe != pe)
836 		free_pending_exception(pe);
837 
838 	return origin_bios;
839 }
840 
841 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
842 {
843 	struct dm_snap_exception *e;
844 	struct dm_snapshot *s = pe->snap;
845 	struct bio *origin_bios = NULL;
846 	struct bio *snapshot_bios = NULL;
847 	int error = 0;
848 
849 	if (!success) {
850 		/* Read/write error - snapshot is unusable */
851 		down_write(&s->lock);
852 		__invalidate_snapshot(s, -EIO);
853 		error = 1;
854 		goto out;
855 	}
856 
857 	e = alloc_exception();
858 	if (!e) {
859 		down_write(&s->lock);
860 		__invalidate_snapshot(s, -ENOMEM);
861 		error = 1;
862 		goto out;
863 	}
864 	*e = pe->e;
865 
866 	down_write(&s->lock);
867 	if (!s->valid) {
868 		free_exception(e);
869 		error = 1;
870 		goto out;
871 	}
872 
873 	/*
874 	 * Check for conflicting reads. This is extremely improbable,
875 	 * so yield() is sufficient and there is no need for a wait queue.
876 	 */
877 	while (__chunk_is_tracked(s, pe->e.old_chunk))
878 		yield();
879 
880 	/*
881 	 * Add a proper exception, and remove the
882 	 * in-flight exception from the list.
883 	 */
884 	insert_completed_exception(s, e);
885 
886  out:
887 	remove_exception(&pe->e);
888 	snapshot_bios = bio_list_get(&pe->snapshot_bios);
889 	origin_bios = put_pending_exception(pe);
890 
891 	up_write(&s->lock);
892 
893 	/* Submit any pending write bios */
894 	if (error)
895 		error_bios(snapshot_bios);
896 	else
897 		flush_bios(snapshot_bios);
898 
899 	flush_bios(origin_bios);
900 }
901 
902 static void commit_callback(void *context, int success)
903 {
904 	struct dm_snap_pending_exception *pe = context;
905 
906 	pending_complete(pe, success);
907 }
908 
909 /*
910  * Called when the copy I/O has finished.  kcopyd actually runs
911  * this code so don't block.
912  */
913 static void copy_callback(int read_err, unsigned long write_err, void *context)
914 {
915 	struct dm_snap_pending_exception *pe = context;
916 	struct dm_snapshot *s = pe->snap;
917 
918 	if (read_err || write_err)
919 		pending_complete(pe, 0);
920 
921 	else
922 		/* Update the metadata if we are persistent */
923 		s->store.commit_exception(&s->store, &pe->e, commit_callback,
924 					  pe);
925 }
926 
927 /*
928  * Dispatches the copy operation to kcopyd.
929  */
930 static void start_copy(struct dm_snap_pending_exception *pe)
931 {
932 	struct dm_snapshot *s = pe->snap;
933 	struct dm_io_region src, dest;
934 	struct block_device *bdev = s->origin->bdev;
935 	sector_t dev_size;
936 
937 	dev_size = get_dev_size(bdev);
938 
939 	src.bdev = bdev;
940 	src.sector = chunk_to_sector(s, pe->e.old_chunk);
941 	src.count = min(s->chunk_size, dev_size - src.sector);
942 
943 	dest.bdev = s->cow->bdev;
944 	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
945 	dest.count = src.count;
946 
947 	/* Hand over to kcopyd */
948 	dm_kcopyd_copy(s->kcopyd_client,
949 		    &src, 1, &dest, 0, copy_callback, pe);
950 }
951 
952 /*
953  * Looks to see if this snapshot already has a pending exception
954  * for this chunk, otherwise it allocates a new one and inserts
955  * it into the pending table.
956  *
957  * NOTE: a write lock must be held on snap->lock before calling
958  * this.
959  */
960 static struct dm_snap_pending_exception *
961 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
962 {
963 	struct dm_snap_exception *e;
964 	struct dm_snap_pending_exception *pe;
965 	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
966 
967 	/*
968 	 * Is there a pending exception for this already ?
969 	 */
970 	e = lookup_exception(&s->pending, chunk);
971 	if (e) {
972 		/* cast the exception to a pending exception */
973 		pe = container_of(e, struct dm_snap_pending_exception, e);
974 		goto out;
975 	}
976 
977 	/*
978 	 * Create a new pending exception, we don't want
979 	 * to hold the lock while we do this.
980 	 */
981 	up_write(&s->lock);
982 	pe = alloc_pending_exception(s);
983 	down_write(&s->lock);
984 
985 	if (!s->valid) {
986 		free_pending_exception(pe);
987 		return NULL;
988 	}
989 
990 	e = lookup_exception(&s->pending, chunk);
991 	if (e) {
992 		free_pending_exception(pe);
993 		pe = container_of(e, struct dm_snap_pending_exception, e);
994 		goto out;
995 	}
996 
997 	pe->e.old_chunk = chunk;
998 	bio_list_init(&pe->origin_bios);
999 	bio_list_init(&pe->snapshot_bios);
1000 	pe->primary_pe = NULL;
1001 	atomic_set(&pe->ref_count, 0);
1002 	pe->started = 0;
1003 
1004 	if (s->store.prepare_exception(&s->store, &pe->e)) {
1005 		free_pending_exception(pe);
1006 		return NULL;
1007 	}
1008 
1009 	get_pending_exception(pe);
1010 	insert_exception(&s->pending, &pe->e);
1011 
1012  out:
1013 	return pe;
1014 }
1015 
1016 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1017 			    struct bio *bio, chunk_t chunk)
1018 {
1019 	bio->bi_bdev = s->cow->bdev;
1020 	bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
1021 			 (chunk - e->old_chunk)) +
1022 			 (bio->bi_sector & s->chunk_mask);
1023 }
1024 
1025 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1026 			union map_info *map_context)
1027 {
1028 	struct dm_snap_exception *e;
1029 	struct dm_snapshot *s = ti->private;
1030 	int r = DM_MAPIO_REMAPPED;
1031 	chunk_t chunk;
1032 	struct dm_snap_pending_exception *pe = NULL;
1033 
1034 	chunk = sector_to_chunk(s, bio->bi_sector);
1035 
1036 	/* Full snapshots are not usable */
1037 	/* To get here the table must be live so s->active is always set. */
1038 	if (!s->valid)
1039 		return -EIO;
1040 
1041 	/* FIXME: should only take write lock if we need
1042 	 * to copy an exception */
1043 	down_write(&s->lock);
1044 
1045 	if (!s->valid) {
1046 		r = -EIO;
1047 		goto out_unlock;
1048 	}
1049 
1050 	/* If the block is already remapped - use that, else remap it */
1051 	e = lookup_exception(&s->complete, chunk);
1052 	if (e) {
1053 		remap_exception(s, e, bio, chunk);
1054 		goto out_unlock;
1055 	}
1056 
1057 	/*
1058 	 * Write to snapshot - higher level takes care of RW/RO
1059 	 * flags so we should only get this if we are
1060 	 * writeable.
1061 	 */
1062 	if (bio_rw(bio) == WRITE) {
1063 		pe = __find_pending_exception(s, bio);
1064 		if (!pe) {
1065 			__invalidate_snapshot(s, -ENOMEM);
1066 			r = -EIO;
1067 			goto out_unlock;
1068 		}
1069 
1070 		remap_exception(s, &pe->e, bio, chunk);
1071 		bio_list_add(&pe->snapshot_bios, bio);
1072 
1073 		r = DM_MAPIO_SUBMITTED;
1074 
1075 		if (!pe->started) {
1076 			/* this is protected by snap->lock */
1077 			pe->started = 1;
1078 			up_write(&s->lock);
1079 			start_copy(pe);
1080 			goto out;
1081 		}
1082 	} else {
1083 		bio->bi_bdev = s->origin->bdev;
1084 		map_context->ptr = track_chunk(s, chunk);
1085 	}
1086 
1087  out_unlock:
1088 	up_write(&s->lock);
1089  out:
1090 	return r;
1091 }
1092 
1093 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1094 			   int error, union map_info *map_context)
1095 {
1096 	struct dm_snapshot *s = ti->private;
1097 	struct dm_snap_tracked_chunk *c = map_context->ptr;
1098 
1099 	if (c)
1100 		stop_tracking_chunk(s, c);
1101 
1102 	return 0;
1103 }
1104 
1105 static void snapshot_resume(struct dm_target *ti)
1106 {
1107 	struct dm_snapshot *s = ti->private;
1108 
1109 	down_write(&s->lock);
1110 	s->active = 1;
1111 	up_write(&s->lock);
1112 }
1113 
1114 static int snapshot_status(struct dm_target *ti, status_type_t type,
1115 			   char *result, unsigned int maxlen)
1116 {
1117 	struct dm_snapshot *snap = ti->private;
1118 
1119 	switch (type) {
1120 	case STATUSTYPE_INFO:
1121 		if (!snap->valid)
1122 			snprintf(result, maxlen, "Invalid");
1123 		else {
1124 			if (snap->store.fraction_full) {
1125 				sector_t numerator, denominator;
1126 				snap->store.fraction_full(&snap->store,
1127 							  &numerator,
1128 							  &denominator);
1129 				snprintf(result, maxlen, "%llu/%llu",
1130 					(unsigned long long)numerator,
1131 					(unsigned long long)denominator);
1132 			}
1133 			else
1134 				snprintf(result, maxlen, "Unknown");
1135 		}
1136 		break;
1137 
1138 	case STATUSTYPE_TABLE:
1139 		/*
1140 		 * kdevname returns a static pointer so we need
1141 		 * to make private copies if the output is to
1142 		 * make sense.
1143 		 */
1144 		snprintf(result, maxlen, "%s %s %c %llu",
1145 			 snap->origin->name, snap->cow->name,
1146 			 snap->type,
1147 			 (unsigned long long)snap->chunk_size);
1148 		break;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
1154 /*-----------------------------------------------------------------
1155  * Origin methods
1156  *---------------------------------------------------------------*/
1157 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1158 {
1159 	int r = DM_MAPIO_REMAPPED, first = 0;
1160 	struct dm_snapshot *snap;
1161 	struct dm_snap_exception *e;
1162 	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1163 	chunk_t chunk;
1164 	LIST_HEAD(pe_queue);
1165 
1166 	/* Do all the snapshots on this origin */
1167 	list_for_each_entry (snap, snapshots, list) {
1168 
1169 		down_write(&snap->lock);
1170 
1171 		/* Only deal with valid and active snapshots */
1172 		if (!snap->valid || !snap->active)
1173 			goto next_snapshot;
1174 
1175 		/* Nothing to do if writing beyond end of snapshot */
1176 		if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1177 			goto next_snapshot;
1178 
1179 		/*
1180 		 * Remember, different snapshots can have
1181 		 * different chunk sizes.
1182 		 */
1183 		chunk = sector_to_chunk(snap, bio->bi_sector);
1184 
1185 		/*
1186 		 * Check exception table to see if block
1187 		 * is already remapped in this snapshot
1188 		 * and trigger an exception if not.
1189 		 *
1190 		 * ref_count is initialised to 1 so pending_complete()
1191 		 * won't destroy the primary_pe while we're inside this loop.
1192 		 */
1193 		e = lookup_exception(&snap->complete, chunk);
1194 		if (e)
1195 			goto next_snapshot;
1196 
1197 		pe = __find_pending_exception(snap, bio);
1198 		if (!pe) {
1199 			__invalidate_snapshot(snap, -ENOMEM);
1200 			goto next_snapshot;
1201 		}
1202 
1203 		if (!primary_pe) {
1204 			/*
1205 			 * Either every pe here has same
1206 			 * primary_pe or none has one yet.
1207 			 */
1208 			if (pe->primary_pe)
1209 				primary_pe = pe->primary_pe;
1210 			else {
1211 				primary_pe = pe;
1212 				first = 1;
1213 			}
1214 
1215 			bio_list_add(&primary_pe->origin_bios, bio);
1216 
1217 			r = DM_MAPIO_SUBMITTED;
1218 		}
1219 
1220 		if (!pe->primary_pe) {
1221 			pe->primary_pe = primary_pe;
1222 			get_pending_exception(primary_pe);
1223 		}
1224 
1225 		if (!pe->started) {
1226 			pe->started = 1;
1227 			list_add_tail(&pe->list, &pe_queue);
1228 		}
1229 
1230  next_snapshot:
1231 		up_write(&snap->lock);
1232 	}
1233 
1234 	if (!primary_pe)
1235 		return r;
1236 
1237 	/*
1238 	 * If this is the first time we're processing this chunk and
1239 	 * ref_count is now 1 it means all the pending exceptions
1240 	 * got completed while we were in the loop above, so it falls to
1241 	 * us here to remove the primary_pe and submit any origin_bios.
1242 	 */
1243 
1244 	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1245 		flush_bios(bio_list_get(&primary_pe->origin_bios));
1246 		free_pending_exception(primary_pe);
1247 		/* If we got here, pe_queue is necessarily empty. */
1248 		return r;
1249 	}
1250 
1251 	/*
1252 	 * Now that we have a complete pe list we can start the copying.
1253 	 */
1254 	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1255 		start_copy(pe);
1256 
1257 	return r;
1258 }
1259 
1260 /*
1261  * Called on a write from the origin driver.
1262  */
1263 static int do_origin(struct dm_dev *origin, struct bio *bio)
1264 {
1265 	struct origin *o;
1266 	int r = DM_MAPIO_REMAPPED;
1267 
1268 	down_read(&_origins_lock);
1269 	o = __lookup_origin(origin->bdev);
1270 	if (o)
1271 		r = __origin_write(&o->snapshots, bio);
1272 	up_read(&_origins_lock);
1273 
1274 	return r;
1275 }
1276 
1277 /*
1278  * Origin: maps a linear range of a device, with hooks for snapshotting.
1279  */
1280 
1281 /*
1282  * Construct an origin mapping: <dev_path>
1283  * The context for an origin is merely a 'struct dm_dev *'
1284  * pointing to the real device.
1285  */
1286 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1287 {
1288 	int r;
1289 	struct dm_dev *dev;
1290 
1291 	if (argc != 1) {
1292 		ti->error = "origin: incorrect number of arguments";
1293 		return -EINVAL;
1294 	}
1295 
1296 	r = dm_get_device(ti, argv[0], 0, ti->len,
1297 			  dm_table_get_mode(ti->table), &dev);
1298 	if (r) {
1299 		ti->error = "Cannot get target device";
1300 		return r;
1301 	}
1302 
1303 	ti->private = dev;
1304 	return 0;
1305 }
1306 
1307 static void origin_dtr(struct dm_target *ti)
1308 {
1309 	struct dm_dev *dev = ti->private;
1310 	dm_put_device(ti, dev);
1311 }
1312 
1313 static int origin_map(struct dm_target *ti, struct bio *bio,
1314 		      union map_info *map_context)
1315 {
1316 	struct dm_dev *dev = ti->private;
1317 	bio->bi_bdev = dev->bdev;
1318 
1319 	/* Only tell snapshots if this is a write */
1320 	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1321 }
1322 
1323 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1324 
1325 /*
1326  * Set the target "split_io" field to the minimum of all the snapshots'
1327  * chunk sizes.
1328  */
1329 static void origin_resume(struct dm_target *ti)
1330 {
1331 	struct dm_dev *dev = ti->private;
1332 	struct dm_snapshot *snap;
1333 	struct origin *o;
1334 	chunk_t chunk_size = 0;
1335 
1336 	down_read(&_origins_lock);
1337 	o = __lookup_origin(dev->bdev);
1338 	if (o)
1339 		list_for_each_entry (snap, &o->snapshots, list)
1340 			chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1341 	up_read(&_origins_lock);
1342 
1343 	ti->split_io = chunk_size;
1344 }
1345 
1346 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1347 			 unsigned int maxlen)
1348 {
1349 	struct dm_dev *dev = ti->private;
1350 
1351 	switch (type) {
1352 	case STATUSTYPE_INFO:
1353 		result[0] = '\0';
1354 		break;
1355 
1356 	case STATUSTYPE_TABLE:
1357 		snprintf(result, maxlen, "%s", dev->name);
1358 		break;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 static struct target_type origin_target = {
1365 	.name    = "snapshot-origin",
1366 	.version = {1, 6, 0},
1367 	.module  = THIS_MODULE,
1368 	.ctr     = origin_ctr,
1369 	.dtr     = origin_dtr,
1370 	.map     = origin_map,
1371 	.resume  = origin_resume,
1372 	.status  = origin_status,
1373 };
1374 
1375 static struct target_type snapshot_target = {
1376 	.name    = "snapshot",
1377 	.version = {1, 6, 0},
1378 	.module  = THIS_MODULE,
1379 	.ctr     = snapshot_ctr,
1380 	.dtr     = snapshot_dtr,
1381 	.map     = snapshot_map,
1382 	.end_io  = snapshot_end_io,
1383 	.resume  = snapshot_resume,
1384 	.status  = snapshot_status,
1385 };
1386 
1387 static int __init dm_snapshot_init(void)
1388 {
1389 	int r;
1390 
1391 	r = dm_register_target(&snapshot_target);
1392 	if (r) {
1393 		DMERR("snapshot target register failed %d", r);
1394 		return r;
1395 	}
1396 
1397 	r = dm_register_target(&origin_target);
1398 	if (r < 0) {
1399 		DMERR("Origin target register failed %d", r);
1400 		goto bad1;
1401 	}
1402 
1403 	r = init_origin_hash();
1404 	if (r) {
1405 		DMERR("init_origin_hash failed.");
1406 		goto bad2;
1407 	}
1408 
1409 	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1410 	if (!exception_cache) {
1411 		DMERR("Couldn't create exception cache.");
1412 		r = -ENOMEM;
1413 		goto bad3;
1414 	}
1415 
1416 	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1417 	if (!pending_cache) {
1418 		DMERR("Couldn't create pending cache.");
1419 		r = -ENOMEM;
1420 		goto bad4;
1421 	}
1422 
1423 	tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1424 	if (!tracked_chunk_cache) {
1425 		DMERR("Couldn't create cache to track chunks in use.");
1426 		r = -ENOMEM;
1427 		goto bad5;
1428 	}
1429 
1430 	ksnapd = create_singlethread_workqueue("ksnapd");
1431 	if (!ksnapd) {
1432 		DMERR("Failed to create ksnapd workqueue.");
1433 		r = -ENOMEM;
1434 		goto bad_pending_pool;
1435 	}
1436 
1437 	return 0;
1438 
1439       bad_pending_pool:
1440 	kmem_cache_destroy(tracked_chunk_cache);
1441       bad5:
1442 	kmem_cache_destroy(pending_cache);
1443       bad4:
1444 	kmem_cache_destroy(exception_cache);
1445       bad3:
1446 	exit_origin_hash();
1447       bad2:
1448 	dm_unregister_target(&origin_target);
1449       bad1:
1450 	dm_unregister_target(&snapshot_target);
1451 	return r;
1452 }
1453 
1454 static void __exit dm_snapshot_exit(void)
1455 {
1456 	int r;
1457 
1458 	destroy_workqueue(ksnapd);
1459 
1460 	r = dm_unregister_target(&snapshot_target);
1461 	if (r)
1462 		DMERR("snapshot unregister failed %d", r);
1463 
1464 	r = dm_unregister_target(&origin_target);
1465 	if (r)
1466 		DMERR("origin unregister failed %d", r);
1467 
1468 	exit_origin_hash();
1469 	kmem_cache_destroy(pending_cache);
1470 	kmem_cache_destroy(exception_cache);
1471 	kmem_cache_destroy(tracked_chunk_cache);
1472 }
1473 
1474 /* Module hooks */
1475 module_init(dm_snapshot_init);
1476 module_exit(dm_snapshot_exit);
1477 
1478 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1479 MODULE_AUTHOR("Joe Thornber");
1480 MODULE_LICENSE("GPL");
1481