xref: /linux/drivers/md/dm-snap-persistent.c (revision fbf46565c67c626849c7ce2a326972d3008d2a91)
1 /*
2  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2006-2008 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-exception-store.h"
9 
10 #include <linux/ctype.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/vmalloc.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/dm-io.h>
17 #include <linux/dm-bufio.h>
18 
19 #define DM_MSG_PREFIX "persistent snapshot"
20 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U	/* 16KB */
21 
22 #define DM_PREFETCH_CHUNKS		12
23 
24 /*-----------------------------------------------------------------
25  * Persistent snapshots, by persistent we mean that the snapshot
26  * will survive a reboot.
27  *---------------------------------------------------------------*/
28 
29 /*
30  * We need to store a record of which parts of the origin have
31  * been copied to the snapshot device.  The snapshot code
32  * requires that we copy exception chunks to chunk aligned areas
33  * of the COW store.  It makes sense therefore, to store the
34  * metadata in chunk size blocks.
35  *
36  * There is no backward or forward compatibility implemented,
37  * snapshots with different disk versions than the kernel will
38  * not be usable.  It is expected that "lvcreate" will blank out
39  * the start of a fresh COW device before calling the snapshot
40  * constructor.
41  *
42  * The first chunk of the COW device just contains the header.
43  * After this there is a chunk filled with exception metadata,
44  * followed by as many exception chunks as can fit in the
45  * metadata areas.
46  *
47  * All on disk structures are in little-endian format.  The end
48  * of the exceptions info is indicated by an exception with a
49  * new_chunk of 0, which is invalid since it would point to the
50  * header chunk.
51  */
52 
53 /*
54  * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
55  */
56 #define SNAP_MAGIC 0x70416e53
57 
58 /*
59  * The on-disk version of the metadata.
60  */
61 #define SNAPSHOT_DISK_VERSION 1
62 
63 #define NUM_SNAPSHOT_HDR_CHUNKS 1
64 
65 struct disk_header {
66 	__le32 magic;
67 
68 	/*
69 	 * Is this snapshot valid.  There is no way of recovering
70 	 * an invalid snapshot.
71 	 */
72 	__le32 valid;
73 
74 	/*
75 	 * Simple, incrementing version. no backward
76 	 * compatibility.
77 	 */
78 	__le32 version;
79 
80 	/* In sectors */
81 	__le32 chunk_size;
82 } __packed;
83 
84 struct disk_exception {
85 	__le64 old_chunk;
86 	__le64 new_chunk;
87 } __packed;
88 
89 struct core_exception {
90 	uint64_t old_chunk;
91 	uint64_t new_chunk;
92 };
93 
94 struct commit_callback {
95 	void (*callback)(void *, int success);
96 	void *context;
97 };
98 
99 /*
100  * The top level structure for a persistent exception store.
101  */
102 struct pstore {
103 	struct dm_exception_store *store;
104 	int version;
105 	int valid;
106 	uint32_t exceptions_per_area;
107 
108 	/*
109 	 * Now that we have an asynchronous kcopyd there is no
110 	 * need for large chunk sizes, so it wont hurt to have a
111 	 * whole chunks worth of metadata in memory at once.
112 	 */
113 	void *area;
114 
115 	/*
116 	 * An area of zeros used to clear the next area.
117 	 */
118 	void *zero_area;
119 
120 	/*
121 	 * An area used for header. The header can be written
122 	 * concurrently with metadata (when invalidating the snapshot),
123 	 * so it needs a separate buffer.
124 	 */
125 	void *header_area;
126 
127 	/*
128 	 * Used to keep track of which metadata area the data in
129 	 * 'chunk' refers to.
130 	 */
131 	chunk_t current_area;
132 
133 	/*
134 	 * The next free chunk for an exception.
135 	 *
136 	 * When creating exceptions, all the chunks here and above are
137 	 * free.  It holds the next chunk to be allocated.  On rare
138 	 * occasions (e.g. after a system crash) holes can be left in
139 	 * the exception store because chunks can be committed out of
140 	 * order.
141 	 *
142 	 * When merging exceptions, it does not necessarily mean all the
143 	 * chunks here and above are free.  It holds the value it would
144 	 * have held if all chunks had been committed in order of
145 	 * allocation.  Consequently the value may occasionally be
146 	 * slightly too low, but since it's only used for 'status' and
147 	 * it can never reach its minimum value too early this doesn't
148 	 * matter.
149 	 */
150 
151 	chunk_t next_free;
152 
153 	/*
154 	 * The index of next free exception in the current
155 	 * metadata area.
156 	 */
157 	uint32_t current_committed;
158 
159 	atomic_t pending_count;
160 	uint32_t callback_count;
161 	struct commit_callback *callbacks;
162 	struct dm_io_client *io_client;
163 
164 	struct workqueue_struct *metadata_wq;
165 };
166 
167 static int alloc_area(struct pstore *ps)
168 {
169 	int r = -ENOMEM;
170 	size_t len;
171 
172 	len = ps->store->chunk_size << SECTOR_SHIFT;
173 
174 	/*
175 	 * Allocate the chunk_size block of memory that will hold
176 	 * a single metadata area.
177 	 */
178 	ps->area = vmalloc(len);
179 	if (!ps->area)
180 		goto err_area;
181 
182 	ps->zero_area = vzalloc(len);
183 	if (!ps->zero_area)
184 		goto err_zero_area;
185 
186 	ps->header_area = vmalloc(len);
187 	if (!ps->header_area)
188 		goto err_header_area;
189 
190 	return 0;
191 
192 err_header_area:
193 	vfree(ps->zero_area);
194 
195 err_zero_area:
196 	vfree(ps->area);
197 
198 err_area:
199 	return r;
200 }
201 
202 static void free_area(struct pstore *ps)
203 {
204 	vfree(ps->area);
205 	ps->area = NULL;
206 	vfree(ps->zero_area);
207 	ps->zero_area = NULL;
208 	vfree(ps->header_area);
209 	ps->header_area = NULL;
210 }
211 
212 struct mdata_req {
213 	struct dm_io_region *where;
214 	struct dm_io_request *io_req;
215 	struct work_struct work;
216 	int result;
217 };
218 
219 static void do_metadata(struct work_struct *work)
220 {
221 	struct mdata_req *req = container_of(work, struct mdata_req, work);
222 
223 	req->result = dm_io(req->io_req, 1, req->where, NULL);
224 }
225 
226 /*
227  * Read or write a chunk aligned and sized block of data from a device.
228  */
229 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
230 		    int metadata)
231 {
232 	struct dm_io_region where = {
233 		.bdev = dm_snap_cow(ps->store->snap)->bdev,
234 		.sector = ps->store->chunk_size * chunk,
235 		.count = ps->store->chunk_size,
236 	};
237 	struct dm_io_request io_req = {
238 		.bi_opf = opf,
239 		.mem.type = DM_IO_VMA,
240 		.mem.ptr.vma = area,
241 		.client = ps->io_client,
242 		.notify.fn = NULL,
243 	};
244 	struct mdata_req req;
245 
246 	if (!metadata)
247 		return dm_io(&io_req, 1, &where, NULL);
248 
249 	req.where = &where;
250 	req.io_req = &io_req;
251 
252 	/*
253 	 * Issue the synchronous I/O from a different thread
254 	 * to avoid submit_bio_noacct recursion.
255 	 */
256 	INIT_WORK_ONSTACK(&req.work, do_metadata);
257 	queue_work(ps->metadata_wq, &req.work);
258 	flush_workqueue(ps->metadata_wq);
259 	destroy_work_on_stack(&req.work);
260 
261 	return req.result;
262 }
263 
264 /*
265  * Convert a metadata area index to a chunk index.
266  */
267 static chunk_t area_location(struct pstore *ps, chunk_t area)
268 {
269 	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270 }
271 
272 static void skip_metadata(struct pstore *ps)
273 {
274 	uint32_t stride = ps->exceptions_per_area + 1;
275 	chunk_t next_free = ps->next_free;
276 	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277 		ps->next_free++;
278 }
279 
280 /*
281  * Read or write a metadata area.  Remembering to skip the first
282  * chunk which holds the header.
283  */
284 static int area_io(struct pstore *ps, blk_opf_t opf)
285 {
286 	chunk_t chunk = area_location(ps, ps->current_area);
287 
288 	return chunk_io(ps, ps->area, chunk, opf, 0);
289 }
290 
291 static void zero_memory_area(struct pstore *ps)
292 {
293 	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
294 }
295 
296 static int zero_disk_area(struct pstore *ps, chunk_t area)
297 {
298 	return chunk_io(ps, ps->zero_area, area_location(ps, area),
299 			REQ_OP_WRITE, 0);
300 }
301 
302 static int read_header(struct pstore *ps, int *new_snapshot)
303 {
304 	int r;
305 	struct disk_header *dh;
306 	unsigned chunk_size;
307 	int chunk_size_supplied = 1;
308 	char *chunk_err;
309 
310 	/*
311 	 * Use default chunk size (or logical_block_size, if larger)
312 	 * if none supplied
313 	 */
314 	if (!ps->store->chunk_size) {
315 		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
316 		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
317 					    bdev) >> 9);
318 		ps->store->chunk_mask = ps->store->chunk_size - 1;
319 		ps->store->chunk_shift = __ffs(ps->store->chunk_size);
320 		chunk_size_supplied = 0;
321 	}
322 
323 	ps->io_client = dm_io_client_create();
324 	if (IS_ERR(ps->io_client))
325 		return PTR_ERR(ps->io_client);
326 
327 	r = alloc_area(ps);
328 	if (r)
329 		return r;
330 
331 	r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
332 	if (r)
333 		goto bad;
334 
335 	dh = ps->header_area;
336 
337 	if (le32_to_cpu(dh->magic) == 0) {
338 		*new_snapshot = 1;
339 		return 0;
340 	}
341 
342 	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
343 		DMWARN("Invalid or corrupt snapshot");
344 		r = -ENXIO;
345 		goto bad;
346 	}
347 
348 	*new_snapshot = 0;
349 	ps->valid = le32_to_cpu(dh->valid);
350 	ps->version = le32_to_cpu(dh->version);
351 	chunk_size = le32_to_cpu(dh->chunk_size);
352 
353 	if (ps->store->chunk_size == chunk_size)
354 		return 0;
355 
356 	if (chunk_size_supplied)
357 		DMWARN("chunk size %u in device metadata overrides "
358 		       "table chunk size of %u.",
359 		       chunk_size, ps->store->chunk_size);
360 
361 	/* We had a bogus chunk_size. Fix stuff up. */
362 	free_area(ps);
363 
364 	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
365 					      &chunk_err);
366 	if (r) {
367 		DMERR("invalid on-disk chunk size %u: %s.",
368 		      chunk_size, chunk_err);
369 		return r;
370 	}
371 
372 	r = alloc_area(ps);
373 	return r;
374 
375 bad:
376 	free_area(ps);
377 	return r;
378 }
379 
380 static int write_header(struct pstore *ps)
381 {
382 	struct disk_header *dh;
383 
384 	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
385 
386 	dh = ps->header_area;
387 	dh->magic = cpu_to_le32(SNAP_MAGIC);
388 	dh->valid = cpu_to_le32(ps->valid);
389 	dh->version = cpu_to_le32(ps->version);
390 	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
391 
392 	return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
393 }
394 
395 /*
396  * Access functions for the disk exceptions, these do the endian conversions.
397  */
398 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
399 					    uint32_t index)
400 {
401 	BUG_ON(index >= ps->exceptions_per_area);
402 
403 	return ((struct disk_exception *) ps_area) + index;
404 }
405 
406 static void read_exception(struct pstore *ps, void *ps_area,
407 			   uint32_t index, struct core_exception *result)
408 {
409 	struct disk_exception *de = get_exception(ps, ps_area, index);
410 
411 	/* copy it */
412 	result->old_chunk = le64_to_cpu(de->old_chunk);
413 	result->new_chunk = le64_to_cpu(de->new_chunk);
414 }
415 
416 static void write_exception(struct pstore *ps,
417 			    uint32_t index, struct core_exception *e)
418 {
419 	struct disk_exception *de = get_exception(ps, ps->area, index);
420 
421 	/* copy it */
422 	de->old_chunk = cpu_to_le64(e->old_chunk);
423 	de->new_chunk = cpu_to_le64(e->new_chunk);
424 }
425 
426 static void clear_exception(struct pstore *ps, uint32_t index)
427 {
428 	struct disk_exception *de = get_exception(ps, ps->area, index);
429 
430 	/* clear it */
431 	de->old_chunk = 0;
432 	de->new_chunk = 0;
433 }
434 
435 /*
436  * Registers the exceptions that are present in the current area.
437  * 'full' is filled in to indicate if the area has been
438  * filled.
439  */
440 static int insert_exceptions(struct pstore *ps, void *ps_area,
441 			     int (*callback)(void *callback_context,
442 					     chunk_t old, chunk_t new),
443 			     void *callback_context,
444 			     int *full)
445 {
446 	int r;
447 	unsigned int i;
448 	struct core_exception e;
449 
450 	/* presume the area is full */
451 	*full = 1;
452 
453 	for (i = 0; i < ps->exceptions_per_area; i++) {
454 		read_exception(ps, ps_area, i, &e);
455 
456 		/*
457 		 * If the new_chunk is pointing at the start of
458 		 * the COW device, where the first metadata area
459 		 * is we know that we've hit the end of the
460 		 * exceptions.  Therefore the area is not full.
461 		 */
462 		if (e.new_chunk == 0LL) {
463 			ps->current_committed = i;
464 			*full = 0;
465 			break;
466 		}
467 
468 		/*
469 		 * Keep track of the start of the free chunks.
470 		 */
471 		if (ps->next_free <= e.new_chunk)
472 			ps->next_free = e.new_chunk + 1;
473 
474 		/*
475 		 * Otherwise we add the exception to the snapshot.
476 		 */
477 		r = callback(callback_context, e.old_chunk, e.new_chunk);
478 		if (r)
479 			return r;
480 	}
481 
482 	return 0;
483 }
484 
485 static int read_exceptions(struct pstore *ps,
486 			   int (*callback)(void *callback_context, chunk_t old,
487 					   chunk_t new),
488 			   void *callback_context)
489 {
490 	int r, full = 1;
491 	struct dm_bufio_client *client;
492 	chunk_t prefetch_area = 0;
493 
494 	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
495 					ps->store->chunk_size << SECTOR_SHIFT,
496 					1, 0, NULL, NULL, 0);
497 
498 	if (IS_ERR(client))
499 		return PTR_ERR(client);
500 
501 	/*
502 	 * Setup for one current buffer + desired readahead buffers.
503 	 */
504 	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
505 
506 	/*
507 	 * Keeping reading chunks and inserting exceptions until
508 	 * we find a partially full area.
509 	 */
510 	for (ps->current_area = 0; full; ps->current_area++) {
511 		struct dm_buffer *bp;
512 		void *area;
513 		chunk_t chunk;
514 
515 		if (unlikely(prefetch_area < ps->current_area))
516 			prefetch_area = ps->current_area;
517 
518 		if (DM_PREFETCH_CHUNKS) do {
519 			chunk_t pf_chunk = area_location(ps, prefetch_area);
520 			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
521 				break;
522 			dm_bufio_prefetch(client, pf_chunk, 1);
523 			prefetch_area++;
524 			if (unlikely(!prefetch_area))
525 				break;
526 		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
527 
528 		chunk = area_location(ps, ps->current_area);
529 
530 		area = dm_bufio_read(client, chunk, &bp);
531 		if (IS_ERR(area)) {
532 			r = PTR_ERR(area);
533 			goto ret_destroy_bufio;
534 		}
535 
536 		r = insert_exceptions(ps, area, callback, callback_context,
537 				      &full);
538 
539 		if (!full)
540 			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
541 
542 		dm_bufio_release(bp);
543 
544 		dm_bufio_forget(client, chunk);
545 
546 		if (unlikely(r))
547 			goto ret_destroy_bufio;
548 	}
549 
550 	ps->current_area--;
551 
552 	skip_metadata(ps);
553 
554 	r = 0;
555 
556 ret_destroy_bufio:
557 	dm_bufio_client_destroy(client);
558 
559 	return r;
560 }
561 
562 static struct pstore *get_info(struct dm_exception_store *store)
563 {
564 	return (struct pstore *) store->context;
565 }
566 
567 static void persistent_usage(struct dm_exception_store *store,
568 			     sector_t *total_sectors,
569 			     sector_t *sectors_allocated,
570 			     sector_t *metadata_sectors)
571 {
572 	struct pstore *ps = get_info(store);
573 
574 	*sectors_allocated = ps->next_free * store->chunk_size;
575 	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
576 
577 	/*
578 	 * First chunk is the fixed header.
579 	 * Then there are (ps->current_area + 1) metadata chunks, each one
580 	 * separated from the next by ps->exceptions_per_area data chunks.
581 	 */
582 	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
583 			    store->chunk_size;
584 }
585 
586 static void persistent_dtr(struct dm_exception_store *store)
587 {
588 	struct pstore *ps = get_info(store);
589 
590 	destroy_workqueue(ps->metadata_wq);
591 
592 	/* Created in read_header */
593 	if (ps->io_client)
594 		dm_io_client_destroy(ps->io_client);
595 	free_area(ps);
596 
597 	/* Allocated in persistent_read_metadata */
598 	kvfree(ps->callbacks);
599 
600 	kfree(ps);
601 }
602 
603 static int persistent_read_metadata(struct dm_exception_store *store,
604 				    int (*callback)(void *callback_context,
605 						    chunk_t old, chunk_t new),
606 				    void *callback_context)
607 {
608 	int r, new_snapshot;
609 	struct pstore *ps = get_info(store);
610 
611 	/*
612 	 * Read the snapshot header.
613 	 */
614 	r = read_header(ps, &new_snapshot);
615 	if (r)
616 		return r;
617 
618 	/*
619 	 * Now we know correct chunk_size, complete the initialisation.
620 	 */
621 	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
622 				  sizeof(struct disk_exception);
623 	ps->callbacks = kvcalloc(ps->exceptions_per_area,
624 				 sizeof(*ps->callbacks), GFP_KERNEL);
625 	if (!ps->callbacks)
626 		return -ENOMEM;
627 
628 	/*
629 	 * Do we need to setup a new snapshot ?
630 	 */
631 	if (new_snapshot) {
632 		r = write_header(ps);
633 		if (r) {
634 			DMWARN("write_header failed");
635 			return r;
636 		}
637 
638 		ps->current_area = 0;
639 		zero_memory_area(ps);
640 		r = zero_disk_area(ps, 0);
641 		if (r)
642 			DMWARN("zero_disk_area(0) failed");
643 		return r;
644 	}
645 	/*
646 	 * Sanity checks.
647 	 */
648 	if (ps->version != SNAPSHOT_DISK_VERSION) {
649 		DMWARN("unable to handle snapshot disk version %d",
650 		       ps->version);
651 		return -EINVAL;
652 	}
653 
654 	/*
655 	 * Metadata are valid, but snapshot is invalidated
656 	 */
657 	if (!ps->valid)
658 		return 1;
659 
660 	/*
661 	 * Read the metadata.
662 	 */
663 	r = read_exceptions(ps, callback, callback_context);
664 
665 	return r;
666 }
667 
668 static int persistent_prepare_exception(struct dm_exception_store *store,
669 					struct dm_exception *e)
670 {
671 	struct pstore *ps = get_info(store);
672 	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
673 
674 	/* Is there enough room ? */
675 	if (size < ((ps->next_free + 1) * store->chunk_size))
676 		return -ENOSPC;
677 
678 	e->new_chunk = ps->next_free;
679 
680 	/*
681 	 * Move onto the next free pending, making sure to take
682 	 * into account the location of the metadata chunks.
683 	 */
684 	ps->next_free++;
685 	skip_metadata(ps);
686 
687 	atomic_inc(&ps->pending_count);
688 	return 0;
689 }
690 
691 static void persistent_commit_exception(struct dm_exception_store *store,
692 					struct dm_exception *e, int valid,
693 					void (*callback) (void *, int success),
694 					void *callback_context)
695 {
696 	unsigned int i;
697 	struct pstore *ps = get_info(store);
698 	struct core_exception ce;
699 	struct commit_callback *cb;
700 
701 	if (!valid)
702 		ps->valid = 0;
703 
704 	ce.old_chunk = e->old_chunk;
705 	ce.new_chunk = e->new_chunk;
706 	write_exception(ps, ps->current_committed++, &ce);
707 
708 	/*
709 	 * Add the callback to the back of the array.  This code
710 	 * is the only place where the callback array is
711 	 * manipulated, and we know that it will never be called
712 	 * multiple times concurrently.
713 	 */
714 	cb = ps->callbacks + ps->callback_count++;
715 	cb->callback = callback;
716 	cb->context = callback_context;
717 
718 	/*
719 	 * If there are exceptions in flight and we have not yet
720 	 * filled this metadata area there's nothing more to do.
721 	 */
722 	if (!atomic_dec_and_test(&ps->pending_count) &&
723 	    (ps->current_committed != ps->exceptions_per_area))
724 		return;
725 
726 	/*
727 	 * If we completely filled the current area, then wipe the next one.
728 	 */
729 	if ((ps->current_committed == ps->exceptions_per_area) &&
730 	    zero_disk_area(ps, ps->current_area + 1))
731 		ps->valid = 0;
732 
733 	/*
734 	 * Commit exceptions to disk.
735 	 */
736 	if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
737 				 REQ_SYNC))
738 		ps->valid = 0;
739 
740 	/*
741 	 * Advance to the next area if this one is full.
742 	 */
743 	if (ps->current_committed == ps->exceptions_per_area) {
744 		ps->current_committed = 0;
745 		ps->current_area++;
746 		zero_memory_area(ps);
747 	}
748 
749 	for (i = 0; i < ps->callback_count; i++) {
750 		cb = ps->callbacks + i;
751 		cb->callback(cb->context, ps->valid);
752 	}
753 
754 	ps->callback_count = 0;
755 }
756 
757 static int persistent_prepare_merge(struct dm_exception_store *store,
758 				    chunk_t *last_old_chunk,
759 				    chunk_t *last_new_chunk)
760 {
761 	struct pstore *ps = get_info(store);
762 	struct core_exception ce;
763 	int nr_consecutive;
764 	int r;
765 
766 	/*
767 	 * When current area is empty, move back to preceding area.
768 	 */
769 	if (!ps->current_committed) {
770 		/*
771 		 * Have we finished?
772 		 */
773 		if (!ps->current_area)
774 			return 0;
775 
776 		ps->current_area--;
777 		r = area_io(ps, REQ_OP_READ);
778 		if (r < 0)
779 			return r;
780 		ps->current_committed = ps->exceptions_per_area;
781 	}
782 
783 	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
784 	*last_old_chunk = ce.old_chunk;
785 	*last_new_chunk = ce.new_chunk;
786 
787 	/*
788 	 * Find number of consecutive chunks within the current area,
789 	 * working backwards.
790 	 */
791 	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
792 	     nr_consecutive++) {
793 		read_exception(ps, ps->area,
794 			       ps->current_committed - 1 - nr_consecutive, &ce);
795 		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
796 		    ce.new_chunk != *last_new_chunk - nr_consecutive)
797 			break;
798 	}
799 
800 	return nr_consecutive;
801 }
802 
803 static int persistent_commit_merge(struct dm_exception_store *store,
804 				   int nr_merged)
805 {
806 	int r, i;
807 	struct pstore *ps = get_info(store);
808 
809 	BUG_ON(nr_merged > ps->current_committed);
810 
811 	for (i = 0; i < nr_merged; i++)
812 		clear_exception(ps, ps->current_committed - 1 - i);
813 
814 	r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
815 	if (r < 0)
816 		return r;
817 
818 	ps->current_committed -= nr_merged;
819 
820 	/*
821 	 * At this stage, only persistent_usage() uses ps->next_free, so
822 	 * we make no attempt to keep ps->next_free strictly accurate
823 	 * as exceptions may have been committed out-of-order originally.
824 	 * Once a snapshot has become merging, we set it to the value it
825 	 * would have held had all the exceptions been committed in order.
826 	 *
827 	 * ps->current_area does not get reduced by prepare_merge() until
828 	 * after commit_merge() has removed the nr_merged previous exceptions.
829 	 */
830 	ps->next_free = area_location(ps, ps->current_area) +
831 			ps->current_committed + 1;
832 
833 	return 0;
834 }
835 
836 static void persistent_drop_snapshot(struct dm_exception_store *store)
837 {
838 	struct pstore *ps = get_info(store);
839 
840 	ps->valid = 0;
841 	if (write_header(ps))
842 		DMWARN("write header failed");
843 }
844 
845 static int persistent_ctr(struct dm_exception_store *store, char *options)
846 {
847 	struct pstore *ps;
848 	int r;
849 
850 	/* allocate the pstore */
851 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
852 	if (!ps)
853 		return -ENOMEM;
854 
855 	ps->store = store;
856 	ps->valid = 1;
857 	ps->version = SNAPSHOT_DISK_VERSION;
858 	ps->area = NULL;
859 	ps->zero_area = NULL;
860 	ps->header_area = NULL;
861 	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
862 	ps->current_committed = 0;
863 
864 	ps->callback_count = 0;
865 	atomic_set(&ps->pending_count, 0);
866 	ps->callbacks = NULL;
867 
868 	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
869 	if (!ps->metadata_wq) {
870 		DMERR("couldn't start header metadata update thread");
871 		r = -ENOMEM;
872 		goto err_workqueue;
873 	}
874 
875 	if (options) {
876 		char overflow = toupper(options[0]);
877 		if (overflow == 'O')
878 			store->userspace_supports_overflow = true;
879 		else {
880 			DMERR("Unsupported persistent store option: %s", options);
881 			r = -EINVAL;
882 			goto err_options;
883 		}
884 	}
885 
886 	store->context = ps;
887 
888 	return 0;
889 
890 err_options:
891 	destroy_workqueue(ps->metadata_wq);
892 err_workqueue:
893 	kfree(ps);
894 
895 	return r;
896 }
897 
898 static unsigned persistent_status(struct dm_exception_store *store,
899 				  status_type_t status, char *result,
900 				  unsigned maxlen)
901 {
902 	unsigned sz = 0;
903 
904 	switch (status) {
905 	case STATUSTYPE_INFO:
906 		break;
907 	case STATUSTYPE_TABLE:
908 		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
909 		       (unsigned long long)store->chunk_size);
910 		break;
911 	case STATUSTYPE_IMA:
912 		*result = '\0';
913 		break;
914 	}
915 
916 	return sz;
917 }
918 
919 static struct dm_exception_store_type _persistent_type = {
920 	.name = "persistent",
921 	.module = THIS_MODULE,
922 	.ctr = persistent_ctr,
923 	.dtr = persistent_dtr,
924 	.read_metadata = persistent_read_metadata,
925 	.prepare_exception = persistent_prepare_exception,
926 	.commit_exception = persistent_commit_exception,
927 	.prepare_merge = persistent_prepare_merge,
928 	.commit_merge = persistent_commit_merge,
929 	.drop_snapshot = persistent_drop_snapshot,
930 	.usage = persistent_usage,
931 	.status = persistent_status,
932 };
933 
934 static struct dm_exception_store_type _persistent_compat_type = {
935 	.name = "P",
936 	.module = THIS_MODULE,
937 	.ctr = persistent_ctr,
938 	.dtr = persistent_dtr,
939 	.read_metadata = persistent_read_metadata,
940 	.prepare_exception = persistent_prepare_exception,
941 	.commit_exception = persistent_commit_exception,
942 	.prepare_merge = persistent_prepare_merge,
943 	.commit_merge = persistent_commit_merge,
944 	.drop_snapshot = persistent_drop_snapshot,
945 	.usage = persistent_usage,
946 	.status = persistent_status,
947 };
948 
949 int dm_persistent_snapshot_init(void)
950 {
951 	int r;
952 
953 	r = dm_exception_store_type_register(&_persistent_type);
954 	if (r) {
955 		DMERR("Unable to register persistent exception store type");
956 		return r;
957 	}
958 
959 	r = dm_exception_store_type_register(&_persistent_compat_type);
960 	if (r) {
961 		DMERR("Unable to register old-style persistent exception "
962 		      "store type");
963 		dm_exception_store_type_unregister(&_persistent_type);
964 		return r;
965 	}
966 
967 	return r;
968 }
969 
970 void dm_persistent_snapshot_exit(void)
971 {
972 	dm_exception_store_type_unregister(&_persistent_type);
973 	dm_exception_store_type_unregister(&_persistent_compat_type);
974 }
975