1 /* 2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2006-2008 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-exception-store.h" 9 10 #include <linux/mm.h> 11 #include <linux/pagemap.h> 12 #include <linux/vmalloc.h> 13 #include <linux/export.h> 14 #include <linux/slab.h> 15 #include <linux/dm-io.h> 16 #include "dm-bufio.h" 17 18 #define DM_MSG_PREFIX "persistent snapshot" 19 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ 20 21 #define DM_PREFETCH_CHUNKS 12 22 23 /*----------------------------------------------------------------- 24 * Persistent snapshots, by persistent we mean that the snapshot 25 * will survive a reboot. 26 *---------------------------------------------------------------*/ 27 28 /* 29 * We need to store a record of which parts of the origin have 30 * been copied to the snapshot device. The snapshot code 31 * requires that we copy exception chunks to chunk aligned areas 32 * of the COW store. It makes sense therefore, to store the 33 * metadata in chunk size blocks. 34 * 35 * There is no backward or forward compatibility implemented, 36 * snapshots with different disk versions than the kernel will 37 * not be usable. It is expected that "lvcreate" will blank out 38 * the start of a fresh COW device before calling the snapshot 39 * constructor. 40 * 41 * The first chunk of the COW device just contains the header. 42 * After this there is a chunk filled with exception metadata, 43 * followed by as many exception chunks as can fit in the 44 * metadata areas. 45 * 46 * All on disk structures are in little-endian format. The end 47 * of the exceptions info is indicated by an exception with a 48 * new_chunk of 0, which is invalid since it would point to the 49 * header chunk. 50 */ 51 52 /* 53 * Magic for persistent snapshots: "SnAp" - Feeble isn't it. 54 */ 55 #define SNAP_MAGIC 0x70416e53 56 57 /* 58 * The on-disk version of the metadata. 59 */ 60 #define SNAPSHOT_DISK_VERSION 1 61 62 #define NUM_SNAPSHOT_HDR_CHUNKS 1 63 64 struct disk_header { 65 __le32 magic; 66 67 /* 68 * Is this snapshot valid. There is no way of recovering 69 * an invalid snapshot. 70 */ 71 __le32 valid; 72 73 /* 74 * Simple, incrementing version. no backward 75 * compatibility. 76 */ 77 __le32 version; 78 79 /* In sectors */ 80 __le32 chunk_size; 81 } __packed; 82 83 struct disk_exception { 84 __le64 old_chunk; 85 __le64 new_chunk; 86 } __packed; 87 88 struct core_exception { 89 uint64_t old_chunk; 90 uint64_t new_chunk; 91 }; 92 93 struct commit_callback { 94 void (*callback)(void *, int success); 95 void *context; 96 }; 97 98 /* 99 * The top level structure for a persistent exception store. 100 */ 101 struct pstore { 102 struct dm_exception_store *store; 103 int version; 104 int valid; 105 uint32_t exceptions_per_area; 106 107 /* 108 * Now that we have an asynchronous kcopyd there is no 109 * need for large chunk sizes, so it wont hurt to have a 110 * whole chunks worth of metadata in memory at once. 111 */ 112 void *area; 113 114 /* 115 * An area of zeros used to clear the next area. 116 */ 117 void *zero_area; 118 119 /* 120 * An area used for header. The header can be written 121 * concurrently with metadata (when invalidating the snapshot), 122 * so it needs a separate buffer. 123 */ 124 void *header_area; 125 126 /* 127 * Used to keep track of which metadata area the data in 128 * 'chunk' refers to. 129 */ 130 chunk_t current_area; 131 132 /* 133 * The next free chunk for an exception. 134 * 135 * When creating exceptions, all the chunks here and above are 136 * free. It holds the next chunk to be allocated. On rare 137 * occasions (e.g. after a system crash) holes can be left in 138 * the exception store because chunks can be committed out of 139 * order. 140 * 141 * When merging exceptions, it does not necessarily mean all the 142 * chunks here and above are free. It holds the value it would 143 * have held if all chunks had been committed in order of 144 * allocation. Consequently the value may occasionally be 145 * slightly too low, but since it's only used for 'status' and 146 * it can never reach its minimum value too early this doesn't 147 * matter. 148 */ 149 150 chunk_t next_free; 151 152 /* 153 * The index of next free exception in the current 154 * metadata area. 155 */ 156 uint32_t current_committed; 157 158 atomic_t pending_count; 159 uint32_t callback_count; 160 struct commit_callback *callbacks; 161 struct dm_io_client *io_client; 162 163 struct workqueue_struct *metadata_wq; 164 }; 165 166 static int alloc_area(struct pstore *ps) 167 { 168 int r = -ENOMEM; 169 size_t len; 170 171 len = ps->store->chunk_size << SECTOR_SHIFT; 172 173 /* 174 * Allocate the chunk_size block of memory that will hold 175 * a single metadata area. 176 */ 177 ps->area = vmalloc(len); 178 if (!ps->area) 179 goto err_area; 180 181 ps->zero_area = vzalloc(len); 182 if (!ps->zero_area) 183 goto err_zero_area; 184 185 ps->header_area = vmalloc(len); 186 if (!ps->header_area) 187 goto err_header_area; 188 189 return 0; 190 191 err_header_area: 192 vfree(ps->zero_area); 193 194 err_zero_area: 195 vfree(ps->area); 196 197 err_area: 198 return r; 199 } 200 201 static void free_area(struct pstore *ps) 202 { 203 if (ps->area) 204 vfree(ps->area); 205 ps->area = NULL; 206 207 if (ps->zero_area) 208 vfree(ps->zero_area); 209 ps->zero_area = NULL; 210 211 if (ps->header_area) 212 vfree(ps->header_area); 213 ps->header_area = NULL; 214 } 215 216 struct mdata_req { 217 struct dm_io_region *where; 218 struct dm_io_request *io_req; 219 struct work_struct work; 220 int result; 221 }; 222 223 static void do_metadata(struct work_struct *work) 224 { 225 struct mdata_req *req = container_of(work, struct mdata_req, work); 226 227 req->result = dm_io(req->io_req, 1, req->where, NULL); 228 } 229 230 /* 231 * Read or write a chunk aligned and sized block of data from a device. 232 */ 233 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, 234 int metadata) 235 { 236 struct dm_io_region where = { 237 .bdev = dm_snap_cow(ps->store->snap)->bdev, 238 .sector = ps->store->chunk_size * chunk, 239 .count = ps->store->chunk_size, 240 }; 241 struct dm_io_request io_req = { 242 .bi_rw = rw, 243 .mem.type = DM_IO_VMA, 244 .mem.ptr.vma = area, 245 .client = ps->io_client, 246 .notify.fn = NULL, 247 }; 248 struct mdata_req req; 249 250 if (!metadata) 251 return dm_io(&io_req, 1, &where, NULL); 252 253 req.where = &where; 254 req.io_req = &io_req; 255 256 /* 257 * Issue the synchronous I/O from a different thread 258 * to avoid generic_make_request recursion. 259 */ 260 INIT_WORK_ONSTACK(&req.work, do_metadata); 261 queue_work(ps->metadata_wq, &req.work); 262 flush_workqueue(ps->metadata_wq); 263 destroy_work_on_stack(&req.work); 264 265 return req.result; 266 } 267 268 /* 269 * Convert a metadata area index to a chunk index. 270 */ 271 static chunk_t area_location(struct pstore *ps, chunk_t area) 272 { 273 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); 274 } 275 276 static void skip_metadata(struct pstore *ps) 277 { 278 uint32_t stride = ps->exceptions_per_area + 1; 279 chunk_t next_free = ps->next_free; 280 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) 281 ps->next_free++; 282 } 283 284 /* 285 * Read or write a metadata area. Remembering to skip the first 286 * chunk which holds the header. 287 */ 288 static int area_io(struct pstore *ps, int rw) 289 { 290 int r; 291 chunk_t chunk; 292 293 chunk = area_location(ps, ps->current_area); 294 295 r = chunk_io(ps, ps->area, chunk, rw, 0); 296 if (r) 297 return r; 298 299 return 0; 300 } 301 302 static void zero_memory_area(struct pstore *ps) 303 { 304 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 305 } 306 307 static int zero_disk_area(struct pstore *ps, chunk_t area) 308 { 309 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); 310 } 311 312 static int read_header(struct pstore *ps, int *new_snapshot) 313 { 314 int r; 315 struct disk_header *dh; 316 unsigned chunk_size; 317 int chunk_size_supplied = 1; 318 char *chunk_err; 319 320 /* 321 * Use default chunk size (or logical_block_size, if larger) 322 * if none supplied 323 */ 324 if (!ps->store->chunk_size) { 325 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 326 bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> 327 bdev) >> 9); 328 ps->store->chunk_mask = ps->store->chunk_size - 1; 329 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 330 chunk_size_supplied = 0; 331 } 332 333 ps->io_client = dm_io_client_create(); 334 if (IS_ERR(ps->io_client)) 335 return PTR_ERR(ps->io_client); 336 337 r = alloc_area(ps); 338 if (r) 339 return r; 340 341 r = chunk_io(ps, ps->header_area, 0, READ, 1); 342 if (r) 343 goto bad; 344 345 dh = ps->header_area; 346 347 if (le32_to_cpu(dh->magic) == 0) { 348 *new_snapshot = 1; 349 return 0; 350 } 351 352 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { 353 DMWARN("Invalid or corrupt snapshot"); 354 r = -ENXIO; 355 goto bad; 356 } 357 358 *new_snapshot = 0; 359 ps->valid = le32_to_cpu(dh->valid); 360 ps->version = le32_to_cpu(dh->version); 361 chunk_size = le32_to_cpu(dh->chunk_size); 362 363 if (ps->store->chunk_size == chunk_size) 364 return 0; 365 366 if (chunk_size_supplied) 367 DMWARN("chunk size %u in device metadata overrides " 368 "table chunk size of %u.", 369 chunk_size, ps->store->chunk_size); 370 371 /* We had a bogus chunk_size. Fix stuff up. */ 372 free_area(ps); 373 374 r = dm_exception_store_set_chunk_size(ps->store, chunk_size, 375 &chunk_err); 376 if (r) { 377 DMERR("invalid on-disk chunk size %u: %s.", 378 chunk_size, chunk_err); 379 return r; 380 } 381 382 r = alloc_area(ps); 383 return r; 384 385 bad: 386 free_area(ps); 387 return r; 388 } 389 390 static int write_header(struct pstore *ps) 391 { 392 struct disk_header *dh; 393 394 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); 395 396 dh = ps->header_area; 397 dh->magic = cpu_to_le32(SNAP_MAGIC); 398 dh->valid = cpu_to_le32(ps->valid); 399 dh->version = cpu_to_le32(ps->version); 400 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 401 402 return chunk_io(ps, ps->header_area, 0, WRITE, 1); 403 } 404 405 /* 406 * Access functions for the disk exceptions, these do the endian conversions. 407 */ 408 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, 409 uint32_t index) 410 { 411 BUG_ON(index >= ps->exceptions_per_area); 412 413 return ((struct disk_exception *) ps_area) + index; 414 } 415 416 static void read_exception(struct pstore *ps, void *ps_area, 417 uint32_t index, struct core_exception *result) 418 { 419 struct disk_exception *de = get_exception(ps, ps_area, index); 420 421 /* copy it */ 422 result->old_chunk = le64_to_cpu(de->old_chunk); 423 result->new_chunk = le64_to_cpu(de->new_chunk); 424 } 425 426 static void write_exception(struct pstore *ps, 427 uint32_t index, struct core_exception *e) 428 { 429 struct disk_exception *de = get_exception(ps, ps->area, index); 430 431 /* copy it */ 432 de->old_chunk = cpu_to_le64(e->old_chunk); 433 de->new_chunk = cpu_to_le64(e->new_chunk); 434 } 435 436 static void clear_exception(struct pstore *ps, uint32_t index) 437 { 438 struct disk_exception *de = get_exception(ps, ps->area, index); 439 440 /* clear it */ 441 de->old_chunk = 0; 442 de->new_chunk = 0; 443 } 444 445 /* 446 * Registers the exceptions that are present in the current area. 447 * 'full' is filled in to indicate if the area has been 448 * filled. 449 */ 450 static int insert_exceptions(struct pstore *ps, void *ps_area, 451 int (*callback)(void *callback_context, 452 chunk_t old, chunk_t new), 453 void *callback_context, 454 int *full) 455 { 456 int r; 457 unsigned int i; 458 struct core_exception e; 459 460 /* presume the area is full */ 461 *full = 1; 462 463 for (i = 0; i < ps->exceptions_per_area; i++) { 464 read_exception(ps, ps_area, i, &e); 465 466 /* 467 * If the new_chunk is pointing at the start of 468 * the COW device, where the first metadata area 469 * is we know that we've hit the end of the 470 * exceptions. Therefore the area is not full. 471 */ 472 if (e.new_chunk == 0LL) { 473 ps->current_committed = i; 474 *full = 0; 475 break; 476 } 477 478 /* 479 * Keep track of the start of the free chunks. 480 */ 481 if (ps->next_free <= e.new_chunk) 482 ps->next_free = e.new_chunk + 1; 483 484 /* 485 * Otherwise we add the exception to the snapshot. 486 */ 487 r = callback(callback_context, e.old_chunk, e.new_chunk); 488 if (r) 489 return r; 490 } 491 492 return 0; 493 } 494 495 static int read_exceptions(struct pstore *ps, 496 int (*callback)(void *callback_context, chunk_t old, 497 chunk_t new), 498 void *callback_context) 499 { 500 int r, full = 1; 501 struct dm_bufio_client *client; 502 chunk_t prefetch_area = 0; 503 504 client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, 505 ps->store->chunk_size << SECTOR_SHIFT, 506 1, 0, NULL, NULL); 507 508 if (IS_ERR(client)) 509 return PTR_ERR(client); 510 511 /* 512 * Setup for one current buffer + desired readahead buffers. 513 */ 514 dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); 515 516 /* 517 * Keeping reading chunks and inserting exceptions until 518 * we find a partially full area. 519 */ 520 for (ps->current_area = 0; full; ps->current_area++) { 521 struct dm_buffer *bp; 522 void *area; 523 chunk_t chunk; 524 525 if (unlikely(prefetch_area < ps->current_area)) 526 prefetch_area = ps->current_area; 527 528 if (DM_PREFETCH_CHUNKS) do { 529 chunk_t pf_chunk = area_location(ps, prefetch_area); 530 if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) 531 break; 532 dm_bufio_prefetch(client, pf_chunk, 1); 533 prefetch_area++; 534 if (unlikely(!prefetch_area)) 535 break; 536 } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); 537 538 chunk = area_location(ps, ps->current_area); 539 540 area = dm_bufio_read(client, chunk, &bp); 541 if (unlikely(IS_ERR(area))) { 542 r = PTR_ERR(area); 543 goto ret_destroy_bufio; 544 } 545 546 r = insert_exceptions(ps, area, callback, callback_context, 547 &full); 548 549 if (!full) 550 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); 551 552 dm_bufio_release(bp); 553 554 dm_bufio_forget(client, chunk); 555 556 if (unlikely(r)) 557 goto ret_destroy_bufio; 558 } 559 560 ps->current_area--; 561 562 skip_metadata(ps); 563 564 r = 0; 565 566 ret_destroy_bufio: 567 dm_bufio_client_destroy(client); 568 569 return r; 570 } 571 572 static struct pstore *get_info(struct dm_exception_store *store) 573 { 574 return (struct pstore *) store->context; 575 } 576 577 static void persistent_usage(struct dm_exception_store *store, 578 sector_t *total_sectors, 579 sector_t *sectors_allocated, 580 sector_t *metadata_sectors) 581 { 582 struct pstore *ps = get_info(store); 583 584 *sectors_allocated = ps->next_free * store->chunk_size; 585 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); 586 587 /* 588 * First chunk is the fixed header. 589 * Then there are (ps->current_area + 1) metadata chunks, each one 590 * separated from the next by ps->exceptions_per_area data chunks. 591 */ 592 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * 593 store->chunk_size; 594 } 595 596 static void persistent_dtr(struct dm_exception_store *store) 597 { 598 struct pstore *ps = get_info(store); 599 600 destroy_workqueue(ps->metadata_wq); 601 602 /* Created in read_header */ 603 if (ps->io_client) 604 dm_io_client_destroy(ps->io_client); 605 free_area(ps); 606 607 /* Allocated in persistent_read_metadata */ 608 if (ps->callbacks) 609 vfree(ps->callbacks); 610 611 kfree(ps); 612 } 613 614 static int persistent_read_metadata(struct dm_exception_store *store, 615 int (*callback)(void *callback_context, 616 chunk_t old, chunk_t new), 617 void *callback_context) 618 { 619 int r, uninitialized_var(new_snapshot); 620 struct pstore *ps = get_info(store); 621 622 /* 623 * Read the snapshot header. 624 */ 625 r = read_header(ps, &new_snapshot); 626 if (r) 627 return r; 628 629 /* 630 * Now we know correct chunk_size, complete the initialisation. 631 */ 632 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / 633 sizeof(struct disk_exception); 634 ps->callbacks = dm_vcalloc(ps->exceptions_per_area, 635 sizeof(*ps->callbacks)); 636 if (!ps->callbacks) 637 return -ENOMEM; 638 639 /* 640 * Do we need to setup a new snapshot ? 641 */ 642 if (new_snapshot) { 643 r = write_header(ps); 644 if (r) { 645 DMWARN("write_header failed"); 646 return r; 647 } 648 649 ps->current_area = 0; 650 zero_memory_area(ps); 651 r = zero_disk_area(ps, 0); 652 if (r) 653 DMWARN("zero_disk_area(0) failed"); 654 return r; 655 } 656 /* 657 * Sanity checks. 658 */ 659 if (ps->version != SNAPSHOT_DISK_VERSION) { 660 DMWARN("unable to handle snapshot disk version %d", 661 ps->version); 662 return -EINVAL; 663 } 664 665 /* 666 * Metadata are valid, but snapshot is invalidated 667 */ 668 if (!ps->valid) 669 return 1; 670 671 /* 672 * Read the metadata. 673 */ 674 r = read_exceptions(ps, callback, callback_context); 675 676 return r; 677 } 678 679 static int persistent_prepare_exception(struct dm_exception_store *store, 680 struct dm_exception *e) 681 { 682 struct pstore *ps = get_info(store); 683 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); 684 685 /* Is there enough room ? */ 686 if (size < ((ps->next_free + 1) * store->chunk_size)) 687 return -ENOSPC; 688 689 e->new_chunk = ps->next_free; 690 691 /* 692 * Move onto the next free pending, making sure to take 693 * into account the location of the metadata chunks. 694 */ 695 ps->next_free++; 696 skip_metadata(ps); 697 698 atomic_inc(&ps->pending_count); 699 return 0; 700 } 701 702 static void persistent_commit_exception(struct dm_exception_store *store, 703 struct dm_exception *e, 704 void (*callback) (void *, int success), 705 void *callback_context) 706 { 707 unsigned int i; 708 struct pstore *ps = get_info(store); 709 struct core_exception ce; 710 struct commit_callback *cb; 711 712 ce.old_chunk = e->old_chunk; 713 ce.new_chunk = e->new_chunk; 714 write_exception(ps, ps->current_committed++, &ce); 715 716 /* 717 * Add the callback to the back of the array. This code 718 * is the only place where the callback array is 719 * manipulated, and we know that it will never be called 720 * multiple times concurrently. 721 */ 722 cb = ps->callbacks + ps->callback_count++; 723 cb->callback = callback; 724 cb->context = callback_context; 725 726 /* 727 * If there are exceptions in flight and we have not yet 728 * filled this metadata area there's nothing more to do. 729 */ 730 if (!atomic_dec_and_test(&ps->pending_count) && 731 (ps->current_committed != ps->exceptions_per_area)) 732 return; 733 734 /* 735 * If we completely filled the current area, then wipe the next one. 736 */ 737 if ((ps->current_committed == ps->exceptions_per_area) && 738 zero_disk_area(ps, ps->current_area + 1)) 739 ps->valid = 0; 740 741 /* 742 * Commit exceptions to disk. 743 */ 744 if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) 745 ps->valid = 0; 746 747 /* 748 * Advance to the next area if this one is full. 749 */ 750 if (ps->current_committed == ps->exceptions_per_area) { 751 ps->current_committed = 0; 752 ps->current_area++; 753 zero_memory_area(ps); 754 } 755 756 for (i = 0; i < ps->callback_count; i++) { 757 cb = ps->callbacks + i; 758 cb->callback(cb->context, ps->valid); 759 } 760 761 ps->callback_count = 0; 762 } 763 764 static int persistent_prepare_merge(struct dm_exception_store *store, 765 chunk_t *last_old_chunk, 766 chunk_t *last_new_chunk) 767 { 768 struct pstore *ps = get_info(store); 769 struct core_exception ce; 770 int nr_consecutive; 771 int r; 772 773 /* 774 * When current area is empty, move back to preceding area. 775 */ 776 if (!ps->current_committed) { 777 /* 778 * Have we finished? 779 */ 780 if (!ps->current_area) 781 return 0; 782 783 ps->current_area--; 784 r = area_io(ps, READ); 785 if (r < 0) 786 return r; 787 ps->current_committed = ps->exceptions_per_area; 788 } 789 790 read_exception(ps, ps->area, ps->current_committed - 1, &ce); 791 *last_old_chunk = ce.old_chunk; 792 *last_new_chunk = ce.new_chunk; 793 794 /* 795 * Find number of consecutive chunks within the current area, 796 * working backwards. 797 */ 798 for (nr_consecutive = 1; nr_consecutive < ps->current_committed; 799 nr_consecutive++) { 800 read_exception(ps, ps->area, 801 ps->current_committed - 1 - nr_consecutive, &ce); 802 if (ce.old_chunk != *last_old_chunk - nr_consecutive || 803 ce.new_chunk != *last_new_chunk - nr_consecutive) 804 break; 805 } 806 807 return nr_consecutive; 808 } 809 810 static int persistent_commit_merge(struct dm_exception_store *store, 811 int nr_merged) 812 { 813 int r, i; 814 struct pstore *ps = get_info(store); 815 816 BUG_ON(nr_merged > ps->current_committed); 817 818 for (i = 0; i < nr_merged; i++) 819 clear_exception(ps, ps->current_committed - 1 - i); 820 821 r = area_io(ps, WRITE_FLUSH_FUA); 822 if (r < 0) 823 return r; 824 825 ps->current_committed -= nr_merged; 826 827 /* 828 * At this stage, only persistent_usage() uses ps->next_free, so 829 * we make no attempt to keep ps->next_free strictly accurate 830 * as exceptions may have been committed out-of-order originally. 831 * Once a snapshot has become merging, we set it to the value it 832 * would have held had all the exceptions been committed in order. 833 * 834 * ps->current_area does not get reduced by prepare_merge() until 835 * after commit_merge() has removed the nr_merged previous exceptions. 836 */ 837 ps->next_free = area_location(ps, ps->current_area) + 838 ps->current_committed + 1; 839 840 return 0; 841 } 842 843 static void persistent_drop_snapshot(struct dm_exception_store *store) 844 { 845 struct pstore *ps = get_info(store); 846 847 ps->valid = 0; 848 if (write_header(ps)) 849 DMWARN("write header failed"); 850 } 851 852 static int persistent_ctr(struct dm_exception_store *store, 853 unsigned argc, char **argv) 854 { 855 struct pstore *ps; 856 857 /* allocate the pstore */ 858 ps = kzalloc(sizeof(*ps), GFP_KERNEL); 859 if (!ps) 860 return -ENOMEM; 861 862 ps->store = store; 863 ps->valid = 1; 864 ps->version = SNAPSHOT_DISK_VERSION; 865 ps->area = NULL; 866 ps->zero_area = NULL; 867 ps->header_area = NULL; 868 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ 869 ps->current_committed = 0; 870 871 ps->callback_count = 0; 872 atomic_set(&ps->pending_count, 0); 873 ps->callbacks = NULL; 874 875 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); 876 if (!ps->metadata_wq) { 877 kfree(ps); 878 DMERR("couldn't start header metadata update thread"); 879 return -ENOMEM; 880 } 881 882 store->context = ps; 883 884 return 0; 885 } 886 887 static unsigned persistent_status(struct dm_exception_store *store, 888 status_type_t status, char *result, 889 unsigned maxlen) 890 { 891 unsigned sz = 0; 892 893 switch (status) { 894 case STATUSTYPE_INFO: 895 break; 896 case STATUSTYPE_TABLE: 897 DMEMIT(" P %llu", (unsigned long long)store->chunk_size); 898 } 899 900 return sz; 901 } 902 903 static struct dm_exception_store_type _persistent_type = { 904 .name = "persistent", 905 .module = THIS_MODULE, 906 .ctr = persistent_ctr, 907 .dtr = persistent_dtr, 908 .read_metadata = persistent_read_metadata, 909 .prepare_exception = persistent_prepare_exception, 910 .commit_exception = persistent_commit_exception, 911 .prepare_merge = persistent_prepare_merge, 912 .commit_merge = persistent_commit_merge, 913 .drop_snapshot = persistent_drop_snapshot, 914 .usage = persistent_usage, 915 .status = persistent_status, 916 }; 917 918 static struct dm_exception_store_type _persistent_compat_type = { 919 .name = "P", 920 .module = THIS_MODULE, 921 .ctr = persistent_ctr, 922 .dtr = persistent_dtr, 923 .read_metadata = persistent_read_metadata, 924 .prepare_exception = persistent_prepare_exception, 925 .commit_exception = persistent_commit_exception, 926 .prepare_merge = persistent_prepare_merge, 927 .commit_merge = persistent_commit_merge, 928 .drop_snapshot = persistent_drop_snapshot, 929 .usage = persistent_usage, 930 .status = persistent_status, 931 }; 932 933 int dm_persistent_snapshot_init(void) 934 { 935 int r; 936 937 r = dm_exception_store_type_register(&_persistent_type); 938 if (r) { 939 DMERR("Unable to register persistent exception store type"); 940 return r; 941 } 942 943 r = dm_exception_store_type_register(&_persistent_compat_type); 944 if (r) { 945 DMERR("Unable to register old-style persistent exception " 946 "store type"); 947 dm_exception_store_type_unregister(&_persistent_type); 948 return r; 949 } 950 951 return r; 952 } 953 954 void dm_persistent_snapshot_exit(void) 955 { 956 dm_exception_store_type_unregister(&_persistent_type); 957 dm_exception_store_type_unregister(&_persistent_compat_type); 958 } 959