1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/config.h> 11 #include <linux/ctype.h> 12 #include <linux/device-mapper.h> 13 #include <linux/fs.h> 14 #include <linux/init.h> 15 #include <linux/kdev_t.h> 16 #include <linux/list.h> 17 #include <linux/mempool.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 22 #include "dm-snap.h" 23 #include "dm-bio-list.h" 24 #include "kcopyd.h" 25 26 /* 27 * The percentage increment we will wake up users at 28 */ 29 #define WAKE_UP_PERCENT 5 30 31 /* 32 * kcopyd priority of snapshot operations 33 */ 34 #define SNAPSHOT_COPY_PRIORITY 2 35 36 /* 37 * Each snapshot reserves this many pages for io 38 */ 39 #define SNAPSHOT_PAGES 256 40 41 struct pending_exception { 42 struct exception e; 43 44 /* 45 * Origin buffers waiting for this to complete are held 46 * in a bio list 47 */ 48 struct bio_list origin_bios; 49 struct bio_list snapshot_bios; 50 51 /* 52 * Other pending_exceptions that are processing this 53 * chunk. When this list is empty, we know we can 54 * complete the origins. 55 */ 56 struct list_head siblings; 57 58 /* Pointer back to snapshot context */ 59 struct dm_snapshot *snap; 60 61 /* 62 * 1 indicates the exception has already been sent to 63 * kcopyd. 64 */ 65 int started; 66 }; 67 68 /* 69 * Hash table mapping origin volumes to lists of snapshots and 70 * a lock to protect it 71 */ 72 static kmem_cache_t *exception_cache; 73 static kmem_cache_t *pending_cache; 74 static mempool_t *pending_pool; 75 76 /* 77 * One of these per registered origin, held in the snapshot_origins hash 78 */ 79 struct origin { 80 /* The origin device */ 81 struct block_device *bdev; 82 83 struct list_head hash_list; 84 85 /* List of snapshots for this origin */ 86 struct list_head snapshots; 87 }; 88 89 /* 90 * Size of the hash table for origin volumes. If we make this 91 * the size of the minors list then it should be nearly perfect 92 */ 93 #define ORIGIN_HASH_SIZE 256 94 #define ORIGIN_MASK 0xFF 95 static struct list_head *_origins; 96 static struct rw_semaphore _origins_lock; 97 98 static int init_origin_hash(void) 99 { 100 int i; 101 102 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 103 GFP_KERNEL); 104 if (!_origins) { 105 DMERR("Device mapper: Snapshot: unable to allocate memory"); 106 return -ENOMEM; 107 } 108 109 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 110 INIT_LIST_HEAD(_origins + i); 111 init_rwsem(&_origins_lock); 112 113 return 0; 114 } 115 116 static void exit_origin_hash(void) 117 { 118 kfree(_origins); 119 } 120 121 static inline unsigned int origin_hash(struct block_device *bdev) 122 { 123 return bdev->bd_dev & ORIGIN_MASK; 124 } 125 126 static struct origin *__lookup_origin(struct block_device *origin) 127 { 128 struct list_head *ol; 129 struct origin *o; 130 131 ol = &_origins[origin_hash(origin)]; 132 list_for_each_entry (o, ol, hash_list) 133 if (bdev_equal(o->bdev, origin)) 134 return o; 135 136 return NULL; 137 } 138 139 static void __insert_origin(struct origin *o) 140 { 141 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 142 list_add_tail(&o->hash_list, sl); 143 } 144 145 /* 146 * Make a note of the snapshot and its origin so we can look it 147 * up when the origin has a write on it. 148 */ 149 static int register_snapshot(struct dm_snapshot *snap) 150 { 151 struct origin *o; 152 struct block_device *bdev = snap->origin->bdev; 153 154 down_write(&_origins_lock); 155 o = __lookup_origin(bdev); 156 157 if (!o) { 158 /* New origin */ 159 o = kmalloc(sizeof(*o), GFP_KERNEL); 160 if (!o) { 161 up_write(&_origins_lock); 162 return -ENOMEM; 163 } 164 165 /* Initialise the struct */ 166 INIT_LIST_HEAD(&o->snapshots); 167 o->bdev = bdev; 168 169 __insert_origin(o); 170 } 171 172 list_add_tail(&snap->list, &o->snapshots); 173 174 up_write(&_origins_lock); 175 return 0; 176 } 177 178 static void unregister_snapshot(struct dm_snapshot *s) 179 { 180 struct origin *o; 181 182 down_write(&_origins_lock); 183 o = __lookup_origin(s->origin->bdev); 184 185 list_del(&s->list); 186 if (list_empty(&o->snapshots)) { 187 list_del(&o->hash_list); 188 kfree(o); 189 } 190 191 up_write(&_origins_lock); 192 } 193 194 /* 195 * Implementation of the exception hash tables. 196 */ 197 static int init_exception_table(struct exception_table *et, uint32_t size) 198 { 199 unsigned int i; 200 201 et->hash_mask = size - 1; 202 et->table = dm_vcalloc(size, sizeof(struct list_head)); 203 if (!et->table) 204 return -ENOMEM; 205 206 for (i = 0; i < size; i++) 207 INIT_LIST_HEAD(et->table + i); 208 209 return 0; 210 } 211 212 static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) 213 { 214 struct list_head *slot; 215 struct exception *ex, *next; 216 int i, size; 217 218 size = et->hash_mask + 1; 219 for (i = 0; i < size; i++) { 220 slot = et->table + i; 221 222 list_for_each_entry_safe (ex, next, slot, hash_list) 223 kmem_cache_free(mem, ex); 224 } 225 226 vfree(et->table); 227 } 228 229 static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 230 { 231 return chunk & et->hash_mask; 232 } 233 234 static void insert_exception(struct exception_table *eh, struct exception *e) 235 { 236 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 237 list_add(&e->hash_list, l); 238 } 239 240 static inline void remove_exception(struct exception *e) 241 { 242 list_del(&e->hash_list); 243 } 244 245 /* 246 * Return the exception data for a sector, or NULL if not 247 * remapped. 248 */ 249 static struct exception *lookup_exception(struct exception_table *et, 250 chunk_t chunk) 251 { 252 struct list_head *slot; 253 struct exception *e; 254 255 slot = &et->table[exception_hash(et, chunk)]; 256 list_for_each_entry (e, slot, hash_list) 257 if (e->old_chunk == chunk) 258 return e; 259 260 return NULL; 261 } 262 263 static inline struct exception *alloc_exception(void) 264 { 265 struct exception *e; 266 267 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 268 if (!e) 269 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 270 271 return e; 272 } 273 274 static inline void free_exception(struct exception *e) 275 { 276 kmem_cache_free(exception_cache, e); 277 } 278 279 static inline struct pending_exception *alloc_pending_exception(void) 280 { 281 return mempool_alloc(pending_pool, GFP_NOIO); 282 } 283 284 static inline void free_pending_exception(struct pending_exception *pe) 285 { 286 mempool_free(pe, pending_pool); 287 } 288 289 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 290 { 291 struct exception *e; 292 293 e = alloc_exception(); 294 if (!e) 295 return -ENOMEM; 296 297 e->old_chunk = old; 298 e->new_chunk = new; 299 insert_exception(&s->complete, e); 300 return 0; 301 } 302 303 /* 304 * Hard coded magic. 305 */ 306 static int calc_max_buckets(void) 307 { 308 /* use a fixed size of 2MB */ 309 unsigned long mem = 2 * 1024 * 1024; 310 mem /= sizeof(struct list_head); 311 312 return mem; 313 } 314 315 /* 316 * Rounds a number down to a power of 2. 317 */ 318 static inline uint32_t round_down(uint32_t n) 319 { 320 while (n & (n - 1)) 321 n &= (n - 1); 322 return n; 323 } 324 325 /* 326 * Allocate room for a suitable hash table. 327 */ 328 static int init_hash_tables(struct dm_snapshot *s) 329 { 330 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 331 332 /* 333 * Calculate based on the size of the original volume or 334 * the COW volume... 335 */ 336 cow_dev_size = get_dev_size(s->cow->bdev); 337 origin_dev_size = get_dev_size(s->origin->bdev); 338 max_buckets = calc_max_buckets(); 339 340 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; 341 hash_size = min(hash_size, max_buckets); 342 343 /* Round it down to a power of 2 */ 344 hash_size = round_down(hash_size); 345 if (init_exception_table(&s->complete, hash_size)) 346 return -ENOMEM; 347 348 /* 349 * Allocate hash table for in-flight exceptions 350 * Make this smaller than the real hash table 351 */ 352 hash_size >>= 3; 353 if (hash_size < 64) 354 hash_size = 64; 355 356 if (init_exception_table(&s->pending, hash_size)) { 357 exit_exception_table(&s->complete, exception_cache); 358 return -ENOMEM; 359 } 360 361 return 0; 362 } 363 364 /* 365 * Round a number up to the nearest 'size' boundary. size must 366 * be a power of 2. 367 */ 368 static inline ulong round_up(ulong n, ulong size) 369 { 370 size--; 371 return (n + size) & ~size; 372 } 373 374 static void read_snapshot_metadata(struct dm_snapshot *s) 375 { 376 if (s->store.read_metadata(&s->store)) { 377 down_write(&s->lock); 378 s->valid = 0; 379 up_write(&s->lock); 380 } 381 } 382 383 /* 384 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 385 */ 386 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 387 { 388 struct dm_snapshot *s; 389 unsigned long chunk_size; 390 int r = -EINVAL; 391 char persistent; 392 char *origin_path; 393 char *cow_path; 394 char *value; 395 int blocksize; 396 397 if (argc < 4) { 398 ti->error = "dm-snapshot: requires exactly 4 arguments"; 399 r = -EINVAL; 400 goto bad1; 401 } 402 403 origin_path = argv[0]; 404 cow_path = argv[1]; 405 persistent = toupper(*argv[2]); 406 407 if (persistent != 'P' && persistent != 'N') { 408 ti->error = "Persistent flag is not P or N"; 409 r = -EINVAL; 410 goto bad1; 411 } 412 413 chunk_size = simple_strtoul(argv[3], &value, 10); 414 if (chunk_size == 0 || value == NULL) { 415 ti->error = "Invalid chunk size"; 416 r = -EINVAL; 417 goto bad1; 418 } 419 420 s = kmalloc(sizeof(*s), GFP_KERNEL); 421 if (s == NULL) { 422 ti->error = "Cannot allocate snapshot context private " 423 "structure"; 424 r = -ENOMEM; 425 goto bad1; 426 } 427 428 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 429 if (r) { 430 ti->error = "Cannot get origin device"; 431 goto bad2; 432 } 433 434 r = dm_get_device(ti, cow_path, 0, 0, 435 FMODE_READ | FMODE_WRITE, &s->cow); 436 if (r) { 437 dm_put_device(ti, s->origin); 438 ti->error = "Cannot get COW device"; 439 goto bad2; 440 } 441 442 /* 443 * Chunk size must be multiple of page size. Silently 444 * round up if it's not. 445 */ 446 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); 447 448 /* Validate the chunk size against the device block size */ 449 blocksize = s->cow->bdev->bd_disk->queue->hardsect_size; 450 if (chunk_size % (blocksize >> 9)) { 451 ti->error = "Chunk size is not a multiple of device blocksize"; 452 r = -EINVAL; 453 goto bad3; 454 } 455 456 /* Check chunk_size is a power of 2 */ 457 if (chunk_size & (chunk_size - 1)) { 458 ti->error = "Chunk size is not a power of 2"; 459 r = -EINVAL; 460 goto bad3; 461 } 462 463 s->chunk_size = chunk_size; 464 s->chunk_mask = chunk_size - 1; 465 s->type = persistent; 466 s->chunk_shift = ffs(chunk_size) - 1; 467 468 s->valid = 1; 469 s->active = 0; 470 s->last_percent = 0; 471 init_rwsem(&s->lock); 472 s->table = ti->table; 473 474 /* Allocate hash table for COW data */ 475 if (init_hash_tables(s)) { 476 ti->error = "Unable to allocate hash table space"; 477 r = -ENOMEM; 478 goto bad3; 479 } 480 481 /* 482 * Check the persistent flag - done here because we need the iobuf 483 * to check the LV header 484 */ 485 s->store.snap = s; 486 487 if (persistent == 'P') 488 r = dm_create_persistent(&s->store, chunk_size); 489 else 490 r = dm_create_transient(&s->store, s, blocksize); 491 492 if (r) { 493 ti->error = "Couldn't create exception store"; 494 r = -EINVAL; 495 goto bad4; 496 } 497 498 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 499 if (r) { 500 ti->error = "Could not create kcopyd client"; 501 goto bad5; 502 } 503 504 /* Metadata must only be loaded into one table at once */ 505 read_snapshot_metadata(s); 506 507 /* Add snapshot to the list of snapshots for this origin */ 508 /* Exceptions aren't triggered till snapshot_resume() is called */ 509 if (register_snapshot(s)) { 510 r = -EINVAL; 511 ti->error = "Cannot register snapshot origin"; 512 goto bad6; 513 } 514 515 ti->private = s; 516 ti->split_io = chunk_size; 517 518 return 0; 519 520 bad6: 521 kcopyd_client_destroy(s->kcopyd_client); 522 523 bad5: 524 s->store.destroy(&s->store); 525 526 bad4: 527 exit_exception_table(&s->pending, pending_cache); 528 exit_exception_table(&s->complete, exception_cache); 529 530 bad3: 531 dm_put_device(ti, s->cow); 532 dm_put_device(ti, s->origin); 533 534 bad2: 535 kfree(s); 536 537 bad1: 538 return r; 539 } 540 541 static void snapshot_dtr(struct dm_target *ti) 542 { 543 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 544 545 unregister_snapshot(s); 546 547 exit_exception_table(&s->pending, pending_cache); 548 exit_exception_table(&s->complete, exception_cache); 549 550 /* Deallocate memory used */ 551 s->store.destroy(&s->store); 552 553 dm_put_device(ti, s->origin); 554 dm_put_device(ti, s->cow); 555 kcopyd_client_destroy(s->kcopyd_client); 556 kfree(s); 557 } 558 559 /* 560 * Flush a list of buffers. 561 */ 562 static void flush_bios(struct bio *bio) 563 { 564 struct bio *n; 565 566 while (bio) { 567 n = bio->bi_next; 568 bio->bi_next = NULL; 569 generic_make_request(bio); 570 bio = n; 571 } 572 } 573 574 /* 575 * Error a list of buffers. 576 */ 577 static void error_bios(struct bio *bio) 578 { 579 struct bio *n; 580 581 while (bio) { 582 n = bio->bi_next; 583 bio->bi_next = NULL; 584 bio_io_error(bio, bio->bi_size); 585 bio = n; 586 } 587 } 588 589 static struct bio *__flush_bios(struct pending_exception *pe) 590 { 591 struct pending_exception *sibling; 592 593 if (list_empty(&pe->siblings)) 594 return bio_list_get(&pe->origin_bios); 595 596 sibling = list_entry(pe->siblings.next, 597 struct pending_exception, siblings); 598 599 list_del(&pe->siblings); 600 601 /* This is fine as long as kcopyd is single-threaded. If kcopyd 602 * becomes multi-threaded, we'll need some locking here. 603 */ 604 bio_list_merge(&sibling->origin_bios, &pe->origin_bios); 605 606 return NULL; 607 } 608 609 static void pending_complete(struct pending_exception *pe, int success) 610 { 611 struct exception *e; 612 struct dm_snapshot *s = pe->snap; 613 struct bio *flush = NULL; 614 615 if (success) { 616 e = alloc_exception(); 617 if (!e) { 618 DMWARN("Unable to allocate exception."); 619 down_write(&s->lock); 620 s->store.drop_snapshot(&s->store); 621 s->valid = 0; 622 flush = __flush_bios(pe); 623 up_write(&s->lock); 624 625 error_bios(bio_list_get(&pe->snapshot_bios)); 626 goto out; 627 } 628 *e = pe->e; 629 630 /* 631 * Add a proper exception, and remove the 632 * in-flight exception from the list. 633 */ 634 down_write(&s->lock); 635 insert_exception(&s->complete, e); 636 remove_exception(&pe->e); 637 flush = __flush_bios(pe); 638 639 /* Submit any pending write bios */ 640 up_write(&s->lock); 641 642 flush_bios(bio_list_get(&pe->snapshot_bios)); 643 } else { 644 /* Read/write error - snapshot is unusable */ 645 down_write(&s->lock); 646 if (s->valid) 647 DMERR("Error reading/writing snapshot"); 648 s->store.drop_snapshot(&s->store); 649 s->valid = 0; 650 remove_exception(&pe->e); 651 flush = __flush_bios(pe); 652 up_write(&s->lock); 653 654 error_bios(bio_list_get(&pe->snapshot_bios)); 655 656 dm_table_event(s->table); 657 } 658 659 out: 660 free_pending_exception(pe); 661 662 if (flush) 663 flush_bios(flush); 664 } 665 666 static void commit_callback(void *context, int success) 667 { 668 struct pending_exception *pe = (struct pending_exception *) context; 669 pending_complete(pe, success); 670 } 671 672 /* 673 * Called when the copy I/O has finished. kcopyd actually runs 674 * this code so don't block. 675 */ 676 static void copy_callback(int read_err, unsigned int write_err, void *context) 677 { 678 struct pending_exception *pe = (struct pending_exception *) context; 679 struct dm_snapshot *s = pe->snap; 680 681 if (read_err || write_err) 682 pending_complete(pe, 0); 683 684 else 685 /* Update the metadata if we are persistent */ 686 s->store.commit_exception(&s->store, &pe->e, commit_callback, 687 pe); 688 } 689 690 /* 691 * Dispatches the copy operation to kcopyd. 692 */ 693 static void start_copy(struct pending_exception *pe) 694 { 695 struct dm_snapshot *s = pe->snap; 696 struct io_region src, dest; 697 struct block_device *bdev = s->origin->bdev; 698 sector_t dev_size; 699 700 dev_size = get_dev_size(bdev); 701 702 src.bdev = bdev; 703 src.sector = chunk_to_sector(s, pe->e.old_chunk); 704 src.count = min(s->chunk_size, dev_size - src.sector); 705 706 dest.bdev = s->cow->bdev; 707 dest.sector = chunk_to_sector(s, pe->e.new_chunk); 708 dest.count = src.count; 709 710 /* Hand over to kcopyd */ 711 kcopyd_copy(s->kcopyd_client, 712 &src, 1, &dest, 0, copy_callback, pe); 713 } 714 715 /* 716 * Looks to see if this snapshot already has a pending exception 717 * for this chunk, otherwise it allocates a new one and inserts 718 * it into the pending table. 719 * 720 * NOTE: a write lock must be held on snap->lock before calling 721 * this. 722 */ 723 static struct pending_exception * 724 __find_pending_exception(struct dm_snapshot *s, struct bio *bio) 725 { 726 struct exception *e; 727 struct pending_exception *pe; 728 chunk_t chunk = sector_to_chunk(s, bio->bi_sector); 729 730 /* 731 * Is there a pending exception for this already ? 732 */ 733 e = lookup_exception(&s->pending, chunk); 734 if (e) { 735 /* cast the exception to a pending exception */ 736 pe = container_of(e, struct pending_exception, e); 737 738 } else { 739 /* 740 * Create a new pending exception, we don't want 741 * to hold the lock while we do this. 742 */ 743 up_write(&s->lock); 744 pe = alloc_pending_exception(); 745 down_write(&s->lock); 746 747 e = lookup_exception(&s->pending, chunk); 748 if (e) { 749 free_pending_exception(pe); 750 pe = container_of(e, struct pending_exception, e); 751 } else { 752 pe->e.old_chunk = chunk; 753 bio_list_init(&pe->origin_bios); 754 bio_list_init(&pe->snapshot_bios); 755 INIT_LIST_HEAD(&pe->siblings); 756 pe->snap = s; 757 pe->started = 0; 758 759 if (s->store.prepare_exception(&s->store, &pe->e)) { 760 free_pending_exception(pe); 761 s->valid = 0; 762 return NULL; 763 } 764 765 insert_exception(&s->pending, &pe->e); 766 } 767 } 768 769 return pe; 770 } 771 772 static inline void remap_exception(struct dm_snapshot *s, struct exception *e, 773 struct bio *bio) 774 { 775 bio->bi_bdev = s->cow->bdev; 776 bio->bi_sector = chunk_to_sector(s, e->new_chunk) + 777 (bio->bi_sector & s->chunk_mask); 778 } 779 780 static int snapshot_map(struct dm_target *ti, struct bio *bio, 781 union map_info *map_context) 782 { 783 struct exception *e; 784 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 785 int r = 1; 786 chunk_t chunk; 787 struct pending_exception *pe; 788 789 chunk = sector_to_chunk(s, bio->bi_sector); 790 791 /* Full snapshots are not usable */ 792 if (!s->valid) 793 return -EIO; 794 795 if (unlikely(bio_barrier(bio))) 796 return -EOPNOTSUPP; 797 798 /* 799 * Write to snapshot - higher level takes care of RW/RO 800 * flags so we should only get this if we are 801 * writeable. 802 */ 803 if (bio_rw(bio) == WRITE) { 804 805 /* FIXME: should only take write lock if we need 806 * to copy an exception */ 807 down_write(&s->lock); 808 809 /* If the block is already remapped - use that, else remap it */ 810 e = lookup_exception(&s->complete, chunk); 811 if (e) { 812 remap_exception(s, e, bio); 813 up_write(&s->lock); 814 815 } else { 816 pe = __find_pending_exception(s, bio); 817 818 if (!pe) { 819 if (s->store.drop_snapshot) 820 s->store.drop_snapshot(&s->store); 821 s->valid = 0; 822 r = -EIO; 823 up_write(&s->lock); 824 } else { 825 remap_exception(s, &pe->e, bio); 826 bio_list_add(&pe->snapshot_bios, bio); 827 828 if (!pe->started) { 829 /* this is protected by snap->lock */ 830 pe->started = 1; 831 up_write(&s->lock); 832 start_copy(pe); 833 } else 834 up_write(&s->lock); 835 r = 0; 836 } 837 } 838 839 } else { 840 /* 841 * FIXME: this read path scares me because we 842 * always use the origin when we have a pending 843 * exception. However I can't think of a 844 * situation where this is wrong - ejt. 845 */ 846 847 /* Do reads */ 848 down_read(&s->lock); 849 850 /* See if it it has been remapped */ 851 e = lookup_exception(&s->complete, chunk); 852 if (e) 853 remap_exception(s, e, bio); 854 else 855 bio->bi_bdev = s->origin->bdev; 856 857 up_read(&s->lock); 858 } 859 860 return r; 861 } 862 863 static void snapshot_resume(struct dm_target *ti) 864 { 865 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 866 867 down_write(&s->lock); 868 s->active = 1; 869 up_write(&s->lock); 870 } 871 872 static int snapshot_status(struct dm_target *ti, status_type_t type, 873 char *result, unsigned int maxlen) 874 { 875 struct dm_snapshot *snap = (struct dm_snapshot *) ti->private; 876 877 switch (type) { 878 case STATUSTYPE_INFO: 879 if (!snap->valid) 880 snprintf(result, maxlen, "Invalid"); 881 else { 882 if (snap->store.fraction_full) { 883 sector_t numerator, denominator; 884 snap->store.fraction_full(&snap->store, 885 &numerator, 886 &denominator); 887 snprintf(result, maxlen, 888 SECTOR_FORMAT "/" SECTOR_FORMAT, 889 numerator, denominator); 890 } 891 else 892 snprintf(result, maxlen, "Unknown"); 893 } 894 break; 895 896 case STATUSTYPE_TABLE: 897 /* 898 * kdevname returns a static pointer so we need 899 * to make private copies if the output is to 900 * make sense. 901 */ 902 snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT, 903 snap->origin->name, snap->cow->name, 904 snap->type, snap->chunk_size); 905 break; 906 } 907 908 return 0; 909 } 910 911 /*----------------------------------------------------------------- 912 * Origin methods 913 *---------------------------------------------------------------*/ 914 static void list_merge(struct list_head *l1, struct list_head *l2) 915 { 916 struct list_head *l1_n, *l2_p; 917 918 l1_n = l1->next; 919 l2_p = l2->prev; 920 921 l1->next = l2; 922 l2->prev = l1; 923 924 l2_p->next = l1_n; 925 l1_n->prev = l2_p; 926 } 927 928 static int __origin_write(struct list_head *snapshots, struct bio *bio) 929 { 930 int r = 1, first = 1; 931 struct dm_snapshot *snap; 932 struct exception *e; 933 struct pending_exception *pe, *last = NULL; 934 chunk_t chunk; 935 936 /* Do all the snapshots on this origin */ 937 list_for_each_entry (snap, snapshots, list) { 938 939 /* Only deal with valid and active snapshots */ 940 if (!snap->valid || !snap->active) 941 continue; 942 943 /* Nothing to do if writing beyond end of snapshot */ 944 if (bio->bi_sector >= dm_table_get_size(snap->table)) 945 continue; 946 947 down_write(&snap->lock); 948 949 /* 950 * Remember, different snapshots can have 951 * different chunk sizes. 952 */ 953 chunk = sector_to_chunk(snap, bio->bi_sector); 954 955 /* 956 * Check exception table to see if block 957 * is already remapped in this snapshot 958 * and trigger an exception if not. 959 */ 960 e = lookup_exception(&snap->complete, chunk); 961 if (!e) { 962 pe = __find_pending_exception(snap, bio); 963 if (!pe) { 964 snap->store.drop_snapshot(&snap->store); 965 snap->valid = 0; 966 967 } else { 968 if (last) 969 list_merge(&pe->siblings, 970 &last->siblings); 971 972 last = pe; 973 r = 0; 974 } 975 } 976 977 up_write(&snap->lock); 978 } 979 980 /* 981 * Now that we have a complete pe list we can start the copying. 982 */ 983 if (last) { 984 pe = last; 985 do { 986 down_write(&pe->snap->lock); 987 if (first) 988 bio_list_add(&pe->origin_bios, bio); 989 if (!pe->started) { 990 pe->started = 1; 991 up_write(&pe->snap->lock); 992 start_copy(pe); 993 } else 994 up_write(&pe->snap->lock); 995 first = 0; 996 pe = list_entry(pe->siblings.next, 997 struct pending_exception, siblings); 998 999 } while (pe != last); 1000 } 1001 1002 return r; 1003 } 1004 1005 /* 1006 * Called on a write from the origin driver. 1007 */ 1008 static int do_origin(struct dm_dev *origin, struct bio *bio) 1009 { 1010 struct origin *o; 1011 int r = 1; 1012 1013 down_read(&_origins_lock); 1014 o = __lookup_origin(origin->bdev); 1015 if (o) 1016 r = __origin_write(&o->snapshots, bio); 1017 up_read(&_origins_lock); 1018 1019 return r; 1020 } 1021 1022 /* 1023 * Origin: maps a linear range of a device, with hooks for snapshotting. 1024 */ 1025 1026 /* 1027 * Construct an origin mapping: <dev_path> 1028 * The context for an origin is merely a 'struct dm_dev *' 1029 * pointing to the real device. 1030 */ 1031 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1032 { 1033 int r; 1034 struct dm_dev *dev; 1035 1036 if (argc != 1) { 1037 ti->error = "dm-origin: incorrect number of arguments"; 1038 return -EINVAL; 1039 } 1040 1041 r = dm_get_device(ti, argv[0], 0, ti->len, 1042 dm_table_get_mode(ti->table), &dev); 1043 if (r) { 1044 ti->error = "Cannot get target device"; 1045 return r; 1046 } 1047 1048 ti->private = dev; 1049 return 0; 1050 } 1051 1052 static void origin_dtr(struct dm_target *ti) 1053 { 1054 struct dm_dev *dev = (struct dm_dev *) ti->private; 1055 dm_put_device(ti, dev); 1056 } 1057 1058 static int origin_map(struct dm_target *ti, struct bio *bio, 1059 union map_info *map_context) 1060 { 1061 struct dm_dev *dev = (struct dm_dev *) ti->private; 1062 bio->bi_bdev = dev->bdev; 1063 1064 if (unlikely(bio_barrier(bio))) 1065 return -EOPNOTSUPP; 1066 1067 /* Only tell snapshots if this is a write */ 1068 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1; 1069 } 1070 1071 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1072 1073 /* 1074 * Set the target "split_io" field to the minimum of all the snapshots' 1075 * chunk sizes. 1076 */ 1077 static void origin_resume(struct dm_target *ti) 1078 { 1079 struct dm_dev *dev = (struct dm_dev *) ti->private; 1080 struct dm_snapshot *snap; 1081 struct origin *o; 1082 chunk_t chunk_size = 0; 1083 1084 down_read(&_origins_lock); 1085 o = __lookup_origin(dev->bdev); 1086 if (o) 1087 list_for_each_entry (snap, &o->snapshots, list) 1088 chunk_size = min_not_zero(chunk_size, snap->chunk_size); 1089 up_read(&_origins_lock); 1090 1091 ti->split_io = chunk_size; 1092 } 1093 1094 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1095 unsigned int maxlen) 1096 { 1097 struct dm_dev *dev = (struct dm_dev *) ti->private; 1098 1099 switch (type) { 1100 case STATUSTYPE_INFO: 1101 result[0] = '\0'; 1102 break; 1103 1104 case STATUSTYPE_TABLE: 1105 snprintf(result, maxlen, "%s", dev->name); 1106 break; 1107 } 1108 1109 return 0; 1110 } 1111 1112 static struct target_type origin_target = { 1113 .name = "snapshot-origin", 1114 .version = {1, 1, 0}, 1115 .module = THIS_MODULE, 1116 .ctr = origin_ctr, 1117 .dtr = origin_dtr, 1118 .map = origin_map, 1119 .resume = origin_resume, 1120 .status = origin_status, 1121 }; 1122 1123 static struct target_type snapshot_target = { 1124 .name = "snapshot", 1125 .version = {1, 1, 0}, 1126 .module = THIS_MODULE, 1127 .ctr = snapshot_ctr, 1128 .dtr = snapshot_dtr, 1129 .map = snapshot_map, 1130 .resume = snapshot_resume, 1131 .status = snapshot_status, 1132 }; 1133 1134 static int __init dm_snapshot_init(void) 1135 { 1136 int r; 1137 1138 r = dm_register_target(&snapshot_target); 1139 if (r) { 1140 DMERR("snapshot target register failed %d", r); 1141 return r; 1142 } 1143 1144 r = dm_register_target(&origin_target); 1145 if (r < 0) { 1146 DMERR("Device mapper: Origin: register failed %d\n", r); 1147 goto bad1; 1148 } 1149 1150 r = init_origin_hash(); 1151 if (r) { 1152 DMERR("init_origin_hash failed."); 1153 goto bad2; 1154 } 1155 1156 exception_cache = kmem_cache_create("dm-snapshot-ex", 1157 sizeof(struct exception), 1158 __alignof__(struct exception), 1159 0, NULL, NULL); 1160 if (!exception_cache) { 1161 DMERR("Couldn't create exception cache."); 1162 r = -ENOMEM; 1163 goto bad3; 1164 } 1165 1166 pending_cache = 1167 kmem_cache_create("dm-snapshot-in", 1168 sizeof(struct pending_exception), 1169 __alignof__(struct pending_exception), 1170 0, NULL, NULL); 1171 if (!pending_cache) { 1172 DMERR("Couldn't create pending cache."); 1173 r = -ENOMEM; 1174 goto bad4; 1175 } 1176 1177 pending_pool = mempool_create(128, mempool_alloc_slab, 1178 mempool_free_slab, pending_cache); 1179 if (!pending_pool) { 1180 DMERR("Couldn't create pending pool."); 1181 r = -ENOMEM; 1182 goto bad5; 1183 } 1184 1185 return 0; 1186 1187 bad5: 1188 kmem_cache_destroy(pending_cache); 1189 bad4: 1190 kmem_cache_destroy(exception_cache); 1191 bad3: 1192 exit_origin_hash(); 1193 bad2: 1194 dm_unregister_target(&origin_target); 1195 bad1: 1196 dm_unregister_target(&snapshot_target); 1197 return r; 1198 } 1199 1200 static void __exit dm_snapshot_exit(void) 1201 { 1202 int r; 1203 1204 r = dm_unregister_target(&snapshot_target); 1205 if (r) 1206 DMERR("snapshot unregister failed %d", r); 1207 1208 r = dm_unregister_target(&origin_target); 1209 if (r) 1210 DMERR("origin unregister failed %d", r); 1211 1212 exit_origin_hash(); 1213 mempool_destroy(pending_pool); 1214 kmem_cache_destroy(pending_cache); 1215 kmem_cache_destroy(exception_cache); 1216 } 1217 1218 /* Module hooks */ 1219 module_init(dm_snapshot_init); 1220 module_exit(dm_snapshot_exit); 1221 1222 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1223 MODULE_AUTHOR("Joe Thornber"); 1224 MODULE_LICENSE("GPL"); 1225