1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/device-mapper.h> 11 #include <linux/delay.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/kdev_t.h> 15 #include <linux/list.h> 16 #include <linux/mempool.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/log2.h> 21 #include <linux/dm-kcopyd.h> 22 23 #include "dm.h" 24 25 #include "dm-exception-store.h" 26 27 #define DM_MSG_PREFIX "snapshots" 28 29 static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; 30 31 #define dm_target_is_snapshot_merge(ti) \ 32 ((ti)->type->name == dm_snapshot_merge_target_name) 33 34 /* 35 * The size of the mempool used to track chunks in use. 36 */ 37 #define MIN_IOS 256 38 39 #define DM_TRACKED_CHUNK_HASH_SIZE 16 40 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 41 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 42 43 struct dm_exception_table { 44 uint32_t hash_mask; 45 unsigned hash_shift; 46 struct list_head *table; 47 }; 48 49 struct dm_snapshot { 50 struct mutex lock; 51 52 struct dm_dev *origin; 53 struct dm_dev *cow; 54 55 struct dm_target *ti; 56 57 /* List of snapshots per Origin */ 58 struct list_head list; 59 60 /* 61 * You can't use a snapshot if this is 0 (e.g. if full). 62 * A snapshot-merge target never clears this. 63 */ 64 int valid; 65 66 /* 67 * The snapshot overflowed because of a write to the snapshot device. 68 * We don't have to invalidate the snapshot in this case, but we need 69 * to prevent further writes. 70 */ 71 int snapshot_overflowed; 72 73 /* Origin writes don't trigger exceptions until this is set */ 74 int active; 75 76 atomic_t pending_exceptions_count; 77 78 /* Protected by "lock" */ 79 sector_t exception_start_sequence; 80 81 /* Protected by kcopyd single-threaded callback */ 82 sector_t exception_complete_sequence; 83 84 /* 85 * A list of pending exceptions that completed out of order. 86 * Protected by kcopyd single-threaded callback. 87 */ 88 struct list_head out_of_order_list; 89 90 mempool_t pending_pool; 91 92 struct dm_exception_table pending; 93 struct dm_exception_table complete; 94 95 /* 96 * pe_lock protects all pending_exception operations and access 97 * as well as the snapshot_bios list. 98 */ 99 spinlock_t pe_lock; 100 101 /* Chunks with outstanding reads */ 102 spinlock_t tracked_chunk_lock; 103 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 104 105 /* The on disk metadata handler */ 106 struct dm_exception_store *store; 107 108 struct dm_kcopyd_client *kcopyd_client; 109 110 /* Wait for events based on state_bits */ 111 unsigned long state_bits; 112 113 /* Range of chunks currently being merged. */ 114 chunk_t first_merging_chunk; 115 int num_merging_chunks; 116 117 /* 118 * The merge operation failed if this flag is set. 119 * Failure modes are handled as follows: 120 * - I/O error reading the header 121 * => don't load the target; abort. 122 * - Header does not have "valid" flag set 123 * => use the origin; forget about the snapshot. 124 * - I/O error when reading exceptions 125 * => don't load the target; abort. 126 * (We can't use the intermediate origin state.) 127 * - I/O error while merging 128 * => stop merging; set merge_failed; process I/O normally. 129 */ 130 int merge_failed; 131 132 /* 133 * Incoming bios that overlap with chunks being merged must wait 134 * for them to be committed. 135 */ 136 struct bio_list bios_queued_during_merge; 137 }; 138 139 /* 140 * state_bits: 141 * RUNNING_MERGE - Merge operation is in progress. 142 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; 143 * cleared afterwards. 144 */ 145 #define RUNNING_MERGE 0 146 #define SHUTDOWN_MERGE 1 147 148 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 149 "A percentage of time allocated for copy on write"); 150 151 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) 152 { 153 return s->origin; 154 } 155 EXPORT_SYMBOL(dm_snap_origin); 156 157 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 158 { 159 return s->cow; 160 } 161 EXPORT_SYMBOL(dm_snap_cow); 162 163 static sector_t chunk_to_sector(struct dm_exception_store *store, 164 chunk_t chunk) 165 { 166 return chunk << store->chunk_shift; 167 } 168 169 static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 170 { 171 /* 172 * There is only ever one instance of a particular block 173 * device so we can compare pointers safely. 174 */ 175 return lhs == rhs; 176 } 177 178 struct dm_snap_pending_exception { 179 struct dm_exception e; 180 181 /* 182 * Origin buffers waiting for this to complete are held 183 * in a bio list 184 */ 185 struct bio_list origin_bios; 186 struct bio_list snapshot_bios; 187 188 /* Pointer back to snapshot context */ 189 struct dm_snapshot *snap; 190 191 /* 192 * 1 indicates the exception has already been sent to 193 * kcopyd. 194 */ 195 int started; 196 197 /* There was copying error. */ 198 int copy_error; 199 200 /* A sequence number, it is used for in-order completion. */ 201 sector_t exception_sequence; 202 203 struct list_head out_of_order_entry; 204 205 /* 206 * For writing a complete chunk, bypassing the copy. 207 */ 208 struct bio *full_bio; 209 bio_end_io_t *full_bio_end_io; 210 }; 211 212 /* 213 * Hash table mapping origin volumes to lists of snapshots and 214 * a lock to protect it 215 */ 216 static struct kmem_cache *exception_cache; 217 static struct kmem_cache *pending_cache; 218 219 struct dm_snap_tracked_chunk { 220 struct hlist_node node; 221 chunk_t chunk; 222 }; 223 224 static void init_tracked_chunk(struct bio *bio) 225 { 226 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 227 INIT_HLIST_NODE(&c->node); 228 } 229 230 static bool is_bio_tracked(struct bio *bio) 231 { 232 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 233 return !hlist_unhashed(&c->node); 234 } 235 236 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) 237 { 238 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 239 240 c->chunk = chunk; 241 242 spin_lock_irq(&s->tracked_chunk_lock); 243 hlist_add_head(&c->node, 244 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 245 spin_unlock_irq(&s->tracked_chunk_lock); 246 } 247 248 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) 249 { 250 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 251 unsigned long flags; 252 253 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 254 hlist_del(&c->node); 255 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 256 } 257 258 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 259 { 260 struct dm_snap_tracked_chunk *c; 261 int found = 0; 262 263 spin_lock_irq(&s->tracked_chunk_lock); 264 265 hlist_for_each_entry(c, 266 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 267 if (c->chunk == chunk) { 268 found = 1; 269 break; 270 } 271 } 272 273 spin_unlock_irq(&s->tracked_chunk_lock); 274 275 return found; 276 } 277 278 /* 279 * This conflicting I/O is extremely improbable in the caller, 280 * so msleep(1) is sufficient and there is no need for a wait queue. 281 */ 282 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) 283 { 284 while (__chunk_is_tracked(s, chunk)) 285 msleep(1); 286 } 287 288 /* 289 * One of these per registered origin, held in the snapshot_origins hash 290 */ 291 struct origin { 292 /* The origin device */ 293 struct block_device *bdev; 294 295 struct list_head hash_list; 296 297 /* List of snapshots for this origin */ 298 struct list_head snapshots; 299 }; 300 301 /* 302 * This structure is allocated for each origin target 303 */ 304 struct dm_origin { 305 struct dm_dev *dev; 306 struct dm_target *ti; 307 unsigned split_boundary; 308 struct list_head hash_list; 309 }; 310 311 /* 312 * Size of the hash table for origin volumes. If we make this 313 * the size of the minors list then it should be nearly perfect 314 */ 315 #define ORIGIN_HASH_SIZE 256 316 #define ORIGIN_MASK 0xFF 317 static struct list_head *_origins; 318 static struct list_head *_dm_origins; 319 static struct rw_semaphore _origins_lock; 320 321 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 322 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); 323 static uint64_t _pending_exceptions_done_count; 324 325 static int init_origin_hash(void) 326 { 327 int i; 328 329 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 330 GFP_KERNEL); 331 if (!_origins) { 332 DMERR("unable to allocate memory for _origins"); 333 return -ENOMEM; 334 } 335 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 336 INIT_LIST_HEAD(_origins + i); 337 338 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 339 GFP_KERNEL); 340 if (!_dm_origins) { 341 DMERR("unable to allocate memory for _dm_origins"); 342 kfree(_origins); 343 return -ENOMEM; 344 } 345 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 346 INIT_LIST_HEAD(_dm_origins + i); 347 348 init_rwsem(&_origins_lock); 349 350 return 0; 351 } 352 353 static void exit_origin_hash(void) 354 { 355 kfree(_origins); 356 kfree(_dm_origins); 357 } 358 359 static unsigned origin_hash(struct block_device *bdev) 360 { 361 return bdev->bd_dev & ORIGIN_MASK; 362 } 363 364 static struct origin *__lookup_origin(struct block_device *origin) 365 { 366 struct list_head *ol; 367 struct origin *o; 368 369 ol = &_origins[origin_hash(origin)]; 370 list_for_each_entry (o, ol, hash_list) 371 if (bdev_equal(o->bdev, origin)) 372 return o; 373 374 return NULL; 375 } 376 377 static void __insert_origin(struct origin *o) 378 { 379 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 380 list_add_tail(&o->hash_list, sl); 381 } 382 383 static struct dm_origin *__lookup_dm_origin(struct block_device *origin) 384 { 385 struct list_head *ol; 386 struct dm_origin *o; 387 388 ol = &_dm_origins[origin_hash(origin)]; 389 list_for_each_entry (o, ol, hash_list) 390 if (bdev_equal(o->dev->bdev, origin)) 391 return o; 392 393 return NULL; 394 } 395 396 static void __insert_dm_origin(struct dm_origin *o) 397 { 398 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; 399 list_add_tail(&o->hash_list, sl); 400 } 401 402 static void __remove_dm_origin(struct dm_origin *o) 403 { 404 list_del(&o->hash_list); 405 } 406 407 /* 408 * _origins_lock must be held when calling this function. 409 * Returns number of snapshots registered using the supplied cow device, plus: 410 * snap_src - a snapshot suitable for use as a source of exception handover 411 * snap_dest - a snapshot capable of receiving exception handover. 412 * snap_merge - an existing snapshot-merge target linked to the same origin. 413 * There can be at most one snapshot-merge target. The parameter is optional. 414 * 415 * Possible return values and states of snap_src and snap_dest. 416 * 0: NULL, NULL - first new snapshot 417 * 1: snap_src, NULL - normal snapshot 418 * 2: snap_src, snap_dest - waiting for handover 419 * 2: snap_src, NULL - handed over, waiting for old to be deleted 420 * 1: NULL, snap_dest - source got destroyed without handover 421 */ 422 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, 423 struct dm_snapshot **snap_src, 424 struct dm_snapshot **snap_dest, 425 struct dm_snapshot **snap_merge) 426 { 427 struct dm_snapshot *s; 428 struct origin *o; 429 int count = 0; 430 int active; 431 432 o = __lookup_origin(snap->origin->bdev); 433 if (!o) 434 goto out; 435 436 list_for_each_entry(s, &o->snapshots, list) { 437 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) 438 *snap_merge = s; 439 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) 440 continue; 441 442 mutex_lock(&s->lock); 443 active = s->active; 444 mutex_unlock(&s->lock); 445 446 if (active) { 447 if (snap_src) 448 *snap_src = s; 449 } else if (snap_dest) 450 *snap_dest = s; 451 452 count++; 453 } 454 455 out: 456 return count; 457 } 458 459 /* 460 * On success, returns 1 if this snapshot is a handover destination, 461 * otherwise returns 0. 462 */ 463 static int __validate_exception_handover(struct dm_snapshot *snap) 464 { 465 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 466 struct dm_snapshot *snap_merge = NULL; 467 468 /* Does snapshot need exceptions handed over to it? */ 469 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, 470 &snap_merge) == 2) || 471 snap_dest) { 472 snap->ti->error = "Snapshot cow pairing for exception " 473 "table handover failed"; 474 return -EINVAL; 475 } 476 477 /* 478 * If no snap_src was found, snap cannot become a handover 479 * destination. 480 */ 481 if (!snap_src) 482 return 0; 483 484 /* 485 * Non-snapshot-merge handover? 486 */ 487 if (!dm_target_is_snapshot_merge(snap->ti)) 488 return 1; 489 490 /* 491 * Do not allow more than one merging snapshot. 492 */ 493 if (snap_merge) { 494 snap->ti->error = "A snapshot is already merging."; 495 return -EINVAL; 496 } 497 498 if (!snap_src->store->type->prepare_merge || 499 !snap_src->store->type->commit_merge) { 500 snap->ti->error = "Snapshot exception store does not " 501 "support snapshot-merge."; 502 return -EINVAL; 503 } 504 505 return 1; 506 } 507 508 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) 509 { 510 struct dm_snapshot *l; 511 512 /* Sort the list according to chunk size, largest-first smallest-last */ 513 list_for_each_entry(l, &o->snapshots, list) 514 if (l->store->chunk_size < s->store->chunk_size) 515 break; 516 list_add_tail(&s->list, &l->list); 517 } 518 519 /* 520 * Make a note of the snapshot and its origin so we can look it 521 * up when the origin has a write on it. 522 * 523 * Also validate snapshot exception store handovers. 524 * On success, returns 1 if this registration is a handover destination, 525 * otherwise returns 0. 526 */ 527 static int register_snapshot(struct dm_snapshot *snap) 528 { 529 struct origin *o, *new_o = NULL; 530 struct block_device *bdev = snap->origin->bdev; 531 int r = 0; 532 533 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 534 if (!new_o) 535 return -ENOMEM; 536 537 down_write(&_origins_lock); 538 539 r = __validate_exception_handover(snap); 540 if (r < 0) { 541 kfree(new_o); 542 goto out; 543 } 544 545 o = __lookup_origin(bdev); 546 if (o) 547 kfree(new_o); 548 else { 549 /* New origin */ 550 o = new_o; 551 552 /* Initialise the struct */ 553 INIT_LIST_HEAD(&o->snapshots); 554 o->bdev = bdev; 555 556 __insert_origin(o); 557 } 558 559 __insert_snapshot(o, snap); 560 561 out: 562 up_write(&_origins_lock); 563 564 return r; 565 } 566 567 /* 568 * Move snapshot to correct place in list according to chunk size. 569 */ 570 static void reregister_snapshot(struct dm_snapshot *s) 571 { 572 struct block_device *bdev = s->origin->bdev; 573 574 down_write(&_origins_lock); 575 576 list_del(&s->list); 577 __insert_snapshot(__lookup_origin(bdev), s); 578 579 up_write(&_origins_lock); 580 } 581 582 static void unregister_snapshot(struct dm_snapshot *s) 583 { 584 struct origin *o; 585 586 down_write(&_origins_lock); 587 o = __lookup_origin(s->origin->bdev); 588 589 list_del(&s->list); 590 if (o && list_empty(&o->snapshots)) { 591 list_del(&o->hash_list); 592 kfree(o); 593 } 594 595 up_write(&_origins_lock); 596 } 597 598 /* 599 * Implementation of the exception hash tables. 600 * The lowest hash_shift bits of the chunk number are ignored, allowing 601 * some consecutive chunks to be grouped together. 602 */ 603 static int dm_exception_table_init(struct dm_exception_table *et, 604 uint32_t size, unsigned hash_shift) 605 { 606 unsigned int i; 607 608 et->hash_shift = hash_shift; 609 et->hash_mask = size - 1; 610 et->table = dm_vcalloc(size, sizeof(struct list_head)); 611 if (!et->table) 612 return -ENOMEM; 613 614 for (i = 0; i < size; i++) 615 INIT_LIST_HEAD(et->table + i); 616 617 return 0; 618 } 619 620 static void dm_exception_table_exit(struct dm_exception_table *et, 621 struct kmem_cache *mem) 622 { 623 struct list_head *slot; 624 struct dm_exception *ex, *next; 625 int i, size; 626 627 size = et->hash_mask + 1; 628 for (i = 0; i < size; i++) { 629 slot = et->table + i; 630 631 list_for_each_entry_safe (ex, next, slot, hash_list) 632 kmem_cache_free(mem, ex); 633 } 634 635 vfree(et->table); 636 } 637 638 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) 639 { 640 return (chunk >> et->hash_shift) & et->hash_mask; 641 } 642 643 static void dm_remove_exception(struct dm_exception *e) 644 { 645 list_del(&e->hash_list); 646 } 647 648 /* 649 * Return the exception data for a sector, or NULL if not 650 * remapped. 651 */ 652 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, 653 chunk_t chunk) 654 { 655 struct list_head *slot; 656 struct dm_exception *e; 657 658 slot = &et->table[exception_hash(et, chunk)]; 659 list_for_each_entry (e, slot, hash_list) 660 if (chunk >= e->old_chunk && 661 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 662 return e; 663 664 return NULL; 665 } 666 667 static struct dm_exception *alloc_completed_exception(gfp_t gfp) 668 { 669 struct dm_exception *e; 670 671 e = kmem_cache_alloc(exception_cache, gfp); 672 if (!e && gfp == GFP_NOIO) 673 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 674 675 return e; 676 } 677 678 static void free_completed_exception(struct dm_exception *e) 679 { 680 kmem_cache_free(exception_cache, e); 681 } 682 683 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 684 { 685 struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool, 686 GFP_NOIO); 687 688 atomic_inc(&s->pending_exceptions_count); 689 pe->snap = s; 690 691 return pe; 692 } 693 694 static void free_pending_exception(struct dm_snap_pending_exception *pe) 695 { 696 struct dm_snapshot *s = pe->snap; 697 698 mempool_free(pe, &s->pending_pool); 699 smp_mb__before_atomic(); 700 atomic_dec(&s->pending_exceptions_count); 701 } 702 703 static void dm_insert_exception(struct dm_exception_table *eh, 704 struct dm_exception *new_e) 705 { 706 struct list_head *l; 707 struct dm_exception *e = NULL; 708 709 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 710 711 /* Add immediately if this table doesn't support consecutive chunks */ 712 if (!eh->hash_shift) 713 goto out; 714 715 /* List is ordered by old_chunk */ 716 list_for_each_entry_reverse(e, l, hash_list) { 717 /* Insert after an existing chunk? */ 718 if (new_e->old_chunk == (e->old_chunk + 719 dm_consecutive_chunk_count(e) + 1) && 720 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 721 dm_consecutive_chunk_count(e) + 1)) { 722 dm_consecutive_chunk_count_inc(e); 723 free_completed_exception(new_e); 724 return; 725 } 726 727 /* Insert before an existing chunk? */ 728 if (new_e->old_chunk == (e->old_chunk - 1) && 729 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 730 dm_consecutive_chunk_count_inc(e); 731 e->old_chunk--; 732 e->new_chunk--; 733 free_completed_exception(new_e); 734 return; 735 } 736 737 if (new_e->old_chunk > e->old_chunk) 738 break; 739 } 740 741 out: 742 list_add(&new_e->hash_list, e ? &e->hash_list : l); 743 } 744 745 /* 746 * Callback used by the exception stores to load exceptions when 747 * initialising. 748 */ 749 static int dm_add_exception(void *context, chunk_t old, chunk_t new) 750 { 751 struct dm_snapshot *s = context; 752 struct dm_exception *e; 753 754 e = alloc_completed_exception(GFP_KERNEL); 755 if (!e) 756 return -ENOMEM; 757 758 e->old_chunk = old; 759 760 /* Consecutive_count is implicitly initialised to zero */ 761 e->new_chunk = new; 762 763 dm_insert_exception(&s->complete, e); 764 765 return 0; 766 } 767 768 /* 769 * Return a minimum chunk size of all snapshots that have the specified origin. 770 * Return zero if the origin has no snapshots. 771 */ 772 static uint32_t __minimum_chunk_size(struct origin *o) 773 { 774 struct dm_snapshot *snap; 775 unsigned chunk_size = 0; 776 777 if (o) 778 list_for_each_entry(snap, &o->snapshots, list) 779 chunk_size = min_not_zero(chunk_size, 780 snap->store->chunk_size); 781 782 return (uint32_t) chunk_size; 783 } 784 785 /* 786 * Hard coded magic. 787 */ 788 static int calc_max_buckets(void) 789 { 790 /* use a fixed size of 2MB */ 791 unsigned long mem = 2 * 1024 * 1024; 792 mem /= sizeof(struct list_head); 793 794 return mem; 795 } 796 797 /* 798 * Allocate room for a suitable hash table. 799 */ 800 static int init_hash_tables(struct dm_snapshot *s) 801 { 802 sector_t hash_size, cow_dev_size, max_buckets; 803 804 /* 805 * Calculate based on the size of the original volume or 806 * the COW volume... 807 */ 808 cow_dev_size = get_dev_size(s->cow->bdev); 809 max_buckets = calc_max_buckets(); 810 811 hash_size = cow_dev_size >> s->store->chunk_shift; 812 hash_size = min(hash_size, max_buckets); 813 814 if (hash_size < 64) 815 hash_size = 64; 816 hash_size = rounddown_pow_of_two(hash_size); 817 if (dm_exception_table_init(&s->complete, hash_size, 818 DM_CHUNK_CONSECUTIVE_BITS)) 819 return -ENOMEM; 820 821 /* 822 * Allocate hash table for in-flight exceptions 823 * Make this smaller than the real hash table 824 */ 825 hash_size >>= 3; 826 if (hash_size < 64) 827 hash_size = 64; 828 829 if (dm_exception_table_init(&s->pending, hash_size, 0)) { 830 dm_exception_table_exit(&s->complete, exception_cache); 831 return -ENOMEM; 832 } 833 834 return 0; 835 } 836 837 static void merge_shutdown(struct dm_snapshot *s) 838 { 839 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); 840 smp_mb__after_atomic(); 841 wake_up_bit(&s->state_bits, RUNNING_MERGE); 842 } 843 844 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) 845 { 846 s->first_merging_chunk = 0; 847 s->num_merging_chunks = 0; 848 849 return bio_list_get(&s->bios_queued_during_merge); 850 } 851 852 /* 853 * Remove one chunk from the index of completed exceptions. 854 */ 855 static int __remove_single_exception_chunk(struct dm_snapshot *s, 856 chunk_t old_chunk) 857 { 858 struct dm_exception *e; 859 860 e = dm_lookup_exception(&s->complete, old_chunk); 861 if (!e) { 862 DMERR("Corruption detected: exception for block %llu is " 863 "on disk but not in memory", 864 (unsigned long long)old_chunk); 865 return -EINVAL; 866 } 867 868 /* 869 * If this is the only chunk using this exception, remove exception. 870 */ 871 if (!dm_consecutive_chunk_count(e)) { 872 dm_remove_exception(e); 873 free_completed_exception(e); 874 return 0; 875 } 876 877 /* 878 * The chunk may be either at the beginning or the end of a 879 * group of consecutive chunks - never in the middle. We are 880 * removing chunks in the opposite order to that in which they 881 * were added, so this should always be true. 882 * Decrement the consecutive chunk counter and adjust the 883 * starting point if necessary. 884 */ 885 if (old_chunk == e->old_chunk) { 886 e->old_chunk++; 887 e->new_chunk++; 888 } else if (old_chunk != e->old_chunk + 889 dm_consecutive_chunk_count(e)) { 890 DMERR("Attempt to merge block %llu from the " 891 "middle of a chunk range [%llu - %llu]", 892 (unsigned long long)old_chunk, 893 (unsigned long long)e->old_chunk, 894 (unsigned long long) 895 e->old_chunk + dm_consecutive_chunk_count(e)); 896 return -EINVAL; 897 } 898 899 dm_consecutive_chunk_count_dec(e); 900 901 return 0; 902 } 903 904 static void flush_bios(struct bio *bio); 905 906 static int remove_single_exception_chunk(struct dm_snapshot *s) 907 { 908 struct bio *b = NULL; 909 int r; 910 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; 911 912 mutex_lock(&s->lock); 913 914 /* 915 * Process chunks (and associated exceptions) in reverse order 916 * so that dm_consecutive_chunk_count_dec() accounting works. 917 */ 918 do { 919 r = __remove_single_exception_chunk(s, old_chunk); 920 if (r) 921 goto out; 922 } while (old_chunk-- > s->first_merging_chunk); 923 924 b = __release_queued_bios_after_merge(s); 925 926 out: 927 mutex_unlock(&s->lock); 928 if (b) 929 flush_bios(b); 930 931 return r; 932 } 933 934 static int origin_write_extent(struct dm_snapshot *merging_snap, 935 sector_t sector, unsigned chunk_size); 936 937 static void merge_callback(int read_err, unsigned long write_err, 938 void *context); 939 940 static uint64_t read_pending_exceptions_done_count(void) 941 { 942 uint64_t pending_exceptions_done; 943 944 spin_lock(&_pending_exceptions_done_spinlock); 945 pending_exceptions_done = _pending_exceptions_done_count; 946 spin_unlock(&_pending_exceptions_done_spinlock); 947 948 return pending_exceptions_done; 949 } 950 951 static void increment_pending_exceptions_done_count(void) 952 { 953 spin_lock(&_pending_exceptions_done_spinlock); 954 _pending_exceptions_done_count++; 955 spin_unlock(&_pending_exceptions_done_spinlock); 956 957 wake_up_all(&_pending_exceptions_done); 958 } 959 960 static void snapshot_merge_next_chunks(struct dm_snapshot *s) 961 { 962 int i, linear_chunks; 963 chunk_t old_chunk, new_chunk; 964 struct dm_io_region src, dest; 965 sector_t io_size; 966 uint64_t previous_count; 967 968 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); 969 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) 970 goto shut; 971 972 /* 973 * valid flag never changes during merge, so no lock required. 974 */ 975 if (!s->valid) { 976 DMERR("Snapshot is invalid: can't merge"); 977 goto shut; 978 } 979 980 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, 981 &new_chunk); 982 if (linear_chunks <= 0) { 983 if (linear_chunks < 0) { 984 DMERR("Read error in exception store: " 985 "shutting down merge"); 986 mutex_lock(&s->lock); 987 s->merge_failed = 1; 988 mutex_unlock(&s->lock); 989 } 990 goto shut; 991 } 992 993 /* Adjust old_chunk and new_chunk to reflect start of linear region */ 994 old_chunk = old_chunk + 1 - linear_chunks; 995 new_chunk = new_chunk + 1 - linear_chunks; 996 997 /* 998 * Use one (potentially large) I/O to copy all 'linear_chunks' 999 * from the exception store to the origin 1000 */ 1001 io_size = linear_chunks * s->store->chunk_size; 1002 1003 dest.bdev = s->origin->bdev; 1004 dest.sector = chunk_to_sector(s->store, old_chunk); 1005 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); 1006 1007 src.bdev = s->cow->bdev; 1008 src.sector = chunk_to_sector(s->store, new_chunk); 1009 src.count = dest.count; 1010 1011 /* 1012 * Reallocate any exceptions needed in other snapshots then 1013 * wait for the pending exceptions to complete. 1014 * Each time any pending exception (globally on the system) 1015 * completes we are woken and repeat the process to find out 1016 * if we can proceed. While this may not seem a particularly 1017 * efficient algorithm, it is not expected to have any 1018 * significant impact on performance. 1019 */ 1020 previous_count = read_pending_exceptions_done_count(); 1021 while (origin_write_extent(s, dest.sector, io_size)) { 1022 wait_event(_pending_exceptions_done, 1023 (read_pending_exceptions_done_count() != 1024 previous_count)); 1025 /* Retry after the wait, until all exceptions are done. */ 1026 previous_count = read_pending_exceptions_done_count(); 1027 } 1028 1029 mutex_lock(&s->lock); 1030 s->first_merging_chunk = old_chunk; 1031 s->num_merging_chunks = linear_chunks; 1032 mutex_unlock(&s->lock); 1033 1034 /* Wait until writes to all 'linear_chunks' drain */ 1035 for (i = 0; i < linear_chunks; i++) 1036 __check_for_conflicting_io(s, old_chunk + i); 1037 1038 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); 1039 return; 1040 1041 shut: 1042 merge_shutdown(s); 1043 } 1044 1045 static void error_bios(struct bio *bio); 1046 1047 static void merge_callback(int read_err, unsigned long write_err, void *context) 1048 { 1049 struct dm_snapshot *s = context; 1050 struct bio *b = NULL; 1051 1052 if (read_err || write_err) { 1053 if (read_err) 1054 DMERR("Read error: shutting down merge."); 1055 else 1056 DMERR("Write error: shutting down merge."); 1057 goto shut; 1058 } 1059 1060 if (s->store->type->commit_merge(s->store, 1061 s->num_merging_chunks) < 0) { 1062 DMERR("Write error in exception store: shutting down merge"); 1063 goto shut; 1064 } 1065 1066 if (remove_single_exception_chunk(s) < 0) 1067 goto shut; 1068 1069 snapshot_merge_next_chunks(s); 1070 1071 return; 1072 1073 shut: 1074 mutex_lock(&s->lock); 1075 s->merge_failed = 1; 1076 b = __release_queued_bios_after_merge(s); 1077 mutex_unlock(&s->lock); 1078 error_bios(b); 1079 1080 merge_shutdown(s); 1081 } 1082 1083 static void start_merge(struct dm_snapshot *s) 1084 { 1085 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) 1086 snapshot_merge_next_chunks(s); 1087 } 1088 1089 /* 1090 * Stop the merging process and wait until it finishes. 1091 */ 1092 static void stop_merge(struct dm_snapshot *s) 1093 { 1094 set_bit(SHUTDOWN_MERGE, &s->state_bits); 1095 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); 1096 clear_bit(SHUTDOWN_MERGE, &s->state_bits); 1097 } 1098 1099 /* 1100 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> 1101 */ 1102 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1103 { 1104 struct dm_snapshot *s; 1105 int i; 1106 int r = -EINVAL; 1107 char *origin_path, *cow_path; 1108 dev_t origin_dev, cow_dev; 1109 unsigned args_used, num_flush_bios = 1; 1110 fmode_t origin_mode = FMODE_READ; 1111 1112 if (argc != 4) { 1113 ti->error = "requires exactly 4 arguments"; 1114 r = -EINVAL; 1115 goto bad; 1116 } 1117 1118 if (dm_target_is_snapshot_merge(ti)) { 1119 num_flush_bios = 2; 1120 origin_mode = FMODE_WRITE; 1121 } 1122 1123 s = kzalloc(sizeof(*s), GFP_KERNEL); 1124 if (!s) { 1125 ti->error = "Cannot allocate private snapshot structure"; 1126 r = -ENOMEM; 1127 goto bad; 1128 } 1129 1130 origin_path = argv[0]; 1131 argv++; 1132 argc--; 1133 1134 r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1135 if (r) { 1136 ti->error = "Cannot get origin device"; 1137 goto bad_origin; 1138 } 1139 origin_dev = s->origin->bdev->bd_dev; 1140 1141 cow_path = argv[0]; 1142 argv++; 1143 argc--; 1144 1145 cow_dev = dm_get_dev_t(cow_path); 1146 if (cow_dev && cow_dev == origin_dev) { 1147 ti->error = "COW device cannot be the same as origin device"; 1148 r = -EINVAL; 1149 goto bad_cow; 1150 } 1151 1152 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); 1153 if (r) { 1154 ti->error = "Cannot get COW device"; 1155 goto bad_cow; 1156 } 1157 1158 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); 1159 if (r) { 1160 ti->error = "Couldn't create exception store"; 1161 r = -EINVAL; 1162 goto bad_store; 1163 } 1164 1165 argv += args_used; 1166 argc -= args_used; 1167 1168 s->ti = ti; 1169 s->valid = 1; 1170 s->snapshot_overflowed = 0; 1171 s->active = 0; 1172 atomic_set(&s->pending_exceptions_count, 0); 1173 s->exception_start_sequence = 0; 1174 s->exception_complete_sequence = 0; 1175 INIT_LIST_HEAD(&s->out_of_order_list); 1176 mutex_init(&s->lock); 1177 INIT_LIST_HEAD(&s->list); 1178 spin_lock_init(&s->pe_lock); 1179 s->state_bits = 0; 1180 s->merge_failed = 0; 1181 s->first_merging_chunk = 0; 1182 s->num_merging_chunks = 0; 1183 bio_list_init(&s->bios_queued_during_merge); 1184 1185 /* Allocate hash table for COW data */ 1186 if (init_hash_tables(s)) { 1187 ti->error = "Unable to allocate hash table space"; 1188 r = -ENOMEM; 1189 goto bad_hash_tables; 1190 } 1191 1192 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1193 if (IS_ERR(s->kcopyd_client)) { 1194 r = PTR_ERR(s->kcopyd_client); 1195 ti->error = "Could not create kcopyd client"; 1196 goto bad_kcopyd; 1197 } 1198 1199 r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache); 1200 if (r) { 1201 ti->error = "Could not allocate mempool for pending exceptions"; 1202 goto bad_pending_pool; 1203 } 1204 1205 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1206 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1207 1208 spin_lock_init(&s->tracked_chunk_lock); 1209 1210 ti->private = s; 1211 ti->num_flush_bios = num_flush_bios; 1212 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk); 1213 1214 /* Add snapshot to the list of snapshots for this origin */ 1215 /* Exceptions aren't triggered till snapshot_resume() is called */ 1216 r = register_snapshot(s); 1217 if (r == -ENOMEM) { 1218 ti->error = "Snapshot origin struct allocation failed"; 1219 goto bad_load_and_register; 1220 } else if (r < 0) { 1221 /* invalid handover, register_snapshot has set ti->error */ 1222 goto bad_load_and_register; 1223 } 1224 1225 /* 1226 * Metadata must only be loaded into one table at once, so skip this 1227 * if metadata will be handed over during resume. 1228 * Chunk size will be set during the handover - set it to zero to 1229 * ensure it's ignored. 1230 */ 1231 if (r > 0) { 1232 s->store->chunk_size = 0; 1233 return 0; 1234 } 1235 1236 r = s->store->type->read_metadata(s->store, dm_add_exception, 1237 (void *)s); 1238 if (r < 0) { 1239 ti->error = "Failed to read snapshot metadata"; 1240 goto bad_read_metadata; 1241 } else if (r > 0) { 1242 s->valid = 0; 1243 DMWARN("Snapshot is marked invalid."); 1244 } 1245 1246 if (!s->store->chunk_size) { 1247 ti->error = "Chunk size not set"; 1248 goto bad_read_metadata; 1249 } 1250 1251 r = dm_set_target_max_io_len(ti, s->store->chunk_size); 1252 if (r) 1253 goto bad_read_metadata; 1254 1255 return 0; 1256 1257 bad_read_metadata: 1258 unregister_snapshot(s); 1259 1260 bad_load_and_register: 1261 mempool_exit(&s->pending_pool); 1262 1263 bad_pending_pool: 1264 dm_kcopyd_client_destroy(s->kcopyd_client); 1265 1266 bad_kcopyd: 1267 dm_exception_table_exit(&s->pending, pending_cache); 1268 dm_exception_table_exit(&s->complete, exception_cache); 1269 1270 bad_hash_tables: 1271 dm_exception_store_destroy(s->store); 1272 1273 bad_store: 1274 dm_put_device(ti, s->cow); 1275 1276 bad_cow: 1277 dm_put_device(ti, s->origin); 1278 1279 bad_origin: 1280 kfree(s); 1281 1282 bad: 1283 return r; 1284 } 1285 1286 static void __free_exceptions(struct dm_snapshot *s) 1287 { 1288 dm_kcopyd_client_destroy(s->kcopyd_client); 1289 s->kcopyd_client = NULL; 1290 1291 dm_exception_table_exit(&s->pending, pending_cache); 1292 dm_exception_table_exit(&s->complete, exception_cache); 1293 } 1294 1295 static void __handover_exceptions(struct dm_snapshot *snap_src, 1296 struct dm_snapshot *snap_dest) 1297 { 1298 union { 1299 struct dm_exception_table table_swap; 1300 struct dm_exception_store *store_swap; 1301 } u; 1302 1303 /* 1304 * Swap all snapshot context information between the two instances. 1305 */ 1306 u.table_swap = snap_dest->complete; 1307 snap_dest->complete = snap_src->complete; 1308 snap_src->complete = u.table_swap; 1309 1310 u.store_swap = snap_dest->store; 1311 snap_dest->store = snap_src->store; 1312 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; 1313 snap_src->store = u.store_swap; 1314 1315 snap_dest->store->snap = snap_dest; 1316 snap_src->store->snap = snap_src; 1317 1318 snap_dest->ti->max_io_len = snap_dest->store->chunk_size; 1319 snap_dest->valid = snap_src->valid; 1320 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed; 1321 1322 /* 1323 * Set source invalid to ensure it receives no further I/O. 1324 */ 1325 snap_src->valid = 0; 1326 } 1327 1328 static void snapshot_dtr(struct dm_target *ti) 1329 { 1330 #ifdef CONFIG_DM_DEBUG 1331 int i; 1332 #endif 1333 struct dm_snapshot *s = ti->private; 1334 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1335 1336 down_read(&_origins_lock); 1337 /* Check whether exception handover must be cancelled */ 1338 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1339 if (snap_src && snap_dest && (s == snap_src)) { 1340 mutex_lock(&snap_dest->lock); 1341 snap_dest->valid = 0; 1342 mutex_unlock(&snap_dest->lock); 1343 DMERR("Cancelling snapshot handover."); 1344 } 1345 up_read(&_origins_lock); 1346 1347 if (dm_target_is_snapshot_merge(ti)) 1348 stop_merge(s); 1349 1350 /* Prevent further origin writes from using this snapshot. */ 1351 /* After this returns there can be no new kcopyd jobs. */ 1352 unregister_snapshot(s); 1353 1354 while (atomic_read(&s->pending_exceptions_count)) 1355 msleep(1); 1356 /* 1357 * Ensure instructions in mempool_exit aren't reordered 1358 * before atomic_read. 1359 */ 1360 smp_mb(); 1361 1362 #ifdef CONFIG_DM_DEBUG 1363 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1364 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1365 #endif 1366 1367 __free_exceptions(s); 1368 1369 mempool_exit(&s->pending_pool); 1370 1371 dm_exception_store_destroy(s->store); 1372 1373 mutex_destroy(&s->lock); 1374 1375 dm_put_device(ti, s->cow); 1376 1377 dm_put_device(ti, s->origin); 1378 1379 kfree(s); 1380 } 1381 1382 /* 1383 * Flush a list of buffers. 1384 */ 1385 static void flush_bios(struct bio *bio) 1386 { 1387 struct bio *n; 1388 1389 while (bio) { 1390 n = bio->bi_next; 1391 bio->bi_next = NULL; 1392 generic_make_request(bio); 1393 bio = n; 1394 } 1395 } 1396 1397 static int do_origin(struct dm_dev *origin, struct bio *bio); 1398 1399 /* 1400 * Flush a list of buffers. 1401 */ 1402 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) 1403 { 1404 struct bio *n; 1405 int r; 1406 1407 while (bio) { 1408 n = bio->bi_next; 1409 bio->bi_next = NULL; 1410 r = do_origin(s->origin, bio); 1411 if (r == DM_MAPIO_REMAPPED) 1412 generic_make_request(bio); 1413 bio = n; 1414 } 1415 } 1416 1417 /* 1418 * Error a list of buffers. 1419 */ 1420 static void error_bios(struct bio *bio) 1421 { 1422 struct bio *n; 1423 1424 while (bio) { 1425 n = bio->bi_next; 1426 bio->bi_next = NULL; 1427 bio_io_error(bio); 1428 bio = n; 1429 } 1430 } 1431 1432 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 1433 { 1434 if (!s->valid) 1435 return; 1436 1437 if (err == -EIO) 1438 DMERR("Invalidating snapshot: Error reading/writing."); 1439 else if (err == -ENOMEM) 1440 DMERR("Invalidating snapshot: Unable to allocate exception."); 1441 1442 if (s->store->type->drop_snapshot) 1443 s->store->type->drop_snapshot(s->store); 1444 1445 s->valid = 0; 1446 1447 dm_table_event(s->ti->table); 1448 } 1449 1450 static void pending_complete(void *context, int success) 1451 { 1452 struct dm_snap_pending_exception *pe = context; 1453 struct dm_exception *e; 1454 struct dm_snapshot *s = pe->snap; 1455 struct bio *origin_bios = NULL; 1456 struct bio *snapshot_bios = NULL; 1457 struct bio *full_bio = NULL; 1458 int error = 0; 1459 1460 if (!success) { 1461 /* Read/write error - snapshot is unusable */ 1462 mutex_lock(&s->lock); 1463 __invalidate_snapshot(s, -EIO); 1464 error = 1; 1465 goto out; 1466 } 1467 1468 e = alloc_completed_exception(GFP_NOIO); 1469 if (!e) { 1470 mutex_lock(&s->lock); 1471 __invalidate_snapshot(s, -ENOMEM); 1472 error = 1; 1473 goto out; 1474 } 1475 *e = pe->e; 1476 1477 mutex_lock(&s->lock); 1478 if (!s->valid) { 1479 free_completed_exception(e); 1480 error = 1; 1481 goto out; 1482 } 1483 1484 /* Check for conflicting reads */ 1485 __check_for_conflicting_io(s, pe->e.old_chunk); 1486 1487 /* 1488 * Add a proper exception, and remove the 1489 * in-flight exception from the list. 1490 */ 1491 dm_insert_exception(&s->complete, e); 1492 1493 out: 1494 dm_remove_exception(&pe->e); 1495 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1496 origin_bios = bio_list_get(&pe->origin_bios); 1497 full_bio = pe->full_bio; 1498 if (full_bio) 1499 full_bio->bi_end_io = pe->full_bio_end_io; 1500 increment_pending_exceptions_done_count(); 1501 1502 mutex_unlock(&s->lock); 1503 1504 /* Submit any pending write bios */ 1505 if (error) { 1506 if (full_bio) 1507 bio_io_error(full_bio); 1508 error_bios(snapshot_bios); 1509 } else { 1510 if (full_bio) 1511 bio_endio(full_bio); 1512 flush_bios(snapshot_bios); 1513 } 1514 1515 retry_origin_bios(s, origin_bios); 1516 1517 free_pending_exception(pe); 1518 } 1519 1520 static void complete_exception(struct dm_snap_pending_exception *pe) 1521 { 1522 struct dm_snapshot *s = pe->snap; 1523 1524 /* Update the metadata if we are persistent */ 1525 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, 1526 pending_complete, pe); 1527 } 1528 1529 /* 1530 * Called when the copy I/O has finished. kcopyd actually runs 1531 * this code so don't block. 1532 */ 1533 static void copy_callback(int read_err, unsigned long write_err, void *context) 1534 { 1535 struct dm_snap_pending_exception *pe = context; 1536 struct dm_snapshot *s = pe->snap; 1537 1538 pe->copy_error = read_err || write_err; 1539 1540 if (pe->exception_sequence == s->exception_complete_sequence) { 1541 s->exception_complete_sequence++; 1542 complete_exception(pe); 1543 1544 while (!list_empty(&s->out_of_order_list)) { 1545 pe = list_entry(s->out_of_order_list.next, 1546 struct dm_snap_pending_exception, out_of_order_entry); 1547 if (pe->exception_sequence != s->exception_complete_sequence) 1548 break; 1549 s->exception_complete_sequence++; 1550 list_del(&pe->out_of_order_entry); 1551 complete_exception(pe); 1552 } 1553 } else { 1554 struct list_head *lh; 1555 struct dm_snap_pending_exception *pe2; 1556 1557 list_for_each_prev(lh, &s->out_of_order_list) { 1558 pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); 1559 if (pe2->exception_sequence < pe->exception_sequence) 1560 break; 1561 } 1562 list_add(&pe->out_of_order_entry, lh); 1563 } 1564 } 1565 1566 /* 1567 * Dispatches the copy operation to kcopyd. 1568 */ 1569 static void start_copy(struct dm_snap_pending_exception *pe) 1570 { 1571 struct dm_snapshot *s = pe->snap; 1572 struct dm_io_region src, dest; 1573 struct block_device *bdev = s->origin->bdev; 1574 sector_t dev_size; 1575 1576 dev_size = get_dev_size(bdev); 1577 1578 src.bdev = bdev; 1579 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1580 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1581 1582 dest.bdev = s->cow->bdev; 1583 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1584 dest.count = src.count; 1585 1586 /* Hand over to kcopyd */ 1587 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); 1588 } 1589 1590 static void full_bio_end_io(struct bio *bio) 1591 { 1592 void *callback_data = bio->bi_private; 1593 1594 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); 1595 } 1596 1597 static void start_full_bio(struct dm_snap_pending_exception *pe, 1598 struct bio *bio) 1599 { 1600 struct dm_snapshot *s = pe->snap; 1601 void *callback_data; 1602 1603 pe->full_bio = bio; 1604 pe->full_bio_end_io = bio->bi_end_io; 1605 1606 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, 1607 copy_callback, pe); 1608 1609 bio->bi_end_io = full_bio_end_io; 1610 bio->bi_private = callback_data; 1611 1612 generic_make_request(bio); 1613 } 1614 1615 static struct dm_snap_pending_exception * 1616 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1617 { 1618 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); 1619 1620 if (!e) 1621 return NULL; 1622 1623 return container_of(e, struct dm_snap_pending_exception, e); 1624 } 1625 1626 /* 1627 * Looks to see if this snapshot already has a pending exception 1628 * for this chunk, otherwise it allocates a new one and inserts 1629 * it into the pending table. 1630 * 1631 * NOTE: a write lock must be held on snap->lock before calling 1632 * this. 1633 */ 1634 static struct dm_snap_pending_exception * 1635 __find_pending_exception(struct dm_snapshot *s, 1636 struct dm_snap_pending_exception *pe, chunk_t chunk) 1637 { 1638 struct dm_snap_pending_exception *pe2; 1639 1640 pe2 = __lookup_pending_exception(s, chunk); 1641 if (pe2) { 1642 free_pending_exception(pe); 1643 return pe2; 1644 } 1645 1646 pe->e.old_chunk = chunk; 1647 bio_list_init(&pe->origin_bios); 1648 bio_list_init(&pe->snapshot_bios); 1649 pe->started = 0; 1650 pe->full_bio = NULL; 1651 1652 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1653 free_pending_exception(pe); 1654 return NULL; 1655 } 1656 1657 pe->exception_sequence = s->exception_start_sequence++; 1658 1659 dm_insert_exception(&s->pending, &pe->e); 1660 1661 return pe; 1662 } 1663 1664 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1665 struct bio *bio, chunk_t chunk) 1666 { 1667 bio_set_dev(bio, s->cow->bdev); 1668 bio->bi_iter.bi_sector = 1669 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1670 (chunk - e->old_chunk)) + 1671 (bio->bi_iter.bi_sector & s->store->chunk_mask); 1672 } 1673 1674 static int snapshot_map(struct dm_target *ti, struct bio *bio) 1675 { 1676 struct dm_exception *e; 1677 struct dm_snapshot *s = ti->private; 1678 int r = DM_MAPIO_REMAPPED; 1679 chunk_t chunk; 1680 struct dm_snap_pending_exception *pe = NULL; 1681 1682 init_tracked_chunk(bio); 1683 1684 if (bio->bi_opf & REQ_PREFLUSH) { 1685 bio_set_dev(bio, s->cow->bdev); 1686 return DM_MAPIO_REMAPPED; 1687 } 1688 1689 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1690 1691 /* Full snapshots are not usable */ 1692 /* To get here the table must be live so s->active is always set. */ 1693 if (!s->valid) 1694 return DM_MAPIO_KILL; 1695 1696 /* FIXME: should only take write lock if we need 1697 * to copy an exception */ 1698 mutex_lock(&s->lock); 1699 1700 if (!s->valid || (unlikely(s->snapshot_overflowed) && 1701 bio_data_dir(bio) == WRITE)) { 1702 r = DM_MAPIO_KILL; 1703 goto out_unlock; 1704 } 1705 1706 /* If the block is already remapped - use that, else remap it */ 1707 e = dm_lookup_exception(&s->complete, chunk); 1708 if (e) { 1709 remap_exception(s, e, bio, chunk); 1710 goto out_unlock; 1711 } 1712 1713 /* 1714 * Write to snapshot - higher level takes care of RW/RO 1715 * flags so we should only get this if we are 1716 * writeable. 1717 */ 1718 if (bio_data_dir(bio) == WRITE) { 1719 pe = __lookup_pending_exception(s, chunk); 1720 if (!pe) { 1721 mutex_unlock(&s->lock); 1722 pe = alloc_pending_exception(s); 1723 mutex_lock(&s->lock); 1724 1725 if (!s->valid || s->snapshot_overflowed) { 1726 free_pending_exception(pe); 1727 r = DM_MAPIO_KILL; 1728 goto out_unlock; 1729 } 1730 1731 e = dm_lookup_exception(&s->complete, chunk); 1732 if (e) { 1733 free_pending_exception(pe); 1734 remap_exception(s, e, bio, chunk); 1735 goto out_unlock; 1736 } 1737 1738 pe = __find_pending_exception(s, pe, chunk); 1739 if (!pe) { 1740 if (s->store->userspace_supports_overflow) { 1741 s->snapshot_overflowed = 1; 1742 DMERR("Snapshot overflowed: Unable to allocate exception."); 1743 } else 1744 __invalidate_snapshot(s, -ENOMEM); 1745 r = DM_MAPIO_KILL; 1746 goto out_unlock; 1747 } 1748 } 1749 1750 remap_exception(s, &pe->e, bio, chunk); 1751 1752 r = DM_MAPIO_SUBMITTED; 1753 1754 if (!pe->started && 1755 bio->bi_iter.bi_size == 1756 (s->store->chunk_size << SECTOR_SHIFT)) { 1757 pe->started = 1; 1758 mutex_unlock(&s->lock); 1759 start_full_bio(pe, bio); 1760 goto out; 1761 } 1762 1763 bio_list_add(&pe->snapshot_bios, bio); 1764 1765 if (!pe->started) { 1766 /* this is protected by snap->lock */ 1767 pe->started = 1; 1768 mutex_unlock(&s->lock); 1769 start_copy(pe); 1770 goto out; 1771 } 1772 } else { 1773 bio_set_dev(bio, s->origin->bdev); 1774 track_chunk(s, bio, chunk); 1775 } 1776 1777 out_unlock: 1778 mutex_unlock(&s->lock); 1779 out: 1780 return r; 1781 } 1782 1783 /* 1784 * A snapshot-merge target behaves like a combination of a snapshot 1785 * target and a snapshot-origin target. It only generates new 1786 * exceptions in other snapshots and not in the one that is being 1787 * merged. 1788 * 1789 * For each chunk, if there is an existing exception, it is used to 1790 * redirect I/O to the cow device. Otherwise I/O is sent to the origin, 1791 * which in turn might generate exceptions in other snapshots. 1792 * If merging is currently taking place on the chunk in question, the 1793 * I/O is deferred by adding it to s->bios_queued_during_merge. 1794 */ 1795 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) 1796 { 1797 struct dm_exception *e; 1798 struct dm_snapshot *s = ti->private; 1799 int r = DM_MAPIO_REMAPPED; 1800 chunk_t chunk; 1801 1802 init_tracked_chunk(bio); 1803 1804 if (bio->bi_opf & REQ_PREFLUSH) { 1805 if (!dm_bio_get_target_bio_nr(bio)) 1806 bio_set_dev(bio, s->origin->bdev); 1807 else 1808 bio_set_dev(bio, s->cow->bdev); 1809 return DM_MAPIO_REMAPPED; 1810 } 1811 1812 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1813 1814 mutex_lock(&s->lock); 1815 1816 /* Full merging snapshots are redirected to the origin */ 1817 if (!s->valid) 1818 goto redirect_to_origin; 1819 1820 /* If the block is already remapped - use that */ 1821 e = dm_lookup_exception(&s->complete, chunk); 1822 if (e) { 1823 /* Queue writes overlapping with chunks being merged */ 1824 if (bio_data_dir(bio) == WRITE && 1825 chunk >= s->first_merging_chunk && 1826 chunk < (s->first_merging_chunk + 1827 s->num_merging_chunks)) { 1828 bio_set_dev(bio, s->origin->bdev); 1829 bio_list_add(&s->bios_queued_during_merge, bio); 1830 r = DM_MAPIO_SUBMITTED; 1831 goto out_unlock; 1832 } 1833 1834 remap_exception(s, e, bio, chunk); 1835 1836 if (bio_data_dir(bio) == WRITE) 1837 track_chunk(s, bio, chunk); 1838 goto out_unlock; 1839 } 1840 1841 redirect_to_origin: 1842 bio_set_dev(bio, s->origin->bdev); 1843 1844 if (bio_data_dir(bio) == WRITE) { 1845 mutex_unlock(&s->lock); 1846 return do_origin(s->origin, bio); 1847 } 1848 1849 out_unlock: 1850 mutex_unlock(&s->lock); 1851 1852 return r; 1853 } 1854 1855 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1856 blk_status_t *error) 1857 { 1858 struct dm_snapshot *s = ti->private; 1859 1860 if (is_bio_tracked(bio)) 1861 stop_tracking_chunk(s, bio); 1862 1863 return DM_ENDIO_DONE; 1864 } 1865 1866 static void snapshot_merge_presuspend(struct dm_target *ti) 1867 { 1868 struct dm_snapshot *s = ti->private; 1869 1870 stop_merge(s); 1871 } 1872 1873 static int snapshot_preresume(struct dm_target *ti) 1874 { 1875 int r = 0; 1876 struct dm_snapshot *s = ti->private; 1877 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1878 1879 down_read(&_origins_lock); 1880 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1881 if (snap_src && snap_dest) { 1882 mutex_lock(&snap_src->lock); 1883 if (s == snap_src) { 1884 DMERR("Unable to resume snapshot source until " 1885 "handover completes."); 1886 r = -EINVAL; 1887 } else if (!dm_suspended(snap_src->ti)) { 1888 DMERR("Unable to perform snapshot handover until " 1889 "source is suspended."); 1890 r = -EINVAL; 1891 } 1892 mutex_unlock(&snap_src->lock); 1893 } 1894 up_read(&_origins_lock); 1895 1896 return r; 1897 } 1898 1899 static void snapshot_resume(struct dm_target *ti) 1900 { 1901 struct dm_snapshot *s = ti->private; 1902 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; 1903 struct dm_origin *o; 1904 struct mapped_device *origin_md = NULL; 1905 bool must_restart_merging = false; 1906 1907 down_read(&_origins_lock); 1908 1909 o = __lookup_dm_origin(s->origin->bdev); 1910 if (o) 1911 origin_md = dm_table_get_md(o->ti->table); 1912 if (!origin_md) { 1913 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); 1914 if (snap_merging) 1915 origin_md = dm_table_get_md(snap_merging->ti->table); 1916 } 1917 if (origin_md == dm_table_get_md(ti->table)) 1918 origin_md = NULL; 1919 if (origin_md) { 1920 if (dm_hold(origin_md)) 1921 origin_md = NULL; 1922 } 1923 1924 up_read(&_origins_lock); 1925 1926 if (origin_md) { 1927 dm_internal_suspend_fast(origin_md); 1928 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { 1929 must_restart_merging = true; 1930 stop_merge(snap_merging); 1931 } 1932 } 1933 1934 down_read(&_origins_lock); 1935 1936 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1937 if (snap_src && snap_dest) { 1938 mutex_lock(&snap_src->lock); 1939 mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); 1940 __handover_exceptions(snap_src, snap_dest); 1941 mutex_unlock(&snap_dest->lock); 1942 mutex_unlock(&snap_src->lock); 1943 } 1944 1945 up_read(&_origins_lock); 1946 1947 if (origin_md) { 1948 if (must_restart_merging) 1949 start_merge(snap_merging); 1950 dm_internal_resume_fast(origin_md); 1951 dm_put(origin_md); 1952 } 1953 1954 /* Now we have correct chunk size, reregister */ 1955 reregister_snapshot(s); 1956 1957 mutex_lock(&s->lock); 1958 s->active = 1; 1959 mutex_unlock(&s->lock); 1960 } 1961 1962 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) 1963 { 1964 uint32_t min_chunksize; 1965 1966 down_read(&_origins_lock); 1967 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); 1968 up_read(&_origins_lock); 1969 1970 return min_chunksize; 1971 } 1972 1973 static void snapshot_merge_resume(struct dm_target *ti) 1974 { 1975 struct dm_snapshot *s = ti->private; 1976 1977 /* 1978 * Handover exceptions from existing snapshot. 1979 */ 1980 snapshot_resume(ti); 1981 1982 /* 1983 * snapshot-merge acts as an origin, so set ti->max_io_len 1984 */ 1985 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); 1986 1987 start_merge(s); 1988 } 1989 1990 static void snapshot_status(struct dm_target *ti, status_type_t type, 1991 unsigned status_flags, char *result, unsigned maxlen) 1992 { 1993 unsigned sz = 0; 1994 struct dm_snapshot *snap = ti->private; 1995 1996 switch (type) { 1997 case STATUSTYPE_INFO: 1998 1999 mutex_lock(&snap->lock); 2000 2001 if (!snap->valid) 2002 DMEMIT("Invalid"); 2003 else if (snap->merge_failed) 2004 DMEMIT("Merge failed"); 2005 else if (snap->snapshot_overflowed) 2006 DMEMIT("Overflow"); 2007 else { 2008 if (snap->store->type->usage) { 2009 sector_t total_sectors, sectors_allocated, 2010 metadata_sectors; 2011 snap->store->type->usage(snap->store, 2012 &total_sectors, 2013 §ors_allocated, 2014 &metadata_sectors); 2015 DMEMIT("%llu/%llu %llu", 2016 (unsigned long long)sectors_allocated, 2017 (unsigned long long)total_sectors, 2018 (unsigned long long)metadata_sectors); 2019 } 2020 else 2021 DMEMIT("Unknown"); 2022 } 2023 2024 mutex_unlock(&snap->lock); 2025 2026 break; 2027 2028 case STATUSTYPE_TABLE: 2029 /* 2030 * kdevname returns a static pointer so we need 2031 * to make private copies if the output is to 2032 * make sense. 2033 */ 2034 DMEMIT("%s %s", snap->origin->name, snap->cow->name); 2035 snap->store->type->status(snap->store, type, result + sz, 2036 maxlen - sz); 2037 break; 2038 } 2039 } 2040 2041 static int snapshot_iterate_devices(struct dm_target *ti, 2042 iterate_devices_callout_fn fn, void *data) 2043 { 2044 struct dm_snapshot *snap = ti->private; 2045 int r; 2046 2047 r = fn(ti, snap->origin, 0, ti->len, data); 2048 2049 if (!r) 2050 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); 2051 2052 return r; 2053 } 2054 2055 2056 /*----------------------------------------------------------------- 2057 * Origin methods 2058 *---------------------------------------------------------------*/ 2059 2060 /* 2061 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any 2062 * supplied bio was ignored. The caller may submit it immediately. 2063 * (No remapping actually occurs as the origin is always a direct linear 2064 * map.) 2065 * 2066 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned 2067 * and any supplied bio is added to a list to be submitted once all 2068 * the necessary exceptions exist. 2069 */ 2070 static int __origin_write(struct list_head *snapshots, sector_t sector, 2071 struct bio *bio) 2072 { 2073 int r = DM_MAPIO_REMAPPED; 2074 struct dm_snapshot *snap; 2075 struct dm_exception *e; 2076 struct dm_snap_pending_exception *pe; 2077 struct dm_snap_pending_exception *pe_to_start_now = NULL; 2078 struct dm_snap_pending_exception *pe_to_start_last = NULL; 2079 chunk_t chunk; 2080 2081 /* Do all the snapshots on this origin */ 2082 list_for_each_entry (snap, snapshots, list) { 2083 /* 2084 * Don't make new exceptions in a merging snapshot 2085 * because it has effectively been deleted 2086 */ 2087 if (dm_target_is_snapshot_merge(snap->ti)) 2088 continue; 2089 2090 mutex_lock(&snap->lock); 2091 2092 /* Only deal with valid and active snapshots */ 2093 if (!snap->valid || !snap->active) 2094 goto next_snapshot; 2095 2096 /* Nothing to do if writing beyond end of snapshot */ 2097 if (sector >= dm_table_get_size(snap->ti->table)) 2098 goto next_snapshot; 2099 2100 /* 2101 * Remember, different snapshots can have 2102 * different chunk sizes. 2103 */ 2104 chunk = sector_to_chunk(snap->store, sector); 2105 2106 /* 2107 * Check exception table to see if block 2108 * is already remapped in this snapshot 2109 * and trigger an exception if not. 2110 */ 2111 e = dm_lookup_exception(&snap->complete, chunk); 2112 if (e) 2113 goto next_snapshot; 2114 2115 pe = __lookup_pending_exception(snap, chunk); 2116 if (!pe) { 2117 mutex_unlock(&snap->lock); 2118 pe = alloc_pending_exception(snap); 2119 mutex_lock(&snap->lock); 2120 2121 if (!snap->valid) { 2122 free_pending_exception(pe); 2123 goto next_snapshot; 2124 } 2125 2126 e = dm_lookup_exception(&snap->complete, chunk); 2127 if (e) { 2128 free_pending_exception(pe); 2129 goto next_snapshot; 2130 } 2131 2132 pe = __find_pending_exception(snap, pe, chunk); 2133 if (!pe) { 2134 __invalidate_snapshot(snap, -ENOMEM); 2135 goto next_snapshot; 2136 } 2137 } 2138 2139 r = DM_MAPIO_SUBMITTED; 2140 2141 /* 2142 * If an origin bio was supplied, queue it to wait for the 2143 * completion of this exception, and start this one last, 2144 * at the end of the function. 2145 */ 2146 if (bio) { 2147 bio_list_add(&pe->origin_bios, bio); 2148 bio = NULL; 2149 2150 if (!pe->started) { 2151 pe->started = 1; 2152 pe_to_start_last = pe; 2153 } 2154 } 2155 2156 if (!pe->started) { 2157 pe->started = 1; 2158 pe_to_start_now = pe; 2159 } 2160 2161 next_snapshot: 2162 mutex_unlock(&snap->lock); 2163 2164 if (pe_to_start_now) { 2165 start_copy(pe_to_start_now); 2166 pe_to_start_now = NULL; 2167 } 2168 } 2169 2170 /* 2171 * Submit the exception against which the bio is queued last, 2172 * to give the other exceptions a head start. 2173 */ 2174 if (pe_to_start_last) 2175 start_copy(pe_to_start_last); 2176 2177 return r; 2178 } 2179 2180 /* 2181 * Called on a write from the origin driver. 2182 */ 2183 static int do_origin(struct dm_dev *origin, struct bio *bio) 2184 { 2185 struct origin *o; 2186 int r = DM_MAPIO_REMAPPED; 2187 2188 down_read(&_origins_lock); 2189 o = __lookup_origin(origin->bdev); 2190 if (o) 2191 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); 2192 up_read(&_origins_lock); 2193 2194 return r; 2195 } 2196 2197 /* 2198 * Trigger exceptions in all non-merging snapshots. 2199 * 2200 * The chunk size of the merging snapshot may be larger than the chunk 2201 * size of some other snapshot so we may need to reallocate multiple 2202 * chunks in other snapshots. 2203 * 2204 * We scan all the overlapping exceptions in the other snapshots. 2205 * Returns 1 if anything was reallocated and must be waited for, 2206 * otherwise returns 0. 2207 * 2208 * size must be a multiple of merging_snap's chunk_size. 2209 */ 2210 static int origin_write_extent(struct dm_snapshot *merging_snap, 2211 sector_t sector, unsigned size) 2212 { 2213 int must_wait = 0; 2214 sector_t n; 2215 struct origin *o; 2216 2217 /* 2218 * The origin's __minimum_chunk_size() got stored in max_io_len 2219 * by snapshot_merge_resume(). 2220 */ 2221 down_read(&_origins_lock); 2222 o = __lookup_origin(merging_snap->origin->bdev); 2223 for (n = 0; n < size; n += merging_snap->ti->max_io_len) 2224 if (__origin_write(&o->snapshots, sector + n, NULL) == 2225 DM_MAPIO_SUBMITTED) 2226 must_wait = 1; 2227 up_read(&_origins_lock); 2228 2229 return must_wait; 2230 } 2231 2232 /* 2233 * Origin: maps a linear range of a device, with hooks for snapshotting. 2234 */ 2235 2236 /* 2237 * Construct an origin mapping: <dev_path> 2238 * The context for an origin is merely a 'struct dm_dev *' 2239 * pointing to the real device. 2240 */ 2241 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2242 { 2243 int r; 2244 struct dm_origin *o; 2245 2246 if (argc != 1) { 2247 ti->error = "origin: incorrect number of arguments"; 2248 return -EINVAL; 2249 } 2250 2251 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL); 2252 if (!o) { 2253 ti->error = "Cannot allocate private origin structure"; 2254 r = -ENOMEM; 2255 goto bad_alloc; 2256 } 2257 2258 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); 2259 if (r) { 2260 ti->error = "Cannot get target device"; 2261 goto bad_open; 2262 } 2263 2264 o->ti = ti; 2265 ti->private = o; 2266 ti->num_flush_bios = 1; 2267 2268 return 0; 2269 2270 bad_open: 2271 kfree(o); 2272 bad_alloc: 2273 return r; 2274 } 2275 2276 static void origin_dtr(struct dm_target *ti) 2277 { 2278 struct dm_origin *o = ti->private; 2279 2280 dm_put_device(ti, o->dev); 2281 kfree(o); 2282 } 2283 2284 static int origin_map(struct dm_target *ti, struct bio *bio) 2285 { 2286 struct dm_origin *o = ti->private; 2287 unsigned available_sectors; 2288 2289 bio_set_dev(bio, o->dev->bdev); 2290 2291 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) 2292 return DM_MAPIO_REMAPPED; 2293 2294 if (bio_data_dir(bio) != WRITE) 2295 return DM_MAPIO_REMAPPED; 2296 2297 available_sectors = o->split_boundary - 2298 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); 2299 2300 if (bio_sectors(bio) > available_sectors) 2301 dm_accept_partial_bio(bio, available_sectors); 2302 2303 /* Only tell snapshots if this is a write */ 2304 return do_origin(o->dev, bio); 2305 } 2306 2307 static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 2308 long nr_pages, void **kaddr, pfn_t *pfn) 2309 { 2310 DMWARN("device does not support dax."); 2311 return -EIO; 2312 } 2313 2314 /* 2315 * Set the target "max_io_len" field to the minimum of all the snapshots' 2316 * chunk sizes. 2317 */ 2318 static void origin_resume(struct dm_target *ti) 2319 { 2320 struct dm_origin *o = ti->private; 2321 2322 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2323 2324 down_write(&_origins_lock); 2325 __insert_dm_origin(o); 2326 up_write(&_origins_lock); 2327 } 2328 2329 static void origin_postsuspend(struct dm_target *ti) 2330 { 2331 struct dm_origin *o = ti->private; 2332 2333 down_write(&_origins_lock); 2334 __remove_dm_origin(o); 2335 up_write(&_origins_lock); 2336 } 2337 2338 static void origin_status(struct dm_target *ti, status_type_t type, 2339 unsigned status_flags, char *result, unsigned maxlen) 2340 { 2341 struct dm_origin *o = ti->private; 2342 2343 switch (type) { 2344 case STATUSTYPE_INFO: 2345 result[0] = '\0'; 2346 break; 2347 2348 case STATUSTYPE_TABLE: 2349 snprintf(result, maxlen, "%s", o->dev->name); 2350 break; 2351 } 2352 } 2353 2354 static int origin_iterate_devices(struct dm_target *ti, 2355 iterate_devices_callout_fn fn, void *data) 2356 { 2357 struct dm_origin *o = ti->private; 2358 2359 return fn(ti, o->dev, 0, ti->len, data); 2360 } 2361 2362 static struct target_type origin_target = { 2363 .name = "snapshot-origin", 2364 .version = {1, 9, 0}, 2365 .module = THIS_MODULE, 2366 .ctr = origin_ctr, 2367 .dtr = origin_dtr, 2368 .map = origin_map, 2369 .resume = origin_resume, 2370 .postsuspend = origin_postsuspend, 2371 .status = origin_status, 2372 .iterate_devices = origin_iterate_devices, 2373 .direct_access = origin_dax_direct_access, 2374 }; 2375 2376 static struct target_type snapshot_target = { 2377 .name = "snapshot", 2378 .version = {1, 15, 0}, 2379 .module = THIS_MODULE, 2380 .ctr = snapshot_ctr, 2381 .dtr = snapshot_dtr, 2382 .map = snapshot_map, 2383 .end_io = snapshot_end_io, 2384 .preresume = snapshot_preresume, 2385 .resume = snapshot_resume, 2386 .status = snapshot_status, 2387 .iterate_devices = snapshot_iterate_devices, 2388 }; 2389 2390 static struct target_type merge_target = { 2391 .name = dm_snapshot_merge_target_name, 2392 .version = {1, 4, 0}, 2393 .module = THIS_MODULE, 2394 .ctr = snapshot_ctr, 2395 .dtr = snapshot_dtr, 2396 .map = snapshot_merge_map, 2397 .end_io = snapshot_end_io, 2398 .presuspend = snapshot_merge_presuspend, 2399 .preresume = snapshot_preresume, 2400 .resume = snapshot_merge_resume, 2401 .status = snapshot_status, 2402 .iterate_devices = snapshot_iterate_devices, 2403 }; 2404 2405 static int __init dm_snapshot_init(void) 2406 { 2407 int r; 2408 2409 r = dm_exception_store_init(); 2410 if (r) { 2411 DMERR("Failed to initialize exception stores"); 2412 return r; 2413 } 2414 2415 r = init_origin_hash(); 2416 if (r) { 2417 DMERR("init_origin_hash failed."); 2418 goto bad_origin_hash; 2419 } 2420 2421 exception_cache = KMEM_CACHE(dm_exception, 0); 2422 if (!exception_cache) { 2423 DMERR("Couldn't create exception cache."); 2424 r = -ENOMEM; 2425 goto bad_exception_cache; 2426 } 2427 2428 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 2429 if (!pending_cache) { 2430 DMERR("Couldn't create pending cache."); 2431 r = -ENOMEM; 2432 goto bad_pending_cache; 2433 } 2434 2435 r = dm_register_target(&snapshot_target); 2436 if (r < 0) { 2437 DMERR("snapshot target register failed %d", r); 2438 goto bad_register_snapshot_target; 2439 } 2440 2441 r = dm_register_target(&origin_target); 2442 if (r < 0) { 2443 DMERR("Origin target register failed %d", r); 2444 goto bad_register_origin_target; 2445 } 2446 2447 r = dm_register_target(&merge_target); 2448 if (r < 0) { 2449 DMERR("Merge target register failed %d", r); 2450 goto bad_register_merge_target; 2451 } 2452 2453 return 0; 2454 2455 bad_register_merge_target: 2456 dm_unregister_target(&origin_target); 2457 bad_register_origin_target: 2458 dm_unregister_target(&snapshot_target); 2459 bad_register_snapshot_target: 2460 kmem_cache_destroy(pending_cache); 2461 bad_pending_cache: 2462 kmem_cache_destroy(exception_cache); 2463 bad_exception_cache: 2464 exit_origin_hash(); 2465 bad_origin_hash: 2466 dm_exception_store_exit(); 2467 2468 return r; 2469 } 2470 2471 static void __exit dm_snapshot_exit(void) 2472 { 2473 dm_unregister_target(&snapshot_target); 2474 dm_unregister_target(&origin_target); 2475 dm_unregister_target(&merge_target); 2476 2477 exit_origin_hash(); 2478 kmem_cache_destroy(pending_cache); 2479 kmem_cache_destroy(exception_cache); 2480 2481 dm_exception_store_exit(); 2482 } 2483 2484 /* Module hooks */ 2485 module_init(dm_snapshot_init); 2486 module_exit(dm_snapshot_exit); 2487 2488 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 2489 MODULE_AUTHOR("Joe Thornber"); 2490 MODULE_LICENSE("GPL"); 2491 MODULE_ALIAS("dm-snapshot-origin"); 2492 MODULE_ALIAS("dm-snapshot-merge"); 2493