1 /* 2 * Copyright (C) 2012 Red Hat. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm.h" 8 #include "dm-bio-prison.h" 9 #include "dm-bio-record.h" 10 #include "dm-cache-metadata.h" 11 12 #include <linux/dm-io.h> 13 #include <linux/dm-kcopyd.h> 14 #include <linux/init.h> 15 #include <linux/mempool.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 20 #define DM_MSG_PREFIX "cache" 21 22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, 23 "A percentage of time allocated for copying to and/or from cache"); 24 25 /*----------------------------------------------------------------*/ 26 27 /* 28 * Glossary: 29 * 30 * oblock: index of an origin block 31 * cblock: index of a cache block 32 * promotion: movement of a block from origin to cache 33 * demotion: movement of a block from cache to origin 34 * migration: movement of a block between the origin and cache device, 35 * either direction 36 */ 37 38 /*----------------------------------------------------------------*/ 39 40 static size_t bitset_size_in_bytes(unsigned nr_entries) 41 { 42 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); 43 } 44 45 static unsigned long *alloc_bitset(unsigned nr_entries) 46 { 47 size_t s = bitset_size_in_bytes(nr_entries); 48 return vzalloc(s); 49 } 50 51 static void clear_bitset(void *bitset, unsigned nr_entries) 52 { 53 size_t s = bitset_size_in_bytes(nr_entries); 54 memset(bitset, 0, s); 55 } 56 57 static void free_bitset(unsigned long *bits) 58 { 59 vfree(bits); 60 } 61 62 /*----------------------------------------------------------------*/ 63 64 /* 65 * There are a couple of places where we let a bio run, but want to do some 66 * work before calling its endio function. We do this by temporarily 67 * changing the endio fn. 68 */ 69 struct dm_hook_info { 70 bio_end_io_t *bi_end_io; 71 void *bi_private; 72 }; 73 74 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, 75 bio_end_io_t *bi_end_io, void *bi_private) 76 { 77 h->bi_end_io = bio->bi_end_io; 78 h->bi_private = bio->bi_private; 79 80 bio->bi_end_io = bi_end_io; 81 bio->bi_private = bi_private; 82 } 83 84 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) 85 { 86 bio->bi_end_io = h->bi_end_io; 87 bio->bi_private = h->bi_private; 88 89 /* 90 * Must bump bi_remaining to allow bio to complete with 91 * restored bi_end_io. 92 */ 93 atomic_inc(&bio->bi_remaining); 94 } 95 96 /*----------------------------------------------------------------*/ 97 98 #define PRISON_CELLS 1024 99 #define MIGRATION_POOL_SIZE 128 100 #define COMMIT_PERIOD HZ 101 #define MIGRATION_COUNT_WINDOW 10 102 103 /* 104 * The block size of the device holding cache data must be 105 * between 32KB and 1GB. 106 */ 107 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) 108 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 109 110 /* 111 * FIXME: the cache is read/write for the time being. 112 */ 113 enum cache_metadata_mode { 114 CM_WRITE, /* metadata may be changed */ 115 CM_READ_ONLY, /* metadata may not be changed */ 116 }; 117 118 enum cache_io_mode { 119 /* 120 * Data is written to cached blocks only. These blocks are marked 121 * dirty. If you lose the cache device you will lose data. 122 * Potential performance increase for both reads and writes. 123 */ 124 CM_IO_WRITEBACK, 125 126 /* 127 * Data is written to both cache and origin. Blocks are never 128 * dirty. Potential performance benfit for reads only. 129 */ 130 CM_IO_WRITETHROUGH, 131 132 /* 133 * A degraded mode useful for various cache coherency situations 134 * (eg, rolling back snapshots). Reads and writes always go to the 135 * origin. If a write goes to a cached oblock, then the cache 136 * block is invalidated. 137 */ 138 CM_IO_PASSTHROUGH 139 }; 140 141 struct cache_features { 142 enum cache_metadata_mode mode; 143 enum cache_io_mode io_mode; 144 }; 145 146 struct cache_stats { 147 atomic_t read_hit; 148 atomic_t read_miss; 149 atomic_t write_hit; 150 atomic_t write_miss; 151 atomic_t demotion; 152 atomic_t promotion; 153 atomic_t copies_avoided; 154 atomic_t cache_cell_clash; 155 atomic_t commit_count; 156 atomic_t discard_count; 157 }; 158 159 /* 160 * Defines a range of cblocks, begin to (end - 1) are in the range. end is 161 * the one-past-the-end value. 162 */ 163 struct cblock_range { 164 dm_cblock_t begin; 165 dm_cblock_t end; 166 }; 167 168 struct invalidation_request { 169 struct list_head list; 170 struct cblock_range *cblocks; 171 172 atomic_t complete; 173 int err; 174 175 wait_queue_head_t result_wait; 176 }; 177 178 struct cache { 179 struct dm_target *ti; 180 struct dm_target_callbacks callbacks; 181 182 struct dm_cache_metadata *cmd; 183 184 /* 185 * Metadata is written to this device. 186 */ 187 struct dm_dev *metadata_dev; 188 189 /* 190 * The slower of the two data devices. Typically a spindle. 191 */ 192 struct dm_dev *origin_dev; 193 194 /* 195 * The faster of the two data devices. Typically an SSD. 196 */ 197 struct dm_dev *cache_dev; 198 199 /* 200 * Size of the origin device in _complete_ blocks and native sectors. 201 */ 202 dm_oblock_t origin_blocks; 203 sector_t origin_sectors; 204 205 /* 206 * Size of the cache device in blocks. 207 */ 208 dm_cblock_t cache_size; 209 210 /* 211 * Fields for converting from sectors to blocks. 212 */ 213 uint32_t sectors_per_block; 214 int sectors_per_block_shift; 215 216 spinlock_t lock; 217 struct bio_list deferred_bios; 218 struct bio_list deferred_flush_bios; 219 struct bio_list deferred_writethrough_bios; 220 struct list_head quiesced_migrations; 221 struct list_head completed_migrations; 222 struct list_head need_commit_migrations; 223 sector_t migration_threshold; 224 wait_queue_head_t migration_wait; 225 atomic_t nr_migrations; 226 227 wait_queue_head_t quiescing_wait; 228 atomic_t quiescing; 229 atomic_t quiescing_ack; 230 231 /* 232 * cache_size entries, dirty if set 233 */ 234 dm_cblock_t nr_dirty; 235 unsigned long *dirty_bitset; 236 237 /* 238 * origin_blocks entries, discarded if set. 239 */ 240 dm_dblock_t discard_nr_blocks; 241 unsigned long *discard_bitset; 242 uint32_t discard_block_size; /* a power of 2 times sectors per block */ 243 244 /* 245 * Rather than reconstructing the table line for the status we just 246 * save it and regurgitate. 247 */ 248 unsigned nr_ctr_args; 249 const char **ctr_args; 250 251 struct dm_kcopyd_client *copier; 252 struct workqueue_struct *wq; 253 struct work_struct worker; 254 255 struct delayed_work waker; 256 unsigned long last_commit_jiffies; 257 258 struct dm_bio_prison *prison; 259 struct dm_deferred_set *all_io_ds; 260 261 mempool_t *migration_pool; 262 struct dm_cache_migration *next_migration; 263 264 struct dm_cache_policy *policy; 265 unsigned policy_nr_args; 266 267 bool need_tick_bio:1; 268 bool sized:1; 269 bool invalidate:1; 270 bool commit_requested:1; 271 bool loaded_mappings:1; 272 bool loaded_discards:1; 273 274 /* 275 * Cache features such as write-through. 276 */ 277 struct cache_features features; 278 279 struct cache_stats stats; 280 281 /* 282 * Invalidation fields. 283 */ 284 spinlock_t invalidation_lock; 285 struct list_head invalidation_requests; 286 }; 287 288 struct per_bio_data { 289 bool tick:1; 290 unsigned req_nr:2; 291 struct dm_deferred_entry *all_io_entry; 292 struct dm_hook_info hook_info; 293 294 /* 295 * writethrough fields. These MUST remain at the end of this 296 * structure and the 'cache' member must be the first as it 297 * is used to determine the offset of the writethrough fields. 298 */ 299 struct cache *cache; 300 dm_cblock_t cblock; 301 struct dm_bio_details bio_details; 302 }; 303 304 struct dm_cache_migration { 305 struct list_head list; 306 struct cache *cache; 307 308 unsigned long start_jiffies; 309 dm_oblock_t old_oblock; 310 dm_oblock_t new_oblock; 311 dm_cblock_t cblock; 312 313 bool err:1; 314 bool writeback:1; 315 bool demote:1; 316 bool promote:1; 317 bool requeue_holder:1; 318 bool invalidate:1; 319 320 struct dm_bio_prison_cell *old_ocell; 321 struct dm_bio_prison_cell *new_ocell; 322 }; 323 324 /* 325 * Processing a bio in the worker thread may require these memory 326 * allocations. We prealloc to avoid deadlocks (the same worker thread 327 * frees them back to the mempool). 328 */ 329 struct prealloc { 330 struct dm_cache_migration *mg; 331 struct dm_bio_prison_cell *cell1; 332 struct dm_bio_prison_cell *cell2; 333 }; 334 335 static void wake_worker(struct cache *cache) 336 { 337 queue_work(cache->wq, &cache->worker); 338 } 339 340 /*----------------------------------------------------------------*/ 341 342 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) 343 { 344 /* FIXME: change to use a local slab. */ 345 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); 346 } 347 348 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) 349 { 350 dm_bio_prison_free_cell(cache->prison, cell); 351 } 352 353 static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 354 { 355 if (!p->mg) { 356 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); 357 if (!p->mg) 358 return -ENOMEM; 359 } 360 361 if (!p->cell1) { 362 p->cell1 = alloc_prison_cell(cache); 363 if (!p->cell1) 364 return -ENOMEM; 365 } 366 367 if (!p->cell2) { 368 p->cell2 = alloc_prison_cell(cache); 369 if (!p->cell2) 370 return -ENOMEM; 371 } 372 373 return 0; 374 } 375 376 static void prealloc_free_structs(struct cache *cache, struct prealloc *p) 377 { 378 if (p->cell2) 379 free_prison_cell(cache, p->cell2); 380 381 if (p->cell1) 382 free_prison_cell(cache, p->cell1); 383 384 if (p->mg) 385 mempool_free(p->mg, cache->migration_pool); 386 } 387 388 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) 389 { 390 struct dm_cache_migration *mg = p->mg; 391 392 BUG_ON(!mg); 393 p->mg = NULL; 394 395 return mg; 396 } 397 398 /* 399 * You must have a cell within the prealloc struct to return. If not this 400 * function will BUG() rather than returning NULL. 401 */ 402 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) 403 { 404 struct dm_bio_prison_cell *r = NULL; 405 406 if (p->cell1) { 407 r = p->cell1; 408 p->cell1 = NULL; 409 410 } else if (p->cell2) { 411 r = p->cell2; 412 p->cell2 = NULL; 413 } else 414 BUG(); 415 416 return r; 417 } 418 419 /* 420 * You can't have more than two cells in a prealloc struct. BUG() will be 421 * called if you try and overfill. 422 */ 423 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) 424 { 425 if (!p->cell2) 426 p->cell2 = cell; 427 428 else if (!p->cell1) 429 p->cell1 = cell; 430 431 else 432 BUG(); 433 } 434 435 /*----------------------------------------------------------------*/ 436 437 static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) 438 { 439 key->virtual = 0; 440 key->dev = 0; 441 key->block = from_oblock(oblock); 442 } 443 444 /* 445 * The caller hands in a preallocated cell, and a free function for it. 446 * The cell will be freed if there's an error, or if it wasn't used because 447 * a cell with that key already exists. 448 */ 449 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); 450 451 static int bio_detain(struct cache *cache, dm_oblock_t oblock, 452 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, 453 cell_free_fn free_fn, void *free_context, 454 struct dm_bio_prison_cell **cell_result) 455 { 456 int r; 457 struct dm_cell_key key; 458 459 build_key(oblock, &key); 460 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); 461 if (r) 462 free_fn(free_context, cell_prealloc); 463 464 return r; 465 } 466 467 static int get_cell(struct cache *cache, 468 dm_oblock_t oblock, 469 struct prealloc *structs, 470 struct dm_bio_prison_cell **cell_result) 471 { 472 int r; 473 struct dm_cell_key key; 474 struct dm_bio_prison_cell *cell_prealloc; 475 476 cell_prealloc = prealloc_get_cell(structs); 477 478 build_key(oblock, &key); 479 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); 480 if (r) 481 prealloc_put_cell(structs, cell_prealloc); 482 483 return r; 484 } 485 486 /*----------------------------------------------------------------*/ 487 488 static bool is_dirty(struct cache *cache, dm_cblock_t b) 489 { 490 return test_bit(from_cblock(b), cache->dirty_bitset); 491 } 492 493 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) 494 { 495 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { 496 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); 497 policy_set_dirty(cache->policy, oblock); 498 } 499 } 500 501 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) 502 { 503 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { 504 policy_clear_dirty(cache->policy, oblock); 505 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); 506 if (!from_cblock(cache->nr_dirty)) 507 dm_table_event(cache->ti->table); 508 } 509 } 510 511 /*----------------------------------------------------------------*/ 512 513 static bool block_size_is_power_of_two(struct cache *cache) 514 { 515 return cache->sectors_per_block_shift >= 0; 516 } 517 518 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ 519 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 520 __always_inline 521 #endif 522 static dm_block_t block_div(dm_block_t b, uint32_t n) 523 { 524 do_div(b, n); 525 526 return b; 527 } 528 529 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) 530 { 531 uint32_t discard_blocks = cache->discard_block_size; 532 dm_block_t b = from_oblock(oblock); 533 534 if (!block_size_is_power_of_two(cache)) 535 discard_blocks = discard_blocks / cache->sectors_per_block; 536 else 537 discard_blocks >>= cache->sectors_per_block_shift; 538 539 b = block_div(b, discard_blocks); 540 541 return to_dblock(b); 542 } 543 544 static void set_discard(struct cache *cache, dm_dblock_t b) 545 { 546 unsigned long flags; 547 548 atomic_inc(&cache->stats.discard_count); 549 550 spin_lock_irqsave(&cache->lock, flags); 551 set_bit(from_dblock(b), cache->discard_bitset); 552 spin_unlock_irqrestore(&cache->lock, flags); 553 } 554 555 static void clear_discard(struct cache *cache, dm_dblock_t b) 556 { 557 unsigned long flags; 558 559 spin_lock_irqsave(&cache->lock, flags); 560 clear_bit(from_dblock(b), cache->discard_bitset); 561 spin_unlock_irqrestore(&cache->lock, flags); 562 } 563 564 static bool is_discarded(struct cache *cache, dm_dblock_t b) 565 { 566 int r; 567 unsigned long flags; 568 569 spin_lock_irqsave(&cache->lock, flags); 570 r = test_bit(from_dblock(b), cache->discard_bitset); 571 spin_unlock_irqrestore(&cache->lock, flags); 572 573 return r; 574 } 575 576 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) 577 { 578 int r; 579 unsigned long flags; 580 581 spin_lock_irqsave(&cache->lock, flags); 582 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 583 cache->discard_bitset); 584 spin_unlock_irqrestore(&cache->lock, flags); 585 586 return r; 587 } 588 589 /*----------------------------------------------------------------*/ 590 591 static void load_stats(struct cache *cache) 592 { 593 struct dm_cache_statistics stats; 594 595 dm_cache_metadata_get_stats(cache->cmd, &stats); 596 atomic_set(&cache->stats.read_hit, stats.read_hits); 597 atomic_set(&cache->stats.read_miss, stats.read_misses); 598 atomic_set(&cache->stats.write_hit, stats.write_hits); 599 atomic_set(&cache->stats.write_miss, stats.write_misses); 600 } 601 602 static void save_stats(struct cache *cache) 603 { 604 struct dm_cache_statistics stats; 605 606 stats.read_hits = atomic_read(&cache->stats.read_hit); 607 stats.read_misses = atomic_read(&cache->stats.read_miss); 608 stats.write_hits = atomic_read(&cache->stats.write_hit); 609 stats.write_misses = atomic_read(&cache->stats.write_miss); 610 611 dm_cache_metadata_set_stats(cache->cmd, &stats); 612 } 613 614 /*---------------------------------------------------------------- 615 * Per bio data 616 *--------------------------------------------------------------*/ 617 618 /* 619 * If using writeback, leave out struct per_bio_data's writethrough fields. 620 */ 621 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) 622 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) 623 624 static bool writethrough_mode(struct cache_features *f) 625 { 626 return f->io_mode == CM_IO_WRITETHROUGH; 627 } 628 629 static bool writeback_mode(struct cache_features *f) 630 { 631 return f->io_mode == CM_IO_WRITEBACK; 632 } 633 634 static bool passthrough_mode(struct cache_features *f) 635 { 636 return f->io_mode == CM_IO_PASSTHROUGH; 637 } 638 639 static size_t get_per_bio_data_size(struct cache *cache) 640 { 641 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; 642 } 643 644 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) 645 { 646 struct per_bio_data *pb = dm_per_bio_data(bio, data_size); 647 BUG_ON(!pb); 648 return pb; 649 } 650 651 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) 652 { 653 struct per_bio_data *pb = get_per_bio_data(bio, data_size); 654 655 pb->tick = false; 656 pb->req_nr = dm_bio_get_target_bio_nr(bio); 657 pb->all_io_entry = NULL; 658 659 return pb; 660 } 661 662 /*---------------------------------------------------------------- 663 * Remapping 664 *--------------------------------------------------------------*/ 665 static void remap_to_origin(struct cache *cache, struct bio *bio) 666 { 667 bio->bi_bdev = cache->origin_dev->bdev; 668 } 669 670 static void remap_to_cache(struct cache *cache, struct bio *bio, 671 dm_cblock_t cblock) 672 { 673 sector_t bi_sector = bio->bi_iter.bi_sector; 674 sector_t block = from_cblock(cblock); 675 676 bio->bi_bdev = cache->cache_dev->bdev; 677 if (!block_size_is_power_of_two(cache)) 678 bio->bi_iter.bi_sector = 679 (block * cache->sectors_per_block) + 680 sector_div(bi_sector, cache->sectors_per_block); 681 else 682 bio->bi_iter.bi_sector = 683 (block << cache->sectors_per_block_shift) | 684 (bi_sector & (cache->sectors_per_block - 1)); 685 } 686 687 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 688 { 689 unsigned long flags; 690 size_t pb_data_size = get_per_bio_data_size(cache); 691 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 692 693 spin_lock_irqsave(&cache->lock, flags); 694 if (cache->need_tick_bio && 695 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { 696 pb->tick = true; 697 cache->need_tick_bio = false; 698 } 699 spin_unlock_irqrestore(&cache->lock, flags); 700 } 701 702 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, 703 dm_oblock_t oblock) 704 { 705 check_if_tick_bio_needed(cache, bio); 706 remap_to_origin(cache, bio); 707 if (bio_data_dir(bio) == WRITE) 708 clear_discard(cache, oblock_to_dblock(cache, oblock)); 709 } 710 711 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, 712 dm_oblock_t oblock, dm_cblock_t cblock) 713 { 714 check_if_tick_bio_needed(cache, bio); 715 remap_to_cache(cache, bio, cblock); 716 if (bio_data_dir(bio) == WRITE) { 717 set_dirty(cache, oblock, cblock); 718 clear_discard(cache, oblock_to_dblock(cache, oblock)); 719 } 720 } 721 722 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 723 { 724 sector_t block_nr = bio->bi_iter.bi_sector; 725 726 if (!block_size_is_power_of_two(cache)) 727 (void) sector_div(block_nr, cache->sectors_per_block); 728 else 729 block_nr >>= cache->sectors_per_block_shift; 730 731 return to_oblock(block_nr); 732 } 733 734 static int bio_triggers_commit(struct cache *cache, struct bio *bio) 735 { 736 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 737 } 738 739 static void issue(struct cache *cache, struct bio *bio) 740 { 741 unsigned long flags; 742 743 if (!bio_triggers_commit(cache, bio)) { 744 generic_make_request(bio); 745 return; 746 } 747 748 /* 749 * Batch together any bios that trigger commits and then issue a 750 * single commit for them in do_worker(). 751 */ 752 spin_lock_irqsave(&cache->lock, flags); 753 cache->commit_requested = true; 754 bio_list_add(&cache->deferred_flush_bios, bio); 755 spin_unlock_irqrestore(&cache->lock, flags); 756 } 757 758 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) 759 { 760 unsigned long flags; 761 762 spin_lock_irqsave(&cache->lock, flags); 763 bio_list_add(&cache->deferred_writethrough_bios, bio); 764 spin_unlock_irqrestore(&cache->lock, flags); 765 766 wake_worker(cache); 767 } 768 769 static void writethrough_endio(struct bio *bio, int err) 770 { 771 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 772 773 dm_unhook_bio(&pb->hook_info, bio); 774 775 if (err) { 776 bio_endio(bio, err); 777 return; 778 } 779 780 dm_bio_restore(&pb->bio_details, bio); 781 remap_to_cache(pb->cache, bio, pb->cblock); 782 783 /* 784 * We can't issue this bio directly, since we're in interrupt 785 * context. So it gets put on a bio list for processing by the 786 * worker thread. 787 */ 788 defer_writethrough_bio(pb->cache, bio); 789 } 790 791 /* 792 * When running in writethrough mode we need to send writes to clean blocks 793 * to both the cache and origin devices. In future we'd like to clone the 794 * bio and send them in parallel, but for now we're doing them in 795 * series as this is easier. 796 */ 797 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 798 dm_oblock_t oblock, dm_cblock_t cblock) 799 { 800 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 801 802 pb->cache = cache; 803 pb->cblock = cblock; 804 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); 805 dm_bio_record(&pb->bio_details, bio); 806 807 remap_to_origin_clear_discard(pb->cache, bio, oblock); 808 } 809 810 /*---------------------------------------------------------------- 811 * Migration processing 812 * 813 * Migration covers moving data from the origin device to the cache, or 814 * vice versa. 815 *--------------------------------------------------------------*/ 816 static void free_migration(struct dm_cache_migration *mg) 817 { 818 mempool_free(mg, mg->cache->migration_pool); 819 } 820 821 static void inc_nr_migrations(struct cache *cache) 822 { 823 atomic_inc(&cache->nr_migrations); 824 } 825 826 static void dec_nr_migrations(struct cache *cache) 827 { 828 atomic_dec(&cache->nr_migrations); 829 830 /* 831 * Wake the worker in case we're suspending the target. 832 */ 833 wake_up(&cache->migration_wait); 834 } 835 836 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 837 bool holder) 838 { 839 (holder ? dm_cell_release : dm_cell_release_no_holder) 840 (cache->prison, cell, &cache->deferred_bios); 841 free_prison_cell(cache, cell); 842 } 843 844 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 845 bool holder) 846 { 847 unsigned long flags; 848 849 spin_lock_irqsave(&cache->lock, flags); 850 __cell_defer(cache, cell, holder); 851 spin_unlock_irqrestore(&cache->lock, flags); 852 853 wake_worker(cache); 854 } 855 856 static void cleanup_migration(struct dm_cache_migration *mg) 857 { 858 struct cache *cache = mg->cache; 859 free_migration(mg); 860 dec_nr_migrations(cache); 861 } 862 863 static void migration_failure(struct dm_cache_migration *mg) 864 { 865 struct cache *cache = mg->cache; 866 867 if (mg->writeback) { 868 DMWARN_LIMIT("writeback failed; couldn't copy block"); 869 set_dirty(cache, mg->old_oblock, mg->cblock); 870 cell_defer(cache, mg->old_ocell, false); 871 872 } else if (mg->demote) { 873 DMWARN_LIMIT("demotion failed; couldn't copy block"); 874 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); 875 876 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 877 if (mg->promote) 878 cell_defer(cache, mg->new_ocell, true); 879 } else { 880 DMWARN_LIMIT("promotion failed; couldn't copy block"); 881 policy_remove_mapping(cache->policy, mg->new_oblock); 882 cell_defer(cache, mg->new_ocell, true); 883 } 884 885 cleanup_migration(mg); 886 } 887 888 static void migration_success_pre_commit(struct dm_cache_migration *mg) 889 { 890 unsigned long flags; 891 struct cache *cache = mg->cache; 892 893 if (mg->writeback) { 894 cell_defer(cache, mg->old_ocell, false); 895 clear_dirty(cache, mg->old_oblock, mg->cblock); 896 cleanup_migration(mg); 897 return; 898 899 } else if (mg->demote) { 900 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { 901 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); 902 policy_force_mapping(cache->policy, mg->new_oblock, 903 mg->old_oblock); 904 if (mg->promote) 905 cell_defer(cache, mg->new_ocell, true); 906 cleanup_migration(mg); 907 return; 908 } 909 } else { 910 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { 911 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); 912 policy_remove_mapping(cache->policy, mg->new_oblock); 913 cleanup_migration(mg); 914 return; 915 } 916 } 917 918 spin_lock_irqsave(&cache->lock, flags); 919 list_add_tail(&mg->list, &cache->need_commit_migrations); 920 cache->commit_requested = true; 921 spin_unlock_irqrestore(&cache->lock, flags); 922 } 923 924 static void migration_success_post_commit(struct dm_cache_migration *mg) 925 { 926 unsigned long flags; 927 struct cache *cache = mg->cache; 928 929 if (mg->writeback) { 930 DMWARN("writeback unexpectedly triggered commit"); 931 return; 932 933 } else if (mg->demote) { 934 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 935 936 if (mg->promote) { 937 mg->demote = false; 938 939 spin_lock_irqsave(&cache->lock, flags); 940 list_add_tail(&mg->list, &cache->quiesced_migrations); 941 spin_unlock_irqrestore(&cache->lock, flags); 942 943 } else { 944 if (mg->invalidate) 945 policy_remove_mapping(cache->policy, mg->old_oblock); 946 cleanup_migration(mg); 947 } 948 949 } else { 950 if (mg->requeue_holder) 951 cell_defer(cache, mg->new_ocell, true); 952 else { 953 bio_endio(mg->new_ocell->holder, 0); 954 cell_defer(cache, mg->new_ocell, false); 955 } 956 clear_dirty(cache, mg->new_oblock, mg->cblock); 957 cleanup_migration(mg); 958 } 959 } 960 961 static void copy_complete(int read_err, unsigned long write_err, void *context) 962 { 963 unsigned long flags; 964 struct dm_cache_migration *mg = (struct dm_cache_migration *) context; 965 struct cache *cache = mg->cache; 966 967 if (read_err || write_err) 968 mg->err = true; 969 970 spin_lock_irqsave(&cache->lock, flags); 971 list_add_tail(&mg->list, &cache->completed_migrations); 972 spin_unlock_irqrestore(&cache->lock, flags); 973 974 wake_worker(cache); 975 } 976 977 static void issue_copy_real(struct dm_cache_migration *mg) 978 { 979 int r; 980 struct dm_io_region o_region, c_region; 981 struct cache *cache = mg->cache; 982 sector_t cblock = from_cblock(mg->cblock); 983 984 o_region.bdev = cache->origin_dev->bdev; 985 o_region.count = cache->sectors_per_block; 986 987 c_region.bdev = cache->cache_dev->bdev; 988 c_region.sector = cblock * cache->sectors_per_block; 989 c_region.count = cache->sectors_per_block; 990 991 if (mg->writeback || mg->demote) { 992 /* demote */ 993 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; 994 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); 995 } else { 996 /* promote */ 997 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; 998 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); 999 } 1000 1001 if (r < 0) { 1002 DMERR_LIMIT("issuing migration failed"); 1003 migration_failure(mg); 1004 } 1005 } 1006 1007 static void overwrite_endio(struct bio *bio, int err) 1008 { 1009 struct dm_cache_migration *mg = bio->bi_private; 1010 struct cache *cache = mg->cache; 1011 size_t pb_data_size = get_per_bio_data_size(cache); 1012 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1013 unsigned long flags; 1014 1015 dm_unhook_bio(&pb->hook_info, bio); 1016 1017 if (err) 1018 mg->err = true; 1019 1020 mg->requeue_holder = false; 1021 1022 spin_lock_irqsave(&cache->lock, flags); 1023 list_add_tail(&mg->list, &cache->completed_migrations); 1024 spin_unlock_irqrestore(&cache->lock, flags); 1025 1026 wake_worker(cache); 1027 } 1028 1029 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) 1030 { 1031 size_t pb_data_size = get_per_bio_data_size(mg->cache); 1032 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1033 1034 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1035 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); 1036 generic_make_request(bio); 1037 } 1038 1039 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1040 { 1041 return (bio_data_dir(bio) == WRITE) && 1042 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1043 } 1044 1045 static void avoid_copy(struct dm_cache_migration *mg) 1046 { 1047 atomic_inc(&mg->cache->stats.copies_avoided); 1048 migration_success_pre_commit(mg); 1049 } 1050 1051 static void issue_copy(struct dm_cache_migration *mg) 1052 { 1053 bool avoid; 1054 struct cache *cache = mg->cache; 1055 1056 if (mg->writeback || mg->demote) 1057 avoid = !is_dirty(cache, mg->cblock) || 1058 is_discarded_oblock(cache, mg->old_oblock); 1059 else { 1060 struct bio *bio = mg->new_ocell->holder; 1061 1062 avoid = is_discarded_oblock(cache, mg->new_oblock); 1063 1064 if (!avoid && bio_writes_complete_block(cache, bio)) { 1065 issue_overwrite(mg, bio); 1066 return; 1067 } 1068 } 1069 1070 avoid ? avoid_copy(mg) : issue_copy_real(mg); 1071 } 1072 1073 static void complete_migration(struct dm_cache_migration *mg) 1074 { 1075 if (mg->err) 1076 migration_failure(mg); 1077 else 1078 migration_success_pre_commit(mg); 1079 } 1080 1081 static void process_migrations(struct cache *cache, struct list_head *head, 1082 void (*fn)(struct dm_cache_migration *)) 1083 { 1084 unsigned long flags; 1085 struct list_head list; 1086 struct dm_cache_migration *mg, *tmp; 1087 1088 INIT_LIST_HEAD(&list); 1089 spin_lock_irqsave(&cache->lock, flags); 1090 list_splice_init(head, &list); 1091 spin_unlock_irqrestore(&cache->lock, flags); 1092 1093 list_for_each_entry_safe(mg, tmp, &list, list) 1094 fn(mg); 1095 } 1096 1097 static void __queue_quiesced_migration(struct dm_cache_migration *mg) 1098 { 1099 list_add_tail(&mg->list, &mg->cache->quiesced_migrations); 1100 } 1101 1102 static void queue_quiesced_migration(struct dm_cache_migration *mg) 1103 { 1104 unsigned long flags; 1105 struct cache *cache = mg->cache; 1106 1107 spin_lock_irqsave(&cache->lock, flags); 1108 __queue_quiesced_migration(mg); 1109 spin_unlock_irqrestore(&cache->lock, flags); 1110 1111 wake_worker(cache); 1112 } 1113 1114 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) 1115 { 1116 unsigned long flags; 1117 struct dm_cache_migration *mg, *tmp; 1118 1119 spin_lock_irqsave(&cache->lock, flags); 1120 list_for_each_entry_safe(mg, tmp, work, list) 1121 __queue_quiesced_migration(mg); 1122 spin_unlock_irqrestore(&cache->lock, flags); 1123 1124 wake_worker(cache); 1125 } 1126 1127 static void check_for_quiesced_migrations(struct cache *cache, 1128 struct per_bio_data *pb) 1129 { 1130 struct list_head work; 1131 1132 if (!pb->all_io_entry) 1133 return; 1134 1135 INIT_LIST_HEAD(&work); 1136 if (pb->all_io_entry) 1137 dm_deferred_entry_dec(pb->all_io_entry, &work); 1138 1139 if (!list_empty(&work)) 1140 queue_quiesced_migrations(cache, &work); 1141 } 1142 1143 static void quiesce_migration(struct dm_cache_migration *mg) 1144 { 1145 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) 1146 queue_quiesced_migration(mg); 1147 } 1148 1149 static void promote(struct cache *cache, struct prealloc *structs, 1150 dm_oblock_t oblock, dm_cblock_t cblock, 1151 struct dm_bio_prison_cell *cell) 1152 { 1153 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1154 1155 mg->err = false; 1156 mg->writeback = false; 1157 mg->demote = false; 1158 mg->promote = true; 1159 mg->requeue_holder = true; 1160 mg->invalidate = false; 1161 mg->cache = cache; 1162 mg->new_oblock = oblock; 1163 mg->cblock = cblock; 1164 mg->old_ocell = NULL; 1165 mg->new_ocell = cell; 1166 mg->start_jiffies = jiffies; 1167 1168 inc_nr_migrations(cache); 1169 quiesce_migration(mg); 1170 } 1171 1172 static void writeback(struct cache *cache, struct prealloc *structs, 1173 dm_oblock_t oblock, dm_cblock_t cblock, 1174 struct dm_bio_prison_cell *cell) 1175 { 1176 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1177 1178 mg->err = false; 1179 mg->writeback = true; 1180 mg->demote = false; 1181 mg->promote = false; 1182 mg->requeue_holder = true; 1183 mg->invalidate = false; 1184 mg->cache = cache; 1185 mg->old_oblock = oblock; 1186 mg->cblock = cblock; 1187 mg->old_ocell = cell; 1188 mg->new_ocell = NULL; 1189 mg->start_jiffies = jiffies; 1190 1191 inc_nr_migrations(cache); 1192 quiesce_migration(mg); 1193 } 1194 1195 static void demote_then_promote(struct cache *cache, struct prealloc *structs, 1196 dm_oblock_t old_oblock, dm_oblock_t new_oblock, 1197 dm_cblock_t cblock, 1198 struct dm_bio_prison_cell *old_ocell, 1199 struct dm_bio_prison_cell *new_ocell) 1200 { 1201 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1202 1203 mg->err = false; 1204 mg->writeback = false; 1205 mg->demote = true; 1206 mg->promote = true; 1207 mg->requeue_holder = true; 1208 mg->invalidate = false; 1209 mg->cache = cache; 1210 mg->old_oblock = old_oblock; 1211 mg->new_oblock = new_oblock; 1212 mg->cblock = cblock; 1213 mg->old_ocell = old_ocell; 1214 mg->new_ocell = new_ocell; 1215 mg->start_jiffies = jiffies; 1216 1217 inc_nr_migrations(cache); 1218 quiesce_migration(mg); 1219 } 1220 1221 /* 1222 * Invalidate a cache entry. No writeback occurs; any changes in the cache 1223 * block are thrown away. 1224 */ 1225 static void invalidate(struct cache *cache, struct prealloc *structs, 1226 dm_oblock_t oblock, dm_cblock_t cblock, 1227 struct dm_bio_prison_cell *cell) 1228 { 1229 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1230 1231 mg->err = false; 1232 mg->writeback = false; 1233 mg->demote = true; 1234 mg->promote = false; 1235 mg->requeue_holder = true; 1236 mg->invalidate = true; 1237 mg->cache = cache; 1238 mg->old_oblock = oblock; 1239 mg->cblock = cblock; 1240 mg->old_ocell = cell; 1241 mg->new_ocell = NULL; 1242 mg->start_jiffies = jiffies; 1243 1244 inc_nr_migrations(cache); 1245 quiesce_migration(mg); 1246 } 1247 1248 /*---------------------------------------------------------------- 1249 * bio processing 1250 *--------------------------------------------------------------*/ 1251 static void defer_bio(struct cache *cache, struct bio *bio) 1252 { 1253 unsigned long flags; 1254 1255 spin_lock_irqsave(&cache->lock, flags); 1256 bio_list_add(&cache->deferred_bios, bio); 1257 spin_unlock_irqrestore(&cache->lock, flags); 1258 1259 wake_worker(cache); 1260 } 1261 1262 static void process_flush_bio(struct cache *cache, struct bio *bio) 1263 { 1264 size_t pb_data_size = get_per_bio_data_size(cache); 1265 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1266 1267 BUG_ON(bio->bi_iter.bi_size); 1268 if (!pb->req_nr) 1269 remap_to_origin(cache, bio); 1270 else 1271 remap_to_cache(cache, bio, 0); 1272 1273 issue(cache, bio); 1274 } 1275 1276 /* 1277 * People generally discard large parts of a device, eg, the whole device 1278 * when formatting. Splitting these large discards up into cache block 1279 * sized ios and then quiescing (always neccessary for discard) takes too 1280 * long. 1281 * 1282 * We keep it simple, and allow any size of discard to come in, and just 1283 * mark off blocks on the discard bitset. No passdown occurs! 1284 * 1285 * To implement passdown we need to change the bio_prison such that a cell 1286 * can have a key that spans many blocks. 1287 */ 1288 static void process_discard_bio(struct cache *cache, struct bio *bio) 1289 { 1290 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1291 cache->discard_block_size); 1292 dm_block_t end_block = bio_end_sector(bio); 1293 dm_block_t b; 1294 1295 end_block = block_div(end_block, cache->discard_block_size); 1296 1297 for (b = start_block; b < end_block; b++) 1298 set_discard(cache, to_dblock(b)); 1299 1300 bio_endio(bio, 0); 1301 } 1302 1303 static bool spare_migration_bandwidth(struct cache *cache) 1304 { 1305 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * 1306 cache->sectors_per_block; 1307 return current_volume < cache->migration_threshold; 1308 } 1309 1310 static void inc_hit_counter(struct cache *cache, struct bio *bio) 1311 { 1312 atomic_inc(bio_data_dir(bio) == READ ? 1313 &cache->stats.read_hit : &cache->stats.write_hit); 1314 } 1315 1316 static void inc_miss_counter(struct cache *cache, struct bio *bio) 1317 { 1318 atomic_inc(bio_data_dir(bio) == READ ? 1319 &cache->stats.read_miss : &cache->stats.write_miss); 1320 } 1321 1322 static void issue_cache_bio(struct cache *cache, struct bio *bio, 1323 struct per_bio_data *pb, 1324 dm_oblock_t oblock, dm_cblock_t cblock) 1325 { 1326 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1327 remap_to_cache_dirty(cache, bio, oblock, cblock); 1328 issue(cache, bio); 1329 } 1330 1331 static void process_bio(struct cache *cache, struct prealloc *structs, 1332 struct bio *bio) 1333 { 1334 int r; 1335 bool release_cell = true; 1336 dm_oblock_t block = get_bio_block(cache, bio); 1337 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1338 struct policy_result lookup_result; 1339 size_t pb_data_size = get_per_bio_data_size(cache); 1340 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1341 bool discarded_block = is_discarded_oblock(cache, block); 1342 bool passthrough = passthrough_mode(&cache->features); 1343 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1344 1345 /* 1346 * Check to see if that block is currently migrating. 1347 */ 1348 cell_prealloc = prealloc_get_cell(structs); 1349 r = bio_detain(cache, block, bio, cell_prealloc, 1350 (cell_free_fn) prealloc_put_cell, 1351 structs, &new_ocell); 1352 if (r > 0) 1353 return; 1354 1355 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, 1356 bio, &lookup_result); 1357 1358 if (r == -EWOULDBLOCK) 1359 /* migration has been denied */ 1360 lookup_result.op = POLICY_MISS; 1361 1362 switch (lookup_result.op) { 1363 case POLICY_HIT: 1364 if (passthrough) { 1365 inc_miss_counter(cache, bio); 1366 1367 /* 1368 * Passthrough always maps to the origin, 1369 * invalidating any cache blocks that are written 1370 * to. 1371 */ 1372 1373 if (bio_data_dir(bio) == WRITE) { 1374 atomic_inc(&cache->stats.demotion); 1375 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); 1376 release_cell = false; 1377 1378 } else { 1379 /* FIXME: factor out issue_origin() */ 1380 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1381 remap_to_origin_clear_discard(cache, bio, block); 1382 issue(cache, bio); 1383 } 1384 } else { 1385 inc_hit_counter(cache, bio); 1386 1387 if (bio_data_dir(bio) == WRITE && 1388 writethrough_mode(&cache->features) && 1389 !is_dirty(cache, lookup_result.cblock)) { 1390 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1391 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1392 issue(cache, bio); 1393 } else 1394 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); 1395 } 1396 1397 break; 1398 1399 case POLICY_MISS: 1400 inc_miss_counter(cache, bio); 1401 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1402 remap_to_origin_clear_discard(cache, bio, block); 1403 issue(cache, bio); 1404 break; 1405 1406 case POLICY_NEW: 1407 atomic_inc(&cache->stats.promotion); 1408 promote(cache, structs, block, lookup_result.cblock, new_ocell); 1409 release_cell = false; 1410 break; 1411 1412 case POLICY_REPLACE: 1413 cell_prealloc = prealloc_get_cell(structs); 1414 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc, 1415 (cell_free_fn) prealloc_put_cell, 1416 structs, &old_ocell); 1417 if (r > 0) { 1418 /* 1419 * We have to be careful to avoid lock inversion of 1420 * the cells. So we back off, and wait for the 1421 * old_ocell to become free. 1422 */ 1423 policy_force_mapping(cache->policy, block, 1424 lookup_result.old_oblock); 1425 atomic_inc(&cache->stats.cache_cell_clash); 1426 break; 1427 } 1428 atomic_inc(&cache->stats.demotion); 1429 atomic_inc(&cache->stats.promotion); 1430 1431 demote_then_promote(cache, structs, lookup_result.old_oblock, 1432 block, lookup_result.cblock, 1433 old_ocell, new_ocell); 1434 release_cell = false; 1435 break; 1436 1437 default: 1438 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, 1439 (unsigned) lookup_result.op); 1440 bio_io_error(bio); 1441 } 1442 1443 if (release_cell) 1444 cell_defer(cache, new_ocell, false); 1445 } 1446 1447 static int need_commit_due_to_time(struct cache *cache) 1448 { 1449 return jiffies < cache->last_commit_jiffies || 1450 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; 1451 } 1452 1453 static int commit_if_needed(struct cache *cache) 1454 { 1455 int r = 0; 1456 1457 if ((cache->commit_requested || need_commit_due_to_time(cache)) && 1458 dm_cache_changed_this_transaction(cache->cmd)) { 1459 atomic_inc(&cache->stats.commit_count); 1460 cache->commit_requested = false; 1461 r = dm_cache_commit(cache->cmd, false); 1462 cache->last_commit_jiffies = jiffies; 1463 } 1464 1465 return r; 1466 } 1467 1468 static void process_deferred_bios(struct cache *cache) 1469 { 1470 unsigned long flags; 1471 struct bio_list bios; 1472 struct bio *bio; 1473 struct prealloc structs; 1474 1475 memset(&structs, 0, sizeof(structs)); 1476 bio_list_init(&bios); 1477 1478 spin_lock_irqsave(&cache->lock, flags); 1479 bio_list_merge(&bios, &cache->deferred_bios); 1480 bio_list_init(&cache->deferred_bios); 1481 spin_unlock_irqrestore(&cache->lock, flags); 1482 1483 while (!bio_list_empty(&bios)) { 1484 /* 1485 * If we've got no free migration structs, and processing 1486 * this bio might require one, we pause until there are some 1487 * prepared mappings to process. 1488 */ 1489 if (prealloc_data_structs(cache, &structs)) { 1490 spin_lock_irqsave(&cache->lock, flags); 1491 bio_list_merge(&cache->deferred_bios, &bios); 1492 spin_unlock_irqrestore(&cache->lock, flags); 1493 break; 1494 } 1495 1496 bio = bio_list_pop(&bios); 1497 1498 if (bio->bi_rw & REQ_FLUSH) 1499 process_flush_bio(cache, bio); 1500 else if (bio->bi_rw & REQ_DISCARD) 1501 process_discard_bio(cache, bio); 1502 else 1503 process_bio(cache, &structs, bio); 1504 } 1505 1506 prealloc_free_structs(cache, &structs); 1507 } 1508 1509 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 1510 { 1511 unsigned long flags; 1512 struct bio_list bios; 1513 struct bio *bio; 1514 1515 bio_list_init(&bios); 1516 1517 spin_lock_irqsave(&cache->lock, flags); 1518 bio_list_merge(&bios, &cache->deferred_flush_bios); 1519 bio_list_init(&cache->deferred_flush_bios); 1520 spin_unlock_irqrestore(&cache->lock, flags); 1521 1522 while ((bio = bio_list_pop(&bios))) 1523 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1524 } 1525 1526 static void process_deferred_writethrough_bios(struct cache *cache) 1527 { 1528 unsigned long flags; 1529 struct bio_list bios; 1530 struct bio *bio; 1531 1532 bio_list_init(&bios); 1533 1534 spin_lock_irqsave(&cache->lock, flags); 1535 bio_list_merge(&bios, &cache->deferred_writethrough_bios); 1536 bio_list_init(&cache->deferred_writethrough_bios); 1537 spin_unlock_irqrestore(&cache->lock, flags); 1538 1539 while ((bio = bio_list_pop(&bios))) 1540 generic_make_request(bio); 1541 } 1542 1543 static void writeback_some_dirty_blocks(struct cache *cache) 1544 { 1545 int r = 0; 1546 dm_oblock_t oblock; 1547 dm_cblock_t cblock; 1548 struct prealloc structs; 1549 struct dm_bio_prison_cell *old_ocell; 1550 1551 memset(&structs, 0, sizeof(structs)); 1552 1553 while (spare_migration_bandwidth(cache)) { 1554 if (prealloc_data_structs(cache, &structs)) 1555 break; 1556 1557 r = policy_writeback_work(cache->policy, &oblock, &cblock); 1558 if (r) 1559 break; 1560 1561 r = get_cell(cache, oblock, &structs, &old_ocell); 1562 if (r) { 1563 policy_set_dirty(cache->policy, oblock); 1564 break; 1565 } 1566 1567 writeback(cache, &structs, oblock, cblock, old_ocell); 1568 } 1569 1570 prealloc_free_structs(cache, &structs); 1571 } 1572 1573 /*---------------------------------------------------------------- 1574 * Invalidations. 1575 * Dropping something from the cache *without* writing back. 1576 *--------------------------------------------------------------*/ 1577 1578 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) 1579 { 1580 int r = 0; 1581 uint64_t begin = from_cblock(req->cblocks->begin); 1582 uint64_t end = from_cblock(req->cblocks->end); 1583 1584 while (begin != end) { 1585 r = policy_remove_cblock(cache->policy, to_cblock(begin)); 1586 if (!r) { 1587 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); 1588 if (r) 1589 break; 1590 1591 } else if (r == -ENODATA) { 1592 /* harmless, already unmapped */ 1593 r = 0; 1594 1595 } else { 1596 DMERR("policy_remove_cblock failed"); 1597 break; 1598 } 1599 1600 begin++; 1601 } 1602 1603 cache->commit_requested = true; 1604 1605 req->err = r; 1606 atomic_set(&req->complete, 1); 1607 1608 wake_up(&req->result_wait); 1609 } 1610 1611 static void process_invalidation_requests(struct cache *cache) 1612 { 1613 struct list_head list; 1614 struct invalidation_request *req, *tmp; 1615 1616 INIT_LIST_HEAD(&list); 1617 spin_lock(&cache->invalidation_lock); 1618 list_splice_init(&cache->invalidation_requests, &list); 1619 spin_unlock(&cache->invalidation_lock); 1620 1621 list_for_each_entry_safe (req, tmp, &list, list) 1622 process_invalidation_request(cache, req); 1623 } 1624 1625 /*---------------------------------------------------------------- 1626 * Main worker loop 1627 *--------------------------------------------------------------*/ 1628 static bool is_quiescing(struct cache *cache) 1629 { 1630 return atomic_read(&cache->quiescing); 1631 } 1632 1633 static void ack_quiescing(struct cache *cache) 1634 { 1635 if (is_quiescing(cache)) { 1636 atomic_inc(&cache->quiescing_ack); 1637 wake_up(&cache->quiescing_wait); 1638 } 1639 } 1640 1641 static void wait_for_quiescing_ack(struct cache *cache) 1642 { 1643 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); 1644 } 1645 1646 static void start_quiescing(struct cache *cache) 1647 { 1648 atomic_inc(&cache->quiescing); 1649 wait_for_quiescing_ack(cache); 1650 } 1651 1652 static void stop_quiescing(struct cache *cache) 1653 { 1654 atomic_set(&cache->quiescing, 0); 1655 atomic_set(&cache->quiescing_ack, 0); 1656 } 1657 1658 static void wait_for_migrations(struct cache *cache) 1659 { 1660 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); 1661 } 1662 1663 static void stop_worker(struct cache *cache) 1664 { 1665 cancel_delayed_work(&cache->waker); 1666 flush_workqueue(cache->wq); 1667 } 1668 1669 static void requeue_deferred_io(struct cache *cache) 1670 { 1671 struct bio *bio; 1672 struct bio_list bios; 1673 1674 bio_list_init(&bios); 1675 bio_list_merge(&bios, &cache->deferred_bios); 1676 bio_list_init(&cache->deferred_bios); 1677 1678 while ((bio = bio_list_pop(&bios))) 1679 bio_endio(bio, DM_ENDIO_REQUEUE); 1680 } 1681 1682 static int more_work(struct cache *cache) 1683 { 1684 if (is_quiescing(cache)) 1685 return !list_empty(&cache->quiesced_migrations) || 1686 !list_empty(&cache->completed_migrations) || 1687 !list_empty(&cache->need_commit_migrations); 1688 else 1689 return !bio_list_empty(&cache->deferred_bios) || 1690 !bio_list_empty(&cache->deferred_flush_bios) || 1691 !bio_list_empty(&cache->deferred_writethrough_bios) || 1692 !list_empty(&cache->quiesced_migrations) || 1693 !list_empty(&cache->completed_migrations) || 1694 !list_empty(&cache->need_commit_migrations) || 1695 cache->invalidate; 1696 } 1697 1698 static void do_worker(struct work_struct *ws) 1699 { 1700 struct cache *cache = container_of(ws, struct cache, worker); 1701 1702 do { 1703 if (!is_quiescing(cache)) { 1704 writeback_some_dirty_blocks(cache); 1705 process_deferred_writethrough_bios(cache); 1706 process_deferred_bios(cache); 1707 process_invalidation_requests(cache); 1708 } 1709 1710 process_migrations(cache, &cache->quiesced_migrations, issue_copy); 1711 process_migrations(cache, &cache->completed_migrations, complete_migration); 1712 1713 if (commit_if_needed(cache)) { 1714 process_deferred_flush_bios(cache, false); 1715 1716 /* 1717 * FIXME: rollback metadata or just go into a 1718 * failure mode and error everything 1719 */ 1720 } else { 1721 process_deferred_flush_bios(cache, true); 1722 process_migrations(cache, &cache->need_commit_migrations, 1723 migration_success_post_commit); 1724 } 1725 1726 ack_quiescing(cache); 1727 1728 } while (more_work(cache)); 1729 } 1730 1731 /* 1732 * We want to commit periodically so that not too much 1733 * unwritten metadata builds up. 1734 */ 1735 static void do_waker(struct work_struct *ws) 1736 { 1737 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 1738 policy_tick(cache->policy); 1739 wake_worker(cache); 1740 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 1741 } 1742 1743 /*----------------------------------------------------------------*/ 1744 1745 static int is_congested(struct dm_dev *dev, int bdi_bits) 1746 { 1747 struct request_queue *q = bdev_get_queue(dev->bdev); 1748 return bdi_congested(&q->backing_dev_info, bdi_bits); 1749 } 1750 1751 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1752 { 1753 struct cache *cache = container_of(cb, struct cache, callbacks); 1754 1755 return is_congested(cache->origin_dev, bdi_bits) || 1756 is_congested(cache->cache_dev, bdi_bits); 1757 } 1758 1759 /*---------------------------------------------------------------- 1760 * Target methods 1761 *--------------------------------------------------------------*/ 1762 1763 /* 1764 * This function gets called on the error paths of the constructor, so we 1765 * have to cope with a partially initialised struct. 1766 */ 1767 static void destroy(struct cache *cache) 1768 { 1769 unsigned i; 1770 1771 if (cache->next_migration) 1772 mempool_free(cache->next_migration, cache->migration_pool); 1773 1774 if (cache->migration_pool) 1775 mempool_destroy(cache->migration_pool); 1776 1777 if (cache->all_io_ds) 1778 dm_deferred_set_destroy(cache->all_io_ds); 1779 1780 if (cache->prison) 1781 dm_bio_prison_destroy(cache->prison); 1782 1783 if (cache->wq) 1784 destroy_workqueue(cache->wq); 1785 1786 if (cache->dirty_bitset) 1787 free_bitset(cache->dirty_bitset); 1788 1789 if (cache->discard_bitset) 1790 free_bitset(cache->discard_bitset); 1791 1792 if (cache->copier) 1793 dm_kcopyd_client_destroy(cache->copier); 1794 1795 if (cache->cmd) 1796 dm_cache_metadata_close(cache->cmd); 1797 1798 if (cache->metadata_dev) 1799 dm_put_device(cache->ti, cache->metadata_dev); 1800 1801 if (cache->origin_dev) 1802 dm_put_device(cache->ti, cache->origin_dev); 1803 1804 if (cache->cache_dev) 1805 dm_put_device(cache->ti, cache->cache_dev); 1806 1807 if (cache->policy) 1808 dm_cache_policy_destroy(cache->policy); 1809 1810 for (i = 0; i < cache->nr_ctr_args ; i++) 1811 kfree(cache->ctr_args[i]); 1812 kfree(cache->ctr_args); 1813 1814 kfree(cache); 1815 } 1816 1817 static void cache_dtr(struct dm_target *ti) 1818 { 1819 struct cache *cache = ti->private; 1820 1821 destroy(cache); 1822 } 1823 1824 static sector_t get_dev_size(struct dm_dev *dev) 1825 { 1826 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; 1827 } 1828 1829 /*----------------------------------------------------------------*/ 1830 1831 /* 1832 * Construct a cache device mapping. 1833 * 1834 * cache <metadata dev> <cache dev> <origin dev> <block size> 1835 * <#feature args> [<feature arg>]* 1836 * <policy> <#policy args> [<policy arg>]* 1837 * 1838 * metadata dev : fast device holding the persistent metadata 1839 * cache dev : fast device holding cached data blocks 1840 * origin dev : slow device holding original data blocks 1841 * block size : cache unit size in sectors 1842 * 1843 * #feature args : number of feature arguments passed 1844 * feature args : writethrough. (The default is writeback.) 1845 * 1846 * policy : the replacement policy to use 1847 * #policy args : an even number of policy arguments corresponding 1848 * to key/value pairs passed to the policy 1849 * policy args : key/value pairs passed to the policy 1850 * E.g. 'sequential_threshold 1024' 1851 * See cache-policies.txt for details. 1852 * 1853 * Optional feature arguments are: 1854 * writethrough : write through caching that prohibits cache block 1855 * content from being different from origin block content. 1856 * Without this argument, the default behaviour is to write 1857 * back cache block contents later for performance reasons, 1858 * so they may differ from the corresponding origin blocks. 1859 */ 1860 struct cache_args { 1861 struct dm_target *ti; 1862 1863 struct dm_dev *metadata_dev; 1864 1865 struct dm_dev *cache_dev; 1866 sector_t cache_sectors; 1867 1868 struct dm_dev *origin_dev; 1869 sector_t origin_sectors; 1870 1871 uint32_t block_size; 1872 1873 const char *policy_name; 1874 int policy_argc; 1875 const char **policy_argv; 1876 1877 struct cache_features features; 1878 }; 1879 1880 static void destroy_cache_args(struct cache_args *ca) 1881 { 1882 if (ca->metadata_dev) 1883 dm_put_device(ca->ti, ca->metadata_dev); 1884 1885 if (ca->cache_dev) 1886 dm_put_device(ca->ti, ca->cache_dev); 1887 1888 if (ca->origin_dev) 1889 dm_put_device(ca->ti, ca->origin_dev); 1890 1891 kfree(ca); 1892 } 1893 1894 static bool at_least_one_arg(struct dm_arg_set *as, char **error) 1895 { 1896 if (!as->argc) { 1897 *error = "Insufficient args"; 1898 return false; 1899 } 1900 1901 return true; 1902 } 1903 1904 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, 1905 char **error) 1906 { 1907 int r; 1908 sector_t metadata_dev_size; 1909 char b[BDEVNAME_SIZE]; 1910 1911 if (!at_least_one_arg(as, error)) 1912 return -EINVAL; 1913 1914 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1915 &ca->metadata_dev); 1916 if (r) { 1917 *error = "Error opening metadata device"; 1918 return r; 1919 } 1920 1921 metadata_dev_size = get_dev_size(ca->metadata_dev); 1922 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) 1923 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 1924 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); 1925 1926 return 0; 1927 } 1928 1929 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, 1930 char **error) 1931 { 1932 int r; 1933 1934 if (!at_least_one_arg(as, error)) 1935 return -EINVAL; 1936 1937 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1938 &ca->cache_dev); 1939 if (r) { 1940 *error = "Error opening cache device"; 1941 return r; 1942 } 1943 ca->cache_sectors = get_dev_size(ca->cache_dev); 1944 1945 return 0; 1946 } 1947 1948 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, 1949 char **error) 1950 { 1951 int r; 1952 1953 if (!at_least_one_arg(as, error)) 1954 return -EINVAL; 1955 1956 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1957 &ca->origin_dev); 1958 if (r) { 1959 *error = "Error opening origin device"; 1960 return r; 1961 } 1962 1963 ca->origin_sectors = get_dev_size(ca->origin_dev); 1964 if (ca->ti->len > ca->origin_sectors) { 1965 *error = "Device size larger than cached device"; 1966 return -EINVAL; 1967 } 1968 1969 return 0; 1970 } 1971 1972 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, 1973 char **error) 1974 { 1975 unsigned long block_size; 1976 1977 if (!at_least_one_arg(as, error)) 1978 return -EINVAL; 1979 1980 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size || 1981 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || 1982 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || 1983 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { 1984 *error = "Invalid data block size"; 1985 return -EINVAL; 1986 } 1987 1988 if (block_size > ca->cache_sectors) { 1989 *error = "Data block size is larger than the cache device"; 1990 return -EINVAL; 1991 } 1992 1993 ca->block_size = block_size; 1994 1995 return 0; 1996 } 1997 1998 static void init_features(struct cache_features *cf) 1999 { 2000 cf->mode = CM_WRITE; 2001 cf->io_mode = CM_IO_WRITEBACK; 2002 } 2003 2004 static int parse_features(struct cache_args *ca, struct dm_arg_set *as, 2005 char **error) 2006 { 2007 static struct dm_arg _args[] = { 2008 {0, 1, "Invalid number of cache feature arguments"}, 2009 }; 2010 2011 int r; 2012 unsigned argc; 2013 const char *arg; 2014 struct cache_features *cf = &ca->features; 2015 2016 init_features(cf); 2017 2018 r = dm_read_arg_group(_args, as, &argc, error); 2019 if (r) 2020 return -EINVAL; 2021 2022 while (argc--) { 2023 arg = dm_shift_arg(as); 2024 2025 if (!strcasecmp(arg, "writeback")) 2026 cf->io_mode = CM_IO_WRITEBACK; 2027 2028 else if (!strcasecmp(arg, "writethrough")) 2029 cf->io_mode = CM_IO_WRITETHROUGH; 2030 2031 else if (!strcasecmp(arg, "passthrough")) 2032 cf->io_mode = CM_IO_PASSTHROUGH; 2033 2034 else { 2035 *error = "Unrecognised cache feature requested"; 2036 return -EINVAL; 2037 } 2038 } 2039 2040 return 0; 2041 } 2042 2043 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, 2044 char **error) 2045 { 2046 static struct dm_arg _args[] = { 2047 {0, 1024, "Invalid number of policy arguments"}, 2048 }; 2049 2050 int r; 2051 2052 if (!at_least_one_arg(as, error)) 2053 return -EINVAL; 2054 2055 ca->policy_name = dm_shift_arg(as); 2056 2057 r = dm_read_arg_group(_args, as, &ca->policy_argc, error); 2058 if (r) 2059 return -EINVAL; 2060 2061 ca->policy_argv = (const char **)as->argv; 2062 dm_consume_args(as, ca->policy_argc); 2063 2064 return 0; 2065 } 2066 2067 static int parse_cache_args(struct cache_args *ca, int argc, char **argv, 2068 char **error) 2069 { 2070 int r; 2071 struct dm_arg_set as; 2072 2073 as.argc = argc; 2074 as.argv = argv; 2075 2076 r = parse_metadata_dev(ca, &as, error); 2077 if (r) 2078 return r; 2079 2080 r = parse_cache_dev(ca, &as, error); 2081 if (r) 2082 return r; 2083 2084 r = parse_origin_dev(ca, &as, error); 2085 if (r) 2086 return r; 2087 2088 r = parse_block_size(ca, &as, error); 2089 if (r) 2090 return r; 2091 2092 r = parse_features(ca, &as, error); 2093 if (r) 2094 return r; 2095 2096 r = parse_policy(ca, &as, error); 2097 if (r) 2098 return r; 2099 2100 return 0; 2101 } 2102 2103 /*----------------------------------------------------------------*/ 2104 2105 static struct kmem_cache *migration_cache; 2106 2107 #define NOT_CORE_OPTION 1 2108 2109 static int process_config_option(struct cache *cache, const char *key, const char *value) 2110 { 2111 unsigned long tmp; 2112 2113 if (!strcasecmp(key, "migration_threshold")) { 2114 if (kstrtoul(value, 10, &tmp)) 2115 return -EINVAL; 2116 2117 cache->migration_threshold = tmp; 2118 return 0; 2119 } 2120 2121 return NOT_CORE_OPTION; 2122 } 2123 2124 static int set_config_value(struct cache *cache, const char *key, const char *value) 2125 { 2126 int r = process_config_option(cache, key, value); 2127 2128 if (r == NOT_CORE_OPTION) 2129 r = policy_set_config_value(cache->policy, key, value); 2130 2131 if (r) 2132 DMWARN("bad config value for %s: %s", key, value); 2133 2134 return r; 2135 } 2136 2137 static int set_config_values(struct cache *cache, int argc, const char **argv) 2138 { 2139 int r = 0; 2140 2141 if (argc & 1) { 2142 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); 2143 return -EINVAL; 2144 } 2145 2146 while (argc) { 2147 r = set_config_value(cache, argv[0], argv[1]); 2148 if (r) 2149 break; 2150 2151 argc -= 2; 2152 argv += 2; 2153 } 2154 2155 return r; 2156 } 2157 2158 static int create_cache_policy(struct cache *cache, struct cache_args *ca, 2159 char **error) 2160 { 2161 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, 2162 cache->cache_size, 2163 cache->origin_sectors, 2164 cache->sectors_per_block); 2165 if (IS_ERR(p)) { 2166 *error = "Error creating cache's policy"; 2167 return PTR_ERR(p); 2168 } 2169 cache->policy = p; 2170 2171 return 0; 2172 } 2173 2174 /* 2175 * We want the discard block size to be a power of two, at least the size 2176 * of the cache block size, and have no more than 2^14 discard blocks 2177 * across the origin. 2178 */ 2179 #define MAX_DISCARD_BLOCKS (1 << 14) 2180 2181 static bool too_many_discard_blocks(sector_t discard_block_size, 2182 sector_t origin_size) 2183 { 2184 (void) sector_div(origin_size, discard_block_size); 2185 2186 return origin_size > MAX_DISCARD_BLOCKS; 2187 } 2188 2189 static sector_t calculate_discard_block_size(sector_t cache_block_size, 2190 sector_t origin_size) 2191 { 2192 sector_t discard_block_size; 2193 2194 discard_block_size = roundup_pow_of_two(cache_block_size); 2195 2196 if (origin_size) 2197 while (too_many_discard_blocks(discard_block_size, origin_size)) 2198 discard_block_size *= 2; 2199 2200 return discard_block_size; 2201 } 2202 2203 #define DEFAULT_MIGRATION_THRESHOLD 2048 2204 2205 static int cache_create(struct cache_args *ca, struct cache **result) 2206 { 2207 int r = 0; 2208 char **error = &ca->ti->error; 2209 struct cache *cache; 2210 struct dm_target *ti = ca->ti; 2211 dm_block_t origin_blocks; 2212 struct dm_cache_metadata *cmd; 2213 bool may_format = ca->features.mode == CM_WRITE; 2214 2215 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 2216 if (!cache) 2217 return -ENOMEM; 2218 2219 cache->ti = ca->ti; 2220 ti->private = cache; 2221 ti->num_flush_bios = 2; 2222 ti->flush_supported = true; 2223 2224 ti->num_discard_bios = 1; 2225 ti->discards_supported = true; 2226 ti->discard_zeroes_data_unsupported = true; 2227 2228 cache->features = ca->features; 2229 ti->per_bio_data_size = get_per_bio_data_size(cache); 2230 2231 cache->callbacks.congested_fn = cache_is_congested; 2232 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 2233 2234 cache->metadata_dev = ca->metadata_dev; 2235 cache->origin_dev = ca->origin_dev; 2236 cache->cache_dev = ca->cache_dev; 2237 2238 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; 2239 2240 /* FIXME: factor out this whole section */ 2241 origin_blocks = cache->origin_sectors = ca->origin_sectors; 2242 origin_blocks = block_div(origin_blocks, ca->block_size); 2243 cache->origin_blocks = to_oblock(origin_blocks); 2244 2245 cache->sectors_per_block = ca->block_size; 2246 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { 2247 r = -EINVAL; 2248 goto bad; 2249 } 2250 2251 if (ca->block_size & (ca->block_size - 1)) { 2252 dm_block_t cache_size = ca->cache_sectors; 2253 2254 cache->sectors_per_block_shift = -1; 2255 cache_size = block_div(cache_size, ca->block_size); 2256 cache->cache_size = to_cblock(cache_size); 2257 } else { 2258 cache->sectors_per_block_shift = __ffs(ca->block_size); 2259 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); 2260 } 2261 2262 r = create_cache_policy(cache, ca, error); 2263 if (r) 2264 goto bad; 2265 2266 cache->policy_nr_args = ca->policy_argc; 2267 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; 2268 2269 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); 2270 if (r) { 2271 *error = "Error setting cache policy's config values"; 2272 goto bad; 2273 } 2274 2275 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, 2276 ca->block_size, may_format, 2277 dm_cache_policy_get_hint_size(cache->policy)); 2278 if (IS_ERR(cmd)) { 2279 *error = "Error creating metadata object"; 2280 r = PTR_ERR(cmd); 2281 goto bad; 2282 } 2283 cache->cmd = cmd; 2284 2285 if (passthrough_mode(&cache->features)) { 2286 bool all_clean; 2287 2288 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); 2289 if (r) { 2290 *error = "dm_cache_metadata_all_clean() failed"; 2291 goto bad; 2292 } 2293 2294 if (!all_clean) { 2295 *error = "Cannot enter passthrough mode unless all blocks are clean"; 2296 r = -EINVAL; 2297 goto bad; 2298 } 2299 } 2300 2301 spin_lock_init(&cache->lock); 2302 bio_list_init(&cache->deferred_bios); 2303 bio_list_init(&cache->deferred_flush_bios); 2304 bio_list_init(&cache->deferred_writethrough_bios); 2305 INIT_LIST_HEAD(&cache->quiesced_migrations); 2306 INIT_LIST_HEAD(&cache->completed_migrations); 2307 INIT_LIST_HEAD(&cache->need_commit_migrations); 2308 atomic_set(&cache->nr_migrations, 0); 2309 init_waitqueue_head(&cache->migration_wait); 2310 2311 init_waitqueue_head(&cache->quiescing_wait); 2312 atomic_set(&cache->quiescing, 0); 2313 atomic_set(&cache->quiescing_ack, 0); 2314 2315 r = -ENOMEM; 2316 cache->nr_dirty = 0; 2317 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); 2318 if (!cache->dirty_bitset) { 2319 *error = "could not allocate dirty bitset"; 2320 goto bad; 2321 } 2322 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2323 2324 cache->discard_block_size = 2325 calculate_discard_block_size(cache->sectors_per_block, 2326 cache->origin_sectors); 2327 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); 2328 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); 2329 if (!cache->discard_bitset) { 2330 *error = "could not allocate discard bitset"; 2331 goto bad; 2332 } 2333 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); 2334 2335 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2336 if (IS_ERR(cache->copier)) { 2337 *error = "could not create kcopyd client"; 2338 r = PTR_ERR(cache->copier); 2339 goto bad; 2340 } 2341 2342 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); 2343 if (!cache->wq) { 2344 *error = "could not create workqueue for metadata object"; 2345 goto bad; 2346 } 2347 INIT_WORK(&cache->worker, do_worker); 2348 INIT_DELAYED_WORK(&cache->waker, do_waker); 2349 cache->last_commit_jiffies = jiffies; 2350 2351 cache->prison = dm_bio_prison_create(PRISON_CELLS); 2352 if (!cache->prison) { 2353 *error = "could not create bio prison"; 2354 goto bad; 2355 } 2356 2357 cache->all_io_ds = dm_deferred_set_create(); 2358 if (!cache->all_io_ds) { 2359 *error = "could not create all_io deferred set"; 2360 goto bad; 2361 } 2362 2363 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, 2364 migration_cache); 2365 if (!cache->migration_pool) { 2366 *error = "Error creating cache's migration mempool"; 2367 goto bad; 2368 } 2369 2370 cache->next_migration = NULL; 2371 2372 cache->need_tick_bio = true; 2373 cache->sized = false; 2374 cache->invalidate = false; 2375 cache->commit_requested = false; 2376 cache->loaded_mappings = false; 2377 cache->loaded_discards = false; 2378 2379 load_stats(cache); 2380 2381 atomic_set(&cache->stats.demotion, 0); 2382 atomic_set(&cache->stats.promotion, 0); 2383 atomic_set(&cache->stats.copies_avoided, 0); 2384 atomic_set(&cache->stats.cache_cell_clash, 0); 2385 atomic_set(&cache->stats.commit_count, 0); 2386 atomic_set(&cache->stats.discard_count, 0); 2387 2388 spin_lock_init(&cache->invalidation_lock); 2389 INIT_LIST_HEAD(&cache->invalidation_requests); 2390 2391 *result = cache; 2392 return 0; 2393 2394 bad: 2395 destroy(cache); 2396 return r; 2397 } 2398 2399 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) 2400 { 2401 unsigned i; 2402 const char **copy; 2403 2404 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); 2405 if (!copy) 2406 return -ENOMEM; 2407 for (i = 0; i < argc; i++) { 2408 copy[i] = kstrdup(argv[i], GFP_KERNEL); 2409 if (!copy[i]) { 2410 while (i--) 2411 kfree(copy[i]); 2412 kfree(copy); 2413 return -ENOMEM; 2414 } 2415 } 2416 2417 cache->nr_ctr_args = argc; 2418 cache->ctr_args = copy; 2419 2420 return 0; 2421 } 2422 2423 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) 2424 { 2425 int r = -EINVAL; 2426 struct cache_args *ca; 2427 struct cache *cache = NULL; 2428 2429 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2430 if (!ca) { 2431 ti->error = "Error allocating memory for cache"; 2432 return -ENOMEM; 2433 } 2434 ca->ti = ti; 2435 2436 r = parse_cache_args(ca, argc, argv, &ti->error); 2437 if (r) 2438 goto out; 2439 2440 r = cache_create(ca, &cache); 2441 if (r) 2442 goto out; 2443 2444 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); 2445 if (r) { 2446 destroy(cache); 2447 goto out; 2448 } 2449 2450 ti->private = cache; 2451 2452 out: 2453 destroy_cache_args(ca); 2454 return r; 2455 } 2456 2457 static int cache_map(struct dm_target *ti, struct bio *bio) 2458 { 2459 struct cache *cache = ti->private; 2460 2461 int r; 2462 dm_oblock_t block = get_bio_block(cache, bio); 2463 size_t pb_data_size = get_per_bio_data_size(cache); 2464 bool can_migrate = false; 2465 bool discarded_block; 2466 struct dm_bio_prison_cell *cell; 2467 struct policy_result lookup_result; 2468 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 2469 2470 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { 2471 /* 2472 * This can only occur if the io goes to a partial block at 2473 * the end of the origin device. We don't cache these. 2474 * Just remap to the origin and carry on. 2475 */ 2476 remap_to_origin(cache, bio); 2477 return DM_MAPIO_REMAPPED; 2478 } 2479 2480 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2481 defer_bio(cache, bio); 2482 return DM_MAPIO_SUBMITTED; 2483 } 2484 2485 /* 2486 * Check to see if that block is currently migrating. 2487 */ 2488 cell = alloc_prison_cell(cache); 2489 if (!cell) { 2490 defer_bio(cache, bio); 2491 return DM_MAPIO_SUBMITTED; 2492 } 2493 2494 r = bio_detain(cache, block, bio, cell, 2495 (cell_free_fn) free_prison_cell, 2496 cache, &cell); 2497 if (r) { 2498 if (r < 0) 2499 defer_bio(cache, bio); 2500 2501 return DM_MAPIO_SUBMITTED; 2502 } 2503 2504 discarded_block = is_discarded_oblock(cache, block); 2505 2506 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2507 bio, &lookup_result); 2508 if (r == -EWOULDBLOCK) { 2509 cell_defer(cache, cell, true); 2510 return DM_MAPIO_SUBMITTED; 2511 2512 } else if (r) { 2513 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2514 bio_io_error(bio); 2515 return DM_MAPIO_SUBMITTED; 2516 } 2517 2518 r = DM_MAPIO_REMAPPED; 2519 switch (lookup_result.op) { 2520 case POLICY_HIT: 2521 if (passthrough_mode(&cache->features)) { 2522 if (bio_data_dir(bio) == WRITE) { 2523 /* 2524 * We need to invalidate this block, so 2525 * defer for the worker thread. 2526 */ 2527 cell_defer(cache, cell, true); 2528 r = DM_MAPIO_SUBMITTED; 2529 2530 } else { 2531 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2532 inc_miss_counter(cache, bio); 2533 remap_to_origin_clear_discard(cache, bio, block); 2534 2535 cell_defer(cache, cell, false); 2536 } 2537 2538 } else { 2539 inc_hit_counter(cache, bio); 2540 2541 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2542 !is_dirty(cache, lookup_result.cblock)) 2543 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2544 else 2545 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2546 2547 cell_defer(cache, cell, false); 2548 } 2549 break; 2550 2551 case POLICY_MISS: 2552 inc_miss_counter(cache, bio); 2553 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2554 2555 if (pb->req_nr != 0) { 2556 /* 2557 * This is a duplicate writethrough io that is no 2558 * longer needed because the block has been demoted. 2559 */ 2560 bio_endio(bio, 0); 2561 cell_defer(cache, cell, false); 2562 return DM_MAPIO_SUBMITTED; 2563 } else { 2564 remap_to_origin_clear_discard(cache, bio, block); 2565 cell_defer(cache, cell, false); 2566 } 2567 break; 2568 2569 default: 2570 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 2571 (unsigned) lookup_result.op); 2572 bio_io_error(bio); 2573 r = DM_MAPIO_SUBMITTED; 2574 } 2575 2576 return r; 2577 } 2578 2579 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 2580 { 2581 struct cache *cache = ti->private; 2582 unsigned long flags; 2583 size_t pb_data_size = get_per_bio_data_size(cache); 2584 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 2585 2586 if (pb->tick) { 2587 policy_tick(cache->policy); 2588 2589 spin_lock_irqsave(&cache->lock, flags); 2590 cache->need_tick_bio = true; 2591 spin_unlock_irqrestore(&cache->lock, flags); 2592 } 2593 2594 check_for_quiesced_migrations(cache, pb); 2595 2596 return 0; 2597 } 2598 2599 static int write_dirty_bitset(struct cache *cache) 2600 { 2601 unsigned i, r; 2602 2603 for (i = 0; i < from_cblock(cache->cache_size); i++) { 2604 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), 2605 is_dirty(cache, to_cblock(i))); 2606 if (r) 2607 return r; 2608 } 2609 2610 return 0; 2611 } 2612 2613 static int write_discard_bitset(struct cache *cache) 2614 { 2615 unsigned i, r; 2616 2617 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 2618 cache->discard_nr_blocks); 2619 if (r) { 2620 DMERR("could not resize on-disk discard bitset"); 2621 return r; 2622 } 2623 2624 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { 2625 r = dm_cache_set_discard(cache->cmd, to_dblock(i), 2626 is_discarded(cache, to_dblock(i))); 2627 if (r) 2628 return r; 2629 } 2630 2631 return 0; 2632 } 2633 2634 static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, 2635 uint32_t hint) 2636 { 2637 struct cache *cache = context; 2638 return dm_cache_save_hint(cache->cmd, cblock, hint); 2639 } 2640 2641 static int write_hints(struct cache *cache) 2642 { 2643 int r; 2644 2645 r = dm_cache_begin_hints(cache->cmd, cache->policy); 2646 if (r) { 2647 DMERR("dm_cache_begin_hints failed"); 2648 return r; 2649 } 2650 2651 r = policy_walk_mappings(cache->policy, save_hint, cache); 2652 if (r) 2653 DMERR("policy_walk_mappings failed"); 2654 2655 return r; 2656 } 2657 2658 /* 2659 * returns true on success 2660 */ 2661 static bool sync_metadata(struct cache *cache) 2662 { 2663 int r1, r2, r3, r4; 2664 2665 r1 = write_dirty_bitset(cache); 2666 if (r1) 2667 DMERR("could not write dirty bitset"); 2668 2669 r2 = write_discard_bitset(cache); 2670 if (r2) 2671 DMERR("could not write discard bitset"); 2672 2673 save_stats(cache); 2674 2675 r3 = write_hints(cache); 2676 if (r3) 2677 DMERR("could not write hints"); 2678 2679 /* 2680 * If writing the above metadata failed, we still commit, but don't 2681 * set the clean shutdown flag. This will effectively force every 2682 * dirty bit to be set on reload. 2683 */ 2684 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); 2685 if (r4) 2686 DMERR("could not write cache metadata. Data loss may occur."); 2687 2688 return !r1 && !r2 && !r3 && !r4; 2689 } 2690 2691 static void cache_postsuspend(struct dm_target *ti) 2692 { 2693 struct cache *cache = ti->private; 2694 2695 start_quiescing(cache); 2696 wait_for_migrations(cache); 2697 stop_worker(cache); 2698 requeue_deferred_io(cache); 2699 stop_quiescing(cache); 2700 2701 (void) sync_metadata(cache); 2702 } 2703 2704 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, 2705 bool dirty, uint32_t hint, bool hint_valid) 2706 { 2707 int r; 2708 struct cache *cache = context; 2709 2710 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); 2711 if (r) 2712 return r; 2713 2714 if (dirty) 2715 set_dirty(cache, oblock, cblock); 2716 else 2717 clear_dirty(cache, oblock, cblock); 2718 2719 return 0; 2720 } 2721 2722 static int load_discard(void *context, sector_t discard_block_size, 2723 dm_dblock_t dblock, bool discard) 2724 { 2725 struct cache *cache = context; 2726 2727 /* FIXME: handle mis-matched block size */ 2728 2729 if (discard) 2730 set_discard(cache, dblock); 2731 else 2732 clear_discard(cache, dblock); 2733 2734 return 0; 2735 } 2736 2737 static dm_cblock_t get_cache_dev_size(struct cache *cache) 2738 { 2739 sector_t size = get_dev_size(cache->cache_dev); 2740 (void) sector_div(size, cache->sectors_per_block); 2741 return to_cblock(size); 2742 } 2743 2744 static bool can_resize(struct cache *cache, dm_cblock_t new_size) 2745 { 2746 if (from_cblock(new_size) > from_cblock(cache->cache_size)) 2747 return true; 2748 2749 /* 2750 * We can't drop a dirty block when shrinking the cache. 2751 */ 2752 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { 2753 new_size = to_cblock(from_cblock(new_size) + 1); 2754 if (is_dirty(cache, new_size)) { 2755 DMERR("unable to shrink cache; cache block %llu is dirty", 2756 (unsigned long long) from_cblock(new_size)); 2757 return false; 2758 } 2759 } 2760 2761 return true; 2762 } 2763 2764 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) 2765 { 2766 int r; 2767 2768 r = dm_cache_resize(cache->cmd, new_size); 2769 if (r) { 2770 DMERR("could not resize cache metadata"); 2771 return r; 2772 } 2773 2774 cache->cache_size = new_size; 2775 2776 return 0; 2777 } 2778 2779 static int cache_preresume(struct dm_target *ti) 2780 { 2781 int r = 0; 2782 struct cache *cache = ti->private; 2783 dm_cblock_t csize = get_cache_dev_size(cache); 2784 2785 /* 2786 * Check to see if the cache has resized. 2787 */ 2788 if (!cache->sized) { 2789 r = resize_cache_dev(cache, csize); 2790 if (r) 2791 return r; 2792 2793 cache->sized = true; 2794 2795 } else if (csize != cache->cache_size) { 2796 if (!can_resize(cache, csize)) 2797 return -EINVAL; 2798 2799 r = resize_cache_dev(cache, csize); 2800 if (r) 2801 return r; 2802 } 2803 2804 if (!cache->loaded_mappings) { 2805 r = dm_cache_load_mappings(cache->cmd, cache->policy, 2806 load_mapping, cache); 2807 if (r) { 2808 DMERR("could not load cache mappings"); 2809 return r; 2810 } 2811 2812 cache->loaded_mappings = true; 2813 } 2814 2815 if (!cache->loaded_discards) { 2816 r = dm_cache_load_discards(cache->cmd, load_discard, cache); 2817 if (r) { 2818 DMERR("could not load origin discards"); 2819 return r; 2820 } 2821 2822 cache->loaded_discards = true; 2823 } 2824 2825 return r; 2826 } 2827 2828 static void cache_resume(struct dm_target *ti) 2829 { 2830 struct cache *cache = ti->private; 2831 2832 cache->need_tick_bio = true; 2833 do_waker(&cache->waker.work); 2834 } 2835 2836 /* 2837 * Status format: 2838 * 2839 * <metadata block size> <#used metadata blocks>/<#total metadata blocks> 2840 * <cache block size> <#used cache blocks>/<#total cache blocks> 2841 * <#read hits> <#read misses> <#write hits> <#write misses> 2842 * <#demotions> <#promotions> <#dirty> 2843 * <#features> <features>* 2844 * <#core args> <core args> 2845 * <policy name> <#policy args> <policy args>* 2846 */ 2847 static void cache_status(struct dm_target *ti, status_type_t type, 2848 unsigned status_flags, char *result, unsigned maxlen) 2849 { 2850 int r = 0; 2851 unsigned i; 2852 ssize_t sz = 0; 2853 dm_block_t nr_free_blocks_metadata = 0; 2854 dm_block_t nr_blocks_metadata = 0; 2855 char buf[BDEVNAME_SIZE]; 2856 struct cache *cache = ti->private; 2857 dm_cblock_t residency; 2858 2859 switch (type) { 2860 case STATUSTYPE_INFO: 2861 /* Commit to ensure statistics aren't out-of-date */ 2862 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { 2863 r = dm_cache_commit(cache->cmd, false); 2864 if (r) 2865 DMERR("could not commit metadata for accurate status"); 2866 } 2867 2868 r = dm_cache_get_free_metadata_block_count(cache->cmd, 2869 &nr_free_blocks_metadata); 2870 if (r) { 2871 DMERR("could not get metadata free block count"); 2872 goto err; 2873 } 2874 2875 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); 2876 if (r) { 2877 DMERR("could not get metadata device size"); 2878 goto err; 2879 } 2880 2881 residency = policy_residency(cache->policy); 2882 2883 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ", 2884 (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), 2885 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2886 (unsigned long long)nr_blocks_metadata, 2887 cache->sectors_per_block, 2888 (unsigned long long) from_cblock(residency), 2889 (unsigned long long) from_cblock(cache->cache_size), 2890 (unsigned) atomic_read(&cache->stats.read_hit), 2891 (unsigned) atomic_read(&cache->stats.read_miss), 2892 (unsigned) atomic_read(&cache->stats.write_hit), 2893 (unsigned) atomic_read(&cache->stats.write_miss), 2894 (unsigned) atomic_read(&cache->stats.demotion), 2895 (unsigned) atomic_read(&cache->stats.promotion), 2896 (unsigned long long) from_cblock(cache->nr_dirty)); 2897 2898 if (writethrough_mode(&cache->features)) 2899 DMEMIT("1 writethrough "); 2900 2901 else if (passthrough_mode(&cache->features)) 2902 DMEMIT("1 passthrough "); 2903 2904 else if (writeback_mode(&cache->features)) 2905 DMEMIT("1 writeback "); 2906 2907 else { 2908 DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); 2909 goto err; 2910 } 2911 2912 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); 2913 2914 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); 2915 if (sz < maxlen) { 2916 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); 2917 if (r) 2918 DMERR("policy_emit_config_values returned %d", r); 2919 } 2920 2921 break; 2922 2923 case STATUSTYPE_TABLE: 2924 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); 2925 DMEMIT("%s ", buf); 2926 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); 2927 DMEMIT("%s ", buf); 2928 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); 2929 DMEMIT("%s", buf); 2930 2931 for (i = 0; i < cache->nr_ctr_args - 1; i++) 2932 DMEMIT(" %s", cache->ctr_args[i]); 2933 if (cache->nr_ctr_args) 2934 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); 2935 } 2936 2937 return; 2938 2939 err: 2940 DMEMIT("Error"); 2941 } 2942 2943 /* 2944 * A cache block range can take two forms: 2945 * 2946 * i) A single cblock, eg. '3456' 2947 * ii) A begin and end cblock with dots between, eg. 123-234 2948 */ 2949 static int parse_cblock_range(struct cache *cache, const char *str, 2950 struct cblock_range *result) 2951 { 2952 char dummy; 2953 uint64_t b, e; 2954 int r; 2955 2956 /* 2957 * Try and parse form (ii) first. 2958 */ 2959 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); 2960 if (r < 0) 2961 return r; 2962 2963 if (r == 2) { 2964 result->begin = to_cblock(b); 2965 result->end = to_cblock(e); 2966 return 0; 2967 } 2968 2969 /* 2970 * That didn't work, try form (i). 2971 */ 2972 r = sscanf(str, "%llu%c", &b, &dummy); 2973 if (r < 0) 2974 return r; 2975 2976 if (r == 1) { 2977 result->begin = to_cblock(b); 2978 result->end = to_cblock(from_cblock(result->begin) + 1u); 2979 return 0; 2980 } 2981 2982 DMERR("invalid cblock range '%s'", str); 2983 return -EINVAL; 2984 } 2985 2986 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) 2987 { 2988 uint64_t b = from_cblock(range->begin); 2989 uint64_t e = from_cblock(range->end); 2990 uint64_t n = from_cblock(cache->cache_size); 2991 2992 if (b >= n) { 2993 DMERR("begin cblock out of range: %llu >= %llu", b, n); 2994 return -EINVAL; 2995 } 2996 2997 if (e > n) { 2998 DMERR("end cblock out of range: %llu > %llu", e, n); 2999 return -EINVAL; 3000 } 3001 3002 if (b >= e) { 3003 DMERR("invalid cblock range: %llu >= %llu", b, e); 3004 return -EINVAL; 3005 } 3006 3007 return 0; 3008 } 3009 3010 static int request_invalidation(struct cache *cache, struct cblock_range *range) 3011 { 3012 struct invalidation_request req; 3013 3014 INIT_LIST_HEAD(&req.list); 3015 req.cblocks = range; 3016 atomic_set(&req.complete, 0); 3017 req.err = 0; 3018 init_waitqueue_head(&req.result_wait); 3019 3020 spin_lock(&cache->invalidation_lock); 3021 list_add(&req.list, &cache->invalidation_requests); 3022 spin_unlock(&cache->invalidation_lock); 3023 wake_worker(cache); 3024 3025 wait_event(req.result_wait, atomic_read(&req.complete)); 3026 return req.err; 3027 } 3028 3029 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, 3030 const char **cblock_ranges) 3031 { 3032 int r = 0; 3033 unsigned i; 3034 struct cblock_range range; 3035 3036 if (!passthrough_mode(&cache->features)) { 3037 DMERR("cache has to be in passthrough mode for invalidation"); 3038 return -EPERM; 3039 } 3040 3041 for (i = 0; i < count; i++) { 3042 r = parse_cblock_range(cache, cblock_ranges[i], &range); 3043 if (r) 3044 break; 3045 3046 r = validate_cblock_range(cache, &range); 3047 if (r) 3048 break; 3049 3050 /* 3051 * Pass begin and end origin blocks to the worker and wake it. 3052 */ 3053 r = request_invalidation(cache, &range); 3054 if (r) 3055 break; 3056 } 3057 3058 return r; 3059 } 3060 3061 /* 3062 * Supports 3063 * "<key> <value>" 3064 * and 3065 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]* 3066 * 3067 * The key migration_threshold is supported by the cache target core. 3068 */ 3069 static int cache_message(struct dm_target *ti, unsigned argc, char **argv) 3070 { 3071 struct cache *cache = ti->private; 3072 3073 if (!argc) 3074 return -EINVAL; 3075 3076 if (!strcasecmp(argv[0], "invalidate_cblocks")) 3077 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); 3078 3079 if (argc != 2) 3080 return -EINVAL; 3081 3082 return set_config_value(cache, argv[0], argv[1]); 3083 } 3084 3085 static int cache_iterate_devices(struct dm_target *ti, 3086 iterate_devices_callout_fn fn, void *data) 3087 { 3088 int r = 0; 3089 struct cache *cache = ti->private; 3090 3091 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); 3092 if (!r) 3093 r = fn(ti, cache->origin_dev, 0, ti->len, data); 3094 3095 return r; 3096 } 3097 3098 /* 3099 * We assume I/O is going to the origin (which is the volume 3100 * more likely to have restrictions e.g. by being striped). 3101 * (Looking up the exact location of the data would be expensive 3102 * and could always be out of date by the time the bio is submitted.) 3103 */ 3104 static int cache_bvec_merge(struct dm_target *ti, 3105 struct bvec_merge_data *bvm, 3106 struct bio_vec *biovec, int max_size) 3107 { 3108 struct cache *cache = ti->private; 3109 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); 3110 3111 if (!q->merge_bvec_fn) 3112 return max_size; 3113 3114 bvm->bi_bdev = cache->origin_dev->bdev; 3115 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 3116 } 3117 3118 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) 3119 { 3120 /* 3121 * FIXME: these limits may be incompatible with the cache device 3122 */ 3123 limits->max_discard_sectors = cache->discard_block_size * 1024; 3124 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; 3125 } 3126 3127 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) 3128 { 3129 struct cache *cache = ti->private; 3130 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; 3131 3132 /* 3133 * If the system-determined stacked limits are compatible with the 3134 * cache's blocksize (io_opt is a factor) do not override them. 3135 */ 3136 if (io_opt_sectors < cache->sectors_per_block || 3137 do_div(io_opt_sectors, cache->sectors_per_block)) { 3138 blk_limits_io_min(limits, 0); 3139 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); 3140 } 3141 set_discard_limits(cache, limits); 3142 } 3143 3144 /*----------------------------------------------------------------*/ 3145 3146 static struct target_type cache_target = { 3147 .name = "cache", 3148 .version = {1, 3, 0}, 3149 .module = THIS_MODULE, 3150 .ctr = cache_ctr, 3151 .dtr = cache_dtr, 3152 .map = cache_map, 3153 .end_io = cache_end_io, 3154 .postsuspend = cache_postsuspend, 3155 .preresume = cache_preresume, 3156 .resume = cache_resume, 3157 .status = cache_status, 3158 .message = cache_message, 3159 .iterate_devices = cache_iterate_devices, 3160 .merge = cache_bvec_merge, 3161 .io_hints = cache_io_hints, 3162 }; 3163 3164 static int __init dm_cache_init(void) 3165 { 3166 int r; 3167 3168 r = dm_register_target(&cache_target); 3169 if (r) { 3170 DMERR("cache target registration failed: %d", r); 3171 return r; 3172 } 3173 3174 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3175 if (!migration_cache) { 3176 dm_unregister_target(&cache_target); 3177 return -ENOMEM; 3178 } 3179 3180 return 0; 3181 } 3182 3183 static void __exit dm_cache_exit(void) 3184 { 3185 dm_unregister_target(&cache_target); 3186 kmem_cache_destroy(migration_cache); 3187 } 3188 3189 module_init(dm_cache_init); 3190 module_exit(dm_cache_exit); 3191 3192 MODULE_DESCRIPTION(DM_NAME " cache target"); 3193 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); 3194 MODULE_LICENSE("GPL"); 3195