1 /* 2 * Copyright (C) 2011-2012 Red Hat UK. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-thin-metadata.h" 8 #include "dm-bio-prison.h" 9 #include "dm.h" 10 11 #include <linux/device-mapper.h> 12 #include <linux/dm-io.h> 13 #include <linux/dm-kcopyd.h> 14 #include <linux/list.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 19 #define DM_MSG_PREFIX "thin" 20 21 /* 22 * Tunable constants 23 */ 24 #define ENDIO_HOOK_POOL_SIZE 1024 25 #define MAPPING_POOL_SIZE 1024 26 #define PRISON_CELLS 1024 27 #define COMMIT_PERIOD HZ 28 29 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 30 "A percentage of time allocated for copy on write"); 31 32 /* 33 * The block size of the device holding pool data must be 34 * between 64KB and 1GB. 35 */ 36 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) 37 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 38 39 /* 40 * Device id is restricted to 24 bits. 41 */ 42 #define MAX_DEV_ID ((1 << 24) - 1) 43 44 /* 45 * How do we handle breaking sharing of data blocks? 46 * ================================================= 47 * 48 * We use a standard copy-on-write btree to store the mappings for the 49 * devices (note I'm talking about copy-on-write of the metadata here, not 50 * the data). When you take an internal snapshot you clone the root node 51 * of the origin btree. After this there is no concept of an origin or a 52 * snapshot. They are just two device trees that happen to point to the 53 * same data blocks. 54 * 55 * When we get a write in we decide if it's to a shared data block using 56 * some timestamp magic. If it is, we have to break sharing. 57 * 58 * Let's say we write to a shared block in what was the origin. The 59 * steps are: 60 * 61 * i) plug io further to this physical block. (see bio_prison code). 62 * 63 * ii) quiesce any read io to that shared data block. Obviously 64 * including all devices that share this block. (see dm_deferred_set code) 65 * 66 * iii) copy the data block to a newly allocate block. This step can be 67 * missed out if the io covers the block. (schedule_copy). 68 * 69 * iv) insert the new mapping into the origin's btree 70 * (process_prepared_mapping). This act of inserting breaks some 71 * sharing of btree nodes between the two devices. Breaking sharing only 72 * effects the btree of that specific device. Btrees for the other 73 * devices that share the block never change. The btree for the origin 74 * device as it was after the last commit is untouched, ie. we're using 75 * persistent data structures in the functional programming sense. 76 * 77 * v) unplug io to this physical block, including the io that triggered 78 * the breaking of sharing. 79 * 80 * Steps (ii) and (iii) occur in parallel. 81 * 82 * The metadata _doesn't_ need to be committed before the io continues. We 83 * get away with this because the io is always written to a _new_ block. 84 * If there's a crash, then: 85 * 86 * - The origin mapping will point to the old origin block (the shared 87 * one). This will contain the data as it was before the io that triggered 88 * the breaking of sharing came in. 89 * 90 * - The snap mapping still points to the old block. As it would after 91 * the commit. 92 * 93 * The downside of this scheme is the timestamp magic isn't perfect, and 94 * will continue to think that data block in the snapshot device is shared 95 * even after the write to the origin has broken sharing. I suspect data 96 * blocks will typically be shared by many different devices, so we're 97 * breaking sharing n + 1 times, rather than n, where n is the number of 98 * devices that reference this data block. At the moment I think the 99 * benefits far, far outweigh the disadvantages. 100 */ 101 102 /*----------------------------------------------------------------*/ 103 104 /* 105 * Key building. 106 */ 107 static void build_data_key(struct dm_thin_device *td, 108 dm_block_t b, struct dm_cell_key *key) 109 { 110 key->virtual = 0; 111 key->dev = dm_thin_dev_id(td); 112 key->block = b; 113 } 114 115 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 116 struct dm_cell_key *key) 117 { 118 key->virtual = 1; 119 key->dev = dm_thin_dev_id(td); 120 key->block = b; 121 } 122 123 /*----------------------------------------------------------------*/ 124 125 /* 126 * A pool device ties together a metadata device and a data device. It 127 * also provides the interface for creating and destroying internal 128 * devices. 129 */ 130 struct dm_thin_new_mapping; 131 132 /* 133 * The pool runs in 3 modes. Ordered in degraded order for comparisons. 134 */ 135 enum pool_mode { 136 PM_WRITE, /* metadata may be changed */ 137 PM_READ_ONLY, /* metadata may not be changed */ 138 PM_FAIL, /* all I/O fails */ 139 }; 140 141 struct pool_features { 142 enum pool_mode mode; 143 144 bool zero_new_blocks:1; 145 bool discard_enabled:1; 146 bool discard_passdown:1; 147 }; 148 149 struct thin_c; 150 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 151 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 152 153 struct pool { 154 struct list_head list; 155 struct dm_target *ti; /* Only set if a pool target is bound */ 156 157 struct mapped_device *pool_md; 158 struct block_device *md_dev; 159 struct dm_pool_metadata *pmd; 160 161 dm_block_t low_water_blocks; 162 uint32_t sectors_per_block; 163 int sectors_per_block_shift; 164 165 struct pool_features pf; 166 unsigned low_water_triggered:1; /* A dm event has been sent */ 167 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ 168 169 struct dm_bio_prison *prison; 170 struct dm_kcopyd_client *copier; 171 172 struct workqueue_struct *wq; 173 struct work_struct worker; 174 struct delayed_work waker; 175 176 unsigned long last_commit_jiffies; 177 unsigned ref_count; 178 179 spinlock_t lock; 180 struct bio_list deferred_bios; 181 struct bio_list deferred_flush_bios; 182 struct list_head prepared_mappings; 183 struct list_head prepared_discards; 184 185 struct bio_list retry_on_resume_list; 186 187 struct dm_deferred_set *shared_read_ds; 188 struct dm_deferred_set *all_io_ds; 189 190 struct dm_thin_new_mapping *next_mapping; 191 mempool_t *mapping_pool; 192 193 process_bio_fn process_bio; 194 process_bio_fn process_discard; 195 196 process_mapping_fn process_prepared_mapping; 197 process_mapping_fn process_prepared_discard; 198 }; 199 200 static enum pool_mode get_pool_mode(struct pool *pool); 201 static void set_pool_mode(struct pool *pool, enum pool_mode mode); 202 203 /* 204 * Target context for a pool. 205 */ 206 struct pool_c { 207 struct dm_target *ti; 208 struct pool *pool; 209 struct dm_dev *data_dev; 210 struct dm_dev *metadata_dev; 211 struct dm_target_callbacks callbacks; 212 213 dm_block_t low_water_blocks; 214 struct pool_features requested_pf; /* Features requested during table load */ 215 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ 216 }; 217 218 /* 219 * Target context for a thin. 220 */ 221 struct thin_c { 222 struct dm_dev *pool_dev; 223 struct dm_dev *origin_dev; 224 dm_thin_id dev_id; 225 226 struct pool *pool; 227 struct dm_thin_device *td; 228 }; 229 230 /*----------------------------------------------------------------*/ 231 232 /* 233 * wake_worker() is used when new work is queued and when pool_resume is 234 * ready to continue deferred IO processing. 235 */ 236 static void wake_worker(struct pool *pool) 237 { 238 queue_work(pool->wq, &pool->worker); 239 } 240 241 /*----------------------------------------------------------------*/ 242 243 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, 244 struct dm_bio_prison_cell **cell_result) 245 { 246 int r; 247 struct dm_bio_prison_cell *cell_prealloc; 248 249 /* 250 * Allocate a cell from the prison's mempool. 251 * This might block but it can't fail. 252 */ 253 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); 254 255 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); 256 if (r) 257 /* 258 * We reused an old cell; we can get rid of 259 * the new one. 260 */ 261 dm_bio_prison_free_cell(pool->prison, cell_prealloc); 262 263 return r; 264 } 265 266 static void cell_release(struct pool *pool, 267 struct dm_bio_prison_cell *cell, 268 struct bio_list *bios) 269 { 270 dm_cell_release(pool->prison, cell, bios); 271 dm_bio_prison_free_cell(pool->prison, cell); 272 } 273 274 static void cell_release_no_holder(struct pool *pool, 275 struct dm_bio_prison_cell *cell, 276 struct bio_list *bios) 277 { 278 dm_cell_release_no_holder(pool->prison, cell, bios); 279 dm_bio_prison_free_cell(pool->prison, cell); 280 } 281 282 static void cell_defer_no_holder_no_free(struct thin_c *tc, 283 struct dm_bio_prison_cell *cell) 284 { 285 struct pool *pool = tc->pool; 286 unsigned long flags; 287 288 spin_lock_irqsave(&pool->lock, flags); 289 dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios); 290 spin_unlock_irqrestore(&pool->lock, flags); 291 292 wake_worker(pool); 293 } 294 295 static void cell_error(struct pool *pool, 296 struct dm_bio_prison_cell *cell) 297 { 298 dm_cell_error(pool->prison, cell); 299 dm_bio_prison_free_cell(pool->prison, cell); 300 } 301 302 /*----------------------------------------------------------------*/ 303 304 /* 305 * A global list of pools that uses a struct mapped_device as a key. 306 */ 307 static struct dm_thin_pool_table { 308 struct mutex mutex; 309 struct list_head pools; 310 } dm_thin_pool_table; 311 312 static void pool_table_init(void) 313 { 314 mutex_init(&dm_thin_pool_table.mutex); 315 INIT_LIST_HEAD(&dm_thin_pool_table.pools); 316 } 317 318 static void __pool_table_insert(struct pool *pool) 319 { 320 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 321 list_add(&pool->list, &dm_thin_pool_table.pools); 322 } 323 324 static void __pool_table_remove(struct pool *pool) 325 { 326 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 327 list_del(&pool->list); 328 } 329 330 static struct pool *__pool_table_lookup(struct mapped_device *md) 331 { 332 struct pool *pool = NULL, *tmp; 333 334 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 335 336 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 337 if (tmp->pool_md == md) { 338 pool = tmp; 339 break; 340 } 341 } 342 343 return pool; 344 } 345 346 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) 347 { 348 struct pool *pool = NULL, *tmp; 349 350 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 351 352 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 353 if (tmp->md_dev == md_dev) { 354 pool = tmp; 355 break; 356 } 357 } 358 359 return pool; 360 } 361 362 /*----------------------------------------------------------------*/ 363 364 struct dm_thin_endio_hook { 365 struct thin_c *tc; 366 struct dm_deferred_entry *shared_read_entry; 367 struct dm_deferred_entry *all_io_entry; 368 struct dm_thin_new_mapping *overwrite_mapping; 369 }; 370 371 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 372 { 373 struct bio *bio; 374 struct bio_list bios; 375 376 bio_list_init(&bios); 377 bio_list_merge(&bios, master); 378 bio_list_init(master); 379 380 while ((bio = bio_list_pop(&bios))) { 381 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 382 383 if (h->tc == tc) 384 bio_endio(bio, DM_ENDIO_REQUEUE); 385 else 386 bio_list_add(master, bio); 387 } 388 } 389 390 static void requeue_io(struct thin_c *tc) 391 { 392 struct pool *pool = tc->pool; 393 unsigned long flags; 394 395 spin_lock_irqsave(&pool->lock, flags); 396 __requeue_bio_list(tc, &pool->deferred_bios); 397 __requeue_bio_list(tc, &pool->retry_on_resume_list); 398 spin_unlock_irqrestore(&pool->lock, flags); 399 } 400 401 /* 402 * This section of code contains the logic for processing a thin device's IO. 403 * Much of the code depends on pool object resources (lists, workqueues, etc) 404 * but most is exclusively called from the thin target rather than the thin-pool 405 * target. 406 */ 407 408 static bool block_size_is_power_of_two(struct pool *pool) 409 { 410 return pool->sectors_per_block_shift >= 0; 411 } 412 413 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 414 { 415 struct pool *pool = tc->pool; 416 sector_t block_nr = bio->bi_sector; 417 418 if (block_size_is_power_of_two(pool)) 419 block_nr >>= pool->sectors_per_block_shift; 420 else 421 (void) sector_div(block_nr, pool->sectors_per_block); 422 423 return block_nr; 424 } 425 426 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 427 { 428 struct pool *pool = tc->pool; 429 sector_t bi_sector = bio->bi_sector; 430 431 bio->bi_bdev = tc->pool_dev->bdev; 432 if (block_size_is_power_of_two(pool)) 433 bio->bi_sector = (block << pool->sectors_per_block_shift) | 434 (bi_sector & (pool->sectors_per_block - 1)); 435 else 436 bio->bi_sector = (block * pool->sectors_per_block) + 437 sector_div(bi_sector, pool->sectors_per_block); 438 } 439 440 static void remap_to_origin(struct thin_c *tc, struct bio *bio) 441 { 442 bio->bi_bdev = tc->origin_dev->bdev; 443 } 444 445 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 446 { 447 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 448 dm_thin_changed_this_transaction(tc->td); 449 } 450 451 static void inc_all_io_entry(struct pool *pool, struct bio *bio) 452 { 453 struct dm_thin_endio_hook *h; 454 455 if (bio->bi_rw & REQ_DISCARD) 456 return; 457 458 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 459 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 460 } 461 462 static void issue(struct thin_c *tc, struct bio *bio) 463 { 464 struct pool *pool = tc->pool; 465 unsigned long flags; 466 467 if (!bio_triggers_commit(tc, bio)) { 468 generic_make_request(bio); 469 return; 470 } 471 472 /* 473 * Complete bio with an error if earlier I/O caused changes to 474 * the metadata that can't be committed e.g, due to I/O errors 475 * on the metadata device. 476 */ 477 if (dm_thin_aborted_changes(tc->td)) { 478 bio_io_error(bio); 479 return; 480 } 481 482 /* 483 * Batch together any bios that trigger commits and then issue a 484 * single commit for them in process_deferred_bios(). 485 */ 486 spin_lock_irqsave(&pool->lock, flags); 487 bio_list_add(&pool->deferred_flush_bios, bio); 488 spin_unlock_irqrestore(&pool->lock, flags); 489 } 490 491 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) 492 { 493 remap_to_origin(tc, bio); 494 issue(tc, bio); 495 } 496 497 static void remap_and_issue(struct thin_c *tc, struct bio *bio, 498 dm_block_t block) 499 { 500 remap(tc, bio, block); 501 issue(tc, bio); 502 } 503 504 /*----------------------------------------------------------------*/ 505 506 /* 507 * Bio endio functions. 508 */ 509 struct dm_thin_new_mapping { 510 struct list_head list; 511 512 unsigned quiesced:1; 513 unsigned prepared:1; 514 unsigned pass_discard:1; 515 516 struct thin_c *tc; 517 dm_block_t virt_block; 518 dm_block_t data_block; 519 struct dm_bio_prison_cell *cell, *cell2; 520 int err; 521 522 /* 523 * If the bio covers the whole area of a block then we can avoid 524 * zeroing or copying. Instead this bio is hooked. The bio will 525 * still be in the cell, so care has to be taken to avoid issuing 526 * the bio twice. 527 */ 528 struct bio *bio; 529 bio_end_io_t *saved_bi_end_io; 530 }; 531 532 static void __maybe_add_mapping(struct dm_thin_new_mapping *m) 533 { 534 struct pool *pool = m->tc->pool; 535 536 if (m->quiesced && m->prepared) { 537 list_add(&m->list, &pool->prepared_mappings); 538 wake_worker(pool); 539 } 540 } 541 542 static void copy_complete(int read_err, unsigned long write_err, void *context) 543 { 544 unsigned long flags; 545 struct dm_thin_new_mapping *m = context; 546 struct pool *pool = m->tc->pool; 547 548 m->err = read_err || write_err ? -EIO : 0; 549 550 spin_lock_irqsave(&pool->lock, flags); 551 m->prepared = 1; 552 __maybe_add_mapping(m); 553 spin_unlock_irqrestore(&pool->lock, flags); 554 } 555 556 static void overwrite_endio(struct bio *bio, int err) 557 { 558 unsigned long flags; 559 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 560 struct dm_thin_new_mapping *m = h->overwrite_mapping; 561 struct pool *pool = m->tc->pool; 562 563 m->err = err; 564 565 spin_lock_irqsave(&pool->lock, flags); 566 m->prepared = 1; 567 __maybe_add_mapping(m); 568 spin_unlock_irqrestore(&pool->lock, flags); 569 } 570 571 /*----------------------------------------------------------------*/ 572 573 /* 574 * Workqueue. 575 */ 576 577 /* 578 * Prepared mapping jobs. 579 */ 580 581 /* 582 * This sends the bios in the cell back to the deferred_bios list. 583 */ 584 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) 585 { 586 struct pool *pool = tc->pool; 587 unsigned long flags; 588 589 spin_lock_irqsave(&pool->lock, flags); 590 cell_release(pool, cell, &pool->deferred_bios); 591 spin_unlock_irqrestore(&tc->pool->lock, flags); 592 593 wake_worker(pool); 594 } 595 596 /* 597 * Same as cell_defer above, except it omits the original holder of the cell. 598 */ 599 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 600 { 601 struct pool *pool = tc->pool; 602 unsigned long flags; 603 604 spin_lock_irqsave(&pool->lock, flags); 605 cell_release_no_holder(pool, cell, &pool->deferred_bios); 606 spin_unlock_irqrestore(&pool->lock, flags); 607 608 wake_worker(pool); 609 } 610 611 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 612 { 613 if (m->bio) 614 m->bio->bi_end_io = m->saved_bi_end_io; 615 cell_error(m->tc->pool, m->cell); 616 list_del(&m->list); 617 mempool_free(m, m->tc->pool->mapping_pool); 618 } 619 620 static void process_prepared_mapping(struct dm_thin_new_mapping *m) 621 { 622 struct thin_c *tc = m->tc; 623 struct pool *pool = tc->pool; 624 struct bio *bio; 625 int r; 626 627 bio = m->bio; 628 if (bio) 629 bio->bi_end_io = m->saved_bi_end_io; 630 631 if (m->err) { 632 cell_error(pool, m->cell); 633 goto out; 634 } 635 636 /* 637 * Commit the prepared block into the mapping btree. 638 * Any I/O for this block arriving after this point will get 639 * remapped to it directly. 640 */ 641 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 642 if (r) { 643 DMERR_LIMIT("dm_thin_insert_block() failed"); 644 cell_error(pool, m->cell); 645 goto out; 646 } 647 648 /* 649 * Release any bios held while the block was being provisioned. 650 * If we are processing a write bio that completely covers the block, 651 * we already processed it so can ignore it now when processing 652 * the bios in the cell. 653 */ 654 if (bio) { 655 cell_defer_no_holder(tc, m->cell); 656 bio_endio(bio, 0); 657 } else 658 cell_defer(tc, m->cell); 659 660 out: 661 list_del(&m->list); 662 mempool_free(m, pool->mapping_pool); 663 } 664 665 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) 666 { 667 struct thin_c *tc = m->tc; 668 669 bio_io_error(m->bio); 670 cell_defer_no_holder(tc, m->cell); 671 cell_defer_no_holder(tc, m->cell2); 672 mempool_free(m, tc->pool->mapping_pool); 673 } 674 675 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) 676 { 677 struct thin_c *tc = m->tc; 678 679 inc_all_io_entry(tc->pool, m->bio); 680 cell_defer_no_holder(tc, m->cell); 681 cell_defer_no_holder(tc, m->cell2); 682 683 if (m->pass_discard) 684 remap_and_issue(tc, m->bio, m->data_block); 685 else 686 bio_endio(m->bio, 0); 687 688 mempool_free(m, tc->pool->mapping_pool); 689 } 690 691 static void process_prepared_discard(struct dm_thin_new_mapping *m) 692 { 693 int r; 694 struct thin_c *tc = m->tc; 695 696 r = dm_thin_remove_block(tc->td, m->virt_block); 697 if (r) 698 DMERR_LIMIT("dm_thin_remove_block() failed"); 699 700 process_prepared_discard_passdown(m); 701 } 702 703 static void process_prepared(struct pool *pool, struct list_head *head, 704 process_mapping_fn *fn) 705 { 706 unsigned long flags; 707 struct list_head maps; 708 struct dm_thin_new_mapping *m, *tmp; 709 710 INIT_LIST_HEAD(&maps); 711 spin_lock_irqsave(&pool->lock, flags); 712 list_splice_init(head, &maps); 713 spin_unlock_irqrestore(&pool->lock, flags); 714 715 list_for_each_entry_safe(m, tmp, &maps, list) 716 (*fn)(m); 717 } 718 719 /* 720 * Deferred bio jobs. 721 */ 722 static int io_overlaps_block(struct pool *pool, struct bio *bio) 723 { 724 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 725 } 726 727 static int io_overwrites_block(struct pool *pool, struct bio *bio) 728 { 729 return (bio_data_dir(bio) == WRITE) && 730 io_overlaps_block(pool, bio); 731 } 732 733 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, 734 bio_end_io_t *fn) 735 { 736 *save = bio->bi_end_io; 737 bio->bi_end_io = fn; 738 } 739 740 static int ensure_next_mapping(struct pool *pool) 741 { 742 if (pool->next_mapping) 743 return 0; 744 745 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC); 746 747 return pool->next_mapping ? 0 : -ENOMEM; 748 } 749 750 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) 751 { 752 struct dm_thin_new_mapping *r = pool->next_mapping; 753 754 BUG_ON(!pool->next_mapping); 755 756 pool->next_mapping = NULL; 757 758 return r; 759 } 760 761 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 762 struct dm_dev *origin, dm_block_t data_origin, 763 dm_block_t data_dest, 764 struct dm_bio_prison_cell *cell, struct bio *bio) 765 { 766 int r; 767 struct pool *pool = tc->pool; 768 struct dm_thin_new_mapping *m = get_next_mapping(pool); 769 770 INIT_LIST_HEAD(&m->list); 771 m->quiesced = 0; 772 m->prepared = 0; 773 m->tc = tc; 774 m->virt_block = virt_block; 775 m->data_block = data_dest; 776 m->cell = cell; 777 m->err = 0; 778 m->bio = NULL; 779 780 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 781 m->quiesced = 1; 782 783 /* 784 * IO to pool_dev remaps to the pool target's data_dev. 785 * 786 * If the whole block of data is being overwritten, we can issue the 787 * bio immediately. Otherwise we use kcopyd to clone the data first. 788 */ 789 if (io_overwrites_block(pool, bio)) { 790 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 791 792 h->overwrite_mapping = m; 793 m->bio = bio; 794 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 795 inc_all_io_entry(pool, bio); 796 remap_and_issue(tc, bio, data_dest); 797 } else { 798 struct dm_io_region from, to; 799 800 from.bdev = origin->bdev; 801 from.sector = data_origin * pool->sectors_per_block; 802 from.count = pool->sectors_per_block; 803 804 to.bdev = tc->pool_dev->bdev; 805 to.sector = data_dest * pool->sectors_per_block; 806 to.count = pool->sectors_per_block; 807 808 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, 809 0, copy_complete, m); 810 if (r < 0) { 811 mempool_free(m, pool->mapping_pool); 812 DMERR_LIMIT("dm_kcopyd_copy() failed"); 813 cell_error(pool, cell); 814 } 815 } 816 } 817 818 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 819 dm_block_t data_origin, dm_block_t data_dest, 820 struct dm_bio_prison_cell *cell, struct bio *bio) 821 { 822 schedule_copy(tc, virt_block, tc->pool_dev, 823 data_origin, data_dest, cell, bio); 824 } 825 826 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 827 dm_block_t data_dest, 828 struct dm_bio_prison_cell *cell, struct bio *bio) 829 { 830 schedule_copy(tc, virt_block, tc->origin_dev, 831 virt_block, data_dest, cell, bio); 832 } 833 834 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 835 dm_block_t data_block, struct dm_bio_prison_cell *cell, 836 struct bio *bio) 837 { 838 struct pool *pool = tc->pool; 839 struct dm_thin_new_mapping *m = get_next_mapping(pool); 840 841 INIT_LIST_HEAD(&m->list); 842 m->quiesced = 1; 843 m->prepared = 0; 844 m->tc = tc; 845 m->virt_block = virt_block; 846 m->data_block = data_block; 847 m->cell = cell; 848 m->err = 0; 849 m->bio = NULL; 850 851 /* 852 * If the whole block of data is being overwritten or we are not 853 * zeroing pre-existing data, we can issue the bio immediately. 854 * Otherwise we use kcopyd to zero the data first. 855 */ 856 if (!pool->pf.zero_new_blocks) 857 process_prepared_mapping(m); 858 859 else if (io_overwrites_block(pool, bio)) { 860 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 861 862 h->overwrite_mapping = m; 863 m->bio = bio; 864 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 865 inc_all_io_entry(pool, bio); 866 remap_and_issue(tc, bio, data_block); 867 } else { 868 int r; 869 struct dm_io_region to; 870 871 to.bdev = tc->pool_dev->bdev; 872 to.sector = data_block * pool->sectors_per_block; 873 to.count = pool->sectors_per_block; 874 875 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m); 876 if (r < 0) { 877 mempool_free(m, pool->mapping_pool); 878 DMERR_LIMIT("dm_kcopyd_zero() failed"); 879 cell_error(pool, cell); 880 } 881 } 882 } 883 884 static int commit(struct pool *pool) 885 { 886 int r; 887 888 r = dm_pool_commit_metadata(pool->pmd); 889 if (r) 890 DMERR_LIMIT("commit failed: error = %d", r); 891 892 return r; 893 } 894 895 /* 896 * A non-zero return indicates read_only or fail_io mode. 897 * Many callers don't care about the return value. 898 */ 899 static int commit_or_fallback(struct pool *pool) 900 { 901 int r; 902 903 if (get_pool_mode(pool) != PM_WRITE) 904 return -EINVAL; 905 906 r = commit(pool); 907 if (r) 908 set_pool_mode(pool, PM_READ_ONLY); 909 910 return r; 911 } 912 913 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 914 { 915 int r; 916 dm_block_t free_blocks; 917 unsigned long flags; 918 struct pool *pool = tc->pool; 919 920 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 921 if (r) 922 return r; 923 924 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { 925 DMWARN("%s: reached low water mark for data device: sending event.", 926 dm_device_name(pool->pool_md)); 927 spin_lock_irqsave(&pool->lock, flags); 928 pool->low_water_triggered = 1; 929 spin_unlock_irqrestore(&pool->lock, flags); 930 dm_table_event(pool->ti->table); 931 } 932 933 if (!free_blocks) { 934 if (pool->no_free_space) 935 return -ENOSPC; 936 else { 937 /* 938 * Try to commit to see if that will free up some 939 * more space. 940 */ 941 (void) commit_or_fallback(pool); 942 943 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 944 if (r) 945 return r; 946 947 /* 948 * If we still have no space we set a flag to avoid 949 * doing all this checking and return -ENOSPC. 950 */ 951 if (!free_blocks) { 952 DMWARN("%s: no free space available.", 953 dm_device_name(pool->pool_md)); 954 spin_lock_irqsave(&pool->lock, flags); 955 pool->no_free_space = 1; 956 spin_unlock_irqrestore(&pool->lock, flags); 957 return -ENOSPC; 958 } 959 } 960 } 961 962 r = dm_pool_alloc_data_block(pool->pmd, result); 963 if (r) 964 return r; 965 966 return 0; 967 } 968 969 /* 970 * If we have run out of space, queue bios until the device is 971 * resumed, presumably after having been reloaded with more space. 972 */ 973 static void retry_on_resume(struct bio *bio) 974 { 975 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 976 struct thin_c *tc = h->tc; 977 struct pool *pool = tc->pool; 978 unsigned long flags; 979 980 spin_lock_irqsave(&pool->lock, flags); 981 bio_list_add(&pool->retry_on_resume_list, bio); 982 spin_unlock_irqrestore(&pool->lock, flags); 983 } 984 985 static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell) 986 { 987 struct bio *bio; 988 struct bio_list bios; 989 990 bio_list_init(&bios); 991 cell_release(pool, cell, &bios); 992 993 while ((bio = bio_list_pop(&bios))) 994 retry_on_resume(bio); 995 } 996 997 static void process_discard(struct thin_c *tc, struct bio *bio) 998 { 999 int r; 1000 unsigned long flags; 1001 struct pool *pool = tc->pool; 1002 struct dm_bio_prison_cell *cell, *cell2; 1003 struct dm_cell_key key, key2; 1004 dm_block_t block = get_bio_block(tc, bio); 1005 struct dm_thin_lookup_result lookup_result; 1006 struct dm_thin_new_mapping *m; 1007 1008 build_virtual_key(tc->td, block, &key); 1009 if (bio_detain(tc->pool, &key, bio, &cell)) 1010 return; 1011 1012 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1013 switch (r) { 1014 case 0: 1015 /* 1016 * Check nobody is fiddling with this pool block. This can 1017 * happen if someone's in the process of breaking sharing 1018 * on this block. 1019 */ 1020 build_data_key(tc->td, lookup_result.block, &key2); 1021 if (bio_detain(tc->pool, &key2, bio, &cell2)) { 1022 cell_defer_no_holder(tc, cell); 1023 break; 1024 } 1025 1026 if (io_overlaps_block(pool, bio)) { 1027 /* 1028 * IO may still be going to the destination block. We must 1029 * quiesce before we can do the removal. 1030 */ 1031 m = get_next_mapping(pool); 1032 m->tc = tc; 1033 m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown; 1034 m->virt_block = block; 1035 m->data_block = lookup_result.block; 1036 m->cell = cell; 1037 m->cell2 = cell2; 1038 m->err = 0; 1039 m->bio = bio; 1040 1041 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { 1042 spin_lock_irqsave(&pool->lock, flags); 1043 list_add(&m->list, &pool->prepared_discards); 1044 spin_unlock_irqrestore(&pool->lock, flags); 1045 wake_worker(pool); 1046 } 1047 } else { 1048 inc_all_io_entry(pool, bio); 1049 cell_defer_no_holder(tc, cell); 1050 cell_defer_no_holder(tc, cell2); 1051 1052 /* 1053 * The DM core makes sure that the discard doesn't span 1054 * a block boundary. So we submit the discard of a 1055 * partial block appropriately. 1056 */ 1057 if ((!lookup_result.shared) && pool->pf.discard_passdown) 1058 remap_and_issue(tc, bio, lookup_result.block); 1059 else 1060 bio_endio(bio, 0); 1061 } 1062 break; 1063 1064 case -ENODATA: 1065 /* 1066 * It isn't provisioned, just forget it. 1067 */ 1068 cell_defer_no_holder(tc, cell); 1069 bio_endio(bio, 0); 1070 break; 1071 1072 default: 1073 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1074 __func__, r); 1075 cell_defer_no_holder(tc, cell); 1076 bio_io_error(bio); 1077 break; 1078 } 1079 } 1080 1081 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1082 struct dm_cell_key *key, 1083 struct dm_thin_lookup_result *lookup_result, 1084 struct dm_bio_prison_cell *cell) 1085 { 1086 int r; 1087 dm_block_t data_block; 1088 1089 r = alloc_data_block(tc, &data_block); 1090 switch (r) { 1091 case 0: 1092 schedule_internal_copy(tc, block, lookup_result->block, 1093 data_block, cell, bio); 1094 break; 1095 1096 case -ENOSPC: 1097 no_space(tc->pool, cell); 1098 break; 1099 1100 default: 1101 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1102 __func__, r); 1103 cell_error(tc->pool, cell); 1104 break; 1105 } 1106 } 1107 1108 static void process_shared_bio(struct thin_c *tc, struct bio *bio, 1109 dm_block_t block, 1110 struct dm_thin_lookup_result *lookup_result) 1111 { 1112 struct dm_bio_prison_cell *cell; 1113 struct pool *pool = tc->pool; 1114 struct dm_cell_key key; 1115 1116 /* 1117 * If cell is already occupied, then sharing is already in the process 1118 * of being broken so we have nothing further to do here. 1119 */ 1120 build_data_key(tc->td, lookup_result->block, &key); 1121 if (bio_detain(pool, &key, bio, &cell)) 1122 return; 1123 1124 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1125 break_sharing(tc, bio, block, &key, lookup_result, cell); 1126 else { 1127 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1128 1129 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1130 inc_all_io_entry(pool, bio); 1131 cell_defer_no_holder(tc, cell); 1132 1133 remap_and_issue(tc, bio, lookup_result->block); 1134 } 1135 } 1136 1137 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1138 struct dm_bio_prison_cell *cell) 1139 { 1140 int r; 1141 dm_block_t data_block; 1142 struct pool *pool = tc->pool; 1143 1144 /* 1145 * Remap empty bios (flushes) immediately, without provisioning. 1146 */ 1147 if (!bio->bi_size) { 1148 inc_all_io_entry(pool, bio); 1149 cell_defer_no_holder(tc, cell); 1150 1151 remap_and_issue(tc, bio, 0); 1152 return; 1153 } 1154 1155 /* 1156 * Fill read bios with zeroes and complete them immediately. 1157 */ 1158 if (bio_data_dir(bio) == READ) { 1159 zero_fill_bio(bio); 1160 cell_defer_no_holder(tc, cell); 1161 bio_endio(bio, 0); 1162 return; 1163 } 1164 1165 r = alloc_data_block(tc, &data_block); 1166 switch (r) { 1167 case 0: 1168 if (tc->origin_dev) 1169 schedule_external_copy(tc, block, data_block, cell, bio); 1170 else 1171 schedule_zero(tc, block, data_block, cell, bio); 1172 break; 1173 1174 case -ENOSPC: 1175 no_space(pool, cell); 1176 break; 1177 1178 default: 1179 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1180 __func__, r); 1181 set_pool_mode(pool, PM_READ_ONLY); 1182 cell_error(pool, cell); 1183 break; 1184 } 1185 } 1186 1187 static void process_bio(struct thin_c *tc, struct bio *bio) 1188 { 1189 int r; 1190 struct pool *pool = tc->pool; 1191 dm_block_t block = get_bio_block(tc, bio); 1192 struct dm_bio_prison_cell *cell; 1193 struct dm_cell_key key; 1194 struct dm_thin_lookup_result lookup_result; 1195 1196 /* 1197 * If cell is already occupied, then the block is already 1198 * being provisioned so we have nothing further to do here. 1199 */ 1200 build_virtual_key(tc->td, block, &key); 1201 if (bio_detain(pool, &key, bio, &cell)) 1202 return; 1203 1204 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1205 switch (r) { 1206 case 0: 1207 if (lookup_result.shared) { 1208 process_shared_bio(tc, bio, block, &lookup_result); 1209 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */ 1210 } else { 1211 inc_all_io_entry(pool, bio); 1212 cell_defer_no_holder(tc, cell); 1213 1214 remap_and_issue(tc, bio, lookup_result.block); 1215 } 1216 break; 1217 1218 case -ENODATA: 1219 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1220 inc_all_io_entry(pool, bio); 1221 cell_defer_no_holder(tc, cell); 1222 1223 remap_to_origin_and_issue(tc, bio); 1224 } else 1225 provision_block(tc, bio, block, cell); 1226 break; 1227 1228 default: 1229 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1230 __func__, r); 1231 cell_defer_no_holder(tc, cell); 1232 bio_io_error(bio); 1233 break; 1234 } 1235 } 1236 1237 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1238 { 1239 int r; 1240 int rw = bio_data_dir(bio); 1241 dm_block_t block = get_bio_block(tc, bio); 1242 struct dm_thin_lookup_result lookup_result; 1243 1244 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1245 switch (r) { 1246 case 0: 1247 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1248 bio_io_error(bio); 1249 else { 1250 inc_all_io_entry(tc->pool, bio); 1251 remap_and_issue(tc, bio, lookup_result.block); 1252 } 1253 break; 1254 1255 case -ENODATA: 1256 if (rw != READ) { 1257 bio_io_error(bio); 1258 break; 1259 } 1260 1261 if (tc->origin_dev) { 1262 inc_all_io_entry(tc->pool, bio); 1263 remap_to_origin_and_issue(tc, bio); 1264 break; 1265 } 1266 1267 zero_fill_bio(bio); 1268 bio_endio(bio, 0); 1269 break; 1270 1271 default: 1272 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1273 __func__, r); 1274 bio_io_error(bio); 1275 break; 1276 } 1277 } 1278 1279 static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1280 { 1281 bio_io_error(bio); 1282 } 1283 1284 /* 1285 * FIXME: should we also commit due to size of transaction, measured in 1286 * metadata blocks? 1287 */ 1288 static int need_commit_due_to_time(struct pool *pool) 1289 { 1290 return jiffies < pool->last_commit_jiffies || 1291 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; 1292 } 1293 1294 static void process_deferred_bios(struct pool *pool) 1295 { 1296 unsigned long flags; 1297 struct bio *bio; 1298 struct bio_list bios; 1299 1300 bio_list_init(&bios); 1301 1302 spin_lock_irqsave(&pool->lock, flags); 1303 bio_list_merge(&bios, &pool->deferred_bios); 1304 bio_list_init(&pool->deferred_bios); 1305 spin_unlock_irqrestore(&pool->lock, flags); 1306 1307 while ((bio = bio_list_pop(&bios))) { 1308 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1309 struct thin_c *tc = h->tc; 1310 1311 /* 1312 * If we've got no free new_mapping structs, and processing 1313 * this bio might require one, we pause until there are some 1314 * prepared mappings to process. 1315 */ 1316 if (ensure_next_mapping(pool)) { 1317 spin_lock_irqsave(&pool->lock, flags); 1318 bio_list_merge(&pool->deferred_bios, &bios); 1319 spin_unlock_irqrestore(&pool->lock, flags); 1320 1321 break; 1322 } 1323 1324 if (bio->bi_rw & REQ_DISCARD) 1325 pool->process_discard(tc, bio); 1326 else 1327 pool->process_bio(tc, bio); 1328 } 1329 1330 /* 1331 * If there are any deferred flush bios, we must commit 1332 * the metadata before issuing them. 1333 */ 1334 bio_list_init(&bios); 1335 spin_lock_irqsave(&pool->lock, flags); 1336 bio_list_merge(&bios, &pool->deferred_flush_bios); 1337 bio_list_init(&pool->deferred_flush_bios); 1338 spin_unlock_irqrestore(&pool->lock, flags); 1339 1340 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) 1341 return; 1342 1343 if (commit_or_fallback(pool)) { 1344 while ((bio = bio_list_pop(&bios))) 1345 bio_io_error(bio); 1346 return; 1347 } 1348 pool->last_commit_jiffies = jiffies; 1349 1350 while ((bio = bio_list_pop(&bios))) 1351 generic_make_request(bio); 1352 } 1353 1354 static void do_worker(struct work_struct *ws) 1355 { 1356 struct pool *pool = container_of(ws, struct pool, worker); 1357 1358 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); 1359 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); 1360 process_deferred_bios(pool); 1361 } 1362 1363 /* 1364 * We want to commit periodically so that not too much 1365 * unwritten data builds up. 1366 */ 1367 static void do_waker(struct work_struct *ws) 1368 { 1369 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); 1370 wake_worker(pool); 1371 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1372 } 1373 1374 /*----------------------------------------------------------------*/ 1375 1376 static enum pool_mode get_pool_mode(struct pool *pool) 1377 { 1378 return pool->pf.mode; 1379 } 1380 1381 static void set_pool_mode(struct pool *pool, enum pool_mode mode) 1382 { 1383 int r; 1384 1385 pool->pf.mode = mode; 1386 1387 switch (mode) { 1388 case PM_FAIL: 1389 DMERR("switching pool to failure mode"); 1390 pool->process_bio = process_bio_fail; 1391 pool->process_discard = process_bio_fail; 1392 pool->process_prepared_mapping = process_prepared_mapping_fail; 1393 pool->process_prepared_discard = process_prepared_discard_fail; 1394 break; 1395 1396 case PM_READ_ONLY: 1397 DMERR("switching pool to read-only mode"); 1398 r = dm_pool_abort_metadata(pool->pmd); 1399 if (r) { 1400 DMERR("aborting transaction failed"); 1401 set_pool_mode(pool, PM_FAIL); 1402 } else { 1403 dm_pool_metadata_read_only(pool->pmd); 1404 pool->process_bio = process_bio_read_only; 1405 pool->process_discard = process_discard; 1406 pool->process_prepared_mapping = process_prepared_mapping_fail; 1407 pool->process_prepared_discard = process_prepared_discard_passdown; 1408 } 1409 break; 1410 1411 case PM_WRITE: 1412 pool->process_bio = process_bio; 1413 pool->process_discard = process_discard; 1414 pool->process_prepared_mapping = process_prepared_mapping; 1415 pool->process_prepared_discard = process_prepared_discard; 1416 break; 1417 } 1418 } 1419 1420 /*----------------------------------------------------------------*/ 1421 1422 /* 1423 * Mapping functions. 1424 */ 1425 1426 /* 1427 * Called only while mapping a thin bio to hand it over to the workqueue. 1428 */ 1429 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) 1430 { 1431 unsigned long flags; 1432 struct pool *pool = tc->pool; 1433 1434 spin_lock_irqsave(&pool->lock, flags); 1435 bio_list_add(&pool->deferred_bios, bio); 1436 spin_unlock_irqrestore(&pool->lock, flags); 1437 1438 wake_worker(pool); 1439 } 1440 1441 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 1442 { 1443 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1444 1445 h->tc = tc; 1446 h->shared_read_entry = NULL; 1447 h->all_io_entry = NULL; 1448 h->overwrite_mapping = NULL; 1449 } 1450 1451 /* 1452 * Non-blocking function called from the thin target's map function. 1453 */ 1454 static int thin_bio_map(struct dm_target *ti, struct bio *bio) 1455 { 1456 int r; 1457 struct thin_c *tc = ti->private; 1458 dm_block_t block = get_bio_block(tc, bio); 1459 struct dm_thin_device *td = tc->td; 1460 struct dm_thin_lookup_result result; 1461 struct dm_bio_prison_cell cell1, cell2; 1462 struct dm_bio_prison_cell *cell_result; 1463 struct dm_cell_key key; 1464 1465 thin_hook_bio(tc, bio); 1466 1467 if (get_pool_mode(tc->pool) == PM_FAIL) { 1468 bio_io_error(bio); 1469 return DM_MAPIO_SUBMITTED; 1470 } 1471 1472 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { 1473 thin_defer_bio(tc, bio); 1474 return DM_MAPIO_SUBMITTED; 1475 } 1476 1477 r = dm_thin_find_block(td, block, 0, &result); 1478 1479 /* 1480 * Note that we defer readahead too. 1481 */ 1482 switch (r) { 1483 case 0: 1484 if (unlikely(result.shared)) { 1485 /* 1486 * We have a race condition here between the 1487 * result.shared value returned by the lookup and 1488 * snapshot creation, which may cause new 1489 * sharing. 1490 * 1491 * To avoid this always quiesce the origin before 1492 * taking the snap. You want to do this anyway to 1493 * ensure a consistent application view 1494 * (i.e. lockfs). 1495 * 1496 * More distant ancestors are irrelevant. The 1497 * shared flag will be set in their case. 1498 */ 1499 thin_defer_bio(tc, bio); 1500 return DM_MAPIO_SUBMITTED; 1501 } 1502 1503 build_virtual_key(tc->td, block, &key); 1504 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) 1505 return DM_MAPIO_SUBMITTED; 1506 1507 build_data_key(tc->td, result.block, &key); 1508 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { 1509 cell_defer_no_holder_no_free(tc, &cell1); 1510 return DM_MAPIO_SUBMITTED; 1511 } 1512 1513 inc_all_io_entry(tc->pool, bio); 1514 cell_defer_no_holder_no_free(tc, &cell2); 1515 cell_defer_no_holder_no_free(tc, &cell1); 1516 1517 remap(tc, bio, result.block); 1518 return DM_MAPIO_REMAPPED; 1519 1520 case -ENODATA: 1521 if (get_pool_mode(tc->pool) == PM_READ_ONLY) { 1522 /* 1523 * This block isn't provisioned, and we have no way 1524 * of doing so. Just error it. 1525 */ 1526 bio_io_error(bio); 1527 return DM_MAPIO_SUBMITTED; 1528 } 1529 /* fall through */ 1530 1531 case -EWOULDBLOCK: 1532 /* 1533 * In future, the failed dm_thin_find_block above could 1534 * provide the hint to load the metadata into cache. 1535 */ 1536 thin_defer_bio(tc, bio); 1537 return DM_MAPIO_SUBMITTED; 1538 1539 default: 1540 /* 1541 * Must always call bio_io_error on failure. 1542 * dm_thin_find_block can fail with -EINVAL if the 1543 * pool is switched to fail-io mode. 1544 */ 1545 bio_io_error(bio); 1546 return DM_MAPIO_SUBMITTED; 1547 } 1548 } 1549 1550 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1551 { 1552 int r; 1553 unsigned long flags; 1554 struct pool_c *pt = container_of(cb, struct pool_c, callbacks); 1555 1556 spin_lock_irqsave(&pt->pool->lock, flags); 1557 r = !bio_list_empty(&pt->pool->retry_on_resume_list); 1558 spin_unlock_irqrestore(&pt->pool->lock, flags); 1559 1560 if (!r) { 1561 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 1562 r = bdi_congested(&q->backing_dev_info, bdi_bits); 1563 } 1564 1565 return r; 1566 } 1567 1568 static void __requeue_bios(struct pool *pool) 1569 { 1570 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list); 1571 bio_list_init(&pool->retry_on_resume_list); 1572 } 1573 1574 /*---------------------------------------------------------------- 1575 * Binding of control targets to a pool object 1576 *--------------------------------------------------------------*/ 1577 static bool data_dev_supports_discard(struct pool_c *pt) 1578 { 1579 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 1580 1581 return q && blk_queue_discard(q); 1582 } 1583 1584 static bool is_factor(sector_t block_size, uint32_t n) 1585 { 1586 return !sector_div(block_size, n); 1587 } 1588 1589 /* 1590 * If discard_passdown was enabled verify that the data device 1591 * supports discards. Disable discard_passdown if not. 1592 */ 1593 static void disable_passdown_if_not_supported(struct pool_c *pt) 1594 { 1595 struct pool *pool = pt->pool; 1596 struct block_device *data_bdev = pt->data_dev->bdev; 1597 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; 1598 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; 1599 const char *reason = NULL; 1600 char buf[BDEVNAME_SIZE]; 1601 1602 if (!pt->adjusted_pf.discard_passdown) 1603 return; 1604 1605 if (!data_dev_supports_discard(pt)) 1606 reason = "discard unsupported"; 1607 1608 else if (data_limits->max_discard_sectors < pool->sectors_per_block) 1609 reason = "max discard sectors smaller than a block"; 1610 1611 else if (data_limits->discard_granularity > block_size) 1612 reason = "discard granularity larger than a block"; 1613 1614 else if (!is_factor(block_size, data_limits->discard_granularity)) 1615 reason = "discard granularity not a factor of block size"; 1616 1617 if (reason) { 1618 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); 1619 pt->adjusted_pf.discard_passdown = false; 1620 } 1621 } 1622 1623 static int bind_control_target(struct pool *pool, struct dm_target *ti) 1624 { 1625 struct pool_c *pt = ti->private; 1626 1627 /* 1628 * We want to make sure that degraded pools are never upgraded. 1629 */ 1630 enum pool_mode old_mode = pool->pf.mode; 1631 enum pool_mode new_mode = pt->adjusted_pf.mode; 1632 1633 if (old_mode > new_mode) 1634 new_mode = old_mode; 1635 1636 pool->ti = ti; 1637 pool->low_water_blocks = pt->low_water_blocks; 1638 pool->pf = pt->adjusted_pf; 1639 1640 set_pool_mode(pool, new_mode); 1641 1642 return 0; 1643 } 1644 1645 static void unbind_control_target(struct pool *pool, struct dm_target *ti) 1646 { 1647 if (pool->ti == ti) 1648 pool->ti = NULL; 1649 } 1650 1651 /*---------------------------------------------------------------- 1652 * Pool creation 1653 *--------------------------------------------------------------*/ 1654 /* Initialize pool features. */ 1655 static void pool_features_init(struct pool_features *pf) 1656 { 1657 pf->mode = PM_WRITE; 1658 pf->zero_new_blocks = true; 1659 pf->discard_enabled = true; 1660 pf->discard_passdown = true; 1661 } 1662 1663 static void __pool_destroy(struct pool *pool) 1664 { 1665 __pool_table_remove(pool); 1666 1667 if (dm_pool_metadata_close(pool->pmd) < 0) 1668 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 1669 1670 dm_bio_prison_destroy(pool->prison); 1671 dm_kcopyd_client_destroy(pool->copier); 1672 1673 if (pool->wq) 1674 destroy_workqueue(pool->wq); 1675 1676 if (pool->next_mapping) 1677 mempool_free(pool->next_mapping, pool->mapping_pool); 1678 mempool_destroy(pool->mapping_pool); 1679 dm_deferred_set_destroy(pool->shared_read_ds); 1680 dm_deferred_set_destroy(pool->all_io_ds); 1681 kfree(pool); 1682 } 1683 1684 static struct kmem_cache *_new_mapping_cache; 1685 1686 static struct pool *pool_create(struct mapped_device *pool_md, 1687 struct block_device *metadata_dev, 1688 unsigned long block_size, 1689 int read_only, char **error) 1690 { 1691 int r; 1692 void *err_p; 1693 struct pool *pool; 1694 struct dm_pool_metadata *pmd; 1695 bool format_device = read_only ? false : true; 1696 1697 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); 1698 if (IS_ERR(pmd)) { 1699 *error = "Error creating metadata object"; 1700 return (struct pool *)pmd; 1701 } 1702 1703 pool = kmalloc(sizeof(*pool), GFP_KERNEL); 1704 if (!pool) { 1705 *error = "Error allocating memory for pool"; 1706 err_p = ERR_PTR(-ENOMEM); 1707 goto bad_pool; 1708 } 1709 1710 pool->pmd = pmd; 1711 pool->sectors_per_block = block_size; 1712 if (block_size & (block_size - 1)) 1713 pool->sectors_per_block_shift = -1; 1714 else 1715 pool->sectors_per_block_shift = __ffs(block_size); 1716 pool->low_water_blocks = 0; 1717 pool_features_init(&pool->pf); 1718 pool->prison = dm_bio_prison_create(PRISON_CELLS); 1719 if (!pool->prison) { 1720 *error = "Error creating pool's bio prison"; 1721 err_p = ERR_PTR(-ENOMEM); 1722 goto bad_prison; 1723 } 1724 1725 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1726 if (IS_ERR(pool->copier)) { 1727 r = PTR_ERR(pool->copier); 1728 *error = "Error creating pool's kcopyd client"; 1729 err_p = ERR_PTR(r); 1730 goto bad_kcopyd_client; 1731 } 1732 1733 /* 1734 * Create singlethreaded workqueue that will service all devices 1735 * that use this metadata. 1736 */ 1737 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); 1738 if (!pool->wq) { 1739 *error = "Error creating pool's workqueue"; 1740 err_p = ERR_PTR(-ENOMEM); 1741 goto bad_wq; 1742 } 1743 1744 INIT_WORK(&pool->worker, do_worker); 1745 INIT_DELAYED_WORK(&pool->waker, do_waker); 1746 spin_lock_init(&pool->lock); 1747 bio_list_init(&pool->deferred_bios); 1748 bio_list_init(&pool->deferred_flush_bios); 1749 INIT_LIST_HEAD(&pool->prepared_mappings); 1750 INIT_LIST_HEAD(&pool->prepared_discards); 1751 pool->low_water_triggered = 0; 1752 pool->no_free_space = 0; 1753 bio_list_init(&pool->retry_on_resume_list); 1754 1755 pool->shared_read_ds = dm_deferred_set_create(); 1756 if (!pool->shared_read_ds) { 1757 *error = "Error creating pool's shared read deferred set"; 1758 err_p = ERR_PTR(-ENOMEM); 1759 goto bad_shared_read_ds; 1760 } 1761 1762 pool->all_io_ds = dm_deferred_set_create(); 1763 if (!pool->all_io_ds) { 1764 *error = "Error creating pool's all io deferred set"; 1765 err_p = ERR_PTR(-ENOMEM); 1766 goto bad_all_io_ds; 1767 } 1768 1769 pool->next_mapping = NULL; 1770 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, 1771 _new_mapping_cache); 1772 if (!pool->mapping_pool) { 1773 *error = "Error creating pool's mapping mempool"; 1774 err_p = ERR_PTR(-ENOMEM); 1775 goto bad_mapping_pool; 1776 } 1777 1778 pool->ref_count = 1; 1779 pool->last_commit_jiffies = jiffies; 1780 pool->pool_md = pool_md; 1781 pool->md_dev = metadata_dev; 1782 __pool_table_insert(pool); 1783 1784 return pool; 1785 1786 bad_mapping_pool: 1787 dm_deferred_set_destroy(pool->all_io_ds); 1788 bad_all_io_ds: 1789 dm_deferred_set_destroy(pool->shared_read_ds); 1790 bad_shared_read_ds: 1791 destroy_workqueue(pool->wq); 1792 bad_wq: 1793 dm_kcopyd_client_destroy(pool->copier); 1794 bad_kcopyd_client: 1795 dm_bio_prison_destroy(pool->prison); 1796 bad_prison: 1797 kfree(pool); 1798 bad_pool: 1799 if (dm_pool_metadata_close(pmd)) 1800 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 1801 1802 return err_p; 1803 } 1804 1805 static void __pool_inc(struct pool *pool) 1806 { 1807 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 1808 pool->ref_count++; 1809 } 1810 1811 static void __pool_dec(struct pool *pool) 1812 { 1813 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 1814 BUG_ON(!pool->ref_count); 1815 if (!--pool->ref_count) 1816 __pool_destroy(pool); 1817 } 1818 1819 static struct pool *__pool_find(struct mapped_device *pool_md, 1820 struct block_device *metadata_dev, 1821 unsigned long block_size, int read_only, 1822 char **error, int *created) 1823 { 1824 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); 1825 1826 if (pool) { 1827 if (pool->pool_md != pool_md) { 1828 *error = "metadata device already in use by a pool"; 1829 return ERR_PTR(-EBUSY); 1830 } 1831 __pool_inc(pool); 1832 1833 } else { 1834 pool = __pool_table_lookup(pool_md); 1835 if (pool) { 1836 if (pool->md_dev != metadata_dev) { 1837 *error = "different pool cannot replace a pool"; 1838 return ERR_PTR(-EINVAL); 1839 } 1840 __pool_inc(pool); 1841 1842 } else { 1843 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); 1844 *created = 1; 1845 } 1846 } 1847 1848 return pool; 1849 } 1850 1851 /*---------------------------------------------------------------- 1852 * Pool target methods 1853 *--------------------------------------------------------------*/ 1854 static void pool_dtr(struct dm_target *ti) 1855 { 1856 struct pool_c *pt = ti->private; 1857 1858 mutex_lock(&dm_thin_pool_table.mutex); 1859 1860 unbind_control_target(pt->pool, ti); 1861 __pool_dec(pt->pool); 1862 dm_put_device(ti, pt->metadata_dev); 1863 dm_put_device(ti, pt->data_dev); 1864 kfree(pt); 1865 1866 mutex_unlock(&dm_thin_pool_table.mutex); 1867 } 1868 1869 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, 1870 struct dm_target *ti) 1871 { 1872 int r; 1873 unsigned argc; 1874 const char *arg_name; 1875 1876 static struct dm_arg _args[] = { 1877 {0, 3, "Invalid number of pool feature arguments"}, 1878 }; 1879 1880 /* 1881 * No feature arguments supplied. 1882 */ 1883 if (!as->argc) 1884 return 0; 1885 1886 r = dm_read_arg_group(_args, as, &argc, &ti->error); 1887 if (r) 1888 return -EINVAL; 1889 1890 while (argc && !r) { 1891 arg_name = dm_shift_arg(as); 1892 argc--; 1893 1894 if (!strcasecmp(arg_name, "skip_block_zeroing")) 1895 pf->zero_new_blocks = false; 1896 1897 else if (!strcasecmp(arg_name, "ignore_discard")) 1898 pf->discard_enabled = false; 1899 1900 else if (!strcasecmp(arg_name, "no_discard_passdown")) 1901 pf->discard_passdown = false; 1902 1903 else if (!strcasecmp(arg_name, "read_only")) 1904 pf->mode = PM_READ_ONLY; 1905 1906 else { 1907 ti->error = "Unrecognised pool feature requested"; 1908 r = -EINVAL; 1909 break; 1910 } 1911 } 1912 1913 return r; 1914 } 1915 1916 static void metadata_low_callback(void *context) 1917 { 1918 struct pool *pool = context; 1919 1920 DMWARN("%s: reached low water mark for metadata device: sending event.", 1921 dm_device_name(pool->pool_md)); 1922 1923 dm_table_event(pool->ti->table); 1924 } 1925 1926 static sector_t get_metadata_dev_size(struct block_device *bdev) 1927 { 1928 sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 1929 char buffer[BDEVNAME_SIZE]; 1930 1931 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { 1932 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 1933 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); 1934 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; 1935 } 1936 1937 return metadata_dev_size; 1938 } 1939 1940 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) 1941 { 1942 sector_t metadata_dev_size = get_metadata_dev_size(bdev); 1943 1944 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 1945 1946 return metadata_dev_size; 1947 } 1948 1949 /* 1950 * When a metadata threshold is crossed a dm event is triggered, and 1951 * userland should respond by growing the metadata device. We could let 1952 * userland set the threshold, like we do with the data threshold, but I'm 1953 * not sure they know enough to do this well. 1954 */ 1955 static dm_block_t calc_metadata_threshold(struct pool_c *pt) 1956 { 1957 /* 1958 * 4M is ample for all ops with the possible exception of thin 1959 * device deletion which is harmless if it fails (just retry the 1960 * delete after you've grown the device). 1961 */ 1962 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; 1963 return min((dm_block_t)1024ULL /* 4M */, quarter); 1964 } 1965 1966 /* 1967 * thin-pool <metadata dev> <data dev> 1968 * <data block size (sectors)> 1969 * <low water mark (blocks)> 1970 * [<#feature args> [<arg>]*] 1971 * 1972 * Optional feature arguments are: 1973 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks. 1974 * ignore_discard: disable discard 1975 * no_discard_passdown: don't pass discards down to the data device 1976 */ 1977 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) 1978 { 1979 int r, pool_created = 0; 1980 struct pool_c *pt; 1981 struct pool *pool; 1982 struct pool_features pf; 1983 struct dm_arg_set as; 1984 struct dm_dev *data_dev; 1985 unsigned long block_size; 1986 dm_block_t low_water_blocks; 1987 struct dm_dev *metadata_dev; 1988 fmode_t metadata_mode; 1989 1990 /* 1991 * FIXME Remove validation from scope of lock. 1992 */ 1993 mutex_lock(&dm_thin_pool_table.mutex); 1994 1995 if (argc < 4) { 1996 ti->error = "Invalid argument count"; 1997 r = -EINVAL; 1998 goto out_unlock; 1999 } 2000 2001 as.argc = argc; 2002 as.argv = argv; 2003 2004 /* 2005 * Set default pool features. 2006 */ 2007 pool_features_init(&pf); 2008 2009 dm_consume_args(&as, 4); 2010 r = parse_pool_features(&as, &pf, ti); 2011 if (r) 2012 goto out_unlock; 2013 2014 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); 2015 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); 2016 if (r) { 2017 ti->error = "Error opening metadata block device"; 2018 goto out_unlock; 2019 } 2020 2021 /* 2022 * Run for the side-effect of possibly issuing a warning if the 2023 * device is too big. 2024 */ 2025 (void) get_metadata_dev_size(metadata_dev->bdev); 2026 2027 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 2028 if (r) { 2029 ti->error = "Error getting data device"; 2030 goto out_metadata; 2031 } 2032 2033 if (kstrtoul(argv[2], 10, &block_size) || !block_size || 2034 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || 2035 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || 2036 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { 2037 ti->error = "Invalid block size"; 2038 r = -EINVAL; 2039 goto out; 2040 } 2041 2042 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) { 2043 ti->error = "Invalid low water mark"; 2044 r = -EINVAL; 2045 goto out; 2046 } 2047 2048 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 2049 if (!pt) { 2050 r = -ENOMEM; 2051 goto out; 2052 } 2053 2054 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, 2055 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); 2056 if (IS_ERR(pool)) { 2057 r = PTR_ERR(pool); 2058 goto out_free_pt; 2059 } 2060 2061 /* 2062 * 'pool_created' reflects whether this is the first table load. 2063 * Top level discard support is not allowed to be changed after 2064 * initial load. This would require a pool reload to trigger thin 2065 * device changes. 2066 */ 2067 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { 2068 ti->error = "Discard support cannot be disabled once enabled"; 2069 r = -EINVAL; 2070 goto out_flags_changed; 2071 } 2072 2073 pt->pool = pool; 2074 pt->ti = ti; 2075 pt->metadata_dev = metadata_dev; 2076 pt->data_dev = data_dev; 2077 pt->low_water_blocks = low_water_blocks; 2078 pt->adjusted_pf = pt->requested_pf = pf; 2079 ti->num_flush_bios = 1; 2080 2081 /* 2082 * Only need to enable discards if the pool should pass 2083 * them down to the data device. The thin device's discard 2084 * processing will cause mappings to be removed from the btree. 2085 */ 2086 if (pf.discard_enabled && pf.discard_passdown) { 2087 ti->num_discard_bios = 1; 2088 2089 /* 2090 * Setting 'discards_supported' circumvents the normal 2091 * stacking of discard limits (this keeps the pool and 2092 * thin devices' discard limits consistent). 2093 */ 2094 ti->discards_supported = true; 2095 ti->discard_zeroes_data_unsupported = true; 2096 } 2097 ti->private = pt; 2098 2099 r = dm_pool_register_metadata_threshold(pt->pool->pmd, 2100 calc_metadata_threshold(pt), 2101 metadata_low_callback, 2102 pool); 2103 if (r) 2104 goto out_free_pt; 2105 2106 pt->callbacks.congested_fn = pool_is_congested; 2107 dm_table_add_target_callbacks(ti->table, &pt->callbacks); 2108 2109 mutex_unlock(&dm_thin_pool_table.mutex); 2110 2111 return 0; 2112 2113 out_flags_changed: 2114 __pool_dec(pool); 2115 out_free_pt: 2116 kfree(pt); 2117 out: 2118 dm_put_device(ti, data_dev); 2119 out_metadata: 2120 dm_put_device(ti, metadata_dev); 2121 out_unlock: 2122 mutex_unlock(&dm_thin_pool_table.mutex); 2123 2124 return r; 2125 } 2126 2127 static int pool_map(struct dm_target *ti, struct bio *bio) 2128 { 2129 int r; 2130 struct pool_c *pt = ti->private; 2131 struct pool *pool = pt->pool; 2132 unsigned long flags; 2133 2134 /* 2135 * As this is a singleton target, ti->begin is always zero. 2136 */ 2137 spin_lock_irqsave(&pool->lock, flags); 2138 bio->bi_bdev = pt->data_dev->bdev; 2139 r = DM_MAPIO_REMAPPED; 2140 spin_unlock_irqrestore(&pool->lock, flags); 2141 2142 return r; 2143 } 2144 2145 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) 2146 { 2147 int r; 2148 struct pool_c *pt = ti->private; 2149 struct pool *pool = pt->pool; 2150 sector_t data_size = ti->len; 2151 dm_block_t sb_data_size; 2152 2153 *need_commit = false; 2154 2155 (void) sector_div(data_size, pool->sectors_per_block); 2156 2157 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); 2158 if (r) { 2159 DMERR("failed to retrieve data device size"); 2160 return r; 2161 } 2162 2163 if (data_size < sb_data_size) { 2164 DMERR("pool target (%llu blocks) too small: expected %llu", 2165 (unsigned long long)data_size, sb_data_size); 2166 return -EINVAL; 2167 2168 } else if (data_size > sb_data_size) { 2169 r = dm_pool_resize_data_dev(pool->pmd, data_size); 2170 if (r) { 2171 DMERR("failed to resize data device"); 2172 set_pool_mode(pool, PM_READ_ONLY); 2173 return r; 2174 } 2175 2176 *need_commit = true; 2177 } 2178 2179 return 0; 2180 } 2181 2182 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) 2183 { 2184 int r; 2185 struct pool_c *pt = ti->private; 2186 struct pool *pool = pt->pool; 2187 dm_block_t metadata_dev_size, sb_metadata_dev_size; 2188 2189 *need_commit = false; 2190 2191 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); 2192 2193 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); 2194 if (r) { 2195 DMERR("failed to retrieve data device size"); 2196 return r; 2197 } 2198 2199 if (metadata_dev_size < sb_metadata_dev_size) { 2200 DMERR("metadata device (%llu blocks) too small: expected %llu", 2201 metadata_dev_size, sb_metadata_dev_size); 2202 return -EINVAL; 2203 2204 } else if (metadata_dev_size > sb_metadata_dev_size) { 2205 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 2206 if (r) { 2207 DMERR("failed to resize metadata device"); 2208 return r; 2209 } 2210 2211 *need_commit = true; 2212 } 2213 2214 return 0; 2215 } 2216 2217 /* 2218 * Retrieves the number of blocks of the data device from 2219 * the superblock and compares it to the actual device size, 2220 * thus resizing the data device in case it has grown. 2221 * 2222 * This both copes with opening preallocated data devices in the ctr 2223 * being followed by a resume 2224 * -and- 2225 * calling the resume method individually after userspace has 2226 * grown the data device in reaction to a table event. 2227 */ 2228 static int pool_preresume(struct dm_target *ti) 2229 { 2230 int r; 2231 bool need_commit1, need_commit2; 2232 struct pool_c *pt = ti->private; 2233 struct pool *pool = pt->pool; 2234 2235 /* 2236 * Take control of the pool object. 2237 */ 2238 r = bind_control_target(pool, ti); 2239 if (r) 2240 return r; 2241 2242 r = maybe_resize_data_dev(ti, &need_commit1); 2243 if (r) 2244 return r; 2245 2246 r = maybe_resize_metadata_dev(ti, &need_commit2); 2247 if (r) 2248 return r; 2249 2250 if (need_commit1 || need_commit2) 2251 (void) commit_or_fallback(pool); 2252 2253 return 0; 2254 } 2255 2256 static void pool_resume(struct dm_target *ti) 2257 { 2258 struct pool_c *pt = ti->private; 2259 struct pool *pool = pt->pool; 2260 unsigned long flags; 2261 2262 spin_lock_irqsave(&pool->lock, flags); 2263 pool->low_water_triggered = 0; 2264 pool->no_free_space = 0; 2265 __requeue_bios(pool); 2266 spin_unlock_irqrestore(&pool->lock, flags); 2267 2268 do_waker(&pool->waker.work); 2269 } 2270 2271 static void pool_postsuspend(struct dm_target *ti) 2272 { 2273 struct pool_c *pt = ti->private; 2274 struct pool *pool = pt->pool; 2275 2276 cancel_delayed_work(&pool->waker); 2277 flush_workqueue(pool->wq); 2278 (void) commit_or_fallback(pool); 2279 } 2280 2281 static int check_arg_count(unsigned argc, unsigned args_required) 2282 { 2283 if (argc != args_required) { 2284 DMWARN("Message received with %u arguments instead of %u.", 2285 argc, args_required); 2286 return -EINVAL; 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning) 2293 { 2294 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) && 2295 *dev_id <= MAX_DEV_ID) 2296 return 0; 2297 2298 if (warning) 2299 DMWARN("Message received with invalid device id: %s", arg); 2300 2301 return -EINVAL; 2302 } 2303 2304 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) 2305 { 2306 dm_thin_id dev_id; 2307 int r; 2308 2309 r = check_arg_count(argc, 2); 2310 if (r) 2311 return r; 2312 2313 r = read_dev_id(argv[1], &dev_id, 1); 2314 if (r) 2315 return r; 2316 2317 r = dm_pool_create_thin(pool->pmd, dev_id); 2318 if (r) { 2319 DMWARN("Creation of new thinly-provisioned device with id %s failed.", 2320 argv[1]); 2321 return r; 2322 } 2323 2324 return 0; 2325 } 2326 2327 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) 2328 { 2329 dm_thin_id dev_id; 2330 dm_thin_id origin_dev_id; 2331 int r; 2332 2333 r = check_arg_count(argc, 3); 2334 if (r) 2335 return r; 2336 2337 r = read_dev_id(argv[1], &dev_id, 1); 2338 if (r) 2339 return r; 2340 2341 r = read_dev_id(argv[2], &origin_dev_id, 1); 2342 if (r) 2343 return r; 2344 2345 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); 2346 if (r) { 2347 DMWARN("Creation of new snapshot %s of device %s failed.", 2348 argv[1], argv[2]); 2349 return r; 2350 } 2351 2352 return 0; 2353 } 2354 2355 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) 2356 { 2357 dm_thin_id dev_id; 2358 int r; 2359 2360 r = check_arg_count(argc, 2); 2361 if (r) 2362 return r; 2363 2364 r = read_dev_id(argv[1], &dev_id, 1); 2365 if (r) 2366 return r; 2367 2368 r = dm_pool_delete_thin_device(pool->pmd, dev_id); 2369 if (r) 2370 DMWARN("Deletion of thin device %s failed.", argv[1]); 2371 2372 return r; 2373 } 2374 2375 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) 2376 { 2377 dm_thin_id old_id, new_id; 2378 int r; 2379 2380 r = check_arg_count(argc, 3); 2381 if (r) 2382 return r; 2383 2384 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) { 2385 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]); 2386 return -EINVAL; 2387 } 2388 2389 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) { 2390 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]); 2391 return -EINVAL; 2392 } 2393 2394 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); 2395 if (r) { 2396 DMWARN("Failed to change transaction id from %s to %s.", 2397 argv[1], argv[2]); 2398 return r; 2399 } 2400 2401 return 0; 2402 } 2403 2404 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 2405 { 2406 int r; 2407 2408 r = check_arg_count(argc, 1); 2409 if (r) 2410 return r; 2411 2412 (void) commit_or_fallback(pool); 2413 2414 r = dm_pool_reserve_metadata_snap(pool->pmd); 2415 if (r) 2416 DMWARN("reserve_metadata_snap message failed."); 2417 2418 return r; 2419 } 2420 2421 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 2422 { 2423 int r; 2424 2425 r = check_arg_count(argc, 1); 2426 if (r) 2427 return r; 2428 2429 r = dm_pool_release_metadata_snap(pool->pmd); 2430 if (r) 2431 DMWARN("release_metadata_snap message failed."); 2432 2433 return r; 2434 } 2435 2436 /* 2437 * Messages supported: 2438 * create_thin <dev_id> 2439 * create_snap <dev_id> <origin_id> 2440 * delete <dev_id> 2441 * trim <dev_id> <new_size_in_sectors> 2442 * set_transaction_id <current_trans_id> <new_trans_id> 2443 * reserve_metadata_snap 2444 * release_metadata_snap 2445 */ 2446 static int pool_message(struct dm_target *ti, unsigned argc, char **argv) 2447 { 2448 int r = -EINVAL; 2449 struct pool_c *pt = ti->private; 2450 struct pool *pool = pt->pool; 2451 2452 if (!strcasecmp(argv[0], "create_thin")) 2453 r = process_create_thin_mesg(argc, argv, pool); 2454 2455 else if (!strcasecmp(argv[0], "create_snap")) 2456 r = process_create_snap_mesg(argc, argv, pool); 2457 2458 else if (!strcasecmp(argv[0], "delete")) 2459 r = process_delete_mesg(argc, argv, pool); 2460 2461 else if (!strcasecmp(argv[0], "set_transaction_id")) 2462 r = process_set_transaction_id_mesg(argc, argv, pool); 2463 2464 else if (!strcasecmp(argv[0], "reserve_metadata_snap")) 2465 r = process_reserve_metadata_snap_mesg(argc, argv, pool); 2466 2467 else if (!strcasecmp(argv[0], "release_metadata_snap")) 2468 r = process_release_metadata_snap_mesg(argc, argv, pool); 2469 2470 else 2471 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2472 2473 if (!r) 2474 (void) commit_or_fallback(pool); 2475 2476 return r; 2477 } 2478 2479 static void emit_flags(struct pool_features *pf, char *result, 2480 unsigned sz, unsigned maxlen) 2481 { 2482 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + 2483 !pf->discard_passdown + (pf->mode == PM_READ_ONLY); 2484 DMEMIT("%u ", count); 2485 2486 if (!pf->zero_new_blocks) 2487 DMEMIT("skip_block_zeroing "); 2488 2489 if (!pf->discard_enabled) 2490 DMEMIT("ignore_discard "); 2491 2492 if (!pf->discard_passdown) 2493 DMEMIT("no_discard_passdown "); 2494 2495 if (pf->mode == PM_READ_ONLY) 2496 DMEMIT("read_only "); 2497 } 2498 2499 /* 2500 * Status line is: 2501 * <transaction id> <used metadata sectors>/<total metadata sectors> 2502 * <used data sectors>/<total data sectors> <held metadata root> 2503 */ 2504 static void pool_status(struct dm_target *ti, status_type_t type, 2505 unsigned status_flags, char *result, unsigned maxlen) 2506 { 2507 int r; 2508 unsigned sz = 0; 2509 uint64_t transaction_id; 2510 dm_block_t nr_free_blocks_data; 2511 dm_block_t nr_free_blocks_metadata; 2512 dm_block_t nr_blocks_data; 2513 dm_block_t nr_blocks_metadata; 2514 dm_block_t held_root; 2515 char buf[BDEVNAME_SIZE]; 2516 char buf2[BDEVNAME_SIZE]; 2517 struct pool_c *pt = ti->private; 2518 struct pool *pool = pt->pool; 2519 2520 switch (type) { 2521 case STATUSTYPE_INFO: 2522 if (get_pool_mode(pool) == PM_FAIL) { 2523 DMEMIT("Fail"); 2524 break; 2525 } 2526 2527 /* Commit to ensure statistics aren't out-of-date */ 2528 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 2529 (void) commit_or_fallback(pool); 2530 2531 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); 2532 if (r) { 2533 DMERR("dm_pool_get_metadata_transaction_id returned %d", r); 2534 goto err; 2535 } 2536 2537 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); 2538 if (r) { 2539 DMERR("dm_pool_get_free_metadata_block_count returned %d", r); 2540 goto err; 2541 } 2542 2543 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); 2544 if (r) { 2545 DMERR("dm_pool_get_metadata_dev_size returned %d", r); 2546 goto err; 2547 } 2548 2549 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); 2550 if (r) { 2551 DMERR("dm_pool_get_free_block_count returned %d", r); 2552 goto err; 2553 } 2554 2555 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); 2556 if (r) { 2557 DMERR("dm_pool_get_data_dev_size returned %d", r); 2558 goto err; 2559 } 2560 2561 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); 2562 if (r) { 2563 DMERR("dm_pool_get_metadata_snap returned %d", r); 2564 goto err; 2565 } 2566 2567 DMEMIT("%llu %llu/%llu %llu/%llu ", 2568 (unsigned long long)transaction_id, 2569 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2570 (unsigned long long)nr_blocks_metadata, 2571 (unsigned long long)(nr_blocks_data - nr_free_blocks_data), 2572 (unsigned long long)nr_blocks_data); 2573 2574 if (held_root) 2575 DMEMIT("%llu ", held_root); 2576 else 2577 DMEMIT("- "); 2578 2579 if (pool->pf.mode == PM_READ_ONLY) 2580 DMEMIT("ro "); 2581 else 2582 DMEMIT("rw "); 2583 2584 if (!pool->pf.discard_enabled) 2585 DMEMIT("ignore_discard"); 2586 else if (pool->pf.discard_passdown) 2587 DMEMIT("discard_passdown"); 2588 else 2589 DMEMIT("no_discard_passdown"); 2590 2591 break; 2592 2593 case STATUSTYPE_TABLE: 2594 DMEMIT("%s %s %lu %llu ", 2595 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), 2596 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 2597 (unsigned long)pool->sectors_per_block, 2598 (unsigned long long)pt->low_water_blocks); 2599 emit_flags(&pt->requested_pf, result, sz, maxlen); 2600 break; 2601 } 2602 return; 2603 2604 err: 2605 DMEMIT("Error"); 2606 } 2607 2608 static int pool_iterate_devices(struct dm_target *ti, 2609 iterate_devices_callout_fn fn, void *data) 2610 { 2611 struct pool_c *pt = ti->private; 2612 2613 return fn(ti, pt->data_dev, 0, ti->len, data); 2614 } 2615 2616 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 2617 struct bio_vec *biovec, int max_size) 2618 { 2619 struct pool_c *pt = ti->private; 2620 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 2621 2622 if (!q->merge_bvec_fn) 2623 return max_size; 2624 2625 bvm->bi_bdev = pt->data_dev->bdev; 2626 2627 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2628 } 2629 2630 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) 2631 { 2632 struct pool *pool = pt->pool; 2633 struct queue_limits *data_limits; 2634 2635 limits->max_discard_sectors = pool->sectors_per_block; 2636 2637 /* 2638 * discard_granularity is just a hint, and not enforced. 2639 */ 2640 if (pt->adjusted_pf.discard_passdown) { 2641 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; 2642 limits->discard_granularity = data_limits->discard_granularity; 2643 } else 2644 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2645 } 2646 2647 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2648 { 2649 struct pool_c *pt = ti->private; 2650 struct pool *pool = pt->pool; 2651 2652 blk_limits_io_min(limits, 0); 2653 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 2654 2655 /* 2656 * pt->adjusted_pf is a staging area for the actual features to use. 2657 * They get transferred to the live pool in bind_control_target() 2658 * called from pool_preresume(). 2659 */ 2660 if (!pt->adjusted_pf.discard_enabled) 2661 return; 2662 2663 disable_passdown_if_not_supported(pt); 2664 2665 set_discard_limits(pt, limits); 2666 } 2667 2668 static struct target_type pool_target = { 2669 .name = "thin-pool", 2670 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2671 DM_TARGET_IMMUTABLE, 2672 .version = {1, 8, 0}, 2673 .module = THIS_MODULE, 2674 .ctr = pool_ctr, 2675 .dtr = pool_dtr, 2676 .map = pool_map, 2677 .postsuspend = pool_postsuspend, 2678 .preresume = pool_preresume, 2679 .resume = pool_resume, 2680 .message = pool_message, 2681 .status = pool_status, 2682 .merge = pool_merge, 2683 .iterate_devices = pool_iterate_devices, 2684 .io_hints = pool_io_hints, 2685 }; 2686 2687 /*---------------------------------------------------------------- 2688 * Thin target methods 2689 *--------------------------------------------------------------*/ 2690 static void thin_dtr(struct dm_target *ti) 2691 { 2692 struct thin_c *tc = ti->private; 2693 2694 mutex_lock(&dm_thin_pool_table.mutex); 2695 2696 __pool_dec(tc->pool); 2697 dm_pool_close_thin_device(tc->td); 2698 dm_put_device(ti, tc->pool_dev); 2699 if (tc->origin_dev) 2700 dm_put_device(ti, tc->origin_dev); 2701 kfree(tc); 2702 2703 mutex_unlock(&dm_thin_pool_table.mutex); 2704 } 2705 2706 /* 2707 * Thin target parameters: 2708 * 2709 * <pool_dev> <dev_id> [origin_dev] 2710 * 2711 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool) 2712 * dev_id: the internal device identifier 2713 * origin_dev: a device external to the pool that should act as the origin 2714 * 2715 * If the pool device has discards disabled, they get disabled for the thin 2716 * device as well. 2717 */ 2718 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) 2719 { 2720 int r; 2721 struct thin_c *tc; 2722 struct dm_dev *pool_dev, *origin_dev; 2723 struct mapped_device *pool_md; 2724 2725 mutex_lock(&dm_thin_pool_table.mutex); 2726 2727 if (argc != 2 && argc != 3) { 2728 ti->error = "Invalid argument count"; 2729 r = -EINVAL; 2730 goto out_unlock; 2731 } 2732 2733 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); 2734 if (!tc) { 2735 ti->error = "Out of memory"; 2736 r = -ENOMEM; 2737 goto out_unlock; 2738 } 2739 2740 if (argc == 3) { 2741 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); 2742 if (r) { 2743 ti->error = "Error opening origin device"; 2744 goto bad_origin_dev; 2745 } 2746 tc->origin_dev = origin_dev; 2747 } 2748 2749 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); 2750 if (r) { 2751 ti->error = "Error opening pool device"; 2752 goto bad_pool_dev; 2753 } 2754 tc->pool_dev = pool_dev; 2755 2756 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { 2757 ti->error = "Invalid device id"; 2758 r = -EINVAL; 2759 goto bad_common; 2760 } 2761 2762 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); 2763 if (!pool_md) { 2764 ti->error = "Couldn't get pool mapped device"; 2765 r = -EINVAL; 2766 goto bad_common; 2767 } 2768 2769 tc->pool = __pool_table_lookup(pool_md); 2770 if (!tc->pool) { 2771 ti->error = "Couldn't find pool object"; 2772 r = -EINVAL; 2773 goto bad_pool_lookup; 2774 } 2775 __pool_inc(tc->pool); 2776 2777 if (get_pool_mode(tc->pool) == PM_FAIL) { 2778 ti->error = "Couldn't open thin device, Pool is in fail mode"; 2779 goto bad_thin_open; 2780 } 2781 2782 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 2783 if (r) { 2784 ti->error = "Couldn't open thin internal device"; 2785 goto bad_thin_open; 2786 } 2787 2788 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 2789 if (r) 2790 goto bad_thin_open; 2791 2792 ti->num_flush_bios = 1; 2793 ti->flush_supported = true; 2794 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2795 2796 /* In case the pool supports discards, pass them on. */ 2797 if (tc->pool->pf.discard_enabled) { 2798 ti->discards_supported = true; 2799 ti->num_discard_bios = 1; 2800 ti->discard_zeroes_data_unsupported = true; 2801 /* Discard bios must be split on a block boundary */ 2802 ti->split_discard_bios = true; 2803 } 2804 2805 dm_put(pool_md); 2806 2807 mutex_unlock(&dm_thin_pool_table.mutex); 2808 2809 return 0; 2810 2811 bad_thin_open: 2812 __pool_dec(tc->pool); 2813 bad_pool_lookup: 2814 dm_put(pool_md); 2815 bad_common: 2816 dm_put_device(ti, tc->pool_dev); 2817 bad_pool_dev: 2818 if (tc->origin_dev) 2819 dm_put_device(ti, tc->origin_dev); 2820 bad_origin_dev: 2821 kfree(tc); 2822 out_unlock: 2823 mutex_unlock(&dm_thin_pool_table.mutex); 2824 2825 return r; 2826 } 2827 2828 static int thin_map(struct dm_target *ti, struct bio *bio) 2829 { 2830 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2831 2832 return thin_bio_map(ti, bio); 2833 } 2834 2835 static int thin_endio(struct dm_target *ti, struct bio *bio, int err) 2836 { 2837 unsigned long flags; 2838 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2839 struct list_head work; 2840 struct dm_thin_new_mapping *m, *tmp; 2841 struct pool *pool = h->tc->pool; 2842 2843 if (h->shared_read_entry) { 2844 INIT_LIST_HEAD(&work); 2845 dm_deferred_entry_dec(h->shared_read_entry, &work); 2846 2847 spin_lock_irqsave(&pool->lock, flags); 2848 list_for_each_entry_safe(m, tmp, &work, list) { 2849 list_del(&m->list); 2850 m->quiesced = 1; 2851 __maybe_add_mapping(m); 2852 } 2853 spin_unlock_irqrestore(&pool->lock, flags); 2854 } 2855 2856 if (h->all_io_entry) { 2857 INIT_LIST_HEAD(&work); 2858 dm_deferred_entry_dec(h->all_io_entry, &work); 2859 if (!list_empty(&work)) { 2860 spin_lock_irqsave(&pool->lock, flags); 2861 list_for_each_entry_safe(m, tmp, &work, list) 2862 list_add(&m->list, &pool->prepared_discards); 2863 spin_unlock_irqrestore(&pool->lock, flags); 2864 wake_worker(pool); 2865 } 2866 } 2867 2868 return 0; 2869 } 2870 2871 static void thin_postsuspend(struct dm_target *ti) 2872 { 2873 if (dm_noflush_suspending(ti)) 2874 requeue_io((struct thin_c *)ti->private); 2875 } 2876 2877 /* 2878 * <nr mapped sectors> <highest mapped sector> 2879 */ 2880 static void thin_status(struct dm_target *ti, status_type_t type, 2881 unsigned status_flags, char *result, unsigned maxlen) 2882 { 2883 int r; 2884 ssize_t sz = 0; 2885 dm_block_t mapped, highest; 2886 char buf[BDEVNAME_SIZE]; 2887 struct thin_c *tc = ti->private; 2888 2889 if (get_pool_mode(tc->pool) == PM_FAIL) { 2890 DMEMIT("Fail"); 2891 return; 2892 } 2893 2894 if (!tc->td) 2895 DMEMIT("-"); 2896 else { 2897 switch (type) { 2898 case STATUSTYPE_INFO: 2899 r = dm_thin_get_mapped_count(tc->td, &mapped); 2900 if (r) { 2901 DMERR("dm_thin_get_mapped_count returned %d", r); 2902 goto err; 2903 } 2904 2905 r = dm_thin_get_highest_mapped_block(tc->td, &highest); 2906 if (r < 0) { 2907 DMERR("dm_thin_get_highest_mapped_block returned %d", r); 2908 goto err; 2909 } 2910 2911 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); 2912 if (r) 2913 DMEMIT("%llu", ((highest + 1) * 2914 tc->pool->sectors_per_block) - 1); 2915 else 2916 DMEMIT("-"); 2917 break; 2918 2919 case STATUSTYPE_TABLE: 2920 DMEMIT("%s %lu", 2921 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), 2922 (unsigned long) tc->dev_id); 2923 if (tc->origin_dev) 2924 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); 2925 break; 2926 } 2927 } 2928 2929 return; 2930 2931 err: 2932 DMEMIT("Error"); 2933 } 2934 2935 static int thin_iterate_devices(struct dm_target *ti, 2936 iterate_devices_callout_fn fn, void *data) 2937 { 2938 sector_t blocks; 2939 struct thin_c *tc = ti->private; 2940 struct pool *pool = tc->pool; 2941 2942 /* 2943 * We can't call dm_pool_get_data_dev_size() since that blocks. So 2944 * we follow a more convoluted path through to the pool's target. 2945 */ 2946 if (!pool->ti) 2947 return 0; /* nothing is bound */ 2948 2949 blocks = pool->ti->len; 2950 (void) sector_div(blocks, pool->sectors_per_block); 2951 if (blocks) 2952 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); 2953 2954 return 0; 2955 } 2956 2957 static struct target_type thin_target = { 2958 .name = "thin", 2959 .version = {1, 8, 0}, 2960 .module = THIS_MODULE, 2961 .ctr = thin_ctr, 2962 .dtr = thin_dtr, 2963 .map = thin_map, 2964 .end_io = thin_endio, 2965 .postsuspend = thin_postsuspend, 2966 .status = thin_status, 2967 .iterate_devices = thin_iterate_devices, 2968 }; 2969 2970 /*----------------------------------------------------------------*/ 2971 2972 static int __init dm_thin_init(void) 2973 { 2974 int r; 2975 2976 pool_table_init(); 2977 2978 r = dm_register_target(&thin_target); 2979 if (r) 2980 return r; 2981 2982 r = dm_register_target(&pool_target); 2983 if (r) 2984 goto bad_pool_target; 2985 2986 r = -ENOMEM; 2987 2988 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 2989 if (!_new_mapping_cache) 2990 goto bad_new_mapping_cache; 2991 2992 return 0; 2993 2994 bad_new_mapping_cache: 2995 dm_unregister_target(&pool_target); 2996 bad_pool_target: 2997 dm_unregister_target(&thin_target); 2998 2999 return r; 3000 } 3001 3002 static void dm_thin_exit(void) 3003 { 3004 dm_unregister_target(&thin_target); 3005 dm_unregister_target(&pool_target); 3006 3007 kmem_cache_destroy(_new_mapping_cache); 3008 } 3009 3010 module_init(dm_thin_init); 3011 module_exit(dm_thin_exit); 3012 3013 MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3014 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3015 MODULE_LICENSE("GPL"); 3016