1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/blkpg.h> 15 #include <linux/bio.h> 16 #include <linux/buffer_head.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/idr.h> 20 21 static const char *_name = DM_NAME; 22 23 static unsigned int major = 0; 24 static unsigned int _major = 0; 25 26 /* 27 * One of these is allocated per bio. 28 */ 29 struct dm_io { 30 struct mapped_device *md; 31 int error; 32 struct bio *bio; 33 atomic_t io_count; 34 }; 35 36 /* 37 * One of these is allocated per target within a bio. Hopefully 38 * this will be simplified out one day. 39 */ 40 struct target_io { 41 struct dm_io *io; 42 struct dm_target *ti; 43 union map_info info; 44 }; 45 46 union map_info *dm_get_mapinfo(struct bio *bio) 47 { 48 if (bio && bio->bi_private) 49 return &((struct target_io *)bio->bi_private)->info; 50 return NULL; 51 } 52 53 /* 54 * Bits for the md->flags field. 55 */ 56 #define DMF_BLOCK_IO 0 57 #define DMF_SUSPENDED 1 58 59 struct mapped_device { 60 struct rw_semaphore io_lock; 61 struct semaphore suspend_lock; 62 rwlock_t map_lock; 63 atomic_t holders; 64 65 unsigned long flags; 66 67 request_queue_t *queue; 68 struct gendisk *disk; 69 70 void *interface_ptr; 71 72 /* 73 * A list of ios that arrived while we were suspended. 74 */ 75 atomic_t pending; 76 wait_queue_head_t wait; 77 struct bio_list deferred; 78 79 /* 80 * The current mapping. 81 */ 82 struct dm_table *map; 83 84 /* 85 * io objects are allocated from here. 86 */ 87 mempool_t *io_pool; 88 mempool_t *tio_pool; 89 90 /* 91 * Event handling. 92 */ 93 atomic_t event_nr; 94 wait_queue_head_t eventq; 95 96 /* 97 * freeze/thaw support require holding onto a super block 98 */ 99 struct super_block *frozen_sb; 100 struct block_device *frozen_bdev; 101 }; 102 103 #define MIN_IOS 256 104 static kmem_cache_t *_io_cache; 105 static kmem_cache_t *_tio_cache; 106 107 static struct bio_set *dm_set; 108 109 static int __init local_init(void) 110 { 111 int r; 112 113 dm_set = bioset_create(16, 16, 4); 114 if (!dm_set) 115 return -ENOMEM; 116 117 /* allocate a slab for the dm_ios */ 118 _io_cache = kmem_cache_create("dm_io", 119 sizeof(struct dm_io), 0, 0, NULL, NULL); 120 if (!_io_cache) 121 return -ENOMEM; 122 123 /* allocate a slab for the target ios */ 124 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io), 125 0, 0, NULL, NULL); 126 if (!_tio_cache) { 127 kmem_cache_destroy(_io_cache); 128 return -ENOMEM; 129 } 130 131 _major = major; 132 r = register_blkdev(_major, _name); 133 if (r < 0) { 134 kmem_cache_destroy(_tio_cache); 135 kmem_cache_destroy(_io_cache); 136 return r; 137 } 138 139 if (!_major) 140 _major = r; 141 142 return 0; 143 } 144 145 static void local_exit(void) 146 { 147 kmem_cache_destroy(_tio_cache); 148 kmem_cache_destroy(_io_cache); 149 150 bioset_free(dm_set); 151 152 if (unregister_blkdev(_major, _name) < 0) 153 DMERR("devfs_unregister_blkdev failed"); 154 155 _major = 0; 156 157 DMINFO("cleaned up"); 158 } 159 160 int (*_inits[])(void) __initdata = { 161 local_init, 162 dm_target_init, 163 dm_linear_init, 164 dm_stripe_init, 165 dm_interface_init, 166 }; 167 168 void (*_exits[])(void) = { 169 local_exit, 170 dm_target_exit, 171 dm_linear_exit, 172 dm_stripe_exit, 173 dm_interface_exit, 174 }; 175 176 static int __init dm_init(void) 177 { 178 const int count = ARRAY_SIZE(_inits); 179 180 int r, i; 181 182 for (i = 0; i < count; i++) { 183 r = _inits[i](); 184 if (r) 185 goto bad; 186 } 187 188 return 0; 189 190 bad: 191 while (i--) 192 _exits[i](); 193 194 return r; 195 } 196 197 static void __exit dm_exit(void) 198 { 199 int i = ARRAY_SIZE(_exits); 200 201 while (i--) 202 _exits[i](); 203 } 204 205 /* 206 * Block device functions 207 */ 208 static int dm_blk_open(struct inode *inode, struct file *file) 209 { 210 struct mapped_device *md; 211 212 md = inode->i_bdev->bd_disk->private_data; 213 dm_get(md); 214 return 0; 215 } 216 217 static int dm_blk_close(struct inode *inode, struct file *file) 218 { 219 struct mapped_device *md; 220 221 md = inode->i_bdev->bd_disk->private_data; 222 dm_put(md); 223 return 0; 224 } 225 226 static inline struct dm_io *alloc_io(struct mapped_device *md) 227 { 228 return mempool_alloc(md->io_pool, GFP_NOIO); 229 } 230 231 static inline void free_io(struct mapped_device *md, struct dm_io *io) 232 { 233 mempool_free(io, md->io_pool); 234 } 235 236 static inline struct target_io *alloc_tio(struct mapped_device *md) 237 { 238 return mempool_alloc(md->tio_pool, GFP_NOIO); 239 } 240 241 static inline void free_tio(struct mapped_device *md, struct target_io *tio) 242 { 243 mempool_free(tio, md->tio_pool); 244 } 245 246 /* 247 * Add the bio to the list of deferred io. 248 */ 249 static int queue_io(struct mapped_device *md, struct bio *bio) 250 { 251 down_write(&md->io_lock); 252 253 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 254 up_write(&md->io_lock); 255 return 1; 256 } 257 258 bio_list_add(&md->deferred, bio); 259 260 up_write(&md->io_lock); 261 return 0; /* deferred successfully */ 262 } 263 264 /* 265 * Everyone (including functions in this file), should use this 266 * function to access the md->map field, and make sure they call 267 * dm_table_put() when finished. 268 */ 269 struct dm_table *dm_get_table(struct mapped_device *md) 270 { 271 struct dm_table *t; 272 273 read_lock(&md->map_lock); 274 t = md->map; 275 if (t) 276 dm_table_get(t); 277 read_unlock(&md->map_lock); 278 279 return t; 280 } 281 282 /*----------------------------------------------------------------- 283 * CRUD START: 284 * A more elegant soln is in the works that uses the queue 285 * merge fn, unfortunately there are a couple of changes to 286 * the block layer that I want to make for this. So in the 287 * interests of getting something for people to use I give 288 * you this clearly demarcated crap. 289 *---------------------------------------------------------------*/ 290 291 /* 292 * Decrements the number of outstanding ios that a bio has been 293 * cloned into, completing the original io if necc. 294 */ 295 static inline void dec_pending(struct dm_io *io, int error) 296 { 297 if (error) 298 io->error = error; 299 300 if (atomic_dec_and_test(&io->io_count)) { 301 if (atomic_dec_and_test(&io->md->pending)) 302 /* nudge anyone waiting on suspend queue */ 303 wake_up(&io->md->wait); 304 305 bio_endio(io->bio, io->bio->bi_size, io->error); 306 free_io(io->md, io); 307 } 308 } 309 310 static int clone_endio(struct bio *bio, unsigned int done, int error) 311 { 312 int r = 0; 313 struct target_io *tio = bio->bi_private; 314 struct dm_io *io = tio->io; 315 dm_endio_fn endio = tio->ti->type->end_io; 316 317 if (bio->bi_size) 318 return 1; 319 320 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 321 error = -EIO; 322 323 if (endio) { 324 r = endio(tio->ti, bio, error, &tio->info); 325 if (r < 0) 326 error = r; 327 328 else if (r > 0) 329 /* the target wants another shot at the io */ 330 return 1; 331 } 332 333 free_tio(io->md, tio); 334 dec_pending(io, error); 335 bio_put(bio); 336 return r; 337 } 338 339 static sector_t max_io_len(struct mapped_device *md, 340 sector_t sector, struct dm_target *ti) 341 { 342 sector_t offset = sector - ti->begin; 343 sector_t len = ti->len - offset; 344 345 /* 346 * Does the target need to split even further ? 347 */ 348 if (ti->split_io) { 349 sector_t boundary; 350 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 351 - offset; 352 if (len > boundary) 353 len = boundary; 354 } 355 356 return len; 357 } 358 359 static void __map_bio(struct dm_target *ti, struct bio *clone, 360 struct target_io *tio) 361 { 362 int r; 363 364 /* 365 * Sanity checks. 366 */ 367 BUG_ON(!clone->bi_size); 368 369 clone->bi_end_io = clone_endio; 370 clone->bi_private = tio; 371 372 /* 373 * Map the clone. If r == 0 we don't need to do 374 * anything, the target has assumed ownership of 375 * this io. 376 */ 377 atomic_inc(&tio->io->io_count); 378 r = ti->type->map(ti, clone, &tio->info); 379 if (r > 0) 380 /* the bio has been remapped so dispatch it */ 381 generic_make_request(clone); 382 383 else if (r < 0) { 384 /* error the io and bail out */ 385 struct dm_io *io = tio->io; 386 free_tio(tio->io->md, tio); 387 dec_pending(io, r); 388 bio_put(clone); 389 } 390 } 391 392 struct clone_info { 393 struct mapped_device *md; 394 struct dm_table *map; 395 struct bio *bio; 396 struct dm_io *io; 397 sector_t sector; 398 sector_t sector_count; 399 unsigned short idx; 400 }; 401 402 /* 403 * Creates a little bio that is just does part of a bvec. 404 */ 405 static struct bio *split_bvec(struct bio *bio, sector_t sector, 406 unsigned short idx, unsigned int offset, 407 unsigned int len) 408 { 409 struct bio *clone; 410 struct bio_vec *bv = bio->bi_io_vec + idx; 411 412 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set); 413 *clone->bi_io_vec = *bv; 414 415 clone->bi_sector = sector; 416 clone->bi_bdev = bio->bi_bdev; 417 clone->bi_rw = bio->bi_rw; 418 clone->bi_vcnt = 1; 419 clone->bi_size = to_bytes(len); 420 clone->bi_io_vec->bv_offset = offset; 421 clone->bi_io_vec->bv_len = clone->bi_size; 422 423 return clone; 424 } 425 426 /* 427 * Creates a bio that consists of range of complete bvecs. 428 */ 429 static struct bio *clone_bio(struct bio *bio, sector_t sector, 430 unsigned short idx, unsigned short bv_count, 431 unsigned int len) 432 { 433 struct bio *clone; 434 435 clone = bio_clone(bio, GFP_NOIO); 436 clone->bi_sector = sector; 437 clone->bi_idx = idx; 438 clone->bi_vcnt = idx + bv_count; 439 clone->bi_size = to_bytes(len); 440 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 441 442 return clone; 443 } 444 445 static void __clone_and_map(struct clone_info *ci) 446 { 447 struct bio *clone, *bio = ci->bio; 448 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); 449 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); 450 struct target_io *tio; 451 452 /* 453 * Allocate a target io object. 454 */ 455 tio = alloc_tio(ci->md); 456 tio->io = ci->io; 457 tio->ti = ti; 458 memset(&tio->info, 0, sizeof(tio->info)); 459 460 if (ci->sector_count <= max) { 461 /* 462 * Optimise for the simple case where we can do all of 463 * the remaining io with a single clone. 464 */ 465 clone = clone_bio(bio, ci->sector, ci->idx, 466 bio->bi_vcnt - ci->idx, ci->sector_count); 467 __map_bio(ti, clone, tio); 468 ci->sector_count = 0; 469 470 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 471 /* 472 * There are some bvecs that don't span targets. 473 * Do as many of these as possible. 474 */ 475 int i; 476 sector_t remaining = max; 477 sector_t bv_len; 478 479 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 480 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 481 482 if (bv_len > remaining) 483 break; 484 485 remaining -= bv_len; 486 len += bv_len; 487 } 488 489 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len); 490 __map_bio(ti, clone, tio); 491 492 ci->sector += len; 493 ci->sector_count -= len; 494 ci->idx = i; 495 496 } else { 497 /* 498 * Create two copy bios to deal with io that has 499 * been split across a target. 500 */ 501 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 502 503 clone = split_bvec(bio, ci->sector, ci->idx, 504 bv->bv_offset, max); 505 __map_bio(ti, clone, tio); 506 507 ci->sector += max; 508 ci->sector_count -= max; 509 ti = dm_table_find_target(ci->map, ci->sector); 510 511 len = to_sector(bv->bv_len) - max; 512 clone = split_bvec(bio, ci->sector, ci->idx, 513 bv->bv_offset + to_bytes(max), len); 514 tio = alloc_tio(ci->md); 515 tio->io = ci->io; 516 tio->ti = ti; 517 memset(&tio->info, 0, sizeof(tio->info)); 518 __map_bio(ti, clone, tio); 519 520 ci->sector += len; 521 ci->sector_count -= len; 522 ci->idx++; 523 } 524 } 525 526 /* 527 * Split the bio into several clones. 528 */ 529 static void __split_bio(struct mapped_device *md, struct bio *bio) 530 { 531 struct clone_info ci; 532 533 ci.map = dm_get_table(md); 534 if (!ci.map) { 535 bio_io_error(bio, bio->bi_size); 536 return; 537 } 538 539 ci.md = md; 540 ci.bio = bio; 541 ci.io = alloc_io(md); 542 ci.io->error = 0; 543 atomic_set(&ci.io->io_count, 1); 544 ci.io->bio = bio; 545 ci.io->md = md; 546 ci.sector = bio->bi_sector; 547 ci.sector_count = bio_sectors(bio); 548 ci.idx = bio->bi_idx; 549 550 atomic_inc(&md->pending); 551 while (ci.sector_count) 552 __clone_and_map(&ci); 553 554 /* drop the extra reference count */ 555 dec_pending(ci.io, 0); 556 dm_table_put(ci.map); 557 } 558 /*----------------------------------------------------------------- 559 * CRUD END 560 *---------------------------------------------------------------*/ 561 562 /* 563 * The request function that just remaps the bio built up by 564 * dm_merge_bvec. 565 */ 566 static int dm_request(request_queue_t *q, struct bio *bio) 567 { 568 int r; 569 struct mapped_device *md = q->queuedata; 570 571 down_read(&md->io_lock); 572 573 /* 574 * If we're suspended we have to queue 575 * this io for later. 576 */ 577 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 578 up_read(&md->io_lock); 579 580 if (bio_rw(bio) == READA) { 581 bio_io_error(bio, bio->bi_size); 582 return 0; 583 } 584 585 r = queue_io(md, bio); 586 if (r < 0) { 587 bio_io_error(bio, bio->bi_size); 588 return 0; 589 590 } else if (r == 0) 591 return 0; /* deferred successfully */ 592 593 /* 594 * We're in a while loop, because someone could suspend 595 * before we get to the following read lock. 596 */ 597 down_read(&md->io_lock); 598 } 599 600 __split_bio(md, bio); 601 up_read(&md->io_lock); 602 return 0; 603 } 604 605 static int dm_flush_all(request_queue_t *q, struct gendisk *disk, 606 sector_t *error_sector) 607 { 608 struct mapped_device *md = q->queuedata; 609 struct dm_table *map = dm_get_table(md); 610 int ret = -ENXIO; 611 612 if (map) { 613 ret = dm_table_flush_all(map); 614 dm_table_put(map); 615 } 616 617 return ret; 618 } 619 620 static void dm_unplug_all(request_queue_t *q) 621 { 622 struct mapped_device *md = q->queuedata; 623 struct dm_table *map = dm_get_table(md); 624 625 if (map) { 626 dm_table_unplug_all(map); 627 dm_table_put(map); 628 } 629 } 630 631 static int dm_any_congested(void *congested_data, int bdi_bits) 632 { 633 int r; 634 struct mapped_device *md = (struct mapped_device *) congested_data; 635 struct dm_table *map = dm_get_table(md); 636 637 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 638 r = bdi_bits; 639 else 640 r = dm_table_any_congested(map, bdi_bits); 641 642 dm_table_put(map); 643 return r; 644 } 645 646 /*----------------------------------------------------------------- 647 * An IDR is used to keep track of allocated minor numbers. 648 *---------------------------------------------------------------*/ 649 static DECLARE_MUTEX(_minor_lock); 650 static DEFINE_IDR(_minor_idr); 651 652 static void free_minor(unsigned int minor) 653 { 654 down(&_minor_lock); 655 idr_remove(&_minor_idr, minor); 656 up(&_minor_lock); 657 } 658 659 /* 660 * See if the device with a specific minor # is free. 661 */ 662 static int specific_minor(struct mapped_device *md, unsigned int minor) 663 { 664 int r, m; 665 666 if (minor >= (1 << MINORBITS)) 667 return -EINVAL; 668 669 down(&_minor_lock); 670 671 if (idr_find(&_minor_idr, minor)) { 672 r = -EBUSY; 673 goto out; 674 } 675 676 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 677 if (!r) { 678 r = -ENOMEM; 679 goto out; 680 } 681 682 r = idr_get_new_above(&_minor_idr, md, minor, &m); 683 if (r) { 684 goto out; 685 } 686 687 if (m != minor) { 688 idr_remove(&_minor_idr, m); 689 r = -EBUSY; 690 goto out; 691 } 692 693 out: 694 up(&_minor_lock); 695 return r; 696 } 697 698 static int next_free_minor(struct mapped_device *md, unsigned int *minor) 699 { 700 int r; 701 unsigned int m; 702 703 down(&_minor_lock); 704 705 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 706 if (!r) { 707 r = -ENOMEM; 708 goto out; 709 } 710 711 r = idr_get_new(&_minor_idr, md, &m); 712 if (r) { 713 goto out; 714 } 715 716 if (m >= (1 << MINORBITS)) { 717 idr_remove(&_minor_idr, m); 718 r = -ENOSPC; 719 goto out; 720 } 721 722 *minor = m; 723 724 out: 725 up(&_minor_lock); 726 return r; 727 } 728 729 static struct block_device_operations dm_blk_dops; 730 731 /* 732 * Allocate and initialise a blank device with a given minor. 733 */ 734 static struct mapped_device *alloc_dev(unsigned int minor, int persistent) 735 { 736 int r; 737 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 738 739 if (!md) { 740 DMWARN("unable to allocate device, out of memory."); 741 return NULL; 742 } 743 744 /* get a minor number for the dev */ 745 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor); 746 if (r < 0) 747 goto bad1; 748 749 memset(md, 0, sizeof(*md)); 750 init_rwsem(&md->io_lock); 751 init_MUTEX(&md->suspend_lock); 752 rwlock_init(&md->map_lock); 753 atomic_set(&md->holders, 1); 754 atomic_set(&md->event_nr, 0); 755 756 md->queue = blk_alloc_queue(GFP_KERNEL); 757 if (!md->queue) 758 goto bad1; 759 760 md->queue->queuedata = md; 761 md->queue->backing_dev_info.congested_fn = dm_any_congested; 762 md->queue->backing_dev_info.congested_data = md; 763 blk_queue_make_request(md->queue, dm_request); 764 md->queue->unplug_fn = dm_unplug_all; 765 md->queue->issue_flush_fn = dm_flush_all; 766 767 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 768 mempool_free_slab, _io_cache); 769 if (!md->io_pool) 770 goto bad2; 771 772 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 773 mempool_free_slab, _tio_cache); 774 if (!md->tio_pool) 775 goto bad3; 776 777 md->disk = alloc_disk(1); 778 if (!md->disk) 779 goto bad4; 780 781 md->disk->major = _major; 782 md->disk->first_minor = minor; 783 md->disk->fops = &dm_blk_dops; 784 md->disk->queue = md->queue; 785 md->disk->private_data = md; 786 sprintf(md->disk->disk_name, "dm-%d", minor); 787 add_disk(md->disk); 788 789 atomic_set(&md->pending, 0); 790 init_waitqueue_head(&md->wait); 791 init_waitqueue_head(&md->eventq); 792 793 return md; 794 795 bad4: 796 mempool_destroy(md->tio_pool); 797 bad3: 798 mempool_destroy(md->io_pool); 799 bad2: 800 blk_put_queue(md->queue); 801 free_minor(minor); 802 bad1: 803 kfree(md); 804 return NULL; 805 } 806 807 static void free_dev(struct mapped_device *md) 808 { 809 free_minor(md->disk->first_minor); 810 mempool_destroy(md->tio_pool); 811 mempool_destroy(md->io_pool); 812 del_gendisk(md->disk); 813 put_disk(md->disk); 814 blk_put_queue(md->queue); 815 kfree(md); 816 } 817 818 /* 819 * Bind a table to the device. 820 */ 821 static void event_callback(void *context) 822 { 823 struct mapped_device *md = (struct mapped_device *) context; 824 825 atomic_inc(&md->event_nr); 826 wake_up(&md->eventq); 827 } 828 829 static void __set_size(struct mapped_device *md, sector_t size) 830 { 831 set_capacity(md->disk, size); 832 833 down(&md->frozen_bdev->bd_inode->i_sem); 834 i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 835 up(&md->frozen_bdev->bd_inode->i_sem); 836 } 837 838 static int __bind(struct mapped_device *md, struct dm_table *t) 839 { 840 request_queue_t *q = md->queue; 841 sector_t size; 842 843 size = dm_table_get_size(t); 844 __set_size(md, size); 845 if (size == 0) 846 return 0; 847 848 dm_table_get(t); 849 dm_table_event_callback(t, event_callback, md); 850 851 write_lock(&md->map_lock); 852 md->map = t; 853 dm_table_set_restrictions(t, q); 854 write_unlock(&md->map_lock); 855 856 return 0; 857 } 858 859 static void __unbind(struct mapped_device *md) 860 { 861 struct dm_table *map = md->map; 862 863 if (!map) 864 return; 865 866 dm_table_event_callback(map, NULL, NULL); 867 write_lock(&md->map_lock); 868 md->map = NULL; 869 write_unlock(&md->map_lock); 870 dm_table_put(map); 871 } 872 873 /* 874 * Constructor for a new device. 875 */ 876 static int create_aux(unsigned int minor, int persistent, 877 struct mapped_device **result) 878 { 879 struct mapped_device *md; 880 881 md = alloc_dev(minor, persistent); 882 if (!md) 883 return -ENXIO; 884 885 *result = md; 886 return 0; 887 } 888 889 int dm_create(struct mapped_device **result) 890 { 891 return create_aux(0, 0, result); 892 } 893 894 int dm_create_with_minor(unsigned int minor, struct mapped_device **result) 895 { 896 return create_aux(minor, 1, result); 897 } 898 899 void *dm_get_mdptr(dev_t dev) 900 { 901 struct mapped_device *md; 902 void *mdptr = NULL; 903 unsigned minor = MINOR(dev); 904 905 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 906 return NULL; 907 908 down(&_minor_lock); 909 910 md = idr_find(&_minor_idr, minor); 911 912 if (md && (dm_disk(md)->first_minor == minor)) 913 mdptr = md->interface_ptr; 914 915 up(&_minor_lock); 916 917 return mdptr; 918 } 919 920 void dm_set_mdptr(struct mapped_device *md, void *ptr) 921 { 922 md->interface_ptr = ptr; 923 } 924 925 void dm_get(struct mapped_device *md) 926 { 927 atomic_inc(&md->holders); 928 } 929 930 void dm_put(struct mapped_device *md) 931 { 932 struct dm_table *map = dm_get_table(md); 933 934 if (atomic_dec_and_test(&md->holders)) { 935 if (!dm_suspended(md)) { 936 dm_table_presuspend_targets(map); 937 dm_table_postsuspend_targets(map); 938 } 939 __unbind(md); 940 free_dev(md); 941 } 942 943 dm_table_put(map); 944 } 945 946 /* 947 * Process the deferred bios 948 */ 949 static void __flush_deferred_io(struct mapped_device *md, struct bio *c) 950 { 951 struct bio *n; 952 953 while (c) { 954 n = c->bi_next; 955 c->bi_next = NULL; 956 __split_bio(md, c); 957 c = n; 958 } 959 } 960 961 /* 962 * Swap in a new table (destroying old one). 963 */ 964 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 965 { 966 int r = -EINVAL; 967 968 down(&md->suspend_lock); 969 970 /* device must be suspended */ 971 if (!dm_suspended(md)) 972 goto out; 973 974 __unbind(md); 975 r = __bind(md, table); 976 977 out: 978 up(&md->suspend_lock); 979 return r; 980 } 981 982 /* 983 * Functions to lock and unlock any filesystem running on the 984 * device. 985 */ 986 static int lock_fs(struct mapped_device *md) 987 { 988 int r = -ENOMEM; 989 990 md->frozen_bdev = bdget_disk(md->disk, 0); 991 if (!md->frozen_bdev) { 992 DMWARN("bdget failed in lock_fs"); 993 goto out; 994 } 995 996 WARN_ON(md->frozen_sb); 997 998 md->frozen_sb = freeze_bdev(md->frozen_bdev); 999 if (IS_ERR(md->frozen_sb)) { 1000 r = PTR_ERR(md->frozen_sb); 1001 goto out_bdput; 1002 } 1003 1004 /* don't bdput right now, we don't want the bdev 1005 * to go away while it is locked. We'll bdput 1006 * in unlock_fs 1007 */ 1008 return 0; 1009 1010 out_bdput: 1011 bdput(md->frozen_bdev); 1012 md->frozen_sb = NULL; 1013 md->frozen_bdev = NULL; 1014 out: 1015 return r; 1016 } 1017 1018 static void unlock_fs(struct mapped_device *md) 1019 { 1020 thaw_bdev(md->frozen_bdev, md->frozen_sb); 1021 bdput(md->frozen_bdev); 1022 1023 md->frozen_sb = NULL; 1024 md->frozen_bdev = NULL; 1025 } 1026 1027 /* 1028 * We need to be able to change a mapping table under a mounted 1029 * filesystem. For example we might want to move some data in 1030 * the background. Before the table can be swapped with 1031 * dm_bind_table, dm_suspend must be called to flush any in 1032 * flight bios and ensure that any further io gets deferred. 1033 */ 1034 int dm_suspend(struct mapped_device *md) 1035 { 1036 struct dm_table *map = NULL; 1037 DECLARE_WAITQUEUE(wait, current); 1038 int r = -EINVAL; 1039 1040 down(&md->suspend_lock); 1041 1042 if (dm_suspended(md)) 1043 goto out; 1044 1045 map = dm_get_table(md); 1046 1047 /* This does not get reverted if there's an error later. */ 1048 dm_table_presuspend_targets(map); 1049 1050 /* Flush I/O to the device. */ 1051 r = lock_fs(md); 1052 if (r) 1053 goto out; 1054 1055 /* 1056 * First we set the BLOCK_IO flag so no more ios will be mapped. 1057 */ 1058 down_write(&md->io_lock); 1059 set_bit(DMF_BLOCK_IO, &md->flags); 1060 1061 add_wait_queue(&md->wait, &wait); 1062 up_write(&md->io_lock); 1063 1064 /* unplug */ 1065 if (map) 1066 dm_table_unplug_all(map); 1067 1068 /* 1069 * Then we wait for the already mapped ios to 1070 * complete. 1071 */ 1072 while (1) { 1073 set_current_state(TASK_INTERRUPTIBLE); 1074 1075 if (!atomic_read(&md->pending) || signal_pending(current)) 1076 break; 1077 1078 io_schedule(); 1079 } 1080 set_current_state(TASK_RUNNING); 1081 1082 down_write(&md->io_lock); 1083 remove_wait_queue(&md->wait, &wait); 1084 1085 /* were we interrupted ? */ 1086 r = -EINTR; 1087 if (atomic_read(&md->pending)) { 1088 up_write(&md->io_lock); 1089 unlock_fs(md); 1090 clear_bit(DMF_BLOCK_IO, &md->flags); 1091 goto out; 1092 } 1093 up_write(&md->io_lock); 1094 1095 dm_table_postsuspend_targets(map); 1096 1097 set_bit(DMF_SUSPENDED, &md->flags); 1098 1099 r = 0; 1100 1101 out: 1102 dm_table_put(map); 1103 up(&md->suspend_lock); 1104 return r; 1105 } 1106 1107 int dm_resume(struct mapped_device *md) 1108 { 1109 int r = -EINVAL; 1110 struct bio *def; 1111 struct dm_table *map = NULL; 1112 1113 down(&md->suspend_lock); 1114 if (!dm_suspended(md)) 1115 goto out; 1116 1117 map = dm_get_table(md); 1118 if (!map || !dm_table_get_size(map)) 1119 goto out; 1120 1121 dm_table_resume_targets(map); 1122 1123 down_write(&md->io_lock); 1124 clear_bit(DMF_BLOCK_IO, &md->flags); 1125 1126 def = bio_list_get(&md->deferred); 1127 __flush_deferred_io(md, def); 1128 up_write(&md->io_lock); 1129 1130 unlock_fs(md); 1131 1132 clear_bit(DMF_SUSPENDED, &md->flags); 1133 1134 dm_table_unplug_all(map); 1135 1136 r = 0; 1137 1138 out: 1139 dm_table_put(map); 1140 up(&md->suspend_lock); 1141 1142 return r; 1143 } 1144 1145 /*----------------------------------------------------------------- 1146 * Event notification. 1147 *---------------------------------------------------------------*/ 1148 uint32_t dm_get_event_nr(struct mapped_device *md) 1149 { 1150 return atomic_read(&md->event_nr); 1151 } 1152 1153 int dm_wait_event(struct mapped_device *md, int event_nr) 1154 { 1155 return wait_event_interruptible(md->eventq, 1156 (event_nr != atomic_read(&md->event_nr))); 1157 } 1158 1159 /* 1160 * The gendisk is only valid as long as you have a reference 1161 * count on 'md'. 1162 */ 1163 struct gendisk *dm_disk(struct mapped_device *md) 1164 { 1165 return md->disk; 1166 } 1167 1168 int dm_suspended(struct mapped_device *md) 1169 { 1170 return test_bit(DMF_SUSPENDED, &md->flags); 1171 } 1172 1173 static struct block_device_operations dm_blk_dops = { 1174 .open = dm_blk_open, 1175 .release = dm_blk_close, 1176 .owner = THIS_MODULE 1177 }; 1178 1179 EXPORT_SYMBOL(dm_get_mapinfo); 1180 1181 /* 1182 * module hooks 1183 */ 1184 module_init(dm_init); 1185 module_exit(dm_exit); 1186 1187 module_param(major, uint, 0); 1188 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1189 MODULE_DESCRIPTION(DM_NAME " driver"); 1190 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1191 MODULE_LICENSE("GPL"); 1192