1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/buffer_head.h> 18 #include <linux/mempool.h> 19 #include <linux/slab.h> 20 #include <linux/idr.h> 21 #include <linux/hdreg.h> 22 #include <linux/blktrace_api.h> 23 #include <linux/smp_lock.h> 24 25 #define DM_MSG_PREFIX "core" 26 27 static const char *_name = DM_NAME; 28 29 static unsigned int major = 0; 30 static unsigned int _major = 0; 31 32 static DEFINE_SPINLOCK(_minor_lock); 33 /* 34 * One of these is allocated per bio. 35 */ 36 struct dm_io { 37 struct mapped_device *md; 38 int error; 39 struct bio *bio; 40 atomic_t io_count; 41 unsigned long start_time; 42 }; 43 44 /* 45 * One of these is allocated per target within a bio. Hopefully 46 * this will be simplified out one day. 47 */ 48 struct dm_target_io { 49 struct dm_io *io; 50 struct dm_target *ti; 51 union map_info info; 52 }; 53 54 union map_info *dm_get_mapinfo(struct bio *bio) 55 { 56 if (bio && bio->bi_private) 57 return &((struct dm_target_io *)bio->bi_private)->info; 58 return NULL; 59 } 60 61 #define MINOR_ALLOCED ((void *)-1) 62 63 /* 64 * Bits for the md->flags field. 65 */ 66 #define DMF_BLOCK_IO 0 67 #define DMF_SUSPENDED 1 68 #define DMF_FROZEN 2 69 #define DMF_FREEING 3 70 #define DMF_DELETING 4 71 #define DMF_NOFLUSH_SUSPENDING 5 72 73 struct mapped_device { 74 struct rw_semaphore io_lock; 75 struct semaphore suspend_lock; 76 spinlock_t pushback_lock; 77 rwlock_t map_lock; 78 atomic_t holders; 79 atomic_t open_count; 80 81 unsigned long flags; 82 83 struct request_queue *queue; 84 struct gendisk *disk; 85 char name[16]; 86 87 void *interface_ptr; 88 89 /* 90 * A list of ios that arrived while we were suspended. 91 */ 92 atomic_t pending; 93 wait_queue_head_t wait; 94 struct bio_list deferred; 95 struct bio_list pushback; 96 97 /* 98 * The current mapping. 99 */ 100 struct dm_table *map; 101 102 /* 103 * io objects are allocated from here. 104 */ 105 mempool_t *io_pool; 106 mempool_t *tio_pool; 107 108 struct bio_set *bs; 109 110 /* 111 * Event handling. 112 */ 113 atomic_t event_nr; 114 wait_queue_head_t eventq; 115 116 /* 117 * freeze/thaw support require holding onto a super block 118 */ 119 struct super_block *frozen_sb; 120 struct block_device *suspended_bdev; 121 122 /* forced geometry settings */ 123 struct hd_geometry geometry; 124 }; 125 126 #define MIN_IOS 256 127 static struct kmem_cache *_io_cache; 128 static struct kmem_cache *_tio_cache; 129 130 static int __init local_init(void) 131 { 132 int r; 133 134 /* allocate a slab for the dm_ios */ 135 _io_cache = KMEM_CACHE(dm_io, 0); 136 if (!_io_cache) 137 return -ENOMEM; 138 139 /* allocate a slab for the target ios */ 140 _tio_cache = KMEM_CACHE(dm_target_io, 0); 141 if (!_tio_cache) { 142 kmem_cache_destroy(_io_cache); 143 return -ENOMEM; 144 } 145 146 _major = major; 147 r = register_blkdev(_major, _name); 148 if (r < 0) { 149 kmem_cache_destroy(_tio_cache); 150 kmem_cache_destroy(_io_cache); 151 return r; 152 } 153 154 if (!_major) 155 _major = r; 156 157 return 0; 158 } 159 160 static void local_exit(void) 161 { 162 kmem_cache_destroy(_tio_cache); 163 kmem_cache_destroy(_io_cache); 164 unregister_blkdev(_major, _name); 165 166 _major = 0; 167 168 DMINFO("cleaned up"); 169 } 170 171 int (*_inits[])(void) __initdata = { 172 local_init, 173 dm_target_init, 174 dm_linear_init, 175 dm_stripe_init, 176 dm_interface_init, 177 }; 178 179 void (*_exits[])(void) = { 180 local_exit, 181 dm_target_exit, 182 dm_linear_exit, 183 dm_stripe_exit, 184 dm_interface_exit, 185 }; 186 187 static int __init dm_init(void) 188 { 189 const int count = ARRAY_SIZE(_inits); 190 191 int r, i; 192 193 for (i = 0; i < count; i++) { 194 r = _inits[i](); 195 if (r) 196 goto bad; 197 } 198 199 return 0; 200 201 bad: 202 while (i--) 203 _exits[i](); 204 205 return r; 206 } 207 208 static void __exit dm_exit(void) 209 { 210 int i = ARRAY_SIZE(_exits); 211 212 while (i--) 213 _exits[i](); 214 } 215 216 /* 217 * Block device functions 218 */ 219 static int dm_blk_open(struct inode *inode, struct file *file) 220 { 221 struct mapped_device *md; 222 223 spin_lock(&_minor_lock); 224 225 md = inode->i_bdev->bd_disk->private_data; 226 if (!md) 227 goto out; 228 229 if (test_bit(DMF_FREEING, &md->flags) || 230 test_bit(DMF_DELETING, &md->flags)) { 231 md = NULL; 232 goto out; 233 } 234 235 dm_get(md); 236 atomic_inc(&md->open_count); 237 238 out: 239 spin_unlock(&_minor_lock); 240 241 return md ? 0 : -ENXIO; 242 } 243 244 static int dm_blk_close(struct inode *inode, struct file *file) 245 { 246 struct mapped_device *md; 247 248 md = inode->i_bdev->bd_disk->private_data; 249 atomic_dec(&md->open_count); 250 dm_put(md); 251 return 0; 252 } 253 254 int dm_open_count(struct mapped_device *md) 255 { 256 return atomic_read(&md->open_count); 257 } 258 259 /* 260 * Guarantees nothing is using the device before it's deleted. 261 */ 262 int dm_lock_for_deletion(struct mapped_device *md) 263 { 264 int r = 0; 265 266 spin_lock(&_minor_lock); 267 268 if (dm_open_count(md)) 269 r = -EBUSY; 270 else 271 set_bit(DMF_DELETING, &md->flags); 272 273 spin_unlock(&_minor_lock); 274 275 return r; 276 } 277 278 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 279 { 280 struct mapped_device *md = bdev->bd_disk->private_data; 281 282 return dm_get_geometry(md, geo); 283 } 284 285 static int dm_blk_ioctl(struct inode *inode, struct file *file, 286 unsigned int cmd, unsigned long arg) 287 { 288 struct mapped_device *md; 289 struct dm_table *map; 290 struct dm_target *tgt; 291 int r = -ENOTTY; 292 293 /* We don't really need this lock, but we do need 'inode'. */ 294 unlock_kernel(); 295 296 md = inode->i_bdev->bd_disk->private_data; 297 298 map = dm_get_table(md); 299 300 if (!map || !dm_table_get_size(map)) 301 goto out; 302 303 /* We only support devices that have a single target */ 304 if (dm_table_get_num_targets(map) != 1) 305 goto out; 306 307 tgt = dm_table_get_target(map, 0); 308 309 if (dm_suspended(md)) { 310 r = -EAGAIN; 311 goto out; 312 } 313 314 if (tgt->type->ioctl) 315 r = tgt->type->ioctl(tgt, inode, file, cmd, arg); 316 317 out: 318 dm_table_put(map); 319 320 lock_kernel(); 321 return r; 322 } 323 324 static struct dm_io *alloc_io(struct mapped_device *md) 325 { 326 return mempool_alloc(md->io_pool, GFP_NOIO); 327 } 328 329 static void free_io(struct mapped_device *md, struct dm_io *io) 330 { 331 mempool_free(io, md->io_pool); 332 } 333 334 static struct dm_target_io *alloc_tio(struct mapped_device *md) 335 { 336 return mempool_alloc(md->tio_pool, GFP_NOIO); 337 } 338 339 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 340 { 341 mempool_free(tio, md->tio_pool); 342 } 343 344 static void start_io_acct(struct dm_io *io) 345 { 346 struct mapped_device *md = io->md; 347 348 io->start_time = jiffies; 349 350 preempt_disable(); 351 disk_round_stats(dm_disk(md)); 352 preempt_enable(); 353 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 354 } 355 356 static int end_io_acct(struct dm_io *io) 357 { 358 struct mapped_device *md = io->md; 359 struct bio *bio = io->bio; 360 unsigned long duration = jiffies - io->start_time; 361 int pending; 362 int rw = bio_data_dir(bio); 363 364 preempt_disable(); 365 disk_round_stats(dm_disk(md)); 366 preempt_enable(); 367 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 368 369 disk_stat_add(dm_disk(md), ticks[rw], duration); 370 371 return !pending; 372 } 373 374 /* 375 * Add the bio to the list of deferred io. 376 */ 377 static int queue_io(struct mapped_device *md, struct bio *bio) 378 { 379 down_write(&md->io_lock); 380 381 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 382 up_write(&md->io_lock); 383 return 1; 384 } 385 386 bio_list_add(&md->deferred, bio); 387 388 up_write(&md->io_lock); 389 return 0; /* deferred successfully */ 390 } 391 392 /* 393 * Everyone (including functions in this file), should use this 394 * function to access the md->map field, and make sure they call 395 * dm_table_put() when finished. 396 */ 397 struct dm_table *dm_get_table(struct mapped_device *md) 398 { 399 struct dm_table *t; 400 401 read_lock(&md->map_lock); 402 t = md->map; 403 if (t) 404 dm_table_get(t); 405 read_unlock(&md->map_lock); 406 407 return t; 408 } 409 410 /* 411 * Get the geometry associated with a dm device 412 */ 413 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 414 { 415 *geo = md->geometry; 416 417 return 0; 418 } 419 420 /* 421 * Set the geometry of a device. 422 */ 423 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 424 { 425 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 426 427 if (geo->start > sz) { 428 DMWARN("Start sector is beyond the geometry limits."); 429 return -EINVAL; 430 } 431 432 md->geometry = *geo; 433 434 return 0; 435 } 436 437 /*----------------------------------------------------------------- 438 * CRUD START: 439 * A more elegant soln is in the works that uses the queue 440 * merge fn, unfortunately there are a couple of changes to 441 * the block layer that I want to make for this. So in the 442 * interests of getting something for people to use I give 443 * you this clearly demarcated crap. 444 *---------------------------------------------------------------*/ 445 446 static int __noflush_suspending(struct mapped_device *md) 447 { 448 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 449 } 450 451 /* 452 * Decrements the number of outstanding ios that a bio has been 453 * cloned into, completing the original io if necc. 454 */ 455 static void dec_pending(struct dm_io *io, int error) 456 { 457 unsigned long flags; 458 459 /* Push-back supersedes any I/O errors */ 460 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 461 io->error = error; 462 463 if (atomic_dec_and_test(&io->io_count)) { 464 if (io->error == DM_ENDIO_REQUEUE) { 465 /* 466 * Target requested pushing back the I/O. 467 * This must be handled before the sleeper on 468 * suspend queue merges the pushback list. 469 */ 470 spin_lock_irqsave(&io->md->pushback_lock, flags); 471 if (__noflush_suspending(io->md)) 472 bio_list_add(&io->md->pushback, io->bio); 473 else 474 /* noflush suspend was interrupted. */ 475 io->error = -EIO; 476 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 477 } 478 479 if (end_io_acct(io)) 480 /* nudge anyone waiting on suspend queue */ 481 wake_up(&io->md->wait); 482 483 if (io->error != DM_ENDIO_REQUEUE) { 484 blk_add_trace_bio(io->md->queue, io->bio, 485 BLK_TA_COMPLETE); 486 487 bio_endio(io->bio, io->error); 488 } 489 490 free_io(io->md, io); 491 } 492 } 493 494 static void clone_endio(struct bio *bio, int error) 495 { 496 int r = 0; 497 struct dm_target_io *tio = bio->bi_private; 498 struct mapped_device *md = tio->io->md; 499 dm_endio_fn endio = tio->ti->type->end_io; 500 501 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 502 error = -EIO; 503 504 if (endio) { 505 r = endio(tio->ti, bio, error, &tio->info); 506 if (r < 0 || r == DM_ENDIO_REQUEUE) 507 /* 508 * error and requeue request are handled 509 * in dec_pending(). 510 */ 511 error = r; 512 else if (r == DM_ENDIO_INCOMPLETE) 513 /* The target will handle the io */ 514 return; 515 else if (r) { 516 DMWARN("unimplemented target endio return value: %d", r); 517 BUG(); 518 } 519 } 520 521 dec_pending(tio->io, error); 522 523 /* 524 * Store md for cleanup instead of tio which is about to get freed. 525 */ 526 bio->bi_private = md->bs; 527 528 bio_put(bio); 529 free_tio(md, tio); 530 } 531 532 static sector_t max_io_len(struct mapped_device *md, 533 sector_t sector, struct dm_target *ti) 534 { 535 sector_t offset = sector - ti->begin; 536 sector_t len = ti->len - offset; 537 538 /* 539 * Does the target need to split even further ? 540 */ 541 if (ti->split_io) { 542 sector_t boundary; 543 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 544 - offset; 545 if (len > boundary) 546 len = boundary; 547 } 548 549 return len; 550 } 551 552 static void __map_bio(struct dm_target *ti, struct bio *clone, 553 struct dm_target_io *tio) 554 { 555 int r; 556 sector_t sector; 557 struct mapped_device *md; 558 559 /* 560 * Sanity checks. 561 */ 562 BUG_ON(!clone->bi_size); 563 564 clone->bi_end_io = clone_endio; 565 clone->bi_private = tio; 566 567 /* 568 * Map the clone. If r == 0 we don't need to do 569 * anything, the target has assumed ownership of 570 * this io. 571 */ 572 atomic_inc(&tio->io->io_count); 573 sector = clone->bi_sector; 574 r = ti->type->map(ti, clone, &tio->info); 575 if (r == DM_MAPIO_REMAPPED) { 576 /* the bio has been remapped so dispatch it */ 577 578 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 579 tio->io->bio->bi_bdev->bd_dev, 580 clone->bi_sector, sector); 581 582 generic_make_request(clone); 583 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 584 /* error the io and bail out, or requeue it if needed */ 585 md = tio->io->md; 586 dec_pending(tio->io, r); 587 /* 588 * Store bio_set for cleanup. 589 */ 590 clone->bi_private = md->bs; 591 bio_put(clone); 592 free_tio(md, tio); 593 } else if (r) { 594 DMWARN("unimplemented target map return value: %d", r); 595 BUG(); 596 } 597 } 598 599 struct clone_info { 600 struct mapped_device *md; 601 struct dm_table *map; 602 struct bio *bio; 603 struct dm_io *io; 604 sector_t sector; 605 sector_t sector_count; 606 unsigned short idx; 607 }; 608 609 static void dm_bio_destructor(struct bio *bio) 610 { 611 struct bio_set *bs = bio->bi_private; 612 613 bio_free(bio, bs); 614 } 615 616 /* 617 * Creates a little bio that is just does part of a bvec. 618 */ 619 static struct bio *split_bvec(struct bio *bio, sector_t sector, 620 unsigned short idx, unsigned int offset, 621 unsigned int len, struct bio_set *bs) 622 { 623 struct bio *clone; 624 struct bio_vec *bv = bio->bi_io_vec + idx; 625 626 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 627 clone->bi_destructor = dm_bio_destructor; 628 *clone->bi_io_vec = *bv; 629 630 clone->bi_sector = sector; 631 clone->bi_bdev = bio->bi_bdev; 632 clone->bi_rw = bio->bi_rw; 633 clone->bi_vcnt = 1; 634 clone->bi_size = to_bytes(len); 635 clone->bi_io_vec->bv_offset = offset; 636 clone->bi_io_vec->bv_len = clone->bi_size; 637 638 return clone; 639 } 640 641 /* 642 * Creates a bio that consists of range of complete bvecs. 643 */ 644 static struct bio *clone_bio(struct bio *bio, sector_t sector, 645 unsigned short idx, unsigned short bv_count, 646 unsigned int len, struct bio_set *bs) 647 { 648 struct bio *clone; 649 650 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 651 __bio_clone(clone, bio); 652 clone->bi_destructor = dm_bio_destructor; 653 clone->bi_sector = sector; 654 clone->bi_idx = idx; 655 clone->bi_vcnt = idx + bv_count; 656 clone->bi_size = to_bytes(len); 657 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 658 659 return clone; 660 } 661 662 static void __clone_and_map(struct clone_info *ci) 663 { 664 struct bio *clone, *bio = ci->bio; 665 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); 666 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); 667 struct dm_target_io *tio; 668 669 /* 670 * Allocate a target io object. 671 */ 672 tio = alloc_tio(ci->md); 673 tio->io = ci->io; 674 tio->ti = ti; 675 memset(&tio->info, 0, sizeof(tio->info)); 676 677 if (ci->sector_count <= max) { 678 /* 679 * Optimise for the simple case where we can do all of 680 * the remaining io with a single clone. 681 */ 682 clone = clone_bio(bio, ci->sector, ci->idx, 683 bio->bi_vcnt - ci->idx, ci->sector_count, 684 ci->md->bs); 685 __map_bio(ti, clone, tio); 686 ci->sector_count = 0; 687 688 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 689 /* 690 * There are some bvecs that don't span targets. 691 * Do as many of these as possible. 692 */ 693 int i; 694 sector_t remaining = max; 695 sector_t bv_len; 696 697 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 698 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 699 700 if (bv_len > remaining) 701 break; 702 703 remaining -= bv_len; 704 len += bv_len; 705 } 706 707 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 708 ci->md->bs); 709 __map_bio(ti, clone, tio); 710 711 ci->sector += len; 712 ci->sector_count -= len; 713 ci->idx = i; 714 715 } else { 716 /* 717 * Handle a bvec that must be split between two or more targets. 718 */ 719 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 720 sector_t remaining = to_sector(bv->bv_len); 721 unsigned int offset = 0; 722 723 do { 724 if (offset) { 725 ti = dm_table_find_target(ci->map, ci->sector); 726 max = max_io_len(ci->md, ci->sector, ti); 727 728 tio = alloc_tio(ci->md); 729 tio->io = ci->io; 730 tio->ti = ti; 731 memset(&tio->info, 0, sizeof(tio->info)); 732 } 733 734 len = min(remaining, max); 735 736 clone = split_bvec(bio, ci->sector, ci->idx, 737 bv->bv_offset + offset, len, 738 ci->md->bs); 739 740 __map_bio(ti, clone, tio); 741 742 ci->sector += len; 743 ci->sector_count -= len; 744 offset += to_bytes(len); 745 } while (remaining -= len); 746 747 ci->idx++; 748 } 749 } 750 751 /* 752 * Split the bio into several clones. 753 */ 754 static void __split_bio(struct mapped_device *md, struct bio *bio) 755 { 756 struct clone_info ci; 757 758 ci.map = dm_get_table(md); 759 if (!ci.map) { 760 bio_io_error(bio); 761 return; 762 } 763 764 ci.md = md; 765 ci.bio = bio; 766 ci.io = alloc_io(md); 767 ci.io->error = 0; 768 atomic_set(&ci.io->io_count, 1); 769 ci.io->bio = bio; 770 ci.io->md = md; 771 ci.sector = bio->bi_sector; 772 ci.sector_count = bio_sectors(bio); 773 ci.idx = bio->bi_idx; 774 775 start_io_acct(ci.io); 776 while (ci.sector_count) 777 __clone_and_map(&ci); 778 779 /* drop the extra reference count */ 780 dec_pending(ci.io, 0); 781 dm_table_put(ci.map); 782 } 783 /*----------------------------------------------------------------- 784 * CRUD END 785 *---------------------------------------------------------------*/ 786 787 /* 788 * The request function that just remaps the bio built up by 789 * dm_merge_bvec. 790 */ 791 static int dm_request(struct request_queue *q, struct bio *bio) 792 { 793 int r; 794 int rw = bio_data_dir(bio); 795 struct mapped_device *md = q->queuedata; 796 797 /* 798 * There is no use in forwarding any barrier request since we can't 799 * guarantee it is (or can be) handled by the targets correctly. 800 */ 801 if (unlikely(bio_barrier(bio))) { 802 bio_endio(bio, -EOPNOTSUPP); 803 return 0; 804 } 805 806 down_read(&md->io_lock); 807 808 disk_stat_inc(dm_disk(md), ios[rw]); 809 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 810 811 /* 812 * If we're suspended we have to queue 813 * this io for later. 814 */ 815 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 816 up_read(&md->io_lock); 817 818 if (bio_rw(bio) == READA) { 819 bio_io_error(bio); 820 return 0; 821 } 822 823 r = queue_io(md, bio); 824 if (r < 0) { 825 bio_io_error(bio); 826 return 0; 827 828 } else if (r == 0) 829 return 0; /* deferred successfully */ 830 831 /* 832 * We're in a while loop, because someone could suspend 833 * before we get to the following read lock. 834 */ 835 down_read(&md->io_lock); 836 } 837 838 __split_bio(md, bio); 839 up_read(&md->io_lock); 840 return 0; 841 } 842 843 static int dm_flush_all(struct request_queue *q, struct gendisk *disk, 844 sector_t *error_sector) 845 { 846 struct mapped_device *md = q->queuedata; 847 struct dm_table *map = dm_get_table(md); 848 int ret = -ENXIO; 849 850 if (map) { 851 ret = dm_table_flush_all(map); 852 dm_table_put(map); 853 } 854 855 return ret; 856 } 857 858 static void dm_unplug_all(struct request_queue *q) 859 { 860 struct mapped_device *md = q->queuedata; 861 struct dm_table *map = dm_get_table(md); 862 863 if (map) { 864 dm_table_unplug_all(map); 865 dm_table_put(map); 866 } 867 } 868 869 static int dm_any_congested(void *congested_data, int bdi_bits) 870 { 871 int r; 872 struct mapped_device *md = (struct mapped_device *) congested_data; 873 struct dm_table *map = dm_get_table(md); 874 875 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 876 r = bdi_bits; 877 else 878 r = dm_table_any_congested(map, bdi_bits); 879 880 dm_table_put(map); 881 return r; 882 } 883 884 /*----------------------------------------------------------------- 885 * An IDR is used to keep track of allocated minor numbers. 886 *---------------------------------------------------------------*/ 887 static DEFINE_IDR(_minor_idr); 888 889 static void free_minor(int minor) 890 { 891 spin_lock(&_minor_lock); 892 idr_remove(&_minor_idr, minor); 893 spin_unlock(&_minor_lock); 894 } 895 896 /* 897 * See if the device with a specific minor # is free. 898 */ 899 static int specific_minor(struct mapped_device *md, int minor) 900 { 901 int r, m; 902 903 if (minor >= (1 << MINORBITS)) 904 return -EINVAL; 905 906 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 907 if (!r) 908 return -ENOMEM; 909 910 spin_lock(&_minor_lock); 911 912 if (idr_find(&_minor_idr, minor)) { 913 r = -EBUSY; 914 goto out; 915 } 916 917 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 918 if (r) 919 goto out; 920 921 if (m != minor) { 922 idr_remove(&_minor_idr, m); 923 r = -EBUSY; 924 goto out; 925 } 926 927 out: 928 spin_unlock(&_minor_lock); 929 return r; 930 } 931 932 static int next_free_minor(struct mapped_device *md, int *minor) 933 { 934 int r, m; 935 936 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 937 if (!r) 938 return -ENOMEM; 939 940 spin_lock(&_minor_lock); 941 942 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 943 if (r) { 944 goto out; 945 } 946 947 if (m >= (1 << MINORBITS)) { 948 idr_remove(&_minor_idr, m); 949 r = -ENOSPC; 950 goto out; 951 } 952 953 *minor = m; 954 955 out: 956 spin_unlock(&_minor_lock); 957 return r; 958 } 959 960 static struct block_device_operations dm_blk_dops; 961 962 /* 963 * Allocate and initialise a blank device with a given minor. 964 */ 965 static struct mapped_device *alloc_dev(int minor) 966 { 967 int r; 968 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 969 void *old_md; 970 971 if (!md) { 972 DMWARN("unable to allocate device, out of memory."); 973 return NULL; 974 } 975 976 if (!try_module_get(THIS_MODULE)) 977 goto bad0; 978 979 /* get a minor number for the dev */ 980 if (minor == DM_ANY_MINOR) 981 r = next_free_minor(md, &minor); 982 else 983 r = specific_minor(md, minor); 984 if (r < 0) 985 goto bad1; 986 987 memset(md, 0, sizeof(*md)); 988 init_rwsem(&md->io_lock); 989 init_MUTEX(&md->suspend_lock); 990 spin_lock_init(&md->pushback_lock); 991 rwlock_init(&md->map_lock); 992 atomic_set(&md->holders, 1); 993 atomic_set(&md->open_count, 0); 994 atomic_set(&md->event_nr, 0); 995 996 md->queue = blk_alloc_queue(GFP_KERNEL); 997 if (!md->queue) 998 goto bad1_free_minor; 999 1000 md->queue->queuedata = md; 1001 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1002 md->queue->backing_dev_info.congested_data = md; 1003 blk_queue_make_request(md->queue, dm_request); 1004 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1005 md->queue->unplug_fn = dm_unplug_all; 1006 md->queue->issue_flush_fn = dm_flush_all; 1007 1008 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1009 if (!md->io_pool) 1010 goto bad2; 1011 1012 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 1013 if (!md->tio_pool) 1014 goto bad3; 1015 1016 md->bs = bioset_create(16, 16); 1017 if (!md->bs) 1018 goto bad_no_bioset; 1019 1020 md->disk = alloc_disk(1); 1021 if (!md->disk) 1022 goto bad4; 1023 1024 atomic_set(&md->pending, 0); 1025 init_waitqueue_head(&md->wait); 1026 init_waitqueue_head(&md->eventq); 1027 1028 md->disk->major = _major; 1029 md->disk->first_minor = minor; 1030 md->disk->fops = &dm_blk_dops; 1031 md->disk->queue = md->queue; 1032 md->disk->private_data = md; 1033 sprintf(md->disk->disk_name, "dm-%d", minor); 1034 add_disk(md->disk); 1035 format_dev_t(md->name, MKDEV(_major, minor)); 1036 1037 /* Populate the mapping, nobody knows we exist yet */ 1038 spin_lock(&_minor_lock); 1039 old_md = idr_replace(&_minor_idr, md, minor); 1040 spin_unlock(&_minor_lock); 1041 1042 BUG_ON(old_md != MINOR_ALLOCED); 1043 1044 return md; 1045 1046 bad4: 1047 bioset_free(md->bs); 1048 bad_no_bioset: 1049 mempool_destroy(md->tio_pool); 1050 bad3: 1051 mempool_destroy(md->io_pool); 1052 bad2: 1053 blk_cleanup_queue(md->queue); 1054 bad1_free_minor: 1055 free_minor(minor); 1056 bad1: 1057 module_put(THIS_MODULE); 1058 bad0: 1059 kfree(md); 1060 return NULL; 1061 } 1062 1063 static void free_dev(struct mapped_device *md) 1064 { 1065 int minor = md->disk->first_minor; 1066 1067 if (md->suspended_bdev) { 1068 thaw_bdev(md->suspended_bdev, NULL); 1069 bdput(md->suspended_bdev); 1070 } 1071 mempool_destroy(md->tio_pool); 1072 mempool_destroy(md->io_pool); 1073 bioset_free(md->bs); 1074 del_gendisk(md->disk); 1075 free_minor(minor); 1076 1077 spin_lock(&_minor_lock); 1078 md->disk->private_data = NULL; 1079 spin_unlock(&_minor_lock); 1080 1081 put_disk(md->disk); 1082 blk_cleanup_queue(md->queue); 1083 module_put(THIS_MODULE); 1084 kfree(md); 1085 } 1086 1087 /* 1088 * Bind a table to the device. 1089 */ 1090 static void event_callback(void *context) 1091 { 1092 struct mapped_device *md = (struct mapped_device *) context; 1093 1094 atomic_inc(&md->event_nr); 1095 wake_up(&md->eventq); 1096 } 1097 1098 static void __set_size(struct mapped_device *md, sector_t size) 1099 { 1100 set_capacity(md->disk, size); 1101 1102 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1103 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1104 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1105 } 1106 1107 static int __bind(struct mapped_device *md, struct dm_table *t) 1108 { 1109 struct request_queue *q = md->queue; 1110 sector_t size; 1111 1112 size = dm_table_get_size(t); 1113 1114 /* 1115 * Wipe any geometry if the size of the table changed. 1116 */ 1117 if (size != get_capacity(md->disk)) 1118 memset(&md->geometry, 0, sizeof(md->geometry)); 1119 1120 if (md->suspended_bdev) 1121 __set_size(md, size); 1122 if (size == 0) 1123 return 0; 1124 1125 dm_table_get(t); 1126 dm_table_event_callback(t, event_callback, md); 1127 1128 write_lock(&md->map_lock); 1129 md->map = t; 1130 dm_table_set_restrictions(t, q); 1131 write_unlock(&md->map_lock); 1132 1133 return 0; 1134 } 1135 1136 static void __unbind(struct mapped_device *md) 1137 { 1138 struct dm_table *map = md->map; 1139 1140 if (!map) 1141 return; 1142 1143 dm_table_event_callback(map, NULL, NULL); 1144 write_lock(&md->map_lock); 1145 md->map = NULL; 1146 write_unlock(&md->map_lock); 1147 dm_table_put(map); 1148 } 1149 1150 /* 1151 * Constructor for a new device. 1152 */ 1153 int dm_create(int minor, struct mapped_device **result) 1154 { 1155 struct mapped_device *md; 1156 1157 md = alloc_dev(minor); 1158 if (!md) 1159 return -ENXIO; 1160 1161 *result = md; 1162 return 0; 1163 } 1164 1165 static struct mapped_device *dm_find_md(dev_t dev) 1166 { 1167 struct mapped_device *md; 1168 unsigned minor = MINOR(dev); 1169 1170 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1171 return NULL; 1172 1173 spin_lock(&_minor_lock); 1174 1175 md = idr_find(&_minor_idr, minor); 1176 if (md && (md == MINOR_ALLOCED || 1177 (dm_disk(md)->first_minor != minor) || 1178 test_bit(DMF_FREEING, &md->flags))) { 1179 md = NULL; 1180 goto out; 1181 } 1182 1183 out: 1184 spin_unlock(&_minor_lock); 1185 1186 return md; 1187 } 1188 1189 struct mapped_device *dm_get_md(dev_t dev) 1190 { 1191 struct mapped_device *md = dm_find_md(dev); 1192 1193 if (md) 1194 dm_get(md); 1195 1196 return md; 1197 } 1198 1199 void *dm_get_mdptr(struct mapped_device *md) 1200 { 1201 return md->interface_ptr; 1202 } 1203 1204 void dm_set_mdptr(struct mapped_device *md, void *ptr) 1205 { 1206 md->interface_ptr = ptr; 1207 } 1208 1209 void dm_get(struct mapped_device *md) 1210 { 1211 atomic_inc(&md->holders); 1212 } 1213 1214 const char *dm_device_name(struct mapped_device *md) 1215 { 1216 return md->name; 1217 } 1218 EXPORT_SYMBOL_GPL(dm_device_name); 1219 1220 void dm_put(struct mapped_device *md) 1221 { 1222 struct dm_table *map; 1223 1224 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1225 1226 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1227 map = dm_get_table(md); 1228 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1229 set_bit(DMF_FREEING, &md->flags); 1230 spin_unlock(&_minor_lock); 1231 if (!dm_suspended(md)) { 1232 dm_table_presuspend_targets(map); 1233 dm_table_postsuspend_targets(map); 1234 } 1235 __unbind(md); 1236 dm_table_put(map); 1237 free_dev(md); 1238 } 1239 } 1240 EXPORT_SYMBOL_GPL(dm_put); 1241 1242 /* 1243 * Process the deferred bios 1244 */ 1245 static void __flush_deferred_io(struct mapped_device *md, struct bio *c) 1246 { 1247 struct bio *n; 1248 1249 while (c) { 1250 n = c->bi_next; 1251 c->bi_next = NULL; 1252 __split_bio(md, c); 1253 c = n; 1254 } 1255 } 1256 1257 /* 1258 * Swap in a new table (destroying old one). 1259 */ 1260 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1261 { 1262 int r = -EINVAL; 1263 1264 down(&md->suspend_lock); 1265 1266 /* device must be suspended */ 1267 if (!dm_suspended(md)) 1268 goto out; 1269 1270 /* without bdev, the device size cannot be changed */ 1271 if (!md->suspended_bdev) 1272 if (get_capacity(md->disk) != dm_table_get_size(table)) 1273 goto out; 1274 1275 __unbind(md); 1276 r = __bind(md, table); 1277 1278 out: 1279 up(&md->suspend_lock); 1280 return r; 1281 } 1282 1283 /* 1284 * Functions to lock and unlock any filesystem running on the 1285 * device. 1286 */ 1287 static int lock_fs(struct mapped_device *md) 1288 { 1289 int r; 1290 1291 WARN_ON(md->frozen_sb); 1292 1293 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1294 if (IS_ERR(md->frozen_sb)) { 1295 r = PTR_ERR(md->frozen_sb); 1296 md->frozen_sb = NULL; 1297 return r; 1298 } 1299 1300 set_bit(DMF_FROZEN, &md->flags); 1301 1302 /* don't bdput right now, we don't want the bdev 1303 * to go away while it is locked. 1304 */ 1305 return 0; 1306 } 1307 1308 static void unlock_fs(struct mapped_device *md) 1309 { 1310 if (!test_bit(DMF_FROZEN, &md->flags)) 1311 return; 1312 1313 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1314 md->frozen_sb = NULL; 1315 clear_bit(DMF_FROZEN, &md->flags); 1316 } 1317 1318 /* 1319 * We need to be able to change a mapping table under a mounted 1320 * filesystem. For example we might want to move some data in 1321 * the background. Before the table can be swapped with 1322 * dm_bind_table, dm_suspend must be called to flush any in 1323 * flight bios and ensure that any further io gets deferred. 1324 */ 1325 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1326 { 1327 struct dm_table *map = NULL; 1328 unsigned long flags; 1329 DECLARE_WAITQUEUE(wait, current); 1330 struct bio *def; 1331 int r = -EINVAL; 1332 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1333 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1334 1335 down(&md->suspend_lock); 1336 1337 if (dm_suspended(md)) 1338 goto out_unlock; 1339 1340 map = dm_get_table(md); 1341 1342 /* 1343 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1344 * This flag is cleared before dm_suspend returns. 1345 */ 1346 if (noflush) 1347 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1348 1349 /* This does not get reverted if there's an error later. */ 1350 dm_table_presuspend_targets(map); 1351 1352 /* bdget() can stall if the pending I/Os are not flushed */ 1353 if (!noflush) { 1354 md->suspended_bdev = bdget_disk(md->disk, 0); 1355 if (!md->suspended_bdev) { 1356 DMWARN("bdget failed in dm_suspend"); 1357 r = -ENOMEM; 1358 goto flush_and_out; 1359 } 1360 } 1361 1362 /* 1363 * Flush I/O to the device. 1364 * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os. 1365 */ 1366 if (do_lockfs && !noflush) { 1367 r = lock_fs(md); 1368 if (r) 1369 goto out; 1370 } 1371 1372 /* 1373 * First we set the BLOCK_IO flag so no more ios will be mapped. 1374 */ 1375 down_write(&md->io_lock); 1376 set_bit(DMF_BLOCK_IO, &md->flags); 1377 1378 add_wait_queue(&md->wait, &wait); 1379 up_write(&md->io_lock); 1380 1381 /* unplug */ 1382 if (map) 1383 dm_table_unplug_all(map); 1384 1385 /* 1386 * Then we wait for the already mapped ios to 1387 * complete. 1388 */ 1389 while (1) { 1390 set_current_state(TASK_INTERRUPTIBLE); 1391 1392 if (!atomic_read(&md->pending) || signal_pending(current)) 1393 break; 1394 1395 io_schedule(); 1396 } 1397 set_current_state(TASK_RUNNING); 1398 1399 down_write(&md->io_lock); 1400 remove_wait_queue(&md->wait, &wait); 1401 1402 if (noflush) { 1403 spin_lock_irqsave(&md->pushback_lock, flags); 1404 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1405 bio_list_merge_head(&md->deferred, &md->pushback); 1406 bio_list_init(&md->pushback); 1407 spin_unlock_irqrestore(&md->pushback_lock, flags); 1408 } 1409 1410 /* were we interrupted ? */ 1411 r = -EINTR; 1412 if (atomic_read(&md->pending)) { 1413 clear_bit(DMF_BLOCK_IO, &md->flags); 1414 def = bio_list_get(&md->deferred); 1415 __flush_deferred_io(md, def); 1416 up_write(&md->io_lock); 1417 unlock_fs(md); 1418 goto out; /* pushback list is already flushed, so skip flush */ 1419 } 1420 up_write(&md->io_lock); 1421 1422 dm_table_postsuspend_targets(map); 1423 1424 set_bit(DMF_SUSPENDED, &md->flags); 1425 1426 r = 0; 1427 1428 flush_and_out: 1429 if (r && noflush) { 1430 /* 1431 * Because there may be already I/Os in the pushback list, 1432 * flush them before return. 1433 */ 1434 down_write(&md->io_lock); 1435 1436 spin_lock_irqsave(&md->pushback_lock, flags); 1437 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1438 bio_list_merge_head(&md->deferred, &md->pushback); 1439 bio_list_init(&md->pushback); 1440 spin_unlock_irqrestore(&md->pushback_lock, flags); 1441 1442 def = bio_list_get(&md->deferred); 1443 __flush_deferred_io(md, def); 1444 up_write(&md->io_lock); 1445 } 1446 1447 out: 1448 if (r && md->suspended_bdev) { 1449 bdput(md->suspended_bdev); 1450 md->suspended_bdev = NULL; 1451 } 1452 1453 dm_table_put(map); 1454 1455 out_unlock: 1456 up(&md->suspend_lock); 1457 return r; 1458 } 1459 1460 int dm_resume(struct mapped_device *md) 1461 { 1462 int r = -EINVAL; 1463 struct bio *def; 1464 struct dm_table *map = NULL; 1465 1466 down(&md->suspend_lock); 1467 if (!dm_suspended(md)) 1468 goto out; 1469 1470 map = dm_get_table(md); 1471 if (!map || !dm_table_get_size(map)) 1472 goto out; 1473 1474 r = dm_table_resume_targets(map); 1475 if (r) 1476 goto out; 1477 1478 down_write(&md->io_lock); 1479 clear_bit(DMF_BLOCK_IO, &md->flags); 1480 1481 def = bio_list_get(&md->deferred); 1482 __flush_deferred_io(md, def); 1483 up_write(&md->io_lock); 1484 1485 unlock_fs(md); 1486 1487 if (md->suspended_bdev) { 1488 bdput(md->suspended_bdev); 1489 md->suspended_bdev = NULL; 1490 } 1491 1492 clear_bit(DMF_SUSPENDED, &md->flags); 1493 1494 dm_table_unplug_all(map); 1495 1496 kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); 1497 1498 r = 0; 1499 1500 out: 1501 dm_table_put(map); 1502 up(&md->suspend_lock); 1503 1504 return r; 1505 } 1506 1507 /*----------------------------------------------------------------- 1508 * Event notification. 1509 *---------------------------------------------------------------*/ 1510 uint32_t dm_get_event_nr(struct mapped_device *md) 1511 { 1512 return atomic_read(&md->event_nr); 1513 } 1514 1515 int dm_wait_event(struct mapped_device *md, int event_nr) 1516 { 1517 return wait_event_interruptible(md->eventq, 1518 (event_nr != atomic_read(&md->event_nr))); 1519 } 1520 1521 /* 1522 * The gendisk is only valid as long as you have a reference 1523 * count on 'md'. 1524 */ 1525 struct gendisk *dm_disk(struct mapped_device *md) 1526 { 1527 return md->disk; 1528 } 1529 1530 int dm_suspended(struct mapped_device *md) 1531 { 1532 return test_bit(DMF_SUSPENDED, &md->flags); 1533 } 1534 1535 int dm_noflush_suspending(struct dm_target *ti) 1536 { 1537 struct mapped_device *md = dm_table_get_md(ti->table); 1538 int r = __noflush_suspending(md); 1539 1540 dm_put(md); 1541 1542 return r; 1543 } 1544 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1545 1546 static struct block_device_operations dm_blk_dops = { 1547 .open = dm_blk_open, 1548 .release = dm_blk_close, 1549 .ioctl = dm_blk_ioctl, 1550 .getgeo = dm_blk_getgeo, 1551 .owner = THIS_MODULE 1552 }; 1553 1554 EXPORT_SYMBOL(dm_get_mapinfo); 1555 1556 /* 1557 * module hooks 1558 */ 1559 module_init(dm_init); 1560 module_exit(dm_exit); 1561 1562 module_param(major, uint, 0); 1563 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1564 MODULE_DESCRIPTION(DM_NAME " driver"); 1565 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1566 MODULE_LICENSE("GPL"); 1567