1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/moduleparam.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/buffer_head.h> 19 #include <linux/mempool.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/hdreg.h> 23 #include <linux/blktrace_api.h> 24 #include <linux/smp_lock.h> 25 26 #define DM_MSG_PREFIX "core" 27 28 static const char *_name = DM_NAME; 29 30 static unsigned int major = 0; 31 static unsigned int _major = 0; 32 33 static DEFINE_SPINLOCK(_minor_lock); 34 /* 35 * One of these is allocated per bio. 36 */ 37 struct dm_io { 38 struct mapped_device *md; 39 int error; 40 struct bio *bio; 41 atomic_t io_count; 42 unsigned long start_time; 43 }; 44 45 /* 46 * One of these is allocated per target within a bio. Hopefully 47 * this will be simplified out one day. 48 */ 49 struct dm_target_io { 50 struct dm_io *io; 51 struct dm_target *ti; 52 union map_info info; 53 }; 54 55 union map_info *dm_get_mapinfo(struct bio *bio) 56 { 57 if (bio && bio->bi_private) 58 return &((struct dm_target_io *)bio->bi_private)->info; 59 return NULL; 60 } 61 62 #define MINOR_ALLOCED ((void *)-1) 63 64 /* 65 * Bits for the md->flags field. 66 */ 67 #define DMF_BLOCK_IO 0 68 #define DMF_SUSPENDED 1 69 #define DMF_FROZEN 2 70 #define DMF_FREEING 3 71 #define DMF_DELETING 4 72 #define DMF_NOFLUSH_SUSPENDING 5 73 74 struct mapped_device { 75 struct rw_semaphore io_lock; 76 struct semaphore suspend_lock; 77 spinlock_t pushback_lock; 78 rwlock_t map_lock; 79 atomic_t holders; 80 atomic_t open_count; 81 82 unsigned long flags; 83 84 struct request_queue *queue; 85 struct gendisk *disk; 86 char name[16]; 87 88 void *interface_ptr; 89 90 /* 91 * A list of ios that arrived while we were suspended. 92 */ 93 atomic_t pending; 94 wait_queue_head_t wait; 95 struct bio_list deferred; 96 struct bio_list pushback; 97 98 /* 99 * The current mapping. 100 */ 101 struct dm_table *map; 102 103 /* 104 * io objects are allocated from here. 105 */ 106 mempool_t *io_pool; 107 mempool_t *tio_pool; 108 109 struct bio_set *bs; 110 111 /* 112 * Event handling. 113 */ 114 atomic_t event_nr; 115 wait_queue_head_t eventq; 116 atomic_t uevent_seq; 117 struct list_head uevent_list; 118 spinlock_t uevent_lock; /* Protect access to uevent_list */ 119 120 /* 121 * freeze/thaw support require holding onto a super block 122 */ 123 struct super_block *frozen_sb; 124 struct block_device *suspended_bdev; 125 126 /* forced geometry settings */ 127 struct hd_geometry geometry; 128 }; 129 130 #define MIN_IOS 256 131 static struct kmem_cache *_io_cache; 132 static struct kmem_cache *_tio_cache; 133 134 static int __init local_init(void) 135 { 136 int r; 137 138 /* allocate a slab for the dm_ios */ 139 _io_cache = KMEM_CACHE(dm_io, 0); 140 if (!_io_cache) 141 return -ENOMEM; 142 143 /* allocate a slab for the target ios */ 144 _tio_cache = KMEM_CACHE(dm_target_io, 0); 145 if (!_tio_cache) { 146 kmem_cache_destroy(_io_cache); 147 return -ENOMEM; 148 } 149 150 r = dm_uevent_init(); 151 if (r) { 152 kmem_cache_destroy(_tio_cache); 153 kmem_cache_destroy(_io_cache); 154 return r; 155 } 156 157 _major = major; 158 r = register_blkdev(_major, _name); 159 if (r < 0) { 160 kmem_cache_destroy(_tio_cache); 161 kmem_cache_destroy(_io_cache); 162 dm_uevent_exit(); 163 return r; 164 } 165 166 if (!_major) 167 _major = r; 168 169 return 0; 170 } 171 172 static void local_exit(void) 173 { 174 kmem_cache_destroy(_tio_cache); 175 kmem_cache_destroy(_io_cache); 176 unregister_blkdev(_major, _name); 177 dm_uevent_exit(); 178 179 _major = 0; 180 181 DMINFO("cleaned up"); 182 } 183 184 int (*_inits[])(void) __initdata = { 185 local_init, 186 dm_target_init, 187 dm_linear_init, 188 dm_stripe_init, 189 dm_interface_init, 190 }; 191 192 void (*_exits[])(void) = { 193 local_exit, 194 dm_target_exit, 195 dm_linear_exit, 196 dm_stripe_exit, 197 dm_interface_exit, 198 }; 199 200 static int __init dm_init(void) 201 { 202 const int count = ARRAY_SIZE(_inits); 203 204 int r, i; 205 206 for (i = 0; i < count; i++) { 207 r = _inits[i](); 208 if (r) 209 goto bad; 210 } 211 212 return 0; 213 214 bad: 215 while (i--) 216 _exits[i](); 217 218 return r; 219 } 220 221 static void __exit dm_exit(void) 222 { 223 int i = ARRAY_SIZE(_exits); 224 225 while (i--) 226 _exits[i](); 227 } 228 229 /* 230 * Block device functions 231 */ 232 static int dm_blk_open(struct inode *inode, struct file *file) 233 { 234 struct mapped_device *md; 235 236 spin_lock(&_minor_lock); 237 238 md = inode->i_bdev->bd_disk->private_data; 239 if (!md) 240 goto out; 241 242 if (test_bit(DMF_FREEING, &md->flags) || 243 test_bit(DMF_DELETING, &md->flags)) { 244 md = NULL; 245 goto out; 246 } 247 248 dm_get(md); 249 atomic_inc(&md->open_count); 250 251 out: 252 spin_unlock(&_minor_lock); 253 254 return md ? 0 : -ENXIO; 255 } 256 257 static int dm_blk_close(struct inode *inode, struct file *file) 258 { 259 struct mapped_device *md; 260 261 md = inode->i_bdev->bd_disk->private_data; 262 atomic_dec(&md->open_count); 263 dm_put(md); 264 return 0; 265 } 266 267 int dm_open_count(struct mapped_device *md) 268 { 269 return atomic_read(&md->open_count); 270 } 271 272 /* 273 * Guarantees nothing is using the device before it's deleted. 274 */ 275 int dm_lock_for_deletion(struct mapped_device *md) 276 { 277 int r = 0; 278 279 spin_lock(&_minor_lock); 280 281 if (dm_open_count(md)) 282 r = -EBUSY; 283 else 284 set_bit(DMF_DELETING, &md->flags); 285 286 spin_unlock(&_minor_lock); 287 288 return r; 289 } 290 291 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 292 { 293 struct mapped_device *md = bdev->bd_disk->private_data; 294 295 return dm_get_geometry(md, geo); 296 } 297 298 static int dm_blk_ioctl(struct inode *inode, struct file *file, 299 unsigned int cmd, unsigned long arg) 300 { 301 struct mapped_device *md; 302 struct dm_table *map; 303 struct dm_target *tgt; 304 int r = -ENOTTY; 305 306 /* We don't really need this lock, but we do need 'inode'. */ 307 unlock_kernel(); 308 309 md = inode->i_bdev->bd_disk->private_data; 310 311 map = dm_get_table(md); 312 313 if (!map || !dm_table_get_size(map)) 314 goto out; 315 316 /* We only support devices that have a single target */ 317 if (dm_table_get_num_targets(map) != 1) 318 goto out; 319 320 tgt = dm_table_get_target(map, 0); 321 322 if (dm_suspended(md)) { 323 r = -EAGAIN; 324 goto out; 325 } 326 327 if (tgt->type->ioctl) 328 r = tgt->type->ioctl(tgt, inode, file, cmd, arg); 329 330 out: 331 dm_table_put(map); 332 333 lock_kernel(); 334 return r; 335 } 336 337 static struct dm_io *alloc_io(struct mapped_device *md) 338 { 339 return mempool_alloc(md->io_pool, GFP_NOIO); 340 } 341 342 static void free_io(struct mapped_device *md, struct dm_io *io) 343 { 344 mempool_free(io, md->io_pool); 345 } 346 347 static struct dm_target_io *alloc_tio(struct mapped_device *md) 348 { 349 return mempool_alloc(md->tio_pool, GFP_NOIO); 350 } 351 352 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 353 { 354 mempool_free(tio, md->tio_pool); 355 } 356 357 static void start_io_acct(struct dm_io *io) 358 { 359 struct mapped_device *md = io->md; 360 361 io->start_time = jiffies; 362 363 preempt_disable(); 364 disk_round_stats(dm_disk(md)); 365 preempt_enable(); 366 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 367 } 368 369 static int end_io_acct(struct dm_io *io) 370 { 371 struct mapped_device *md = io->md; 372 struct bio *bio = io->bio; 373 unsigned long duration = jiffies - io->start_time; 374 int pending; 375 int rw = bio_data_dir(bio); 376 377 preempt_disable(); 378 disk_round_stats(dm_disk(md)); 379 preempt_enable(); 380 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 381 382 disk_stat_add(dm_disk(md), ticks[rw], duration); 383 384 return !pending; 385 } 386 387 /* 388 * Add the bio to the list of deferred io. 389 */ 390 static int queue_io(struct mapped_device *md, struct bio *bio) 391 { 392 down_write(&md->io_lock); 393 394 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 395 up_write(&md->io_lock); 396 return 1; 397 } 398 399 bio_list_add(&md->deferred, bio); 400 401 up_write(&md->io_lock); 402 return 0; /* deferred successfully */ 403 } 404 405 /* 406 * Everyone (including functions in this file), should use this 407 * function to access the md->map field, and make sure they call 408 * dm_table_put() when finished. 409 */ 410 struct dm_table *dm_get_table(struct mapped_device *md) 411 { 412 struct dm_table *t; 413 414 read_lock(&md->map_lock); 415 t = md->map; 416 if (t) 417 dm_table_get(t); 418 read_unlock(&md->map_lock); 419 420 return t; 421 } 422 423 /* 424 * Get the geometry associated with a dm device 425 */ 426 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 427 { 428 *geo = md->geometry; 429 430 return 0; 431 } 432 433 /* 434 * Set the geometry of a device. 435 */ 436 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 437 { 438 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 439 440 if (geo->start > sz) { 441 DMWARN("Start sector is beyond the geometry limits."); 442 return -EINVAL; 443 } 444 445 md->geometry = *geo; 446 447 return 0; 448 } 449 450 /*----------------------------------------------------------------- 451 * CRUD START: 452 * A more elegant soln is in the works that uses the queue 453 * merge fn, unfortunately there are a couple of changes to 454 * the block layer that I want to make for this. So in the 455 * interests of getting something for people to use I give 456 * you this clearly demarcated crap. 457 *---------------------------------------------------------------*/ 458 459 static int __noflush_suspending(struct mapped_device *md) 460 { 461 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 462 } 463 464 /* 465 * Decrements the number of outstanding ios that a bio has been 466 * cloned into, completing the original io if necc. 467 */ 468 static void dec_pending(struct dm_io *io, int error) 469 { 470 unsigned long flags; 471 472 /* Push-back supersedes any I/O errors */ 473 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 474 io->error = error; 475 476 if (atomic_dec_and_test(&io->io_count)) { 477 if (io->error == DM_ENDIO_REQUEUE) { 478 /* 479 * Target requested pushing back the I/O. 480 * This must be handled before the sleeper on 481 * suspend queue merges the pushback list. 482 */ 483 spin_lock_irqsave(&io->md->pushback_lock, flags); 484 if (__noflush_suspending(io->md)) 485 bio_list_add(&io->md->pushback, io->bio); 486 else 487 /* noflush suspend was interrupted. */ 488 io->error = -EIO; 489 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 490 } 491 492 if (end_io_acct(io)) 493 /* nudge anyone waiting on suspend queue */ 494 wake_up(&io->md->wait); 495 496 if (io->error != DM_ENDIO_REQUEUE) { 497 blk_add_trace_bio(io->md->queue, io->bio, 498 BLK_TA_COMPLETE); 499 500 bio_endio(io->bio, io->error); 501 } 502 503 free_io(io->md, io); 504 } 505 } 506 507 static void clone_endio(struct bio *bio, int error) 508 { 509 int r = 0; 510 struct dm_target_io *tio = bio->bi_private; 511 struct mapped_device *md = tio->io->md; 512 dm_endio_fn endio = tio->ti->type->end_io; 513 514 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 515 error = -EIO; 516 517 if (endio) { 518 r = endio(tio->ti, bio, error, &tio->info); 519 if (r < 0 || r == DM_ENDIO_REQUEUE) 520 /* 521 * error and requeue request are handled 522 * in dec_pending(). 523 */ 524 error = r; 525 else if (r == DM_ENDIO_INCOMPLETE) 526 /* The target will handle the io */ 527 return; 528 else if (r) { 529 DMWARN("unimplemented target endio return value: %d", r); 530 BUG(); 531 } 532 } 533 534 dec_pending(tio->io, error); 535 536 /* 537 * Store md for cleanup instead of tio which is about to get freed. 538 */ 539 bio->bi_private = md->bs; 540 541 bio_put(bio); 542 free_tio(md, tio); 543 } 544 545 static sector_t max_io_len(struct mapped_device *md, 546 sector_t sector, struct dm_target *ti) 547 { 548 sector_t offset = sector - ti->begin; 549 sector_t len = ti->len - offset; 550 551 /* 552 * Does the target need to split even further ? 553 */ 554 if (ti->split_io) { 555 sector_t boundary; 556 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 557 - offset; 558 if (len > boundary) 559 len = boundary; 560 } 561 562 return len; 563 } 564 565 static void __map_bio(struct dm_target *ti, struct bio *clone, 566 struct dm_target_io *tio) 567 { 568 int r; 569 sector_t sector; 570 struct mapped_device *md; 571 572 /* 573 * Sanity checks. 574 */ 575 BUG_ON(!clone->bi_size); 576 577 clone->bi_end_io = clone_endio; 578 clone->bi_private = tio; 579 580 /* 581 * Map the clone. If r == 0 we don't need to do 582 * anything, the target has assumed ownership of 583 * this io. 584 */ 585 atomic_inc(&tio->io->io_count); 586 sector = clone->bi_sector; 587 r = ti->type->map(ti, clone, &tio->info); 588 if (r == DM_MAPIO_REMAPPED) { 589 /* the bio has been remapped so dispatch it */ 590 591 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 592 tio->io->bio->bi_bdev->bd_dev, 593 clone->bi_sector, sector); 594 595 generic_make_request(clone); 596 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 597 /* error the io and bail out, or requeue it if needed */ 598 md = tio->io->md; 599 dec_pending(tio->io, r); 600 /* 601 * Store bio_set for cleanup. 602 */ 603 clone->bi_private = md->bs; 604 bio_put(clone); 605 free_tio(md, tio); 606 } else if (r) { 607 DMWARN("unimplemented target map return value: %d", r); 608 BUG(); 609 } 610 } 611 612 struct clone_info { 613 struct mapped_device *md; 614 struct dm_table *map; 615 struct bio *bio; 616 struct dm_io *io; 617 sector_t sector; 618 sector_t sector_count; 619 unsigned short idx; 620 }; 621 622 static void dm_bio_destructor(struct bio *bio) 623 { 624 struct bio_set *bs = bio->bi_private; 625 626 bio_free(bio, bs); 627 } 628 629 /* 630 * Creates a little bio that is just does part of a bvec. 631 */ 632 static struct bio *split_bvec(struct bio *bio, sector_t sector, 633 unsigned short idx, unsigned int offset, 634 unsigned int len, struct bio_set *bs) 635 { 636 struct bio *clone; 637 struct bio_vec *bv = bio->bi_io_vec + idx; 638 639 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 640 clone->bi_destructor = dm_bio_destructor; 641 *clone->bi_io_vec = *bv; 642 643 clone->bi_sector = sector; 644 clone->bi_bdev = bio->bi_bdev; 645 clone->bi_rw = bio->bi_rw; 646 clone->bi_vcnt = 1; 647 clone->bi_size = to_bytes(len); 648 clone->bi_io_vec->bv_offset = offset; 649 clone->bi_io_vec->bv_len = clone->bi_size; 650 651 return clone; 652 } 653 654 /* 655 * Creates a bio that consists of range of complete bvecs. 656 */ 657 static struct bio *clone_bio(struct bio *bio, sector_t sector, 658 unsigned short idx, unsigned short bv_count, 659 unsigned int len, struct bio_set *bs) 660 { 661 struct bio *clone; 662 663 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 664 __bio_clone(clone, bio); 665 clone->bi_destructor = dm_bio_destructor; 666 clone->bi_sector = sector; 667 clone->bi_idx = idx; 668 clone->bi_vcnt = idx + bv_count; 669 clone->bi_size = to_bytes(len); 670 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 671 672 return clone; 673 } 674 675 static void __clone_and_map(struct clone_info *ci) 676 { 677 struct bio *clone, *bio = ci->bio; 678 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); 679 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); 680 struct dm_target_io *tio; 681 682 /* 683 * Allocate a target io object. 684 */ 685 tio = alloc_tio(ci->md); 686 tio->io = ci->io; 687 tio->ti = ti; 688 memset(&tio->info, 0, sizeof(tio->info)); 689 690 if (ci->sector_count <= max) { 691 /* 692 * Optimise for the simple case where we can do all of 693 * the remaining io with a single clone. 694 */ 695 clone = clone_bio(bio, ci->sector, ci->idx, 696 bio->bi_vcnt - ci->idx, ci->sector_count, 697 ci->md->bs); 698 __map_bio(ti, clone, tio); 699 ci->sector_count = 0; 700 701 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 702 /* 703 * There are some bvecs that don't span targets. 704 * Do as many of these as possible. 705 */ 706 int i; 707 sector_t remaining = max; 708 sector_t bv_len; 709 710 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 711 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 712 713 if (bv_len > remaining) 714 break; 715 716 remaining -= bv_len; 717 len += bv_len; 718 } 719 720 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 721 ci->md->bs); 722 __map_bio(ti, clone, tio); 723 724 ci->sector += len; 725 ci->sector_count -= len; 726 ci->idx = i; 727 728 } else { 729 /* 730 * Handle a bvec that must be split between two or more targets. 731 */ 732 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 733 sector_t remaining = to_sector(bv->bv_len); 734 unsigned int offset = 0; 735 736 do { 737 if (offset) { 738 ti = dm_table_find_target(ci->map, ci->sector); 739 max = max_io_len(ci->md, ci->sector, ti); 740 741 tio = alloc_tio(ci->md); 742 tio->io = ci->io; 743 tio->ti = ti; 744 memset(&tio->info, 0, sizeof(tio->info)); 745 } 746 747 len = min(remaining, max); 748 749 clone = split_bvec(bio, ci->sector, ci->idx, 750 bv->bv_offset + offset, len, 751 ci->md->bs); 752 753 __map_bio(ti, clone, tio); 754 755 ci->sector += len; 756 ci->sector_count -= len; 757 offset += to_bytes(len); 758 } while (remaining -= len); 759 760 ci->idx++; 761 } 762 } 763 764 /* 765 * Split the bio into several clones. 766 */ 767 static int __split_bio(struct mapped_device *md, struct bio *bio) 768 { 769 struct clone_info ci; 770 771 ci.map = dm_get_table(md); 772 if (unlikely(!ci.map)) 773 return -EIO; 774 775 ci.md = md; 776 ci.bio = bio; 777 ci.io = alloc_io(md); 778 ci.io->error = 0; 779 atomic_set(&ci.io->io_count, 1); 780 ci.io->bio = bio; 781 ci.io->md = md; 782 ci.sector = bio->bi_sector; 783 ci.sector_count = bio_sectors(bio); 784 ci.idx = bio->bi_idx; 785 786 start_io_acct(ci.io); 787 while (ci.sector_count) 788 __clone_and_map(&ci); 789 790 /* drop the extra reference count */ 791 dec_pending(ci.io, 0); 792 dm_table_put(ci.map); 793 794 return 0; 795 } 796 /*----------------------------------------------------------------- 797 * CRUD END 798 *---------------------------------------------------------------*/ 799 800 /* 801 * The request function that just remaps the bio built up by 802 * dm_merge_bvec. 803 */ 804 static int dm_request(struct request_queue *q, struct bio *bio) 805 { 806 int r = -EIO; 807 int rw = bio_data_dir(bio); 808 struct mapped_device *md = q->queuedata; 809 810 /* 811 * There is no use in forwarding any barrier request since we can't 812 * guarantee it is (or can be) handled by the targets correctly. 813 */ 814 if (unlikely(bio_barrier(bio))) { 815 bio_endio(bio, -EOPNOTSUPP); 816 return 0; 817 } 818 819 down_read(&md->io_lock); 820 821 disk_stat_inc(dm_disk(md), ios[rw]); 822 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 823 824 /* 825 * If we're suspended we have to queue 826 * this io for later. 827 */ 828 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 829 up_read(&md->io_lock); 830 831 if (bio_rw(bio) != READA) 832 r = queue_io(md, bio); 833 834 if (r <= 0) 835 goto out_req; 836 837 /* 838 * We're in a while loop, because someone could suspend 839 * before we get to the following read lock. 840 */ 841 down_read(&md->io_lock); 842 } 843 844 r = __split_bio(md, bio); 845 up_read(&md->io_lock); 846 847 out_req: 848 if (r < 0) 849 bio_io_error(bio); 850 851 return 0; 852 } 853 854 static void dm_unplug_all(struct request_queue *q) 855 { 856 struct mapped_device *md = q->queuedata; 857 struct dm_table *map = dm_get_table(md); 858 859 if (map) { 860 dm_table_unplug_all(map); 861 dm_table_put(map); 862 } 863 } 864 865 static int dm_any_congested(void *congested_data, int bdi_bits) 866 { 867 int r; 868 struct mapped_device *md = (struct mapped_device *) congested_data; 869 struct dm_table *map = dm_get_table(md); 870 871 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 872 r = bdi_bits; 873 else 874 r = dm_table_any_congested(map, bdi_bits); 875 876 dm_table_put(map); 877 return r; 878 } 879 880 /*----------------------------------------------------------------- 881 * An IDR is used to keep track of allocated minor numbers. 882 *---------------------------------------------------------------*/ 883 static DEFINE_IDR(_minor_idr); 884 885 static void free_minor(int minor) 886 { 887 spin_lock(&_minor_lock); 888 idr_remove(&_minor_idr, minor); 889 spin_unlock(&_minor_lock); 890 } 891 892 /* 893 * See if the device with a specific minor # is free. 894 */ 895 static int specific_minor(struct mapped_device *md, int minor) 896 { 897 int r, m; 898 899 if (minor >= (1 << MINORBITS)) 900 return -EINVAL; 901 902 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 903 if (!r) 904 return -ENOMEM; 905 906 spin_lock(&_minor_lock); 907 908 if (idr_find(&_minor_idr, minor)) { 909 r = -EBUSY; 910 goto out; 911 } 912 913 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 914 if (r) 915 goto out; 916 917 if (m != minor) { 918 idr_remove(&_minor_idr, m); 919 r = -EBUSY; 920 goto out; 921 } 922 923 out: 924 spin_unlock(&_minor_lock); 925 return r; 926 } 927 928 static int next_free_minor(struct mapped_device *md, int *minor) 929 { 930 int r, m; 931 932 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 933 if (!r) 934 return -ENOMEM; 935 936 spin_lock(&_minor_lock); 937 938 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 939 if (r) { 940 goto out; 941 } 942 943 if (m >= (1 << MINORBITS)) { 944 idr_remove(&_minor_idr, m); 945 r = -ENOSPC; 946 goto out; 947 } 948 949 *minor = m; 950 951 out: 952 spin_unlock(&_minor_lock); 953 return r; 954 } 955 956 static struct block_device_operations dm_blk_dops; 957 958 /* 959 * Allocate and initialise a blank device with a given minor. 960 */ 961 static struct mapped_device *alloc_dev(int minor) 962 { 963 int r; 964 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 965 void *old_md; 966 967 if (!md) { 968 DMWARN("unable to allocate device, out of memory."); 969 return NULL; 970 } 971 972 if (!try_module_get(THIS_MODULE)) 973 goto bad0; 974 975 /* get a minor number for the dev */ 976 if (minor == DM_ANY_MINOR) 977 r = next_free_minor(md, &minor); 978 else 979 r = specific_minor(md, minor); 980 if (r < 0) 981 goto bad1; 982 983 memset(md, 0, sizeof(*md)); 984 init_rwsem(&md->io_lock); 985 init_MUTEX(&md->suspend_lock); 986 spin_lock_init(&md->pushback_lock); 987 rwlock_init(&md->map_lock); 988 atomic_set(&md->holders, 1); 989 atomic_set(&md->open_count, 0); 990 atomic_set(&md->event_nr, 0); 991 atomic_set(&md->uevent_seq, 0); 992 INIT_LIST_HEAD(&md->uevent_list); 993 spin_lock_init(&md->uevent_lock); 994 995 md->queue = blk_alloc_queue(GFP_KERNEL); 996 if (!md->queue) 997 goto bad1_free_minor; 998 999 md->queue->queuedata = md; 1000 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1001 md->queue->backing_dev_info.congested_data = md; 1002 blk_queue_make_request(md->queue, dm_request); 1003 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1004 md->queue->unplug_fn = dm_unplug_all; 1005 1006 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1007 if (!md->io_pool) 1008 goto bad2; 1009 1010 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 1011 if (!md->tio_pool) 1012 goto bad3; 1013 1014 md->bs = bioset_create(16, 16); 1015 if (!md->bs) 1016 goto bad_no_bioset; 1017 1018 md->disk = alloc_disk(1); 1019 if (!md->disk) 1020 goto bad4; 1021 1022 atomic_set(&md->pending, 0); 1023 init_waitqueue_head(&md->wait); 1024 init_waitqueue_head(&md->eventq); 1025 1026 md->disk->major = _major; 1027 md->disk->first_minor = minor; 1028 md->disk->fops = &dm_blk_dops; 1029 md->disk->queue = md->queue; 1030 md->disk->private_data = md; 1031 sprintf(md->disk->disk_name, "dm-%d", minor); 1032 add_disk(md->disk); 1033 format_dev_t(md->name, MKDEV(_major, minor)); 1034 1035 /* Populate the mapping, nobody knows we exist yet */ 1036 spin_lock(&_minor_lock); 1037 old_md = idr_replace(&_minor_idr, md, minor); 1038 spin_unlock(&_minor_lock); 1039 1040 BUG_ON(old_md != MINOR_ALLOCED); 1041 1042 return md; 1043 1044 bad4: 1045 bioset_free(md->bs); 1046 bad_no_bioset: 1047 mempool_destroy(md->tio_pool); 1048 bad3: 1049 mempool_destroy(md->io_pool); 1050 bad2: 1051 blk_cleanup_queue(md->queue); 1052 bad1_free_minor: 1053 free_minor(minor); 1054 bad1: 1055 module_put(THIS_MODULE); 1056 bad0: 1057 kfree(md); 1058 return NULL; 1059 } 1060 1061 static void unlock_fs(struct mapped_device *md); 1062 1063 static void free_dev(struct mapped_device *md) 1064 { 1065 int minor = md->disk->first_minor; 1066 1067 if (md->suspended_bdev) { 1068 unlock_fs(md); 1069 bdput(md->suspended_bdev); 1070 } 1071 mempool_destroy(md->tio_pool); 1072 mempool_destroy(md->io_pool); 1073 bioset_free(md->bs); 1074 del_gendisk(md->disk); 1075 free_minor(minor); 1076 1077 spin_lock(&_minor_lock); 1078 md->disk->private_data = NULL; 1079 spin_unlock(&_minor_lock); 1080 1081 put_disk(md->disk); 1082 blk_cleanup_queue(md->queue); 1083 module_put(THIS_MODULE); 1084 kfree(md); 1085 } 1086 1087 /* 1088 * Bind a table to the device. 1089 */ 1090 static void event_callback(void *context) 1091 { 1092 unsigned long flags; 1093 LIST_HEAD(uevents); 1094 struct mapped_device *md = (struct mapped_device *) context; 1095 1096 spin_lock_irqsave(&md->uevent_lock, flags); 1097 list_splice_init(&md->uevent_list, &uevents); 1098 spin_unlock_irqrestore(&md->uevent_lock, flags); 1099 1100 dm_send_uevents(&uevents, &md->disk->kobj); 1101 1102 atomic_inc(&md->event_nr); 1103 wake_up(&md->eventq); 1104 } 1105 1106 static void __set_size(struct mapped_device *md, sector_t size) 1107 { 1108 set_capacity(md->disk, size); 1109 1110 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1111 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1112 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1113 } 1114 1115 static int __bind(struct mapped_device *md, struct dm_table *t) 1116 { 1117 struct request_queue *q = md->queue; 1118 sector_t size; 1119 1120 size = dm_table_get_size(t); 1121 1122 /* 1123 * Wipe any geometry if the size of the table changed. 1124 */ 1125 if (size != get_capacity(md->disk)) 1126 memset(&md->geometry, 0, sizeof(md->geometry)); 1127 1128 if (md->suspended_bdev) 1129 __set_size(md, size); 1130 if (size == 0) 1131 return 0; 1132 1133 dm_table_get(t); 1134 dm_table_event_callback(t, event_callback, md); 1135 1136 write_lock(&md->map_lock); 1137 md->map = t; 1138 dm_table_set_restrictions(t, q); 1139 write_unlock(&md->map_lock); 1140 1141 return 0; 1142 } 1143 1144 static void __unbind(struct mapped_device *md) 1145 { 1146 struct dm_table *map = md->map; 1147 1148 if (!map) 1149 return; 1150 1151 dm_table_event_callback(map, NULL, NULL); 1152 write_lock(&md->map_lock); 1153 md->map = NULL; 1154 write_unlock(&md->map_lock); 1155 dm_table_put(map); 1156 } 1157 1158 /* 1159 * Constructor for a new device. 1160 */ 1161 int dm_create(int minor, struct mapped_device **result) 1162 { 1163 struct mapped_device *md; 1164 1165 md = alloc_dev(minor); 1166 if (!md) 1167 return -ENXIO; 1168 1169 *result = md; 1170 return 0; 1171 } 1172 1173 static struct mapped_device *dm_find_md(dev_t dev) 1174 { 1175 struct mapped_device *md; 1176 unsigned minor = MINOR(dev); 1177 1178 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1179 return NULL; 1180 1181 spin_lock(&_minor_lock); 1182 1183 md = idr_find(&_minor_idr, minor); 1184 if (md && (md == MINOR_ALLOCED || 1185 (dm_disk(md)->first_minor != minor) || 1186 test_bit(DMF_FREEING, &md->flags))) { 1187 md = NULL; 1188 goto out; 1189 } 1190 1191 out: 1192 spin_unlock(&_minor_lock); 1193 1194 return md; 1195 } 1196 1197 struct mapped_device *dm_get_md(dev_t dev) 1198 { 1199 struct mapped_device *md = dm_find_md(dev); 1200 1201 if (md) 1202 dm_get(md); 1203 1204 return md; 1205 } 1206 1207 void *dm_get_mdptr(struct mapped_device *md) 1208 { 1209 return md->interface_ptr; 1210 } 1211 1212 void dm_set_mdptr(struct mapped_device *md, void *ptr) 1213 { 1214 md->interface_ptr = ptr; 1215 } 1216 1217 void dm_get(struct mapped_device *md) 1218 { 1219 atomic_inc(&md->holders); 1220 } 1221 1222 const char *dm_device_name(struct mapped_device *md) 1223 { 1224 return md->name; 1225 } 1226 EXPORT_SYMBOL_GPL(dm_device_name); 1227 1228 void dm_put(struct mapped_device *md) 1229 { 1230 struct dm_table *map; 1231 1232 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1233 1234 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1235 map = dm_get_table(md); 1236 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1237 set_bit(DMF_FREEING, &md->flags); 1238 spin_unlock(&_minor_lock); 1239 if (!dm_suspended(md)) { 1240 dm_table_presuspend_targets(map); 1241 dm_table_postsuspend_targets(map); 1242 } 1243 __unbind(md); 1244 dm_table_put(map); 1245 free_dev(md); 1246 } 1247 } 1248 EXPORT_SYMBOL_GPL(dm_put); 1249 1250 /* 1251 * Process the deferred bios 1252 */ 1253 static void __flush_deferred_io(struct mapped_device *md, struct bio *c) 1254 { 1255 struct bio *n; 1256 1257 while (c) { 1258 n = c->bi_next; 1259 c->bi_next = NULL; 1260 if (__split_bio(md, c)) 1261 bio_io_error(c); 1262 c = n; 1263 } 1264 } 1265 1266 /* 1267 * Swap in a new table (destroying old one). 1268 */ 1269 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1270 { 1271 int r = -EINVAL; 1272 1273 down(&md->suspend_lock); 1274 1275 /* device must be suspended */ 1276 if (!dm_suspended(md)) 1277 goto out; 1278 1279 /* without bdev, the device size cannot be changed */ 1280 if (!md->suspended_bdev) 1281 if (get_capacity(md->disk) != dm_table_get_size(table)) 1282 goto out; 1283 1284 __unbind(md); 1285 r = __bind(md, table); 1286 1287 out: 1288 up(&md->suspend_lock); 1289 return r; 1290 } 1291 1292 /* 1293 * Functions to lock and unlock any filesystem running on the 1294 * device. 1295 */ 1296 static int lock_fs(struct mapped_device *md) 1297 { 1298 int r; 1299 1300 WARN_ON(md->frozen_sb); 1301 1302 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1303 if (IS_ERR(md->frozen_sb)) { 1304 r = PTR_ERR(md->frozen_sb); 1305 md->frozen_sb = NULL; 1306 return r; 1307 } 1308 1309 set_bit(DMF_FROZEN, &md->flags); 1310 1311 /* don't bdput right now, we don't want the bdev 1312 * to go away while it is locked. 1313 */ 1314 return 0; 1315 } 1316 1317 static void unlock_fs(struct mapped_device *md) 1318 { 1319 if (!test_bit(DMF_FROZEN, &md->flags)) 1320 return; 1321 1322 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1323 md->frozen_sb = NULL; 1324 clear_bit(DMF_FROZEN, &md->flags); 1325 } 1326 1327 /* 1328 * We need to be able to change a mapping table under a mounted 1329 * filesystem. For example we might want to move some data in 1330 * the background. Before the table can be swapped with 1331 * dm_bind_table, dm_suspend must be called to flush any in 1332 * flight bios and ensure that any further io gets deferred. 1333 */ 1334 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1335 { 1336 struct dm_table *map = NULL; 1337 unsigned long flags; 1338 DECLARE_WAITQUEUE(wait, current); 1339 struct bio *def; 1340 int r = -EINVAL; 1341 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1342 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1343 1344 down(&md->suspend_lock); 1345 1346 if (dm_suspended(md)) 1347 goto out_unlock; 1348 1349 map = dm_get_table(md); 1350 1351 /* 1352 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1353 * This flag is cleared before dm_suspend returns. 1354 */ 1355 if (noflush) 1356 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1357 1358 /* This does not get reverted if there's an error later. */ 1359 dm_table_presuspend_targets(map); 1360 1361 /* bdget() can stall if the pending I/Os are not flushed */ 1362 if (!noflush) { 1363 md->suspended_bdev = bdget_disk(md->disk, 0); 1364 if (!md->suspended_bdev) { 1365 DMWARN("bdget failed in dm_suspend"); 1366 r = -ENOMEM; 1367 goto flush_and_out; 1368 } 1369 } 1370 1371 /* 1372 * Flush I/O to the device. 1373 * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os. 1374 */ 1375 if (do_lockfs && !noflush) { 1376 r = lock_fs(md); 1377 if (r) 1378 goto out; 1379 } 1380 1381 /* 1382 * First we set the BLOCK_IO flag so no more ios will be mapped. 1383 */ 1384 down_write(&md->io_lock); 1385 set_bit(DMF_BLOCK_IO, &md->flags); 1386 1387 add_wait_queue(&md->wait, &wait); 1388 up_write(&md->io_lock); 1389 1390 /* unplug */ 1391 if (map) 1392 dm_table_unplug_all(map); 1393 1394 /* 1395 * Then we wait for the already mapped ios to 1396 * complete. 1397 */ 1398 while (1) { 1399 set_current_state(TASK_INTERRUPTIBLE); 1400 1401 if (!atomic_read(&md->pending) || signal_pending(current)) 1402 break; 1403 1404 io_schedule(); 1405 } 1406 set_current_state(TASK_RUNNING); 1407 1408 down_write(&md->io_lock); 1409 remove_wait_queue(&md->wait, &wait); 1410 1411 if (noflush) { 1412 spin_lock_irqsave(&md->pushback_lock, flags); 1413 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1414 bio_list_merge_head(&md->deferred, &md->pushback); 1415 bio_list_init(&md->pushback); 1416 spin_unlock_irqrestore(&md->pushback_lock, flags); 1417 } 1418 1419 /* were we interrupted ? */ 1420 r = -EINTR; 1421 if (atomic_read(&md->pending)) { 1422 clear_bit(DMF_BLOCK_IO, &md->flags); 1423 def = bio_list_get(&md->deferred); 1424 __flush_deferred_io(md, def); 1425 up_write(&md->io_lock); 1426 unlock_fs(md); 1427 goto out; /* pushback list is already flushed, so skip flush */ 1428 } 1429 up_write(&md->io_lock); 1430 1431 dm_table_postsuspend_targets(map); 1432 1433 set_bit(DMF_SUSPENDED, &md->flags); 1434 1435 r = 0; 1436 1437 flush_and_out: 1438 if (r && noflush) { 1439 /* 1440 * Because there may be already I/Os in the pushback list, 1441 * flush them before return. 1442 */ 1443 down_write(&md->io_lock); 1444 1445 spin_lock_irqsave(&md->pushback_lock, flags); 1446 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1447 bio_list_merge_head(&md->deferred, &md->pushback); 1448 bio_list_init(&md->pushback); 1449 spin_unlock_irqrestore(&md->pushback_lock, flags); 1450 1451 def = bio_list_get(&md->deferred); 1452 __flush_deferred_io(md, def); 1453 up_write(&md->io_lock); 1454 } 1455 1456 out: 1457 if (r && md->suspended_bdev) { 1458 bdput(md->suspended_bdev); 1459 md->suspended_bdev = NULL; 1460 } 1461 1462 dm_table_put(map); 1463 1464 out_unlock: 1465 up(&md->suspend_lock); 1466 return r; 1467 } 1468 1469 int dm_resume(struct mapped_device *md) 1470 { 1471 int r = -EINVAL; 1472 struct bio *def; 1473 struct dm_table *map = NULL; 1474 1475 down(&md->suspend_lock); 1476 if (!dm_suspended(md)) 1477 goto out; 1478 1479 map = dm_get_table(md); 1480 if (!map || !dm_table_get_size(map)) 1481 goto out; 1482 1483 r = dm_table_resume_targets(map); 1484 if (r) 1485 goto out; 1486 1487 down_write(&md->io_lock); 1488 clear_bit(DMF_BLOCK_IO, &md->flags); 1489 1490 def = bio_list_get(&md->deferred); 1491 __flush_deferred_io(md, def); 1492 up_write(&md->io_lock); 1493 1494 unlock_fs(md); 1495 1496 if (md->suspended_bdev) { 1497 bdput(md->suspended_bdev); 1498 md->suspended_bdev = NULL; 1499 } 1500 1501 clear_bit(DMF_SUSPENDED, &md->flags); 1502 1503 dm_table_unplug_all(map); 1504 1505 kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); 1506 1507 r = 0; 1508 1509 out: 1510 dm_table_put(map); 1511 up(&md->suspend_lock); 1512 1513 return r; 1514 } 1515 1516 /*----------------------------------------------------------------- 1517 * Event notification. 1518 *---------------------------------------------------------------*/ 1519 uint32_t dm_next_uevent_seq(struct mapped_device *md) 1520 { 1521 return atomic_add_return(1, &md->uevent_seq); 1522 } 1523 1524 uint32_t dm_get_event_nr(struct mapped_device *md) 1525 { 1526 return atomic_read(&md->event_nr); 1527 } 1528 1529 int dm_wait_event(struct mapped_device *md, int event_nr) 1530 { 1531 return wait_event_interruptible(md->eventq, 1532 (event_nr != atomic_read(&md->event_nr))); 1533 } 1534 1535 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 1536 { 1537 unsigned long flags; 1538 1539 spin_lock_irqsave(&md->uevent_lock, flags); 1540 list_add(elist, &md->uevent_list); 1541 spin_unlock_irqrestore(&md->uevent_lock, flags); 1542 } 1543 1544 /* 1545 * The gendisk is only valid as long as you have a reference 1546 * count on 'md'. 1547 */ 1548 struct gendisk *dm_disk(struct mapped_device *md) 1549 { 1550 return md->disk; 1551 } 1552 1553 int dm_suspended(struct mapped_device *md) 1554 { 1555 return test_bit(DMF_SUSPENDED, &md->flags); 1556 } 1557 1558 int dm_noflush_suspending(struct dm_target *ti) 1559 { 1560 struct mapped_device *md = dm_table_get_md(ti->table); 1561 int r = __noflush_suspending(md); 1562 1563 dm_put(md); 1564 1565 return r; 1566 } 1567 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1568 1569 static struct block_device_operations dm_blk_dops = { 1570 .open = dm_blk_open, 1571 .release = dm_blk_close, 1572 .ioctl = dm_blk_ioctl, 1573 .getgeo = dm_blk_getgeo, 1574 .owner = THIS_MODULE 1575 }; 1576 1577 EXPORT_SYMBOL(dm_get_mapinfo); 1578 1579 /* 1580 * module hooks 1581 */ 1582 module_init(dm_init); 1583 module_exit(dm_exit); 1584 1585 module_param(major, uint, 0); 1586 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1587 MODULE_DESCRIPTION(DM_NAME " driver"); 1588 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1589 MODULE_LICENSE("GPL"); 1590