1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/moduleparam.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/buffer_head.h> 19 #include <linux/mempool.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/hdreg.h> 23 #include <linux/blktrace_api.h> 24 25 #define DM_MSG_PREFIX "core" 26 27 static const char *_name = DM_NAME; 28 29 static unsigned int major = 0; 30 static unsigned int _major = 0; 31 32 static DEFINE_SPINLOCK(_minor_lock); 33 /* 34 * One of these is allocated per bio. 35 */ 36 struct dm_io { 37 struct mapped_device *md; 38 int error; 39 atomic_t io_count; 40 struct bio *bio; 41 unsigned long start_time; 42 }; 43 44 /* 45 * One of these is allocated per target within a bio. Hopefully 46 * this will be simplified out one day. 47 */ 48 struct dm_target_io { 49 struct dm_io *io; 50 struct dm_target *ti; 51 union map_info info; 52 }; 53 54 union map_info *dm_get_mapinfo(struct bio *bio) 55 { 56 if (bio && bio->bi_private) 57 return &((struct dm_target_io *)bio->bi_private)->info; 58 return NULL; 59 } 60 61 #define MINOR_ALLOCED ((void *)-1) 62 63 /* 64 * Bits for the md->flags field. 65 */ 66 #define DMF_BLOCK_IO 0 67 #define DMF_SUSPENDED 1 68 #define DMF_FROZEN 2 69 #define DMF_FREEING 3 70 #define DMF_DELETING 4 71 #define DMF_NOFLUSH_SUSPENDING 5 72 73 /* 74 * Work processed by per-device workqueue. 75 */ 76 struct dm_wq_req { 77 enum { 78 DM_WQ_FLUSH_DEFERRED, 79 } type; 80 struct work_struct work; 81 struct mapped_device *md; 82 void *context; 83 }; 84 85 struct mapped_device { 86 struct rw_semaphore io_lock; 87 struct mutex suspend_lock; 88 spinlock_t pushback_lock; 89 rwlock_t map_lock; 90 atomic_t holders; 91 atomic_t open_count; 92 93 unsigned long flags; 94 95 struct request_queue *queue; 96 struct gendisk *disk; 97 char name[16]; 98 99 void *interface_ptr; 100 101 /* 102 * A list of ios that arrived while we were suspended. 103 */ 104 atomic_t pending; 105 wait_queue_head_t wait; 106 struct bio_list deferred; 107 struct bio_list pushback; 108 109 /* 110 * Processing queue (flush/barriers) 111 */ 112 struct workqueue_struct *wq; 113 114 /* 115 * The current mapping. 116 */ 117 struct dm_table *map; 118 119 /* 120 * io objects are allocated from here. 121 */ 122 mempool_t *io_pool; 123 mempool_t *tio_pool; 124 125 struct bio_set *bs; 126 127 /* 128 * Event handling. 129 */ 130 atomic_t event_nr; 131 wait_queue_head_t eventq; 132 atomic_t uevent_seq; 133 struct list_head uevent_list; 134 spinlock_t uevent_lock; /* Protect access to uevent_list */ 135 136 /* 137 * freeze/thaw support require holding onto a super block 138 */ 139 struct super_block *frozen_sb; 140 struct block_device *suspended_bdev; 141 142 /* forced geometry settings */ 143 struct hd_geometry geometry; 144 }; 145 146 #define MIN_IOS 256 147 static struct kmem_cache *_io_cache; 148 static struct kmem_cache *_tio_cache; 149 150 static int __init local_init(void) 151 { 152 int r = -ENOMEM; 153 154 /* allocate a slab for the dm_ios */ 155 _io_cache = KMEM_CACHE(dm_io, 0); 156 if (!_io_cache) 157 return r; 158 159 /* allocate a slab for the target ios */ 160 _tio_cache = KMEM_CACHE(dm_target_io, 0); 161 if (!_tio_cache) 162 goto out_free_io_cache; 163 164 r = dm_uevent_init(); 165 if (r) 166 goto out_free_tio_cache; 167 168 _major = major; 169 r = register_blkdev(_major, _name); 170 if (r < 0) 171 goto out_uevent_exit; 172 173 if (!_major) 174 _major = r; 175 176 return 0; 177 178 out_uevent_exit: 179 dm_uevent_exit(); 180 out_free_tio_cache: 181 kmem_cache_destroy(_tio_cache); 182 out_free_io_cache: 183 kmem_cache_destroy(_io_cache); 184 185 return r; 186 } 187 188 static void local_exit(void) 189 { 190 kmem_cache_destroy(_tio_cache); 191 kmem_cache_destroy(_io_cache); 192 unregister_blkdev(_major, _name); 193 dm_uevent_exit(); 194 195 _major = 0; 196 197 DMINFO("cleaned up"); 198 } 199 200 static int (*_inits[])(void) __initdata = { 201 local_init, 202 dm_target_init, 203 dm_linear_init, 204 dm_stripe_init, 205 dm_kcopyd_init, 206 dm_interface_init, 207 }; 208 209 static void (*_exits[])(void) = { 210 local_exit, 211 dm_target_exit, 212 dm_linear_exit, 213 dm_stripe_exit, 214 dm_kcopyd_exit, 215 dm_interface_exit, 216 }; 217 218 static int __init dm_init(void) 219 { 220 const int count = ARRAY_SIZE(_inits); 221 222 int r, i; 223 224 for (i = 0; i < count; i++) { 225 r = _inits[i](); 226 if (r) 227 goto bad; 228 } 229 230 return 0; 231 232 bad: 233 while (i--) 234 _exits[i](); 235 236 return r; 237 } 238 239 static void __exit dm_exit(void) 240 { 241 int i = ARRAY_SIZE(_exits); 242 243 while (i--) 244 _exits[i](); 245 } 246 247 /* 248 * Block device functions 249 */ 250 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 251 { 252 struct mapped_device *md; 253 254 spin_lock(&_minor_lock); 255 256 md = bdev->bd_disk->private_data; 257 if (!md) 258 goto out; 259 260 if (test_bit(DMF_FREEING, &md->flags) || 261 test_bit(DMF_DELETING, &md->flags)) { 262 md = NULL; 263 goto out; 264 } 265 266 dm_get(md); 267 atomic_inc(&md->open_count); 268 269 out: 270 spin_unlock(&_minor_lock); 271 272 return md ? 0 : -ENXIO; 273 } 274 275 static int dm_blk_close(struct gendisk *disk, fmode_t mode) 276 { 277 struct mapped_device *md = disk->private_data; 278 atomic_dec(&md->open_count); 279 dm_put(md); 280 return 0; 281 } 282 283 int dm_open_count(struct mapped_device *md) 284 { 285 return atomic_read(&md->open_count); 286 } 287 288 /* 289 * Guarantees nothing is using the device before it's deleted. 290 */ 291 int dm_lock_for_deletion(struct mapped_device *md) 292 { 293 int r = 0; 294 295 spin_lock(&_minor_lock); 296 297 if (dm_open_count(md)) 298 r = -EBUSY; 299 else 300 set_bit(DMF_DELETING, &md->flags); 301 302 spin_unlock(&_minor_lock); 303 304 return r; 305 } 306 307 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 308 { 309 struct mapped_device *md = bdev->bd_disk->private_data; 310 311 return dm_get_geometry(md, geo); 312 } 313 314 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 315 unsigned int cmd, unsigned long arg) 316 { 317 struct mapped_device *md = bdev->bd_disk->private_data; 318 struct dm_table *map = dm_get_table(md); 319 struct dm_target *tgt; 320 int r = -ENOTTY; 321 322 if (!map || !dm_table_get_size(map)) 323 goto out; 324 325 /* We only support devices that have a single target */ 326 if (dm_table_get_num_targets(map) != 1) 327 goto out; 328 329 tgt = dm_table_get_target(map, 0); 330 331 if (dm_suspended(md)) { 332 r = -EAGAIN; 333 goto out; 334 } 335 336 if (tgt->type->ioctl) 337 r = tgt->type->ioctl(tgt, cmd, arg); 338 339 out: 340 dm_table_put(map); 341 342 return r; 343 } 344 345 static struct dm_io *alloc_io(struct mapped_device *md) 346 { 347 return mempool_alloc(md->io_pool, GFP_NOIO); 348 } 349 350 static void free_io(struct mapped_device *md, struct dm_io *io) 351 { 352 mempool_free(io, md->io_pool); 353 } 354 355 static struct dm_target_io *alloc_tio(struct mapped_device *md) 356 { 357 return mempool_alloc(md->tio_pool, GFP_NOIO); 358 } 359 360 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 361 { 362 mempool_free(tio, md->tio_pool); 363 } 364 365 static void start_io_acct(struct dm_io *io) 366 { 367 struct mapped_device *md = io->md; 368 int cpu; 369 370 io->start_time = jiffies; 371 372 cpu = part_stat_lock(); 373 part_round_stats(cpu, &dm_disk(md)->part0); 374 part_stat_unlock(); 375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 376 } 377 378 static void end_io_acct(struct dm_io *io) 379 { 380 struct mapped_device *md = io->md; 381 struct bio *bio = io->bio; 382 unsigned long duration = jiffies - io->start_time; 383 int pending, cpu; 384 int rw = bio_data_dir(bio); 385 386 cpu = part_stat_lock(); 387 part_round_stats(cpu, &dm_disk(md)->part0); 388 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 389 part_stat_unlock(); 390 391 dm_disk(md)->part0.in_flight = pending = 392 atomic_dec_return(&md->pending); 393 394 /* nudge anyone waiting on suspend queue */ 395 if (!pending) 396 wake_up(&md->wait); 397 } 398 399 /* 400 * Add the bio to the list of deferred io. 401 */ 402 static int queue_io(struct mapped_device *md, struct bio *bio) 403 { 404 down_write(&md->io_lock); 405 406 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 407 up_write(&md->io_lock); 408 return 1; 409 } 410 411 bio_list_add(&md->deferred, bio); 412 413 up_write(&md->io_lock); 414 return 0; /* deferred successfully */ 415 } 416 417 /* 418 * Everyone (including functions in this file), should use this 419 * function to access the md->map field, and make sure they call 420 * dm_table_put() when finished. 421 */ 422 struct dm_table *dm_get_table(struct mapped_device *md) 423 { 424 struct dm_table *t; 425 426 read_lock(&md->map_lock); 427 t = md->map; 428 if (t) 429 dm_table_get(t); 430 read_unlock(&md->map_lock); 431 432 return t; 433 } 434 435 /* 436 * Get the geometry associated with a dm device 437 */ 438 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 439 { 440 *geo = md->geometry; 441 442 return 0; 443 } 444 445 /* 446 * Set the geometry of a device. 447 */ 448 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 449 { 450 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 451 452 if (geo->start > sz) { 453 DMWARN("Start sector is beyond the geometry limits."); 454 return -EINVAL; 455 } 456 457 md->geometry = *geo; 458 459 return 0; 460 } 461 462 /*----------------------------------------------------------------- 463 * CRUD START: 464 * A more elegant soln is in the works that uses the queue 465 * merge fn, unfortunately there are a couple of changes to 466 * the block layer that I want to make for this. So in the 467 * interests of getting something for people to use I give 468 * you this clearly demarcated crap. 469 *---------------------------------------------------------------*/ 470 471 static int __noflush_suspending(struct mapped_device *md) 472 { 473 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 474 } 475 476 /* 477 * Decrements the number of outstanding ios that a bio has been 478 * cloned into, completing the original io if necc. 479 */ 480 static void dec_pending(struct dm_io *io, int error) 481 { 482 unsigned long flags; 483 484 /* Push-back supersedes any I/O errors */ 485 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 486 io->error = error; 487 488 if (atomic_dec_and_test(&io->io_count)) { 489 if (io->error == DM_ENDIO_REQUEUE) { 490 /* 491 * Target requested pushing back the I/O. 492 * This must be handled before the sleeper on 493 * suspend queue merges the pushback list. 494 */ 495 spin_lock_irqsave(&io->md->pushback_lock, flags); 496 if (__noflush_suspending(io->md)) 497 bio_list_add(&io->md->pushback, io->bio); 498 else 499 /* noflush suspend was interrupted. */ 500 io->error = -EIO; 501 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 502 } 503 504 end_io_acct(io); 505 506 if (io->error != DM_ENDIO_REQUEUE) { 507 blk_add_trace_bio(io->md->queue, io->bio, 508 BLK_TA_COMPLETE); 509 510 bio_endio(io->bio, io->error); 511 } 512 513 free_io(io->md, io); 514 } 515 } 516 517 static void clone_endio(struct bio *bio, int error) 518 { 519 int r = 0; 520 struct dm_target_io *tio = bio->bi_private; 521 struct mapped_device *md = tio->io->md; 522 dm_endio_fn endio = tio->ti->type->end_io; 523 524 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 525 error = -EIO; 526 527 if (endio) { 528 r = endio(tio->ti, bio, error, &tio->info); 529 if (r < 0 || r == DM_ENDIO_REQUEUE) 530 /* 531 * error and requeue request are handled 532 * in dec_pending(). 533 */ 534 error = r; 535 else if (r == DM_ENDIO_INCOMPLETE) 536 /* The target will handle the io */ 537 return; 538 else if (r) { 539 DMWARN("unimplemented target endio return value: %d", r); 540 BUG(); 541 } 542 } 543 544 dec_pending(tio->io, error); 545 546 /* 547 * Store md for cleanup instead of tio which is about to get freed. 548 */ 549 bio->bi_private = md->bs; 550 551 bio_put(bio); 552 free_tio(md, tio); 553 } 554 555 static sector_t max_io_len(struct mapped_device *md, 556 sector_t sector, struct dm_target *ti) 557 { 558 sector_t offset = sector - ti->begin; 559 sector_t len = ti->len - offset; 560 561 /* 562 * Does the target need to split even further ? 563 */ 564 if (ti->split_io) { 565 sector_t boundary; 566 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 567 - offset; 568 if (len > boundary) 569 len = boundary; 570 } 571 572 return len; 573 } 574 575 static void __map_bio(struct dm_target *ti, struct bio *clone, 576 struct dm_target_io *tio) 577 { 578 int r; 579 sector_t sector; 580 struct mapped_device *md; 581 582 /* 583 * Sanity checks. 584 */ 585 BUG_ON(!clone->bi_size); 586 587 clone->bi_end_io = clone_endio; 588 clone->bi_private = tio; 589 590 /* 591 * Map the clone. If r == 0 we don't need to do 592 * anything, the target has assumed ownership of 593 * this io. 594 */ 595 atomic_inc(&tio->io->io_count); 596 sector = clone->bi_sector; 597 r = ti->type->map(ti, clone, &tio->info); 598 if (r == DM_MAPIO_REMAPPED) { 599 /* the bio has been remapped so dispatch it */ 600 601 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 602 tio->io->bio->bi_bdev->bd_dev, 603 clone->bi_sector, sector); 604 605 generic_make_request(clone); 606 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 607 /* error the io and bail out, or requeue it if needed */ 608 md = tio->io->md; 609 dec_pending(tio->io, r); 610 /* 611 * Store bio_set for cleanup. 612 */ 613 clone->bi_private = md->bs; 614 bio_put(clone); 615 free_tio(md, tio); 616 } else if (r) { 617 DMWARN("unimplemented target map return value: %d", r); 618 BUG(); 619 } 620 } 621 622 struct clone_info { 623 struct mapped_device *md; 624 struct dm_table *map; 625 struct bio *bio; 626 struct dm_io *io; 627 sector_t sector; 628 sector_t sector_count; 629 unsigned short idx; 630 }; 631 632 static void dm_bio_destructor(struct bio *bio) 633 { 634 struct bio_set *bs = bio->bi_private; 635 636 bio_free(bio, bs); 637 } 638 639 /* 640 * Creates a little bio that is just does part of a bvec. 641 */ 642 static struct bio *split_bvec(struct bio *bio, sector_t sector, 643 unsigned short idx, unsigned int offset, 644 unsigned int len, struct bio_set *bs) 645 { 646 struct bio *clone; 647 struct bio_vec *bv = bio->bi_io_vec + idx; 648 649 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 650 clone->bi_destructor = dm_bio_destructor; 651 *clone->bi_io_vec = *bv; 652 653 clone->bi_sector = sector; 654 clone->bi_bdev = bio->bi_bdev; 655 clone->bi_rw = bio->bi_rw; 656 clone->bi_vcnt = 1; 657 clone->bi_size = to_bytes(len); 658 clone->bi_io_vec->bv_offset = offset; 659 clone->bi_io_vec->bv_len = clone->bi_size; 660 clone->bi_flags |= 1 << BIO_CLONED; 661 662 return clone; 663 } 664 665 /* 666 * Creates a bio that consists of range of complete bvecs. 667 */ 668 static struct bio *clone_bio(struct bio *bio, sector_t sector, 669 unsigned short idx, unsigned short bv_count, 670 unsigned int len, struct bio_set *bs) 671 { 672 struct bio *clone; 673 674 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 675 __bio_clone(clone, bio); 676 clone->bi_destructor = dm_bio_destructor; 677 clone->bi_sector = sector; 678 clone->bi_idx = idx; 679 clone->bi_vcnt = idx + bv_count; 680 clone->bi_size = to_bytes(len); 681 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 682 683 return clone; 684 } 685 686 static int __clone_and_map(struct clone_info *ci) 687 { 688 struct bio *clone, *bio = ci->bio; 689 struct dm_target *ti; 690 sector_t len = 0, max; 691 struct dm_target_io *tio; 692 693 ti = dm_table_find_target(ci->map, ci->sector); 694 if (!dm_target_is_valid(ti)) 695 return -EIO; 696 697 max = max_io_len(ci->md, ci->sector, ti); 698 699 /* 700 * Allocate a target io object. 701 */ 702 tio = alloc_tio(ci->md); 703 tio->io = ci->io; 704 tio->ti = ti; 705 memset(&tio->info, 0, sizeof(tio->info)); 706 707 if (ci->sector_count <= max) { 708 /* 709 * Optimise for the simple case where we can do all of 710 * the remaining io with a single clone. 711 */ 712 clone = clone_bio(bio, ci->sector, ci->idx, 713 bio->bi_vcnt - ci->idx, ci->sector_count, 714 ci->md->bs); 715 __map_bio(ti, clone, tio); 716 ci->sector_count = 0; 717 718 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 719 /* 720 * There are some bvecs that don't span targets. 721 * Do as many of these as possible. 722 */ 723 int i; 724 sector_t remaining = max; 725 sector_t bv_len; 726 727 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 728 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 729 730 if (bv_len > remaining) 731 break; 732 733 remaining -= bv_len; 734 len += bv_len; 735 } 736 737 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 738 ci->md->bs); 739 __map_bio(ti, clone, tio); 740 741 ci->sector += len; 742 ci->sector_count -= len; 743 ci->idx = i; 744 745 } else { 746 /* 747 * Handle a bvec that must be split between two or more targets. 748 */ 749 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 750 sector_t remaining = to_sector(bv->bv_len); 751 unsigned int offset = 0; 752 753 do { 754 if (offset) { 755 ti = dm_table_find_target(ci->map, ci->sector); 756 if (!dm_target_is_valid(ti)) 757 return -EIO; 758 759 max = max_io_len(ci->md, ci->sector, ti); 760 761 tio = alloc_tio(ci->md); 762 tio->io = ci->io; 763 tio->ti = ti; 764 memset(&tio->info, 0, sizeof(tio->info)); 765 } 766 767 len = min(remaining, max); 768 769 clone = split_bvec(bio, ci->sector, ci->idx, 770 bv->bv_offset + offset, len, 771 ci->md->bs); 772 773 __map_bio(ti, clone, tio); 774 775 ci->sector += len; 776 ci->sector_count -= len; 777 offset += to_bytes(len); 778 } while (remaining -= len); 779 780 ci->idx++; 781 } 782 783 return 0; 784 } 785 786 /* 787 * Split the bio into several clones. 788 */ 789 static int __split_bio(struct mapped_device *md, struct bio *bio) 790 { 791 struct clone_info ci; 792 int error = 0; 793 794 ci.map = dm_get_table(md); 795 if (unlikely(!ci.map)) 796 return -EIO; 797 798 ci.md = md; 799 ci.bio = bio; 800 ci.io = alloc_io(md); 801 ci.io->error = 0; 802 atomic_set(&ci.io->io_count, 1); 803 ci.io->bio = bio; 804 ci.io->md = md; 805 ci.sector = bio->bi_sector; 806 ci.sector_count = bio_sectors(bio); 807 ci.idx = bio->bi_idx; 808 809 start_io_acct(ci.io); 810 while (ci.sector_count && !error) 811 error = __clone_and_map(&ci); 812 813 /* drop the extra reference count */ 814 dec_pending(ci.io, error); 815 dm_table_put(ci.map); 816 817 return 0; 818 } 819 /*----------------------------------------------------------------- 820 * CRUD END 821 *---------------------------------------------------------------*/ 822 823 static int dm_merge_bvec(struct request_queue *q, 824 struct bvec_merge_data *bvm, 825 struct bio_vec *biovec) 826 { 827 struct mapped_device *md = q->queuedata; 828 struct dm_table *map = dm_get_table(md); 829 struct dm_target *ti; 830 sector_t max_sectors; 831 int max_size = 0; 832 833 if (unlikely(!map)) 834 goto out; 835 836 ti = dm_table_find_target(map, bvm->bi_sector); 837 if (!dm_target_is_valid(ti)) 838 goto out_table; 839 840 /* 841 * Find maximum amount of I/O that won't need splitting 842 */ 843 max_sectors = min(max_io_len(md, bvm->bi_sector, ti), 844 (sector_t) BIO_MAX_SECTORS); 845 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 846 if (max_size < 0) 847 max_size = 0; 848 849 /* 850 * merge_bvec_fn() returns number of bytes 851 * it can accept at this offset 852 * max is precomputed maximal io size 853 */ 854 if (max_size && ti->type->merge) 855 max_size = ti->type->merge(ti, bvm, biovec, max_size); 856 857 out_table: 858 dm_table_put(map); 859 860 out: 861 /* 862 * Always allow an entire first page 863 */ 864 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 865 max_size = biovec->bv_len; 866 867 return max_size; 868 } 869 870 /* 871 * The request function that just remaps the bio built up by 872 * dm_merge_bvec. 873 */ 874 static int dm_request(struct request_queue *q, struct bio *bio) 875 { 876 int r = -EIO; 877 int rw = bio_data_dir(bio); 878 struct mapped_device *md = q->queuedata; 879 int cpu; 880 881 /* 882 * There is no use in forwarding any barrier request since we can't 883 * guarantee it is (or can be) handled by the targets correctly. 884 */ 885 if (unlikely(bio_barrier(bio))) { 886 bio_endio(bio, -EOPNOTSUPP); 887 return 0; 888 } 889 890 down_read(&md->io_lock); 891 892 cpu = part_stat_lock(); 893 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 894 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 895 part_stat_unlock(); 896 897 /* 898 * If we're suspended we have to queue 899 * this io for later. 900 */ 901 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 902 up_read(&md->io_lock); 903 904 if (bio_rw(bio) != READA) 905 r = queue_io(md, bio); 906 907 if (r <= 0) 908 goto out_req; 909 910 /* 911 * We're in a while loop, because someone could suspend 912 * before we get to the following read lock. 913 */ 914 down_read(&md->io_lock); 915 } 916 917 r = __split_bio(md, bio); 918 up_read(&md->io_lock); 919 920 out_req: 921 if (r < 0) 922 bio_io_error(bio); 923 924 return 0; 925 } 926 927 static void dm_unplug_all(struct request_queue *q) 928 { 929 struct mapped_device *md = q->queuedata; 930 struct dm_table *map = dm_get_table(md); 931 932 if (map) { 933 dm_table_unplug_all(map); 934 dm_table_put(map); 935 } 936 } 937 938 static int dm_any_congested(void *congested_data, int bdi_bits) 939 { 940 int r = bdi_bits; 941 struct mapped_device *md = congested_data; 942 struct dm_table *map; 943 944 atomic_inc(&md->pending); 945 946 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 947 map = dm_get_table(md); 948 if (map) { 949 r = dm_table_any_congested(map, bdi_bits); 950 dm_table_put(map); 951 } 952 } 953 954 if (!atomic_dec_return(&md->pending)) 955 /* nudge anyone waiting on suspend queue */ 956 wake_up(&md->wait); 957 958 return r; 959 } 960 961 /*----------------------------------------------------------------- 962 * An IDR is used to keep track of allocated minor numbers. 963 *---------------------------------------------------------------*/ 964 static DEFINE_IDR(_minor_idr); 965 966 static void free_minor(int minor) 967 { 968 spin_lock(&_minor_lock); 969 idr_remove(&_minor_idr, minor); 970 spin_unlock(&_minor_lock); 971 } 972 973 /* 974 * See if the device with a specific minor # is free. 975 */ 976 static int specific_minor(int minor) 977 { 978 int r, m; 979 980 if (minor >= (1 << MINORBITS)) 981 return -EINVAL; 982 983 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 984 if (!r) 985 return -ENOMEM; 986 987 spin_lock(&_minor_lock); 988 989 if (idr_find(&_minor_idr, minor)) { 990 r = -EBUSY; 991 goto out; 992 } 993 994 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 995 if (r) 996 goto out; 997 998 if (m != minor) { 999 idr_remove(&_minor_idr, m); 1000 r = -EBUSY; 1001 goto out; 1002 } 1003 1004 out: 1005 spin_unlock(&_minor_lock); 1006 return r; 1007 } 1008 1009 static int next_free_minor(int *minor) 1010 { 1011 int r, m; 1012 1013 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1014 if (!r) 1015 return -ENOMEM; 1016 1017 spin_lock(&_minor_lock); 1018 1019 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1020 if (r) 1021 goto out; 1022 1023 if (m >= (1 << MINORBITS)) { 1024 idr_remove(&_minor_idr, m); 1025 r = -ENOSPC; 1026 goto out; 1027 } 1028 1029 *minor = m; 1030 1031 out: 1032 spin_unlock(&_minor_lock); 1033 return r; 1034 } 1035 1036 static struct block_device_operations dm_blk_dops; 1037 1038 /* 1039 * Allocate and initialise a blank device with a given minor. 1040 */ 1041 static struct mapped_device *alloc_dev(int minor) 1042 { 1043 int r; 1044 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1045 void *old_md; 1046 1047 if (!md) { 1048 DMWARN("unable to allocate device, out of memory."); 1049 return NULL; 1050 } 1051 1052 if (!try_module_get(THIS_MODULE)) 1053 goto bad_module_get; 1054 1055 /* get a minor number for the dev */ 1056 if (minor == DM_ANY_MINOR) 1057 r = next_free_minor(&minor); 1058 else 1059 r = specific_minor(minor); 1060 if (r < 0) 1061 goto bad_minor; 1062 1063 init_rwsem(&md->io_lock); 1064 mutex_init(&md->suspend_lock); 1065 spin_lock_init(&md->pushback_lock); 1066 rwlock_init(&md->map_lock); 1067 atomic_set(&md->holders, 1); 1068 atomic_set(&md->open_count, 0); 1069 atomic_set(&md->event_nr, 0); 1070 atomic_set(&md->uevent_seq, 0); 1071 INIT_LIST_HEAD(&md->uevent_list); 1072 spin_lock_init(&md->uevent_lock); 1073 1074 md->queue = blk_alloc_queue(GFP_KERNEL); 1075 if (!md->queue) 1076 goto bad_queue; 1077 1078 md->queue->queuedata = md; 1079 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1080 md->queue->backing_dev_info.congested_data = md; 1081 blk_queue_make_request(md->queue, dm_request); 1082 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1083 md->queue->unplug_fn = dm_unplug_all; 1084 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1085 1086 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1087 if (!md->io_pool) 1088 goto bad_io_pool; 1089 1090 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 1091 if (!md->tio_pool) 1092 goto bad_tio_pool; 1093 1094 md->bs = bioset_create(16, 16); 1095 if (!md->bs) 1096 goto bad_no_bioset; 1097 1098 md->disk = alloc_disk(1); 1099 if (!md->disk) 1100 goto bad_disk; 1101 1102 atomic_set(&md->pending, 0); 1103 init_waitqueue_head(&md->wait); 1104 init_waitqueue_head(&md->eventq); 1105 1106 md->disk->major = _major; 1107 md->disk->first_minor = minor; 1108 md->disk->fops = &dm_blk_dops; 1109 md->disk->queue = md->queue; 1110 md->disk->private_data = md; 1111 sprintf(md->disk->disk_name, "dm-%d", minor); 1112 add_disk(md->disk); 1113 format_dev_t(md->name, MKDEV(_major, minor)); 1114 1115 md->wq = create_singlethread_workqueue("kdmflush"); 1116 if (!md->wq) 1117 goto bad_thread; 1118 1119 /* Populate the mapping, nobody knows we exist yet */ 1120 spin_lock(&_minor_lock); 1121 old_md = idr_replace(&_minor_idr, md, minor); 1122 spin_unlock(&_minor_lock); 1123 1124 BUG_ON(old_md != MINOR_ALLOCED); 1125 1126 return md; 1127 1128 bad_thread: 1129 put_disk(md->disk); 1130 bad_disk: 1131 bioset_free(md->bs); 1132 bad_no_bioset: 1133 mempool_destroy(md->tio_pool); 1134 bad_tio_pool: 1135 mempool_destroy(md->io_pool); 1136 bad_io_pool: 1137 blk_cleanup_queue(md->queue); 1138 bad_queue: 1139 free_minor(minor); 1140 bad_minor: 1141 module_put(THIS_MODULE); 1142 bad_module_get: 1143 kfree(md); 1144 return NULL; 1145 } 1146 1147 static void unlock_fs(struct mapped_device *md); 1148 1149 static void free_dev(struct mapped_device *md) 1150 { 1151 int minor = MINOR(disk_devt(md->disk)); 1152 1153 if (md->suspended_bdev) { 1154 unlock_fs(md); 1155 bdput(md->suspended_bdev); 1156 } 1157 destroy_workqueue(md->wq); 1158 mempool_destroy(md->tio_pool); 1159 mempool_destroy(md->io_pool); 1160 bioset_free(md->bs); 1161 del_gendisk(md->disk); 1162 free_minor(minor); 1163 1164 spin_lock(&_minor_lock); 1165 md->disk->private_data = NULL; 1166 spin_unlock(&_minor_lock); 1167 1168 put_disk(md->disk); 1169 blk_cleanup_queue(md->queue); 1170 module_put(THIS_MODULE); 1171 kfree(md); 1172 } 1173 1174 /* 1175 * Bind a table to the device. 1176 */ 1177 static void event_callback(void *context) 1178 { 1179 unsigned long flags; 1180 LIST_HEAD(uevents); 1181 struct mapped_device *md = (struct mapped_device *) context; 1182 1183 spin_lock_irqsave(&md->uevent_lock, flags); 1184 list_splice_init(&md->uevent_list, &uevents); 1185 spin_unlock_irqrestore(&md->uevent_lock, flags); 1186 1187 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1188 1189 atomic_inc(&md->event_nr); 1190 wake_up(&md->eventq); 1191 } 1192 1193 static void __set_size(struct mapped_device *md, sector_t size) 1194 { 1195 set_capacity(md->disk, size); 1196 1197 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1198 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1199 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1200 } 1201 1202 static int __bind(struct mapped_device *md, struct dm_table *t) 1203 { 1204 struct request_queue *q = md->queue; 1205 sector_t size; 1206 1207 size = dm_table_get_size(t); 1208 1209 /* 1210 * Wipe any geometry if the size of the table changed. 1211 */ 1212 if (size != get_capacity(md->disk)) 1213 memset(&md->geometry, 0, sizeof(md->geometry)); 1214 1215 if (md->suspended_bdev) 1216 __set_size(md, size); 1217 if (size == 0) 1218 return 0; 1219 1220 dm_table_get(t); 1221 dm_table_event_callback(t, event_callback, md); 1222 1223 write_lock(&md->map_lock); 1224 md->map = t; 1225 dm_table_set_restrictions(t, q); 1226 write_unlock(&md->map_lock); 1227 1228 return 0; 1229 } 1230 1231 static void __unbind(struct mapped_device *md) 1232 { 1233 struct dm_table *map = md->map; 1234 1235 if (!map) 1236 return; 1237 1238 dm_table_event_callback(map, NULL, NULL); 1239 write_lock(&md->map_lock); 1240 md->map = NULL; 1241 write_unlock(&md->map_lock); 1242 dm_table_put(map); 1243 } 1244 1245 /* 1246 * Constructor for a new device. 1247 */ 1248 int dm_create(int minor, struct mapped_device **result) 1249 { 1250 struct mapped_device *md; 1251 1252 md = alloc_dev(minor); 1253 if (!md) 1254 return -ENXIO; 1255 1256 *result = md; 1257 return 0; 1258 } 1259 1260 static struct mapped_device *dm_find_md(dev_t dev) 1261 { 1262 struct mapped_device *md; 1263 unsigned minor = MINOR(dev); 1264 1265 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1266 return NULL; 1267 1268 spin_lock(&_minor_lock); 1269 1270 md = idr_find(&_minor_idr, minor); 1271 if (md && (md == MINOR_ALLOCED || 1272 (MINOR(disk_devt(dm_disk(md))) != minor) || 1273 test_bit(DMF_FREEING, &md->flags))) { 1274 md = NULL; 1275 goto out; 1276 } 1277 1278 out: 1279 spin_unlock(&_minor_lock); 1280 1281 return md; 1282 } 1283 1284 struct mapped_device *dm_get_md(dev_t dev) 1285 { 1286 struct mapped_device *md = dm_find_md(dev); 1287 1288 if (md) 1289 dm_get(md); 1290 1291 return md; 1292 } 1293 1294 void *dm_get_mdptr(struct mapped_device *md) 1295 { 1296 return md->interface_ptr; 1297 } 1298 1299 void dm_set_mdptr(struct mapped_device *md, void *ptr) 1300 { 1301 md->interface_ptr = ptr; 1302 } 1303 1304 void dm_get(struct mapped_device *md) 1305 { 1306 atomic_inc(&md->holders); 1307 } 1308 1309 const char *dm_device_name(struct mapped_device *md) 1310 { 1311 return md->name; 1312 } 1313 EXPORT_SYMBOL_GPL(dm_device_name); 1314 1315 void dm_put(struct mapped_device *md) 1316 { 1317 struct dm_table *map; 1318 1319 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1320 1321 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1322 map = dm_get_table(md); 1323 idr_replace(&_minor_idr, MINOR_ALLOCED, 1324 MINOR(disk_devt(dm_disk(md)))); 1325 set_bit(DMF_FREEING, &md->flags); 1326 spin_unlock(&_minor_lock); 1327 if (!dm_suspended(md)) { 1328 dm_table_presuspend_targets(map); 1329 dm_table_postsuspend_targets(map); 1330 } 1331 __unbind(md); 1332 dm_table_put(map); 1333 free_dev(md); 1334 } 1335 } 1336 EXPORT_SYMBOL_GPL(dm_put); 1337 1338 static int dm_wait_for_completion(struct mapped_device *md) 1339 { 1340 int r = 0; 1341 1342 while (1) { 1343 set_current_state(TASK_INTERRUPTIBLE); 1344 1345 smp_mb(); 1346 if (!atomic_read(&md->pending)) 1347 break; 1348 1349 if (signal_pending(current)) { 1350 r = -EINTR; 1351 break; 1352 } 1353 1354 io_schedule(); 1355 } 1356 set_current_state(TASK_RUNNING); 1357 1358 return r; 1359 } 1360 1361 /* 1362 * Process the deferred bios 1363 */ 1364 static void __flush_deferred_io(struct mapped_device *md) 1365 { 1366 struct bio *c; 1367 1368 while ((c = bio_list_pop(&md->deferred))) { 1369 if (__split_bio(md, c)) 1370 bio_io_error(c); 1371 } 1372 1373 clear_bit(DMF_BLOCK_IO, &md->flags); 1374 } 1375 1376 static void __merge_pushback_list(struct mapped_device *md) 1377 { 1378 unsigned long flags; 1379 1380 spin_lock_irqsave(&md->pushback_lock, flags); 1381 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1382 bio_list_merge_head(&md->deferred, &md->pushback); 1383 bio_list_init(&md->pushback); 1384 spin_unlock_irqrestore(&md->pushback_lock, flags); 1385 } 1386 1387 static void dm_wq_work(struct work_struct *work) 1388 { 1389 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); 1390 struct mapped_device *md = req->md; 1391 1392 down_write(&md->io_lock); 1393 switch (req->type) { 1394 case DM_WQ_FLUSH_DEFERRED: 1395 __flush_deferred_io(md); 1396 break; 1397 default: 1398 DMERR("dm_wq_work: unrecognised work type %d", req->type); 1399 BUG(); 1400 } 1401 up_write(&md->io_lock); 1402 } 1403 1404 static void dm_wq_queue(struct mapped_device *md, int type, void *context, 1405 struct dm_wq_req *req) 1406 { 1407 req->type = type; 1408 req->md = md; 1409 req->context = context; 1410 INIT_WORK(&req->work, dm_wq_work); 1411 queue_work(md->wq, &req->work); 1412 } 1413 1414 static void dm_queue_flush(struct mapped_device *md, int type, void *context) 1415 { 1416 struct dm_wq_req req; 1417 1418 dm_wq_queue(md, type, context, &req); 1419 flush_workqueue(md->wq); 1420 } 1421 1422 /* 1423 * Swap in a new table (destroying old one). 1424 */ 1425 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1426 { 1427 int r = -EINVAL; 1428 1429 mutex_lock(&md->suspend_lock); 1430 1431 /* device must be suspended */ 1432 if (!dm_suspended(md)) 1433 goto out; 1434 1435 /* without bdev, the device size cannot be changed */ 1436 if (!md->suspended_bdev) 1437 if (get_capacity(md->disk) != dm_table_get_size(table)) 1438 goto out; 1439 1440 __unbind(md); 1441 r = __bind(md, table); 1442 1443 out: 1444 mutex_unlock(&md->suspend_lock); 1445 return r; 1446 } 1447 1448 /* 1449 * Functions to lock and unlock any filesystem running on the 1450 * device. 1451 */ 1452 static int lock_fs(struct mapped_device *md) 1453 { 1454 int r; 1455 1456 WARN_ON(md->frozen_sb); 1457 1458 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1459 if (IS_ERR(md->frozen_sb)) { 1460 r = PTR_ERR(md->frozen_sb); 1461 md->frozen_sb = NULL; 1462 return r; 1463 } 1464 1465 set_bit(DMF_FROZEN, &md->flags); 1466 1467 /* don't bdput right now, we don't want the bdev 1468 * to go away while it is locked. 1469 */ 1470 return 0; 1471 } 1472 1473 static void unlock_fs(struct mapped_device *md) 1474 { 1475 if (!test_bit(DMF_FROZEN, &md->flags)) 1476 return; 1477 1478 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1479 md->frozen_sb = NULL; 1480 clear_bit(DMF_FROZEN, &md->flags); 1481 } 1482 1483 /* 1484 * We need to be able to change a mapping table under a mounted 1485 * filesystem. For example we might want to move some data in 1486 * the background. Before the table can be swapped with 1487 * dm_bind_table, dm_suspend must be called to flush any in 1488 * flight bios and ensure that any further io gets deferred. 1489 */ 1490 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1491 { 1492 struct dm_table *map = NULL; 1493 DECLARE_WAITQUEUE(wait, current); 1494 int r = 0; 1495 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1496 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1497 1498 mutex_lock(&md->suspend_lock); 1499 1500 if (dm_suspended(md)) { 1501 r = -EINVAL; 1502 goto out_unlock; 1503 } 1504 1505 map = dm_get_table(md); 1506 1507 /* 1508 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1509 * This flag is cleared before dm_suspend returns. 1510 */ 1511 if (noflush) 1512 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1513 1514 /* This does not get reverted if there's an error later. */ 1515 dm_table_presuspend_targets(map); 1516 1517 /* bdget() can stall if the pending I/Os are not flushed */ 1518 if (!noflush) { 1519 md->suspended_bdev = bdget_disk(md->disk, 0); 1520 if (!md->suspended_bdev) { 1521 DMWARN("bdget failed in dm_suspend"); 1522 r = -ENOMEM; 1523 goto out; 1524 } 1525 1526 /* 1527 * Flush I/O to the device. noflush supersedes do_lockfs, 1528 * because lock_fs() needs to flush I/Os. 1529 */ 1530 if (do_lockfs) { 1531 r = lock_fs(md); 1532 if (r) 1533 goto out; 1534 } 1535 } 1536 1537 /* 1538 * First we set the BLOCK_IO flag so no more ios will be mapped. 1539 */ 1540 down_write(&md->io_lock); 1541 set_bit(DMF_BLOCK_IO, &md->flags); 1542 1543 add_wait_queue(&md->wait, &wait); 1544 up_write(&md->io_lock); 1545 1546 /* unplug */ 1547 if (map) 1548 dm_table_unplug_all(map); 1549 1550 /* 1551 * Wait for the already-mapped ios to complete. 1552 */ 1553 r = dm_wait_for_completion(md); 1554 1555 down_write(&md->io_lock); 1556 remove_wait_queue(&md->wait, &wait); 1557 1558 if (noflush) 1559 __merge_pushback_list(md); 1560 up_write(&md->io_lock); 1561 1562 /* were we interrupted ? */ 1563 if (r < 0) { 1564 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1565 1566 unlock_fs(md); 1567 goto out; /* pushback list is already flushed, so skip flush */ 1568 } 1569 1570 dm_table_postsuspend_targets(map); 1571 1572 set_bit(DMF_SUSPENDED, &md->flags); 1573 1574 out: 1575 if (r && md->suspended_bdev) { 1576 bdput(md->suspended_bdev); 1577 md->suspended_bdev = NULL; 1578 } 1579 1580 dm_table_put(map); 1581 1582 out_unlock: 1583 mutex_unlock(&md->suspend_lock); 1584 return r; 1585 } 1586 1587 int dm_resume(struct mapped_device *md) 1588 { 1589 int r = -EINVAL; 1590 struct dm_table *map = NULL; 1591 1592 mutex_lock(&md->suspend_lock); 1593 if (!dm_suspended(md)) 1594 goto out; 1595 1596 map = dm_get_table(md); 1597 if (!map || !dm_table_get_size(map)) 1598 goto out; 1599 1600 r = dm_table_resume_targets(map); 1601 if (r) 1602 goto out; 1603 1604 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1605 1606 unlock_fs(md); 1607 1608 if (md->suspended_bdev) { 1609 bdput(md->suspended_bdev); 1610 md->suspended_bdev = NULL; 1611 } 1612 1613 clear_bit(DMF_SUSPENDED, &md->flags); 1614 1615 dm_table_unplug_all(map); 1616 1617 dm_kobject_uevent(md); 1618 1619 r = 0; 1620 1621 out: 1622 dm_table_put(map); 1623 mutex_unlock(&md->suspend_lock); 1624 1625 return r; 1626 } 1627 1628 /*----------------------------------------------------------------- 1629 * Event notification. 1630 *---------------------------------------------------------------*/ 1631 void dm_kobject_uevent(struct mapped_device *md) 1632 { 1633 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); 1634 } 1635 1636 uint32_t dm_next_uevent_seq(struct mapped_device *md) 1637 { 1638 return atomic_add_return(1, &md->uevent_seq); 1639 } 1640 1641 uint32_t dm_get_event_nr(struct mapped_device *md) 1642 { 1643 return atomic_read(&md->event_nr); 1644 } 1645 1646 int dm_wait_event(struct mapped_device *md, int event_nr) 1647 { 1648 return wait_event_interruptible(md->eventq, 1649 (event_nr != atomic_read(&md->event_nr))); 1650 } 1651 1652 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 1653 { 1654 unsigned long flags; 1655 1656 spin_lock_irqsave(&md->uevent_lock, flags); 1657 list_add(elist, &md->uevent_list); 1658 spin_unlock_irqrestore(&md->uevent_lock, flags); 1659 } 1660 1661 /* 1662 * The gendisk is only valid as long as you have a reference 1663 * count on 'md'. 1664 */ 1665 struct gendisk *dm_disk(struct mapped_device *md) 1666 { 1667 return md->disk; 1668 } 1669 1670 int dm_suspended(struct mapped_device *md) 1671 { 1672 return test_bit(DMF_SUSPENDED, &md->flags); 1673 } 1674 1675 int dm_noflush_suspending(struct dm_target *ti) 1676 { 1677 struct mapped_device *md = dm_table_get_md(ti->table); 1678 int r = __noflush_suspending(md); 1679 1680 dm_put(md); 1681 1682 return r; 1683 } 1684 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1685 1686 static struct block_device_operations dm_blk_dops = { 1687 .open = dm_blk_open, 1688 .release = dm_blk_close, 1689 .ioctl = dm_blk_ioctl, 1690 .getgeo = dm_blk_getgeo, 1691 .owner = THIS_MODULE 1692 }; 1693 1694 EXPORT_SYMBOL(dm_get_mapinfo); 1695 1696 /* 1697 * module hooks 1698 */ 1699 module_init(dm_init); 1700 module_exit(dm_exit); 1701 1702 module_param(major, uint, 0); 1703 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1704 MODULE_DESCRIPTION(DM_NAME " driver"); 1705 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1706 MODULE_LICENSE("GPL"); 1707