1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-uevent.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/idr.h> 20 #include <linux/hdreg.h> 21 #include <linux/delay.h> 22 23 #include <trace/events/block.h> 24 25 #define DM_MSG_PREFIX "core" 26 27 #ifdef CONFIG_PRINTK 28 /* 29 * ratelimit state to be used in DMXXX_LIMIT(). 30 */ 31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 32 DEFAULT_RATELIMIT_INTERVAL, 33 DEFAULT_RATELIMIT_BURST); 34 EXPORT_SYMBOL(dm_ratelimit_state); 35 #endif 36 37 /* 38 * Cookies are numeric values sent with CHANGE and REMOVE 39 * uevents while resuming, removing or renaming the device. 40 */ 41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 42 #define DM_COOKIE_LENGTH 24 43 44 static const char *_name = DM_NAME; 45 46 static unsigned int major = 0; 47 static unsigned int _major = 0; 48 49 static DEFINE_IDR(_minor_idr); 50 51 static DEFINE_SPINLOCK(_minor_lock); 52 53 static void do_deferred_remove(struct work_struct *w); 54 55 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 56 57 static struct workqueue_struct *deferred_remove_workqueue; 58 59 /* 60 * For bio-based dm. 61 * One of these is allocated per bio. 62 */ 63 struct dm_io { 64 struct mapped_device *md; 65 int error; 66 atomic_t io_count; 67 struct bio *bio; 68 unsigned long start_time; 69 spinlock_t endio_lock; 70 struct dm_stats_aux stats_aux; 71 }; 72 73 /* 74 * For request-based dm. 75 * One of these is allocated per request. 76 */ 77 struct dm_rq_target_io { 78 struct mapped_device *md; 79 struct dm_target *ti; 80 struct request *orig, clone; 81 int error; 82 union map_info info; 83 }; 84 85 /* 86 * For request-based dm - the bio clones we allocate are embedded in these 87 * structs. 88 * 89 * We allocate these with bio_alloc_bioset, using the front_pad parameter when 90 * the bioset is created - this means the bio has to come at the end of the 91 * struct. 92 */ 93 struct dm_rq_clone_bio_info { 94 struct bio *orig; 95 struct dm_rq_target_io *tio; 96 struct bio clone; 97 }; 98 99 union map_info *dm_get_rq_mapinfo(struct request *rq) 100 { 101 if (rq && rq->end_io_data) 102 return &((struct dm_rq_target_io *)rq->end_io_data)->info; 103 return NULL; 104 } 105 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 106 107 #define MINOR_ALLOCED ((void *)-1) 108 109 /* 110 * Bits for the md->flags field. 111 */ 112 #define DMF_BLOCK_IO_FOR_SUSPEND 0 113 #define DMF_SUSPENDED 1 114 #define DMF_FROZEN 2 115 #define DMF_FREEING 3 116 #define DMF_DELETING 4 117 #define DMF_NOFLUSH_SUSPENDING 5 118 #define DMF_MERGE_IS_OPTIONAL 6 119 #define DMF_DEFERRED_REMOVE 7 120 121 /* 122 * A dummy definition to make RCU happy. 123 * struct dm_table should never be dereferenced in this file. 124 */ 125 struct dm_table { 126 int undefined__; 127 }; 128 129 /* 130 * Work processed by per-device workqueue. 131 */ 132 struct mapped_device { 133 struct srcu_struct io_barrier; 134 struct mutex suspend_lock; 135 atomic_t holders; 136 atomic_t open_count; 137 138 /* 139 * The current mapping. 140 * Use dm_get_live_table{_fast} or take suspend_lock for 141 * dereference. 142 */ 143 struct dm_table *map; 144 145 unsigned long flags; 146 147 struct request_queue *queue; 148 unsigned type; 149 /* Protect queue and type against concurrent access. */ 150 struct mutex type_lock; 151 152 struct target_type *immutable_target_type; 153 154 struct gendisk *disk; 155 char name[16]; 156 157 void *interface_ptr; 158 159 /* 160 * A list of ios that arrived while we were suspended. 161 */ 162 atomic_t pending[2]; 163 wait_queue_head_t wait; 164 struct work_struct work; 165 struct bio_list deferred; 166 spinlock_t deferred_lock; 167 168 /* 169 * Processing queue (flush) 170 */ 171 struct workqueue_struct *wq; 172 173 /* 174 * io objects are allocated from here. 175 */ 176 mempool_t *io_pool; 177 178 struct bio_set *bs; 179 180 /* 181 * Event handling. 182 */ 183 atomic_t event_nr; 184 wait_queue_head_t eventq; 185 atomic_t uevent_seq; 186 struct list_head uevent_list; 187 spinlock_t uevent_lock; /* Protect access to uevent_list */ 188 189 /* 190 * freeze/thaw support require holding onto a super block 191 */ 192 struct super_block *frozen_sb; 193 struct block_device *bdev; 194 195 /* forced geometry settings */ 196 struct hd_geometry geometry; 197 198 /* kobject and completion */ 199 struct dm_kobject_holder kobj_holder; 200 201 /* zero-length flush that will be cloned and submitted to targets */ 202 struct bio flush_bio; 203 204 struct dm_stats stats; 205 }; 206 207 /* 208 * For mempools pre-allocation at the table loading time. 209 */ 210 struct dm_md_mempools { 211 mempool_t *io_pool; 212 struct bio_set *bs; 213 }; 214 215 #define RESERVED_BIO_BASED_IOS 16 216 #define RESERVED_REQUEST_BASED_IOS 256 217 #define RESERVED_MAX_IOS 1024 218 static struct kmem_cache *_io_cache; 219 static struct kmem_cache *_rq_tio_cache; 220 221 /* 222 * Bio-based DM's mempools' reserved IOs set by the user. 223 */ 224 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 225 226 /* 227 * Request-based DM's mempools' reserved IOs set by the user. 228 */ 229 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 230 231 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, 232 unsigned def, unsigned max) 233 { 234 unsigned ios = ACCESS_ONCE(*reserved_ios); 235 unsigned modified_ios = 0; 236 237 if (!ios) 238 modified_ios = def; 239 else if (ios > max) 240 modified_ios = max; 241 242 if (modified_ios) { 243 (void)cmpxchg(reserved_ios, ios, modified_ios); 244 ios = modified_ios; 245 } 246 247 return ios; 248 } 249 250 unsigned dm_get_reserved_bio_based_ios(void) 251 { 252 return __dm_get_reserved_ios(&reserved_bio_based_ios, 253 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 254 } 255 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 256 257 unsigned dm_get_reserved_rq_based_ios(void) 258 { 259 return __dm_get_reserved_ios(&reserved_rq_based_ios, 260 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 261 } 262 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 263 264 static int __init local_init(void) 265 { 266 int r = -ENOMEM; 267 268 /* allocate a slab for the dm_ios */ 269 _io_cache = KMEM_CACHE(dm_io, 0); 270 if (!_io_cache) 271 return r; 272 273 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 274 if (!_rq_tio_cache) 275 goto out_free_io_cache; 276 277 r = dm_uevent_init(); 278 if (r) 279 goto out_free_rq_tio_cache; 280 281 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 282 if (!deferred_remove_workqueue) { 283 r = -ENOMEM; 284 goto out_uevent_exit; 285 } 286 287 _major = major; 288 r = register_blkdev(_major, _name); 289 if (r < 0) 290 goto out_free_workqueue; 291 292 if (!_major) 293 _major = r; 294 295 return 0; 296 297 out_free_workqueue: 298 destroy_workqueue(deferred_remove_workqueue); 299 out_uevent_exit: 300 dm_uevent_exit(); 301 out_free_rq_tio_cache: 302 kmem_cache_destroy(_rq_tio_cache); 303 out_free_io_cache: 304 kmem_cache_destroy(_io_cache); 305 306 return r; 307 } 308 309 static void local_exit(void) 310 { 311 flush_scheduled_work(); 312 destroy_workqueue(deferred_remove_workqueue); 313 314 kmem_cache_destroy(_rq_tio_cache); 315 kmem_cache_destroy(_io_cache); 316 unregister_blkdev(_major, _name); 317 dm_uevent_exit(); 318 319 _major = 0; 320 321 DMINFO("cleaned up"); 322 } 323 324 static int (*_inits[])(void) __initdata = { 325 local_init, 326 dm_target_init, 327 dm_linear_init, 328 dm_stripe_init, 329 dm_io_init, 330 dm_kcopyd_init, 331 dm_interface_init, 332 dm_statistics_init, 333 }; 334 335 static void (*_exits[])(void) = { 336 local_exit, 337 dm_target_exit, 338 dm_linear_exit, 339 dm_stripe_exit, 340 dm_io_exit, 341 dm_kcopyd_exit, 342 dm_interface_exit, 343 dm_statistics_exit, 344 }; 345 346 static int __init dm_init(void) 347 { 348 const int count = ARRAY_SIZE(_inits); 349 350 int r, i; 351 352 for (i = 0; i < count; i++) { 353 r = _inits[i](); 354 if (r) 355 goto bad; 356 } 357 358 return 0; 359 360 bad: 361 while (i--) 362 _exits[i](); 363 364 return r; 365 } 366 367 static void __exit dm_exit(void) 368 { 369 int i = ARRAY_SIZE(_exits); 370 371 while (i--) 372 _exits[i](); 373 374 /* 375 * Should be empty by this point. 376 */ 377 idr_destroy(&_minor_idr); 378 } 379 380 /* 381 * Block device functions 382 */ 383 int dm_deleting_md(struct mapped_device *md) 384 { 385 return test_bit(DMF_DELETING, &md->flags); 386 } 387 388 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 389 { 390 struct mapped_device *md; 391 392 spin_lock(&_minor_lock); 393 394 md = bdev->bd_disk->private_data; 395 if (!md) 396 goto out; 397 398 if (test_bit(DMF_FREEING, &md->flags) || 399 dm_deleting_md(md)) { 400 md = NULL; 401 goto out; 402 } 403 404 dm_get(md); 405 atomic_inc(&md->open_count); 406 407 out: 408 spin_unlock(&_minor_lock); 409 410 return md ? 0 : -ENXIO; 411 } 412 413 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 414 { 415 struct mapped_device *md = disk->private_data; 416 417 spin_lock(&_minor_lock); 418 419 if (atomic_dec_and_test(&md->open_count) && 420 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 421 queue_work(deferred_remove_workqueue, &deferred_remove_work); 422 423 dm_put(md); 424 425 spin_unlock(&_minor_lock); 426 } 427 428 int dm_open_count(struct mapped_device *md) 429 { 430 return atomic_read(&md->open_count); 431 } 432 433 /* 434 * Guarantees nothing is using the device before it's deleted. 435 */ 436 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 437 { 438 int r = 0; 439 440 spin_lock(&_minor_lock); 441 442 if (dm_open_count(md)) { 443 r = -EBUSY; 444 if (mark_deferred) 445 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 446 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 447 r = -EEXIST; 448 else 449 set_bit(DMF_DELETING, &md->flags); 450 451 spin_unlock(&_minor_lock); 452 453 return r; 454 } 455 456 int dm_cancel_deferred_remove(struct mapped_device *md) 457 { 458 int r = 0; 459 460 spin_lock(&_minor_lock); 461 462 if (test_bit(DMF_DELETING, &md->flags)) 463 r = -EBUSY; 464 else 465 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 466 467 spin_unlock(&_minor_lock); 468 469 return r; 470 } 471 472 static void do_deferred_remove(struct work_struct *w) 473 { 474 dm_deferred_remove(); 475 } 476 477 sector_t dm_get_size(struct mapped_device *md) 478 { 479 return get_capacity(md->disk); 480 } 481 482 struct request_queue *dm_get_md_queue(struct mapped_device *md) 483 { 484 return md->queue; 485 } 486 487 struct dm_stats *dm_get_stats(struct mapped_device *md) 488 { 489 return &md->stats; 490 } 491 492 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 493 { 494 struct mapped_device *md = bdev->bd_disk->private_data; 495 496 return dm_get_geometry(md, geo); 497 } 498 499 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 500 unsigned int cmd, unsigned long arg) 501 { 502 struct mapped_device *md = bdev->bd_disk->private_data; 503 int srcu_idx; 504 struct dm_table *map; 505 struct dm_target *tgt; 506 int r = -ENOTTY; 507 508 retry: 509 map = dm_get_live_table(md, &srcu_idx); 510 511 if (!map || !dm_table_get_size(map)) 512 goto out; 513 514 /* We only support devices that have a single target */ 515 if (dm_table_get_num_targets(map) != 1) 516 goto out; 517 518 tgt = dm_table_get_target(map, 0); 519 520 if (dm_suspended_md(md)) { 521 r = -EAGAIN; 522 goto out; 523 } 524 525 if (tgt->type->ioctl) 526 r = tgt->type->ioctl(tgt, cmd, arg); 527 528 out: 529 dm_put_live_table(md, srcu_idx); 530 531 if (r == -ENOTCONN) { 532 msleep(10); 533 goto retry; 534 } 535 536 return r; 537 } 538 539 static struct dm_io *alloc_io(struct mapped_device *md) 540 { 541 return mempool_alloc(md->io_pool, GFP_NOIO); 542 } 543 544 static void free_io(struct mapped_device *md, struct dm_io *io) 545 { 546 mempool_free(io, md->io_pool); 547 } 548 549 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 550 { 551 bio_put(&tio->clone); 552 } 553 554 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 555 gfp_t gfp_mask) 556 { 557 return mempool_alloc(md->io_pool, gfp_mask); 558 } 559 560 static void free_rq_tio(struct dm_rq_target_io *tio) 561 { 562 mempool_free(tio, tio->md->io_pool); 563 } 564 565 static int md_in_flight(struct mapped_device *md) 566 { 567 return atomic_read(&md->pending[READ]) + 568 atomic_read(&md->pending[WRITE]); 569 } 570 571 static void start_io_acct(struct dm_io *io) 572 { 573 struct mapped_device *md = io->md; 574 struct bio *bio = io->bio; 575 int cpu; 576 int rw = bio_data_dir(bio); 577 578 io->start_time = jiffies; 579 580 cpu = part_stat_lock(); 581 part_round_stats(cpu, &dm_disk(md)->part0); 582 part_stat_unlock(); 583 atomic_set(&dm_disk(md)->part0.in_flight[rw], 584 atomic_inc_return(&md->pending[rw])); 585 586 if (unlikely(dm_stats_used(&md->stats))) 587 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 588 bio_sectors(bio), false, 0, &io->stats_aux); 589 } 590 591 static void end_io_acct(struct dm_io *io) 592 { 593 struct mapped_device *md = io->md; 594 struct bio *bio = io->bio; 595 unsigned long duration = jiffies - io->start_time; 596 int pending, cpu; 597 int rw = bio_data_dir(bio); 598 599 cpu = part_stat_lock(); 600 part_round_stats(cpu, &dm_disk(md)->part0); 601 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 602 part_stat_unlock(); 603 604 if (unlikely(dm_stats_used(&md->stats))) 605 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 606 bio_sectors(bio), true, duration, &io->stats_aux); 607 608 /* 609 * After this is decremented the bio must not be touched if it is 610 * a flush. 611 */ 612 pending = atomic_dec_return(&md->pending[rw]); 613 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 614 pending += atomic_read(&md->pending[rw^0x1]); 615 616 /* nudge anyone waiting on suspend queue */ 617 if (!pending) 618 wake_up(&md->wait); 619 } 620 621 /* 622 * Add the bio to the list of deferred io. 623 */ 624 static void queue_io(struct mapped_device *md, struct bio *bio) 625 { 626 unsigned long flags; 627 628 spin_lock_irqsave(&md->deferred_lock, flags); 629 bio_list_add(&md->deferred, bio); 630 spin_unlock_irqrestore(&md->deferred_lock, flags); 631 queue_work(md->wq, &md->work); 632 } 633 634 /* 635 * Everyone (including functions in this file), should use this 636 * function to access the md->map field, and make sure they call 637 * dm_put_live_table() when finished. 638 */ 639 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 640 { 641 *srcu_idx = srcu_read_lock(&md->io_barrier); 642 643 return srcu_dereference(md->map, &md->io_barrier); 644 } 645 646 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 647 { 648 srcu_read_unlock(&md->io_barrier, srcu_idx); 649 } 650 651 void dm_sync_table(struct mapped_device *md) 652 { 653 synchronize_srcu(&md->io_barrier); 654 synchronize_rcu_expedited(); 655 } 656 657 /* 658 * A fast alternative to dm_get_live_table/dm_put_live_table. 659 * The caller must not block between these two functions. 660 */ 661 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 662 { 663 rcu_read_lock(); 664 return rcu_dereference(md->map); 665 } 666 667 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 668 { 669 rcu_read_unlock(); 670 } 671 672 /* 673 * Get the geometry associated with a dm device 674 */ 675 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 676 { 677 *geo = md->geometry; 678 679 return 0; 680 } 681 682 /* 683 * Set the geometry of a device. 684 */ 685 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 686 { 687 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 688 689 if (geo->start > sz) { 690 DMWARN("Start sector is beyond the geometry limits."); 691 return -EINVAL; 692 } 693 694 md->geometry = *geo; 695 696 return 0; 697 } 698 699 /*----------------------------------------------------------------- 700 * CRUD START: 701 * A more elegant soln is in the works that uses the queue 702 * merge fn, unfortunately there are a couple of changes to 703 * the block layer that I want to make for this. So in the 704 * interests of getting something for people to use I give 705 * you this clearly demarcated crap. 706 *---------------------------------------------------------------*/ 707 708 static int __noflush_suspending(struct mapped_device *md) 709 { 710 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 711 } 712 713 /* 714 * Decrements the number of outstanding ios that a bio has been 715 * cloned into, completing the original io if necc. 716 */ 717 static void dec_pending(struct dm_io *io, int error) 718 { 719 unsigned long flags; 720 int io_error; 721 struct bio *bio; 722 struct mapped_device *md = io->md; 723 724 /* Push-back supersedes any I/O errors */ 725 if (unlikely(error)) { 726 spin_lock_irqsave(&io->endio_lock, flags); 727 if (!(io->error > 0 && __noflush_suspending(md))) 728 io->error = error; 729 spin_unlock_irqrestore(&io->endio_lock, flags); 730 } 731 732 if (atomic_dec_and_test(&io->io_count)) { 733 if (io->error == DM_ENDIO_REQUEUE) { 734 /* 735 * Target requested pushing back the I/O. 736 */ 737 spin_lock_irqsave(&md->deferred_lock, flags); 738 if (__noflush_suspending(md)) 739 bio_list_add_head(&md->deferred, io->bio); 740 else 741 /* noflush suspend was interrupted. */ 742 io->error = -EIO; 743 spin_unlock_irqrestore(&md->deferred_lock, flags); 744 } 745 746 io_error = io->error; 747 bio = io->bio; 748 end_io_acct(io); 749 free_io(md, io); 750 751 if (io_error == DM_ENDIO_REQUEUE) 752 return; 753 754 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 755 /* 756 * Preflush done for flush with data, reissue 757 * without REQ_FLUSH. 758 */ 759 bio->bi_rw &= ~REQ_FLUSH; 760 queue_io(md, bio); 761 } else { 762 /* done with normal IO or empty flush */ 763 trace_block_bio_complete(md->queue, bio, io_error); 764 bio_endio(bio, io_error); 765 } 766 } 767 } 768 769 static void disable_write_same(struct mapped_device *md) 770 { 771 struct queue_limits *limits = dm_get_queue_limits(md); 772 773 /* device doesn't really support WRITE SAME, disable it */ 774 limits->max_write_same_sectors = 0; 775 } 776 777 static void clone_endio(struct bio *bio, int error) 778 { 779 int r = 0; 780 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 781 struct dm_io *io = tio->io; 782 struct mapped_device *md = tio->io->md; 783 dm_endio_fn endio = tio->ti->type->end_io; 784 785 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 786 error = -EIO; 787 788 if (endio) { 789 r = endio(tio->ti, bio, error); 790 if (r < 0 || r == DM_ENDIO_REQUEUE) 791 /* 792 * error and requeue request are handled 793 * in dec_pending(). 794 */ 795 error = r; 796 else if (r == DM_ENDIO_INCOMPLETE) 797 /* The target will handle the io */ 798 return; 799 else if (r) { 800 DMWARN("unimplemented target endio return value: %d", r); 801 BUG(); 802 } 803 } 804 805 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 806 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 807 disable_write_same(md); 808 809 free_tio(md, tio); 810 dec_pending(io, error); 811 } 812 813 /* 814 * Partial completion handling for request-based dm 815 */ 816 static void end_clone_bio(struct bio *clone, int error) 817 { 818 struct dm_rq_clone_bio_info *info = 819 container_of(clone, struct dm_rq_clone_bio_info, clone); 820 struct dm_rq_target_io *tio = info->tio; 821 struct bio *bio = info->orig; 822 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 823 824 bio_put(clone); 825 826 if (tio->error) 827 /* 828 * An error has already been detected on the request. 829 * Once error occurred, just let clone->end_io() handle 830 * the remainder. 831 */ 832 return; 833 else if (error) { 834 /* 835 * Don't notice the error to the upper layer yet. 836 * The error handling decision is made by the target driver, 837 * when the request is completed. 838 */ 839 tio->error = error; 840 return; 841 } 842 843 /* 844 * I/O for the bio successfully completed. 845 * Notice the data completion to the upper layer. 846 */ 847 848 /* 849 * bios are processed from the head of the list. 850 * So the completing bio should always be rq->bio. 851 * If it's not, something wrong is happening. 852 */ 853 if (tio->orig->bio != bio) 854 DMERR("bio completion is going in the middle of the request"); 855 856 /* 857 * Update the original request. 858 * Do not use blk_end_request() here, because it may complete 859 * the original request before the clone, and break the ordering. 860 */ 861 blk_update_request(tio->orig, 0, nr_bytes); 862 } 863 864 /* 865 * Don't touch any member of the md after calling this function because 866 * the md may be freed in dm_put() at the end of this function. 867 * Or do dm_get() before calling this function and dm_put() later. 868 */ 869 static void rq_completed(struct mapped_device *md, int rw, int run_queue) 870 { 871 atomic_dec(&md->pending[rw]); 872 873 /* nudge anyone waiting on suspend queue */ 874 if (!md_in_flight(md)) 875 wake_up(&md->wait); 876 877 /* 878 * Run this off this callpath, as drivers could invoke end_io while 879 * inside their request_fn (and holding the queue lock). Calling 880 * back into ->request_fn() could deadlock attempting to grab the 881 * queue lock again. 882 */ 883 if (run_queue) 884 blk_run_queue_async(md->queue); 885 886 /* 887 * dm_put() must be at the end of this function. See the comment above 888 */ 889 dm_put(md); 890 } 891 892 static void free_rq_clone(struct request *clone) 893 { 894 struct dm_rq_target_io *tio = clone->end_io_data; 895 896 blk_rq_unprep_clone(clone); 897 free_rq_tio(tio); 898 } 899 900 /* 901 * Complete the clone and the original request. 902 * Must be called without queue lock. 903 */ 904 static void dm_end_request(struct request *clone, int error) 905 { 906 int rw = rq_data_dir(clone); 907 struct dm_rq_target_io *tio = clone->end_io_data; 908 struct mapped_device *md = tio->md; 909 struct request *rq = tio->orig; 910 911 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 912 rq->errors = clone->errors; 913 rq->resid_len = clone->resid_len; 914 915 if (rq->sense) 916 /* 917 * We are using the sense buffer of the original 918 * request. 919 * So setting the length of the sense data is enough. 920 */ 921 rq->sense_len = clone->sense_len; 922 } 923 924 free_rq_clone(clone); 925 blk_end_request_all(rq, error); 926 rq_completed(md, rw, true); 927 } 928 929 static void dm_unprep_request(struct request *rq) 930 { 931 struct request *clone = rq->special; 932 933 rq->special = NULL; 934 rq->cmd_flags &= ~REQ_DONTPREP; 935 936 free_rq_clone(clone); 937 } 938 939 /* 940 * Requeue the original request of a clone. 941 */ 942 void dm_requeue_unmapped_request(struct request *clone) 943 { 944 int rw = rq_data_dir(clone); 945 struct dm_rq_target_io *tio = clone->end_io_data; 946 struct mapped_device *md = tio->md; 947 struct request *rq = tio->orig; 948 struct request_queue *q = rq->q; 949 unsigned long flags; 950 951 dm_unprep_request(rq); 952 953 spin_lock_irqsave(q->queue_lock, flags); 954 blk_requeue_request(q, rq); 955 spin_unlock_irqrestore(q->queue_lock, flags); 956 957 rq_completed(md, rw, 0); 958 } 959 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 960 961 static void __stop_queue(struct request_queue *q) 962 { 963 blk_stop_queue(q); 964 } 965 966 static void stop_queue(struct request_queue *q) 967 { 968 unsigned long flags; 969 970 spin_lock_irqsave(q->queue_lock, flags); 971 __stop_queue(q); 972 spin_unlock_irqrestore(q->queue_lock, flags); 973 } 974 975 static void __start_queue(struct request_queue *q) 976 { 977 if (blk_queue_stopped(q)) 978 blk_start_queue(q); 979 } 980 981 static void start_queue(struct request_queue *q) 982 { 983 unsigned long flags; 984 985 spin_lock_irqsave(q->queue_lock, flags); 986 __start_queue(q); 987 spin_unlock_irqrestore(q->queue_lock, flags); 988 } 989 990 static void dm_done(struct request *clone, int error, bool mapped) 991 { 992 int r = error; 993 struct dm_rq_target_io *tio = clone->end_io_data; 994 dm_request_endio_fn rq_end_io = NULL; 995 996 if (tio->ti) { 997 rq_end_io = tio->ti->type->rq_end_io; 998 999 if (mapped && rq_end_io) 1000 r = rq_end_io(tio->ti, clone, error, &tio->info); 1001 } 1002 1003 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 1004 !clone->q->limits.max_write_same_sectors)) 1005 disable_write_same(tio->md); 1006 1007 if (r <= 0) 1008 /* The target wants to complete the I/O */ 1009 dm_end_request(clone, r); 1010 else if (r == DM_ENDIO_INCOMPLETE) 1011 /* The target will handle the I/O */ 1012 return; 1013 else if (r == DM_ENDIO_REQUEUE) 1014 /* The target wants to requeue the I/O */ 1015 dm_requeue_unmapped_request(clone); 1016 else { 1017 DMWARN("unimplemented target endio return value: %d", r); 1018 BUG(); 1019 } 1020 } 1021 1022 /* 1023 * Request completion handler for request-based dm 1024 */ 1025 static void dm_softirq_done(struct request *rq) 1026 { 1027 bool mapped = true; 1028 struct request *clone = rq->completion_data; 1029 struct dm_rq_target_io *tio = clone->end_io_data; 1030 1031 if (rq->cmd_flags & REQ_FAILED) 1032 mapped = false; 1033 1034 dm_done(clone, tio->error, mapped); 1035 } 1036 1037 /* 1038 * Complete the clone and the original request with the error status 1039 * through softirq context. 1040 */ 1041 static void dm_complete_request(struct request *clone, int error) 1042 { 1043 struct dm_rq_target_io *tio = clone->end_io_data; 1044 struct request *rq = tio->orig; 1045 1046 tio->error = error; 1047 rq->completion_data = clone; 1048 blk_complete_request(rq); 1049 } 1050 1051 /* 1052 * Complete the not-mapped clone and the original request with the error status 1053 * through softirq context. 1054 * Target's rq_end_io() function isn't called. 1055 * This may be used when the target's map_rq() function fails. 1056 */ 1057 void dm_kill_unmapped_request(struct request *clone, int error) 1058 { 1059 struct dm_rq_target_io *tio = clone->end_io_data; 1060 struct request *rq = tio->orig; 1061 1062 rq->cmd_flags |= REQ_FAILED; 1063 dm_complete_request(clone, error); 1064 } 1065 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 1066 1067 /* 1068 * Called with the queue lock held 1069 */ 1070 static void end_clone_request(struct request *clone, int error) 1071 { 1072 /* 1073 * For just cleaning up the information of the queue in which 1074 * the clone was dispatched. 1075 * The clone is *NOT* freed actually here because it is alloced from 1076 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 1077 */ 1078 __blk_put_request(clone->q, clone); 1079 1080 /* 1081 * Actual request completion is done in a softirq context which doesn't 1082 * hold the queue lock. Otherwise, deadlock could occur because: 1083 * - another request may be submitted by the upper level driver 1084 * of the stacking during the completion 1085 * - the submission which requires queue lock may be done 1086 * against this queue 1087 */ 1088 dm_complete_request(clone, error); 1089 } 1090 1091 /* 1092 * Return maximum size of I/O possible at the supplied sector up to the current 1093 * target boundary. 1094 */ 1095 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1096 { 1097 sector_t target_offset = dm_target_offset(ti, sector); 1098 1099 return ti->len - target_offset; 1100 } 1101 1102 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1103 { 1104 sector_t len = max_io_len_target_boundary(sector, ti); 1105 sector_t offset, max_len; 1106 1107 /* 1108 * Does the target need to split even further? 1109 */ 1110 if (ti->max_io_len) { 1111 offset = dm_target_offset(ti, sector); 1112 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1113 max_len = sector_div(offset, ti->max_io_len); 1114 else 1115 max_len = offset & (ti->max_io_len - 1); 1116 max_len = ti->max_io_len - max_len; 1117 1118 if (len > max_len) 1119 len = max_len; 1120 } 1121 1122 return len; 1123 } 1124 1125 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1126 { 1127 if (len > UINT_MAX) { 1128 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1129 (unsigned long long)len, UINT_MAX); 1130 ti->error = "Maximum size of target IO is too large"; 1131 return -EINVAL; 1132 } 1133 1134 ti->max_io_len = (uint32_t) len; 1135 1136 return 0; 1137 } 1138 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1139 1140 /* 1141 * A target may call dm_accept_partial_bio only from the map routine. It is 1142 * allowed for all bio types except REQ_FLUSH. 1143 * 1144 * dm_accept_partial_bio informs the dm that the target only wants to process 1145 * additional n_sectors sectors of the bio and the rest of the data should be 1146 * sent in a next bio. 1147 * 1148 * A diagram that explains the arithmetics: 1149 * +--------------------+---------------+-------+ 1150 * | 1 | 2 | 3 | 1151 * +--------------------+---------------+-------+ 1152 * 1153 * <-------------- *tio->len_ptr ---------------> 1154 * <------- bi_size -------> 1155 * <-- n_sectors --> 1156 * 1157 * Region 1 was already iterated over with bio_advance or similar function. 1158 * (it may be empty if the target doesn't use bio_advance) 1159 * Region 2 is the remaining bio size that the target wants to process. 1160 * (it may be empty if region 1 is non-empty, although there is no reason 1161 * to make it empty) 1162 * The target requires that region 3 is to be sent in the next bio. 1163 * 1164 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1165 * the partially processed part (the sum of regions 1+2) must be the same for all 1166 * copies of the bio. 1167 */ 1168 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1169 { 1170 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1171 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1172 BUG_ON(bio->bi_rw & REQ_FLUSH); 1173 BUG_ON(bi_size > *tio->len_ptr); 1174 BUG_ON(n_sectors > bi_size); 1175 *tio->len_ptr -= bi_size - n_sectors; 1176 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1177 } 1178 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1179 1180 static void __map_bio(struct dm_target_io *tio) 1181 { 1182 int r; 1183 sector_t sector; 1184 struct mapped_device *md; 1185 struct bio *clone = &tio->clone; 1186 struct dm_target *ti = tio->ti; 1187 1188 clone->bi_end_io = clone_endio; 1189 1190 /* 1191 * Map the clone. If r == 0 we don't need to do 1192 * anything, the target has assumed ownership of 1193 * this io. 1194 */ 1195 atomic_inc(&tio->io->io_count); 1196 sector = clone->bi_iter.bi_sector; 1197 r = ti->type->map(ti, clone); 1198 if (r == DM_MAPIO_REMAPPED) { 1199 /* the bio has been remapped so dispatch it */ 1200 1201 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1202 tio->io->bio->bi_bdev->bd_dev, sector); 1203 1204 generic_make_request(clone); 1205 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 1206 /* error the io and bail out, or requeue it if needed */ 1207 md = tio->io->md; 1208 dec_pending(tio->io, r); 1209 free_tio(md, tio); 1210 } else if (r) { 1211 DMWARN("unimplemented target map return value: %d", r); 1212 BUG(); 1213 } 1214 } 1215 1216 struct clone_info { 1217 struct mapped_device *md; 1218 struct dm_table *map; 1219 struct bio *bio; 1220 struct dm_io *io; 1221 sector_t sector; 1222 unsigned sector_count; 1223 }; 1224 1225 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1226 { 1227 bio->bi_iter.bi_sector = sector; 1228 bio->bi_iter.bi_size = to_bytes(len); 1229 } 1230 1231 /* 1232 * Creates a bio that consists of range of complete bvecs. 1233 */ 1234 static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1235 sector_t sector, unsigned len) 1236 { 1237 struct bio *clone = &tio->clone; 1238 1239 __bio_clone_fast(clone, bio); 1240 1241 if (bio_integrity(bio)) 1242 bio_integrity_clone(clone, bio, GFP_NOIO); 1243 1244 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1245 clone->bi_iter.bi_size = to_bytes(len); 1246 1247 if (bio_integrity(bio)) 1248 bio_integrity_trim(clone, 0, len); 1249 } 1250 1251 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1252 struct dm_target *ti, int nr_iovecs, 1253 unsigned target_bio_nr) 1254 { 1255 struct dm_target_io *tio; 1256 struct bio *clone; 1257 1258 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); 1259 tio = container_of(clone, struct dm_target_io, clone); 1260 1261 tio->io = ci->io; 1262 tio->ti = ti; 1263 tio->target_bio_nr = target_bio_nr; 1264 1265 return tio; 1266 } 1267 1268 static void __clone_and_map_simple_bio(struct clone_info *ci, 1269 struct dm_target *ti, 1270 unsigned target_bio_nr, unsigned *len) 1271 { 1272 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); 1273 struct bio *clone = &tio->clone; 1274 1275 tio->len_ptr = len; 1276 1277 /* 1278 * Discard requests require the bio's inline iovecs be initialized. 1279 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1280 * and discard, so no need for concern about wasted bvec allocations. 1281 */ 1282 __bio_clone_fast(clone, ci->bio); 1283 if (len) 1284 bio_setup_sector(clone, ci->sector, *len); 1285 1286 __map_bio(tio); 1287 } 1288 1289 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1290 unsigned num_bios, unsigned *len) 1291 { 1292 unsigned target_bio_nr; 1293 1294 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1295 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1296 } 1297 1298 static int __send_empty_flush(struct clone_info *ci) 1299 { 1300 unsigned target_nr = 0; 1301 struct dm_target *ti; 1302 1303 BUG_ON(bio_has_data(ci->bio)); 1304 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1305 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1306 1307 return 0; 1308 } 1309 1310 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1311 sector_t sector, unsigned *len) 1312 { 1313 struct bio *bio = ci->bio; 1314 struct dm_target_io *tio; 1315 unsigned target_bio_nr; 1316 unsigned num_target_bios = 1; 1317 1318 /* 1319 * Does the target want to receive duplicate copies of the bio? 1320 */ 1321 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1322 num_target_bios = ti->num_write_bios(ti, bio); 1323 1324 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1325 tio = alloc_tio(ci, ti, 0, target_bio_nr); 1326 tio->len_ptr = len; 1327 clone_bio(tio, bio, sector, *len); 1328 __map_bio(tio); 1329 } 1330 } 1331 1332 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1333 1334 static unsigned get_num_discard_bios(struct dm_target *ti) 1335 { 1336 return ti->num_discard_bios; 1337 } 1338 1339 static unsigned get_num_write_same_bios(struct dm_target *ti) 1340 { 1341 return ti->num_write_same_bios; 1342 } 1343 1344 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1345 1346 static bool is_split_required_for_discard(struct dm_target *ti) 1347 { 1348 return ti->split_discard_bios; 1349 } 1350 1351 static int __send_changing_extent_only(struct clone_info *ci, 1352 get_num_bios_fn get_num_bios, 1353 is_split_required_fn is_split_required) 1354 { 1355 struct dm_target *ti; 1356 unsigned len; 1357 unsigned num_bios; 1358 1359 do { 1360 ti = dm_table_find_target(ci->map, ci->sector); 1361 if (!dm_target_is_valid(ti)) 1362 return -EIO; 1363 1364 /* 1365 * Even though the device advertised support for this type of 1366 * request, that does not mean every target supports it, and 1367 * reconfiguration might also have changed that since the 1368 * check was performed. 1369 */ 1370 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1371 if (!num_bios) 1372 return -EOPNOTSUPP; 1373 1374 if (is_split_required && !is_split_required(ti)) 1375 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1376 else 1377 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1378 1379 __send_duplicate_bios(ci, ti, num_bios, &len); 1380 1381 ci->sector += len; 1382 } while (ci->sector_count -= len); 1383 1384 return 0; 1385 } 1386 1387 static int __send_discard(struct clone_info *ci) 1388 { 1389 return __send_changing_extent_only(ci, get_num_discard_bios, 1390 is_split_required_for_discard); 1391 } 1392 1393 static int __send_write_same(struct clone_info *ci) 1394 { 1395 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1396 } 1397 1398 /* 1399 * Select the correct strategy for processing a non-flush bio. 1400 */ 1401 static int __split_and_process_non_flush(struct clone_info *ci) 1402 { 1403 struct bio *bio = ci->bio; 1404 struct dm_target *ti; 1405 unsigned len; 1406 1407 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1408 return __send_discard(ci); 1409 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1410 return __send_write_same(ci); 1411 1412 ti = dm_table_find_target(ci->map, ci->sector); 1413 if (!dm_target_is_valid(ti)) 1414 return -EIO; 1415 1416 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1417 1418 __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1419 1420 ci->sector += len; 1421 ci->sector_count -= len; 1422 1423 return 0; 1424 } 1425 1426 /* 1427 * Entry point to split a bio into clones and submit them to the targets. 1428 */ 1429 static void __split_and_process_bio(struct mapped_device *md, 1430 struct dm_table *map, struct bio *bio) 1431 { 1432 struct clone_info ci; 1433 int error = 0; 1434 1435 if (unlikely(!map)) { 1436 bio_io_error(bio); 1437 return; 1438 } 1439 1440 ci.map = map; 1441 ci.md = md; 1442 ci.io = alloc_io(md); 1443 ci.io->error = 0; 1444 atomic_set(&ci.io->io_count, 1); 1445 ci.io->bio = bio; 1446 ci.io->md = md; 1447 spin_lock_init(&ci.io->endio_lock); 1448 ci.sector = bio->bi_iter.bi_sector; 1449 1450 start_io_acct(ci.io); 1451 1452 if (bio->bi_rw & REQ_FLUSH) { 1453 ci.bio = &ci.md->flush_bio; 1454 ci.sector_count = 0; 1455 error = __send_empty_flush(&ci); 1456 /* dec_pending submits any data associated with flush */ 1457 } else { 1458 ci.bio = bio; 1459 ci.sector_count = bio_sectors(bio); 1460 while (ci.sector_count && !error) 1461 error = __split_and_process_non_flush(&ci); 1462 } 1463 1464 /* drop the extra reference count */ 1465 dec_pending(ci.io, error); 1466 } 1467 /*----------------------------------------------------------------- 1468 * CRUD END 1469 *---------------------------------------------------------------*/ 1470 1471 static int dm_merge_bvec(struct request_queue *q, 1472 struct bvec_merge_data *bvm, 1473 struct bio_vec *biovec) 1474 { 1475 struct mapped_device *md = q->queuedata; 1476 struct dm_table *map = dm_get_live_table_fast(md); 1477 struct dm_target *ti; 1478 sector_t max_sectors; 1479 int max_size = 0; 1480 1481 if (unlikely(!map)) 1482 goto out; 1483 1484 ti = dm_table_find_target(map, bvm->bi_sector); 1485 if (!dm_target_is_valid(ti)) 1486 goto out; 1487 1488 /* 1489 * Find maximum amount of I/O that won't need splitting 1490 */ 1491 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1492 (sector_t) BIO_MAX_SECTORS); 1493 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1494 if (max_size < 0) 1495 max_size = 0; 1496 1497 /* 1498 * merge_bvec_fn() returns number of bytes 1499 * it can accept at this offset 1500 * max is precomputed maximal io size 1501 */ 1502 if (max_size && ti->type->merge) 1503 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1504 /* 1505 * If the target doesn't support merge method and some of the devices 1506 * provided their merge_bvec method (we know this by looking at 1507 * queue_max_hw_sectors), then we can't allow bios with multiple vector 1508 * entries. So always set max_size to 0, and the code below allows 1509 * just one page. 1510 */ 1511 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1512 max_size = 0; 1513 1514 out: 1515 dm_put_live_table_fast(md); 1516 /* 1517 * Always allow an entire first page 1518 */ 1519 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1520 max_size = biovec->bv_len; 1521 1522 return max_size; 1523 } 1524 1525 /* 1526 * The request function that just remaps the bio built up by 1527 * dm_merge_bvec. 1528 */ 1529 static void _dm_request(struct request_queue *q, struct bio *bio) 1530 { 1531 int rw = bio_data_dir(bio); 1532 struct mapped_device *md = q->queuedata; 1533 int cpu; 1534 int srcu_idx; 1535 struct dm_table *map; 1536 1537 map = dm_get_live_table(md, &srcu_idx); 1538 1539 cpu = part_stat_lock(); 1540 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1541 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1542 part_stat_unlock(); 1543 1544 /* if we're suspended, we have to queue this io for later */ 1545 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1546 dm_put_live_table(md, srcu_idx); 1547 1548 if (bio_rw(bio) != READA) 1549 queue_io(md, bio); 1550 else 1551 bio_io_error(bio); 1552 return; 1553 } 1554 1555 __split_and_process_bio(md, map, bio); 1556 dm_put_live_table(md, srcu_idx); 1557 return; 1558 } 1559 1560 int dm_request_based(struct mapped_device *md) 1561 { 1562 return blk_queue_stackable(md->queue); 1563 } 1564 1565 static void dm_request(struct request_queue *q, struct bio *bio) 1566 { 1567 struct mapped_device *md = q->queuedata; 1568 1569 if (dm_request_based(md)) 1570 blk_queue_bio(q, bio); 1571 else 1572 _dm_request(q, bio); 1573 } 1574 1575 void dm_dispatch_request(struct request *rq) 1576 { 1577 int r; 1578 1579 if (blk_queue_io_stat(rq->q)) 1580 rq->cmd_flags |= REQ_IO_STAT; 1581 1582 rq->start_time = jiffies; 1583 r = blk_insert_cloned_request(rq->q, rq); 1584 if (r) 1585 dm_complete_request(rq, r); 1586 } 1587 EXPORT_SYMBOL_GPL(dm_dispatch_request); 1588 1589 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1590 void *data) 1591 { 1592 struct dm_rq_target_io *tio = data; 1593 struct dm_rq_clone_bio_info *info = 1594 container_of(bio, struct dm_rq_clone_bio_info, clone); 1595 1596 info->orig = bio_orig; 1597 info->tio = tio; 1598 bio->bi_end_io = end_clone_bio; 1599 1600 return 0; 1601 } 1602 1603 static int setup_clone(struct request *clone, struct request *rq, 1604 struct dm_rq_target_io *tio) 1605 { 1606 int r; 1607 1608 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1609 dm_rq_bio_constructor, tio); 1610 if (r) 1611 return r; 1612 1613 clone->cmd = rq->cmd; 1614 clone->cmd_len = rq->cmd_len; 1615 clone->sense = rq->sense; 1616 clone->end_io = end_clone_request; 1617 clone->end_io_data = tio; 1618 1619 return 0; 1620 } 1621 1622 static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1623 gfp_t gfp_mask) 1624 { 1625 struct request *clone; 1626 struct dm_rq_target_io *tio; 1627 1628 tio = alloc_rq_tio(md, gfp_mask); 1629 if (!tio) 1630 return NULL; 1631 1632 tio->md = md; 1633 tio->ti = NULL; 1634 tio->orig = rq; 1635 tio->error = 0; 1636 memset(&tio->info, 0, sizeof(tio->info)); 1637 1638 clone = &tio->clone; 1639 if (setup_clone(clone, rq, tio)) { 1640 /* -ENOMEM */ 1641 free_rq_tio(tio); 1642 return NULL; 1643 } 1644 1645 return clone; 1646 } 1647 1648 /* 1649 * Called with the queue lock held. 1650 */ 1651 static int dm_prep_fn(struct request_queue *q, struct request *rq) 1652 { 1653 struct mapped_device *md = q->queuedata; 1654 struct request *clone; 1655 1656 if (unlikely(rq->special)) { 1657 DMWARN("Already has something in rq->special."); 1658 return BLKPREP_KILL; 1659 } 1660 1661 clone = clone_rq(rq, md, GFP_ATOMIC); 1662 if (!clone) 1663 return BLKPREP_DEFER; 1664 1665 rq->special = clone; 1666 rq->cmd_flags |= REQ_DONTPREP; 1667 1668 return BLKPREP_OK; 1669 } 1670 1671 /* 1672 * Returns: 1673 * 0 : the request has been processed (not requeued) 1674 * !0 : the request has been requeued 1675 */ 1676 static int map_request(struct dm_target *ti, struct request *clone, 1677 struct mapped_device *md) 1678 { 1679 int r, requeued = 0; 1680 struct dm_rq_target_io *tio = clone->end_io_data; 1681 1682 tio->ti = ti; 1683 r = ti->type->map_rq(ti, clone, &tio->info); 1684 switch (r) { 1685 case DM_MAPIO_SUBMITTED: 1686 /* The target has taken the I/O to submit by itself later */ 1687 break; 1688 case DM_MAPIO_REMAPPED: 1689 /* The target has remapped the I/O so dispatch it */ 1690 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1691 blk_rq_pos(tio->orig)); 1692 dm_dispatch_request(clone); 1693 break; 1694 case DM_MAPIO_REQUEUE: 1695 /* The target wants to requeue the I/O */ 1696 dm_requeue_unmapped_request(clone); 1697 requeued = 1; 1698 break; 1699 default: 1700 if (r > 0) { 1701 DMWARN("unimplemented target map return value: %d", r); 1702 BUG(); 1703 } 1704 1705 /* The target wants to complete the I/O */ 1706 dm_kill_unmapped_request(clone, r); 1707 break; 1708 } 1709 1710 return requeued; 1711 } 1712 1713 static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1714 { 1715 struct request *clone; 1716 1717 blk_start_request(orig); 1718 clone = orig->special; 1719 atomic_inc(&md->pending[rq_data_dir(clone)]); 1720 1721 /* 1722 * Hold the md reference here for the in-flight I/O. 1723 * We can't rely on the reference count by device opener, 1724 * because the device may be closed during the request completion 1725 * when all bios are completed. 1726 * See the comment in rq_completed() too. 1727 */ 1728 dm_get(md); 1729 1730 return clone; 1731 } 1732 1733 /* 1734 * q->request_fn for request-based dm. 1735 * Called with the queue lock held. 1736 */ 1737 static void dm_request_fn(struct request_queue *q) 1738 { 1739 struct mapped_device *md = q->queuedata; 1740 int srcu_idx; 1741 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1742 struct dm_target *ti; 1743 struct request *rq, *clone; 1744 sector_t pos; 1745 1746 /* 1747 * For suspend, check blk_queue_stopped() and increment 1748 * ->pending within a single queue_lock not to increment the 1749 * number of in-flight I/Os after the queue is stopped in 1750 * dm_suspend(). 1751 */ 1752 while (!blk_queue_stopped(q)) { 1753 rq = blk_peek_request(q); 1754 if (!rq) 1755 goto delay_and_out; 1756 1757 /* always use block 0 to find the target for flushes for now */ 1758 pos = 0; 1759 if (!(rq->cmd_flags & REQ_FLUSH)) 1760 pos = blk_rq_pos(rq); 1761 1762 ti = dm_table_find_target(map, pos); 1763 if (!dm_target_is_valid(ti)) { 1764 /* 1765 * Must perform setup, that dm_done() requires, 1766 * before calling dm_kill_unmapped_request 1767 */ 1768 DMERR_LIMIT("request attempted access beyond the end of device"); 1769 clone = dm_start_request(md, rq); 1770 dm_kill_unmapped_request(clone, -EIO); 1771 continue; 1772 } 1773 1774 if (ti->type->busy && ti->type->busy(ti)) 1775 goto delay_and_out; 1776 1777 clone = dm_start_request(md, rq); 1778 1779 spin_unlock(q->queue_lock); 1780 if (map_request(ti, clone, md)) 1781 goto requeued; 1782 1783 BUG_ON(!irqs_disabled()); 1784 spin_lock(q->queue_lock); 1785 } 1786 1787 goto out; 1788 1789 requeued: 1790 BUG_ON(!irqs_disabled()); 1791 spin_lock(q->queue_lock); 1792 1793 delay_and_out: 1794 blk_delay_queue(q, HZ / 10); 1795 out: 1796 dm_put_live_table(md, srcu_idx); 1797 } 1798 1799 int dm_underlying_device_busy(struct request_queue *q) 1800 { 1801 return blk_lld_busy(q); 1802 } 1803 EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1804 1805 static int dm_lld_busy(struct request_queue *q) 1806 { 1807 int r; 1808 struct mapped_device *md = q->queuedata; 1809 struct dm_table *map = dm_get_live_table_fast(md); 1810 1811 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1812 r = 1; 1813 else 1814 r = dm_table_any_busy_target(map); 1815 1816 dm_put_live_table_fast(md); 1817 1818 return r; 1819 } 1820 1821 static int dm_any_congested(void *congested_data, int bdi_bits) 1822 { 1823 int r = bdi_bits; 1824 struct mapped_device *md = congested_data; 1825 struct dm_table *map; 1826 1827 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1828 map = dm_get_live_table_fast(md); 1829 if (map) { 1830 /* 1831 * Request-based dm cares about only own queue for 1832 * the query about congestion status of request_queue 1833 */ 1834 if (dm_request_based(md)) 1835 r = md->queue->backing_dev_info.state & 1836 bdi_bits; 1837 else 1838 r = dm_table_any_congested(map, bdi_bits); 1839 } 1840 dm_put_live_table_fast(md); 1841 } 1842 1843 return r; 1844 } 1845 1846 /*----------------------------------------------------------------- 1847 * An IDR is used to keep track of allocated minor numbers. 1848 *---------------------------------------------------------------*/ 1849 static void free_minor(int minor) 1850 { 1851 spin_lock(&_minor_lock); 1852 idr_remove(&_minor_idr, minor); 1853 spin_unlock(&_minor_lock); 1854 } 1855 1856 /* 1857 * See if the device with a specific minor # is free. 1858 */ 1859 static int specific_minor(int minor) 1860 { 1861 int r; 1862 1863 if (minor >= (1 << MINORBITS)) 1864 return -EINVAL; 1865 1866 idr_preload(GFP_KERNEL); 1867 spin_lock(&_minor_lock); 1868 1869 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1870 1871 spin_unlock(&_minor_lock); 1872 idr_preload_end(); 1873 if (r < 0) 1874 return r == -ENOSPC ? -EBUSY : r; 1875 return 0; 1876 } 1877 1878 static int next_free_minor(int *minor) 1879 { 1880 int r; 1881 1882 idr_preload(GFP_KERNEL); 1883 spin_lock(&_minor_lock); 1884 1885 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1886 1887 spin_unlock(&_minor_lock); 1888 idr_preload_end(); 1889 if (r < 0) 1890 return r; 1891 *minor = r; 1892 return 0; 1893 } 1894 1895 static const struct block_device_operations dm_blk_dops; 1896 1897 static void dm_wq_work(struct work_struct *work); 1898 1899 static void dm_init_md_queue(struct mapped_device *md) 1900 { 1901 /* 1902 * Request-based dm devices cannot be stacked on top of bio-based dm 1903 * devices. The type of this dm device has not been decided yet. 1904 * The type is decided at the first table loading time. 1905 * To prevent problematic device stacking, clear the queue flag 1906 * for request stacking support until then. 1907 * 1908 * This queue is new, so no concurrency on the queue_flags. 1909 */ 1910 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1911 1912 md->queue->queuedata = md; 1913 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1914 md->queue->backing_dev_info.congested_data = md; 1915 blk_queue_make_request(md->queue, dm_request); 1916 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1917 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1918 } 1919 1920 /* 1921 * Allocate and initialise a blank device with a given minor. 1922 */ 1923 static struct mapped_device *alloc_dev(int minor) 1924 { 1925 int r; 1926 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1927 void *old_md; 1928 1929 if (!md) { 1930 DMWARN("unable to allocate device, out of memory."); 1931 return NULL; 1932 } 1933 1934 if (!try_module_get(THIS_MODULE)) 1935 goto bad_module_get; 1936 1937 /* get a minor number for the dev */ 1938 if (minor == DM_ANY_MINOR) 1939 r = next_free_minor(&minor); 1940 else 1941 r = specific_minor(minor); 1942 if (r < 0) 1943 goto bad_minor; 1944 1945 r = init_srcu_struct(&md->io_barrier); 1946 if (r < 0) 1947 goto bad_io_barrier; 1948 1949 md->type = DM_TYPE_NONE; 1950 mutex_init(&md->suspend_lock); 1951 mutex_init(&md->type_lock); 1952 spin_lock_init(&md->deferred_lock); 1953 atomic_set(&md->holders, 1); 1954 atomic_set(&md->open_count, 0); 1955 atomic_set(&md->event_nr, 0); 1956 atomic_set(&md->uevent_seq, 0); 1957 INIT_LIST_HEAD(&md->uevent_list); 1958 spin_lock_init(&md->uevent_lock); 1959 1960 md->queue = blk_alloc_queue(GFP_KERNEL); 1961 if (!md->queue) 1962 goto bad_queue; 1963 1964 dm_init_md_queue(md); 1965 1966 md->disk = alloc_disk(1); 1967 if (!md->disk) 1968 goto bad_disk; 1969 1970 atomic_set(&md->pending[0], 0); 1971 atomic_set(&md->pending[1], 0); 1972 init_waitqueue_head(&md->wait); 1973 INIT_WORK(&md->work, dm_wq_work); 1974 init_waitqueue_head(&md->eventq); 1975 init_completion(&md->kobj_holder.completion); 1976 1977 md->disk->major = _major; 1978 md->disk->first_minor = minor; 1979 md->disk->fops = &dm_blk_dops; 1980 md->disk->queue = md->queue; 1981 md->disk->private_data = md; 1982 sprintf(md->disk->disk_name, "dm-%d", minor); 1983 add_disk(md->disk); 1984 format_dev_t(md->name, MKDEV(_major, minor)); 1985 1986 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1987 if (!md->wq) 1988 goto bad_thread; 1989 1990 md->bdev = bdget_disk(md->disk, 0); 1991 if (!md->bdev) 1992 goto bad_bdev; 1993 1994 bio_init(&md->flush_bio); 1995 md->flush_bio.bi_bdev = md->bdev; 1996 md->flush_bio.bi_rw = WRITE_FLUSH; 1997 1998 dm_stats_init(&md->stats); 1999 2000 /* Populate the mapping, nobody knows we exist yet */ 2001 spin_lock(&_minor_lock); 2002 old_md = idr_replace(&_minor_idr, md, minor); 2003 spin_unlock(&_minor_lock); 2004 2005 BUG_ON(old_md != MINOR_ALLOCED); 2006 2007 return md; 2008 2009 bad_bdev: 2010 destroy_workqueue(md->wq); 2011 bad_thread: 2012 del_gendisk(md->disk); 2013 put_disk(md->disk); 2014 bad_disk: 2015 blk_cleanup_queue(md->queue); 2016 bad_queue: 2017 cleanup_srcu_struct(&md->io_barrier); 2018 bad_io_barrier: 2019 free_minor(minor); 2020 bad_minor: 2021 module_put(THIS_MODULE); 2022 bad_module_get: 2023 kfree(md); 2024 return NULL; 2025 } 2026 2027 static void unlock_fs(struct mapped_device *md); 2028 2029 static void free_dev(struct mapped_device *md) 2030 { 2031 int minor = MINOR(disk_devt(md->disk)); 2032 2033 unlock_fs(md); 2034 bdput(md->bdev); 2035 destroy_workqueue(md->wq); 2036 if (md->io_pool) 2037 mempool_destroy(md->io_pool); 2038 if (md->bs) 2039 bioset_free(md->bs); 2040 blk_integrity_unregister(md->disk); 2041 del_gendisk(md->disk); 2042 cleanup_srcu_struct(&md->io_barrier); 2043 free_minor(minor); 2044 2045 spin_lock(&_minor_lock); 2046 md->disk->private_data = NULL; 2047 spin_unlock(&_minor_lock); 2048 2049 put_disk(md->disk); 2050 blk_cleanup_queue(md->queue); 2051 dm_stats_cleanup(&md->stats); 2052 module_put(THIS_MODULE); 2053 kfree(md); 2054 } 2055 2056 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2057 { 2058 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2059 2060 if (md->io_pool && md->bs) { 2061 /* The md already has necessary mempools. */ 2062 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2063 /* 2064 * Reload bioset because front_pad may have changed 2065 * because a different table was loaded. 2066 */ 2067 bioset_free(md->bs); 2068 md->bs = p->bs; 2069 p->bs = NULL; 2070 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { 2071 /* 2072 * There's no need to reload with request-based dm 2073 * because the size of front_pad doesn't change. 2074 * Note for future: If you are to reload bioset, 2075 * prep-ed requests in the queue may refer 2076 * to bio from the old bioset, so you must walk 2077 * through the queue to unprep. 2078 */ 2079 } 2080 goto out; 2081 } 2082 2083 BUG_ON(!p || md->io_pool || md->bs); 2084 2085 md->io_pool = p->io_pool; 2086 p->io_pool = NULL; 2087 md->bs = p->bs; 2088 p->bs = NULL; 2089 2090 out: 2091 /* mempool bind completed, now no need any mempools in the table */ 2092 dm_table_free_md_mempools(t); 2093 } 2094 2095 /* 2096 * Bind a table to the device. 2097 */ 2098 static void event_callback(void *context) 2099 { 2100 unsigned long flags; 2101 LIST_HEAD(uevents); 2102 struct mapped_device *md = (struct mapped_device *) context; 2103 2104 spin_lock_irqsave(&md->uevent_lock, flags); 2105 list_splice_init(&md->uevent_list, &uevents); 2106 spin_unlock_irqrestore(&md->uevent_lock, flags); 2107 2108 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2109 2110 atomic_inc(&md->event_nr); 2111 wake_up(&md->eventq); 2112 } 2113 2114 /* 2115 * Protected by md->suspend_lock obtained by dm_swap_table(). 2116 */ 2117 static void __set_size(struct mapped_device *md, sector_t size) 2118 { 2119 set_capacity(md->disk, size); 2120 2121 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2122 } 2123 2124 /* 2125 * Return 1 if the queue has a compulsory merge_bvec_fn function. 2126 * 2127 * If this function returns 0, then the device is either a non-dm 2128 * device without a merge_bvec_fn, or it is a dm device that is 2129 * able to split any bios it receives that are too big. 2130 */ 2131 int dm_queue_merge_is_compulsory(struct request_queue *q) 2132 { 2133 struct mapped_device *dev_md; 2134 2135 if (!q->merge_bvec_fn) 2136 return 0; 2137 2138 if (q->make_request_fn == dm_request) { 2139 dev_md = q->queuedata; 2140 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2141 return 0; 2142 } 2143 2144 return 1; 2145 } 2146 2147 static int dm_device_merge_is_compulsory(struct dm_target *ti, 2148 struct dm_dev *dev, sector_t start, 2149 sector_t len, void *data) 2150 { 2151 struct block_device *bdev = dev->bdev; 2152 struct request_queue *q = bdev_get_queue(bdev); 2153 2154 return dm_queue_merge_is_compulsory(q); 2155 } 2156 2157 /* 2158 * Return 1 if it is acceptable to ignore merge_bvec_fn based 2159 * on the properties of the underlying devices. 2160 */ 2161 static int dm_table_merge_is_optional(struct dm_table *table) 2162 { 2163 unsigned i = 0; 2164 struct dm_target *ti; 2165 2166 while (i < dm_table_get_num_targets(table)) { 2167 ti = dm_table_get_target(table, i++); 2168 2169 if (ti->type->iterate_devices && 2170 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2171 return 0; 2172 } 2173 2174 return 1; 2175 } 2176 2177 /* 2178 * Returns old map, which caller must destroy. 2179 */ 2180 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2181 struct queue_limits *limits) 2182 { 2183 struct dm_table *old_map; 2184 struct request_queue *q = md->queue; 2185 sector_t size; 2186 int merge_is_optional; 2187 2188 size = dm_table_get_size(t); 2189 2190 /* 2191 * Wipe any geometry if the size of the table changed. 2192 */ 2193 if (size != dm_get_size(md)) 2194 memset(&md->geometry, 0, sizeof(md->geometry)); 2195 2196 __set_size(md, size); 2197 2198 dm_table_event_callback(t, event_callback, md); 2199 2200 /* 2201 * The queue hasn't been stopped yet, if the old table type wasn't 2202 * for request-based during suspension. So stop it to prevent 2203 * I/O mapping before resume. 2204 * This must be done before setting the queue restrictions, 2205 * because request-based dm may be run just after the setting. 2206 */ 2207 if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2208 stop_queue(q); 2209 2210 __bind_mempools(md, t); 2211 2212 merge_is_optional = dm_table_merge_is_optional(t); 2213 2214 old_map = md->map; 2215 rcu_assign_pointer(md->map, t); 2216 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2217 2218 dm_table_set_restrictions(t, q, limits); 2219 if (merge_is_optional) 2220 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2221 else 2222 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2223 dm_sync_table(md); 2224 2225 return old_map; 2226 } 2227 2228 /* 2229 * Returns unbound table for the caller to free. 2230 */ 2231 static struct dm_table *__unbind(struct mapped_device *md) 2232 { 2233 struct dm_table *map = md->map; 2234 2235 if (!map) 2236 return NULL; 2237 2238 dm_table_event_callback(map, NULL, NULL); 2239 RCU_INIT_POINTER(md->map, NULL); 2240 dm_sync_table(md); 2241 2242 return map; 2243 } 2244 2245 /* 2246 * Constructor for a new device. 2247 */ 2248 int dm_create(int minor, struct mapped_device **result) 2249 { 2250 struct mapped_device *md; 2251 2252 md = alloc_dev(minor); 2253 if (!md) 2254 return -ENXIO; 2255 2256 dm_sysfs_init(md); 2257 2258 *result = md; 2259 return 0; 2260 } 2261 2262 /* 2263 * Functions to manage md->type. 2264 * All are required to hold md->type_lock. 2265 */ 2266 void dm_lock_md_type(struct mapped_device *md) 2267 { 2268 mutex_lock(&md->type_lock); 2269 } 2270 2271 void dm_unlock_md_type(struct mapped_device *md) 2272 { 2273 mutex_unlock(&md->type_lock); 2274 } 2275 2276 void dm_set_md_type(struct mapped_device *md, unsigned type) 2277 { 2278 BUG_ON(!mutex_is_locked(&md->type_lock)); 2279 md->type = type; 2280 } 2281 2282 unsigned dm_get_md_type(struct mapped_device *md) 2283 { 2284 BUG_ON(!mutex_is_locked(&md->type_lock)); 2285 return md->type; 2286 } 2287 2288 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2289 { 2290 return md->immutable_target_type; 2291 } 2292 2293 /* 2294 * The queue_limits are only valid as long as you have a reference 2295 * count on 'md'. 2296 */ 2297 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2298 { 2299 BUG_ON(!atomic_read(&md->holders)); 2300 return &md->queue->limits; 2301 } 2302 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2303 2304 /* 2305 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2306 */ 2307 static int dm_init_request_based_queue(struct mapped_device *md) 2308 { 2309 struct request_queue *q = NULL; 2310 2311 if (md->queue->elevator) 2312 return 1; 2313 2314 /* Fully initialize the queue */ 2315 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2316 if (!q) 2317 return 0; 2318 2319 md->queue = q; 2320 dm_init_md_queue(md); 2321 blk_queue_softirq_done(md->queue, dm_softirq_done); 2322 blk_queue_prep_rq(md->queue, dm_prep_fn); 2323 blk_queue_lld_busy(md->queue, dm_lld_busy); 2324 2325 elv_register_queue(md->queue); 2326 2327 return 1; 2328 } 2329 2330 /* 2331 * Setup the DM device's queue based on md's type 2332 */ 2333 int dm_setup_md_queue(struct mapped_device *md) 2334 { 2335 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 2336 !dm_init_request_based_queue(md)) { 2337 DMWARN("Cannot initialize queue for request-based mapped device"); 2338 return -EINVAL; 2339 } 2340 2341 return 0; 2342 } 2343 2344 static struct mapped_device *dm_find_md(dev_t dev) 2345 { 2346 struct mapped_device *md; 2347 unsigned minor = MINOR(dev); 2348 2349 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2350 return NULL; 2351 2352 spin_lock(&_minor_lock); 2353 2354 md = idr_find(&_minor_idr, minor); 2355 if (md && (md == MINOR_ALLOCED || 2356 (MINOR(disk_devt(dm_disk(md))) != minor) || 2357 dm_deleting_md(md) || 2358 test_bit(DMF_FREEING, &md->flags))) { 2359 md = NULL; 2360 goto out; 2361 } 2362 2363 out: 2364 spin_unlock(&_minor_lock); 2365 2366 return md; 2367 } 2368 2369 struct mapped_device *dm_get_md(dev_t dev) 2370 { 2371 struct mapped_device *md = dm_find_md(dev); 2372 2373 if (md) 2374 dm_get(md); 2375 2376 return md; 2377 } 2378 EXPORT_SYMBOL_GPL(dm_get_md); 2379 2380 void *dm_get_mdptr(struct mapped_device *md) 2381 { 2382 return md->interface_ptr; 2383 } 2384 2385 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2386 { 2387 md->interface_ptr = ptr; 2388 } 2389 2390 void dm_get(struct mapped_device *md) 2391 { 2392 atomic_inc(&md->holders); 2393 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2394 } 2395 2396 const char *dm_device_name(struct mapped_device *md) 2397 { 2398 return md->name; 2399 } 2400 EXPORT_SYMBOL_GPL(dm_device_name); 2401 2402 static void __dm_destroy(struct mapped_device *md, bool wait) 2403 { 2404 struct dm_table *map; 2405 int srcu_idx; 2406 2407 might_sleep(); 2408 2409 spin_lock(&_minor_lock); 2410 map = dm_get_live_table(md, &srcu_idx); 2411 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2412 set_bit(DMF_FREEING, &md->flags); 2413 spin_unlock(&_minor_lock); 2414 2415 if (!dm_suspended_md(md)) { 2416 dm_table_presuspend_targets(map); 2417 dm_table_postsuspend_targets(map); 2418 } 2419 2420 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2421 dm_put_live_table(md, srcu_idx); 2422 2423 /* 2424 * Rare, but there may be I/O requests still going to complete, 2425 * for example. Wait for all references to disappear. 2426 * No one should increment the reference count of the mapped_device, 2427 * after the mapped_device state becomes DMF_FREEING. 2428 */ 2429 if (wait) 2430 while (atomic_read(&md->holders)) 2431 msleep(1); 2432 else if (atomic_read(&md->holders)) 2433 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2434 dm_device_name(md), atomic_read(&md->holders)); 2435 2436 dm_sysfs_exit(md); 2437 dm_table_destroy(__unbind(md)); 2438 free_dev(md); 2439 } 2440 2441 void dm_destroy(struct mapped_device *md) 2442 { 2443 __dm_destroy(md, true); 2444 } 2445 2446 void dm_destroy_immediate(struct mapped_device *md) 2447 { 2448 __dm_destroy(md, false); 2449 } 2450 2451 void dm_put(struct mapped_device *md) 2452 { 2453 atomic_dec(&md->holders); 2454 } 2455 EXPORT_SYMBOL_GPL(dm_put); 2456 2457 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 2458 { 2459 int r = 0; 2460 DECLARE_WAITQUEUE(wait, current); 2461 2462 add_wait_queue(&md->wait, &wait); 2463 2464 while (1) { 2465 set_current_state(interruptible); 2466 2467 if (!md_in_flight(md)) 2468 break; 2469 2470 if (interruptible == TASK_INTERRUPTIBLE && 2471 signal_pending(current)) { 2472 r = -EINTR; 2473 break; 2474 } 2475 2476 io_schedule(); 2477 } 2478 set_current_state(TASK_RUNNING); 2479 2480 remove_wait_queue(&md->wait, &wait); 2481 2482 return r; 2483 } 2484 2485 /* 2486 * Process the deferred bios 2487 */ 2488 static void dm_wq_work(struct work_struct *work) 2489 { 2490 struct mapped_device *md = container_of(work, struct mapped_device, 2491 work); 2492 struct bio *c; 2493 int srcu_idx; 2494 struct dm_table *map; 2495 2496 map = dm_get_live_table(md, &srcu_idx); 2497 2498 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2499 spin_lock_irq(&md->deferred_lock); 2500 c = bio_list_pop(&md->deferred); 2501 spin_unlock_irq(&md->deferred_lock); 2502 2503 if (!c) 2504 break; 2505 2506 if (dm_request_based(md)) 2507 generic_make_request(c); 2508 else 2509 __split_and_process_bio(md, map, c); 2510 } 2511 2512 dm_put_live_table(md, srcu_idx); 2513 } 2514 2515 static void dm_queue_flush(struct mapped_device *md) 2516 { 2517 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2518 smp_mb__after_atomic(); 2519 queue_work(md->wq, &md->work); 2520 } 2521 2522 /* 2523 * Swap in a new table, returning the old one for the caller to destroy. 2524 */ 2525 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2526 { 2527 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2528 struct queue_limits limits; 2529 int r; 2530 2531 mutex_lock(&md->suspend_lock); 2532 2533 /* device must be suspended */ 2534 if (!dm_suspended_md(md)) 2535 goto out; 2536 2537 /* 2538 * If the new table has no data devices, retain the existing limits. 2539 * This helps multipath with queue_if_no_path if all paths disappear, 2540 * then new I/O is queued based on these limits, and then some paths 2541 * reappear. 2542 */ 2543 if (dm_table_has_no_data_devices(table)) { 2544 live_map = dm_get_live_table_fast(md); 2545 if (live_map) 2546 limits = md->queue->limits; 2547 dm_put_live_table_fast(md); 2548 } 2549 2550 if (!live_map) { 2551 r = dm_calculate_queue_limits(table, &limits); 2552 if (r) { 2553 map = ERR_PTR(r); 2554 goto out; 2555 } 2556 } 2557 2558 map = __bind(md, table, &limits); 2559 2560 out: 2561 mutex_unlock(&md->suspend_lock); 2562 return map; 2563 } 2564 2565 /* 2566 * Functions to lock and unlock any filesystem running on the 2567 * device. 2568 */ 2569 static int lock_fs(struct mapped_device *md) 2570 { 2571 int r; 2572 2573 WARN_ON(md->frozen_sb); 2574 2575 md->frozen_sb = freeze_bdev(md->bdev); 2576 if (IS_ERR(md->frozen_sb)) { 2577 r = PTR_ERR(md->frozen_sb); 2578 md->frozen_sb = NULL; 2579 return r; 2580 } 2581 2582 set_bit(DMF_FROZEN, &md->flags); 2583 2584 return 0; 2585 } 2586 2587 static void unlock_fs(struct mapped_device *md) 2588 { 2589 if (!test_bit(DMF_FROZEN, &md->flags)) 2590 return; 2591 2592 thaw_bdev(md->bdev, md->frozen_sb); 2593 md->frozen_sb = NULL; 2594 clear_bit(DMF_FROZEN, &md->flags); 2595 } 2596 2597 /* 2598 * We need to be able to change a mapping table under a mounted 2599 * filesystem. For example we might want to move some data in 2600 * the background. Before the table can be swapped with 2601 * dm_bind_table, dm_suspend must be called to flush any in 2602 * flight bios and ensure that any further io gets deferred. 2603 */ 2604 /* 2605 * Suspend mechanism in request-based dm. 2606 * 2607 * 1. Flush all I/Os by lock_fs() if needed. 2608 * 2. Stop dispatching any I/O by stopping the request_queue. 2609 * 3. Wait for all in-flight I/Os to be completed or requeued. 2610 * 2611 * To abort suspend, start the request_queue. 2612 */ 2613 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2614 { 2615 struct dm_table *map = NULL; 2616 int r = 0; 2617 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 2618 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 2619 2620 mutex_lock(&md->suspend_lock); 2621 2622 if (dm_suspended_md(md)) { 2623 r = -EINVAL; 2624 goto out_unlock; 2625 } 2626 2627 map = md->map; 2628 2629 /* 2630 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2631 * This flag is cleared before dm_suspend returns. 2632 */ 2633 if (noflush) 2634 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2635 2636 /* This does not get reverted if there's an error later. */ 2637 dm_table_presuspend_targets(map); 2638 2639 /* 2640 * Flush I/O to the device. 2641 * Any I/O submitted after lock_fs() may not be flushed. 2642 * noflush takes precedence over do_lockfs. 2643 * (lock_fs() flushes I/Os and waits for them to complete.) 2644 */ 2645 if (!noflush && do_lockfs) { 2646 r = lock_fs(md); 2647 if (r) 2648 goto out_unlock; 2649 } 2650 2651 /* 2652 * Here we must make sure that no processes are submitting requests 2653 * to target drivers i.e. no one may be executing 2654 * __split_and_process_bio. This is called from dm_request and 2655 * dm_wq_work. 2656 * 2657 * To get all processes out of __split_and_process_bio in dm_request, 2658 * we take the write lock. To prevent any process from reentering 2659 * __split_and_process_bio from dm_request and quiesce the thread 2660 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2661 * flush_workqueue(md->wq). 2662 */ 2663 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2664 synchronize_srcu(&md->io_barrier); 2665 2666 /* 2667 * Stop md->queue before flushing md->wq in case request-based 2668 * dm defers requests to md->wq from md->queue. 2669 */ 2670 if (dm_request_based(md)) 2671 stop_queue(md->queue); 2672 2673 flush_workqueue(md->wq); 2674 2675 /* 2676 * At this point no more requests are entering target request routines. 2677 * We call dm_wait_for_completion to wait for all existing requests 2678 * to finish. 2679 */ 2680 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 2681 2682 if (noflush) 2683 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2684 synchronize_srcu(&md->io_barrier); 2685 2686 /* were we interrupted ? */ 2687 if (r < 0) { 2688 dm_queue_flush(md); 2689 2690 if (dm_request_based(md)) 2691 start_queue(md->queue); 2692 2693 unlock_fs(md); 2694 goto out_unlock; /* pushback list is already flushed, so skip flush */ 2695 } 2696 2697 /* 2698 * If dm_wait_for_completion returned 0, the device is completely 2699 * quiescent now. There is no request-processing activity. All new 2700 * requests are being added to md->deferred list. 2701 */ 2702 2703 set_bit(DMF_SUSPENDED, &md->flags); 2704 2705 dm_table_postsuspend_targets(map); 2706 2707 out_unlock: 2708 mutex_unlock(&md->suspend_lock); 2709 return r; 2710 } 2711 2712 int dm_resume(struct mapped_device *md) 2713 { 2714 int r = -EINVAL; 2715 struct dm_table *map = NULL; 2716 2717 mutex_lock(&md->suspend_lock); 2718 if (!dm_suspended_md(md)) 2719 goto out; 2720 2721 map = md->map; 2722 if (!map || !dm_table_get_size(map)) 2723 goto out; 2724 2725 r = dm_table_resume_targets(map); 2726 if (r) 2727 goto out; 2728 2729 dm_queue_flush(md); 2730 2731 /* 2732 * Flushing deferred I/Os must be done after targets are resumed 2733 * so that mapping of targets can work correctly. 2734 * Request-based dm is queueing the deferred I/Os in its request_queue. 2735 */ 2736 if (dm_request_based(md)) 2737 start_queue(md->queue); 2738 2739 unlock_fs(md); 2740 2741 clear_bit(DMF_SUSPENDED, &md->flags); 2742 2743 r = 0; 2744 out: 2745 mutex_unlock(&md->suspend_lock); 2746 2747 return r; 2748 } 2749 2750 /* 2751 * Internal suspend/resume works like userspace-driven suspend. It waits 2752 * until all bios finish and prevents issuing new bios to the target drivers. 2753 * It may be used only from the kernel. 2754 * 2755 * Internal suspend holds md->suspend_lock, which prevents interaction with 2756 * userspace-driven suspend. 2757 */ 2758 2759 void dm_internal_suspend(struct mapped_device *md) 2760 { 2761 mutex_lock(&md->suspend_lock); 2762 if (dm_suspended_md(md)) 2763 return; 2764 2765 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2766 synchronize_srcu(&md->io_barrier); 2767 flush_workqueue(md->wq); 2768 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2769 } 2770 2771 void dm_internal_resume(struct mapped_device *md) 2772 { 2773 if (dm_suspended_md(md)) 2774 goto done; 2775 2776 dm_queue_flush(md); 2777 2778 done: 2779 mutex_unlock(&md->suspend_lock); 2780 } 2781 2782 /*----------------------------------------------------------------- 2783 * Event notification. 2784 *---------------------------------------------------------------*/ 2785 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2786 unsigned cookie) 2787 { 2788 char udev_cookie[DM_COOKIE_LENGTH]; 2789 char *envp[] = { udev_cookie, NULL }; 2790 2791 if (!cookie) 2792 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2793 else { 2794 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2795 DM_COOKIE_ENV_VAR_NAME, cookie); 2796 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2797 action, envp); 2798 } 2799 } 2800 2801 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2802 { 2803 return atomic_add_return(1, &md->uevent_seq); 2804 } 2805 2806 uint32_t dm_get_event_nr(struct mapped_device *md) 2807 { 2808 return atomic_read(&md->event_nr); 2809 } 2810 2811 int dm_wait_event(struct mapped_device *md, int event_nr) 2812 { 2813 return wait_event_interruptible(md->eventq, 2814 (event_nr != atomic_read(&md->event_nr))); 2815 } 2816 2817 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2818 { 2819 unsigned long flags; 2820 2821 spin_lock_irqsave(&md->uevent_lock, flags); 2822 list_add(elist, &md->uevent_list); 2823 spin_unlock_irqrestore(&md->uevent_lock, flags); 2824 } 2825 2826 /* 2827 * The gendisk is only valid as long as you have a reference 2828 * count on 'md'. 2829 */ 2830 struct gendisk *dm_disk(struct mapped_device *md) 2831 { 2832 return md->disk; 2833 } 2834 2835 struct kobject *dm_kobject(struct mapped_device *md) 2836 { 2837 return &md->kobj_holder.kobj; 2838 } 2839 2840 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2841 { 2842 struct mapped_device *md; 2843 2844 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2845 2846 if (test_bit(DMF_FREEING, &md->flags) || 2847 dm_deleting_md(md)) 2848 return NULL; 2849 2850 dm_get(md); 2851 return md; 2852 } 2853 2854 int dm_suspended_md(struct mapped_device *md) 2855 { 2856 return test_bit(DMF_SUSPENDED, &md->flags); 2857 } 2858 2859 int dm_test_deferred_remove_flag(struct mapped_device *md) 2860 { 2861 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2862 } 2863 2864 int dm_suspended(struct dm_target *ti) 2865 { 2866 return dm_suspended_md(dm_table_get_md(ti->table)); 2867 } 2868 EXPORT_SYMBOL_GPL(dm_suspended); 2869 2870 int dm_noflush_suspending(struct dm_target *ti) 2871 { 2872 return __noflush_suspending(dm_table_get_md(ti->table)); 2873 } 2874 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2875 2876 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 2877 { 2878 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 2879 struct kmem_cache *cachep; 2880 unsigned int pool_size; 2881 unsigned int front_pad; 2882 2883 if (!pools) 2884 return NULL; 2885 2886 if (type == DM_TYPE_BIO_BASED) { 2887 cachep = _io_cache; 2888 pool_size = dm_get_reserved_bio_based_ios(); 2889 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2890 } else if (type == DM_TYPE_REQUEST_BASED) { 2891 cachep = _rq_tio_cache; 2892 pool_size = dm_get_reserved_rq_based_ios(); 2893 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2894 /* per_bio_data_size is not used. See __bind_mempools(). */ 2895 WARN_ON(per_bio_data_size != 0); 2896 } else 2897 goto out; 2898 2899 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 2900 if (!pools->io_pool) 2901 goto out; 2902 2903 pools->bs = bioset_create(pool_size, front_pad); 2904 if (!pools->bs) 2905 goto out; 2906 2907 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 2908 goto out; 2909 2910 return pools; 2911 2912 out: 2913 dm_free_md_mempools(pools); 2914 2915 return NULL; 2916 } 2917 2918 void dm_free_md_mempools(struct dm_md_mempools *pools) 2919 { 2920 if (!pools) 2921 return; 2922 2923 if (pools->io_pool) 2924 mempool_destroy(pools->io_pool); 2925 2926 if (pools->bs) 2927 bioset_free(pools->bs); 2928 2929 kfree(pools); 2930 } 2931 2932 static const struct block_device_operations dm_blk_dops = { 2933 .open = dm_blk_open, 2934 .release = dm_blk_close, 2935 .ioctl = dm_blk_ioctl, 2936 .getgeo = dm_blk_getgeo, 2937 .owner = THIS_MODULE 2938 }; 2939 2940 /* 2941 * module hooks 2942 */ 2943 module_init(dm_init); 2944 module_exit(dm_exit); 2945 2946 module_param(major, uint, 0); 2947 MODULE_PARM_DESC(major, "The major number of the device mapper"); 2948 2949 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2950 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2951 2952 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 2953 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 2954 2955 MODULE_DESCRIPTION(DM_NAME " driver"); 2956 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2957 MODULE_LICENSE("GPL"); 2958