1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/signal.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/mempool.h> 19 #include <linux/dax.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/hdreg.h> 24 #include <linux/delay.h> 25 #include <linux/wait.h> 26 #include <linux/pr.h> 27 #include <linux/refcount.h> 28 29 #define DM_MSG_PREFIX "core" 30 31 /* 32 * Cookies are numeric values sent with CHANGE and REMOVE 33 * uevents while resuming, removing or renaming the device. 34 */ 35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 36 #define DM_COOKIE_LENGTH 24 37 38 static const char *_name = DM_NAME; 39 40 static unsigned int major = 0; 41 static unsigned int _major = 0; 42 43 static DEFINE_IDR(_minor_idr); 44 45 static DEFINE_SPINLOCK(_minor_lock); 46 47 static void do_deferred_remove(struct work_struct *w); 48 49 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 50 51 static struct workqueue_struct *deferred_remove_workqueue; 52 53 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 55 56 void dm_issue_global_event(void) 57 { 58 atomic_inc(&dm_global_event_nr); 59 wake_up(&dm_global_eventq); 60 } 61 62 /* 63 * One of these is allocated (on-stack) per original bio. 64 */ 65 struct clone_info { 66 struct dm_table *map; 67 struct bio *bio; 68 struct dm_io *io; 69 sector_t sector; 70 unsigned sector_count; 71 }; 72 73 /* 74 * One of these is allocated per clone bio. 75 */ 76 #define DM_TIO_MAGIC 7282014 77 struct dm_target_io { 78 unsigned magic; 79 struct dm_io *io; 80 struct dm_target *ti; 81 unsigned target_bio_nr; 82 unsigned *len_ptr; 83 bool inside_dm_io; 84 struct bio clone; 85 }; 86 87 /* 88 * One of these is allocated per original bio. 89 * It contains the first clone used for that original. 90 */ 91 #define DM_IO_MAGIC 5191977 92 struct dm_io { 93 unsigned magic; 94 struct mapped_device *md; 95 blk_status_t status; 96 atomic_t io_count; 97 struct bio *orig_bio; 98 unsigned long start_time; 99 spinlock_t endio_lock; 100 struct dm_stats_aux stats_aux; 101 /* last member of dm_target_io is 'struct bio' */ 102 struct dm_target_io tio; 103 }; 104 105 void *dm_per_bio_data(struct bio *bio, size_t data_size) 106 { 107 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 108 if (!tio->inside_dm_io) 109 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 110 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 111 } 112 EXPORT_SYMBOL_GPL(dm_per_bio_data); 113 114 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 115 { 116 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 117 if (io->magic == DM_IO_MAGIC) 118 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 119 BUG_ON(io->magic != DM_TIO_MAGIC); 120 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 121 } 122 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 123 124 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 125 { 126 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 127 } 128 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 129 130 #define MINOR_ALLOCED ((void *)-1) 131 132 /* 133 * Bits for the md->flags field. 134 */ 135 #define DMF_BLOCK_IO_FOR_SUSPEND 0 136 #define DMF_SUSPENDED 1 137 #define DMF_FROZEN 2 138 #define DMF_FREEING 3 139 #define DMF_DELETING 4 140 #define DMF_NOFLUSH_SUSPENDING 5 141 #define DMF_DEFERRED_REMOVE 6 142 #define DMF_SUSPENDED_INTERNALLY 7 143 144 #define DM_NUMA_NODE NUMA_NO_NODE 145 static int dm_numa_node = DM_NUMA_NODE; 146 147 /* 148 * For mempools pre-allocation at the table loading time. 149 */ 150 struct dm_md_mempools { 151 struct bio_set *bs; 152 struct bio_set *io_bs; 153 }; 154 155 struct table_device { 156 struct list_head list; 157 refcount_t count; 158 struct dm_dev dm_dev; 159 }; 160 161 static struct kmem_cache *_rq_tio_cache; 162 static struct kmem_cache *_rq_cache; 163 164 /* 165 * Bio-based DM's mempools' reserved IOs set by the user. 166 */ 167 #define RESERVED_BIO_BASED_IOS 16 168 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 169 170 static int __dm_get_module_param_int(int *module_param, int min, int max) 171 { 172 int param = READ_ONCE(*module_param); 173 int modified_param = 0; 174 bool modified = true; 175 176 if (param < min) 177 modified_param = min; 178 else if (param > max) 179 modified_param = max; 180 else 181 modified = false; 182 183 if (modified) { 184 (void)cmpxchg(module_param, param, modified_param); 185 param = modified_param; 186 } 187 188 return param; 189 } 190 191 unsigned __dm_get_module_param(unsigned *module_param, 192 unsigned def, unsigned max) 193 { 194 unsigned param = READ_ONCE(*module_param); 195 unsigned modified_param = 0; 196 197 if (!param) 198 modified_param = def; 199 else if (param > max) 200 modified_param = max; 201 202 if (modified_param) { 203 (void)cmpxchg(module_param, param, modified_param); 204 param = modified_param; 205 } 206 207 return param; 208 } 209 210 unsigned dm_get_reserved_bio_based_ios(void) 211 { 212 return __dm_get_module_param(&reserved_bio_based_ios, 213 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 214 } 215 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 216 217 static unsigned dm_get_numa_node(void) 218 { 219 return __dm_get_module_param_int(&dm_numa_node, 220 DM_NUMA_NODE, num_online_nodes() - 1); 221 } 222 223 static int __init local_init(void) 224 { 225 int r = -ENOMEM; 226 227 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 228 if (!_rq_tio_cache) 229 return r; 230 231 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 232 __alignof__(struct request), 0, NULL); 233 if (!_rq_cache) 234 goto out_free_rq_tio_cache; 235 236 r = dm_uevent_init(); 237 if (r) 238 goto out_free_rq_cache; 239 240 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 241 if (!deferred_remove_workqueue) { 242 r = -ENOMEM; 243 goto out_uevent_exit; 244 } 245 246 _major = major; 247 r = register_blkdev(_major, _name); 248 if (r < 0) 249 goto out_free_workqueue; 250 251 if (!_major) 252 _major = r; 253 254 return 0; 255 256 out_free_workqueue: 257 destroy_workqueue(deferred_remove_workqueue); 258 out_uevent_exit: 259 dm_uevent_exit(); 260 out_free_rq_cache: 261 kmem_cache_destroy(_rq_cache); 262 out_free_rq_tio_cache: 263 kmem_cache_destroy(_rq_tio_cache); 264 265 return r; 266 } 267 268 static void local_exit(void) 269 { 270 flush_scheduled_work(); 271 destroy_workqueue(deferred_remove_workqueue); 272 273 kmem_cache_destroy(_rq_cache); 274 kmem_cache_destroy(_rq_tio_cache); 275 unregister_blkdev(_major, _name); 276 dm_uevent_exit(); 277 278 _major = 0; 279 280 DMINFO("cleaned up"); 281 } 282 283 static int (*_inits[])(void) __initdata = { 284 local_init, 285 dm_target_init, 286 dm_linear_init, 287 dm_stripe_init, 288 dm_io_init, 289 dm_kcopyd_init, 290 dm_interface_init, 291 dm_statistics_init, 292 }; 293 294 static void (*_exits[])(void) = { 295 local_exit, 296 dm_target_exit, 297 dm_linear_exit, 298 dm_stripe_exit, 299 dm_io_exit, 300 dm_kcopyd_exit, 301 dm_interface_exit, 302 dm_statistics_exit, 303 }; 304 305 static int __init dm_init(void) 306 { 307 const int count = ARRAY_SIZE(_inits); 308 309 int r, i; 310 311 for (i = 0; i < count; i++) { 312 r = _inits[i](); 313 if (r) 314 goto bad; 315 } 316 317 return 0; 318 319 bad: 320 while (i--) 321 _exits[i](); 322 323 return r; 324 } 325 326 static void __exit dm_exit(void) 327 { 328 int i = ARRAY_SIZE(_exits); 329 330 while (i--) 331 _exits[i](); 332 333 /* 334 * Should be empty by this point. 335 */ 336 idr_destroy(&_minor_idr); 337 } 338 339 /* 340 * Block device functions 341 */ 342 int dm_deleting_md(struct mapped_device *md) 343 { 344 return test_bit(DMF_DELETING, &md->flags); 345 } 346 347 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 348 { 349 struct mapped_device *md; 350 351 spin_lock(&_minor_lock); 352 353 md = bdev->bd_disk->private_data; 354 if (!md) 355 goto out; 356 357 if (test_bit(DMF_FREEING, &md->flags) || 358 dm_deleting_md(md)) { 359 md = NULL; 360 goto out; 361 } 362 363 dm_get(md); 364 atomic_inc(&md->open_count); 365 out: 366 spin_unlock(&_minor_lock); 367 368 return md ? 0 : -ENXIO; 369 } 370 371 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 372 { 373 struct mapped_device *md; 374 375 spin_lock(&_minor_lock); 376 377 md = disk->private_data; 378 if (WARN_ON(!md)) 379 goto out; 380 381 if (atomic_dec_and_test(&md->open_count) && 382 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 383 queue_work(deferred_remove_workqueue, &deferred_remove_work); 384 385 dm_put(md); 386 out: 387 spin_unlock(&_minor_lock); 388 } 389 390 int dm_open_count(struct mapped_device *md) 391 { 392 return atomic_read(&md->open_count); 393 } 394 395 /* 396 * Guarantees nothing is using the device before it's deleted. 397 */ 398 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 399 { 400 int r = 0; 401 402 spin_lock(&_minor_lock); 403 404 if (dm_open_count(md)) { 405 r = -EBUSY; 406 if (mark_deferred) 407 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 408 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 409 r = -EEXIST; 410 else 411 set_bit(DMF_DELETING, &md->flags); 412 413 spin_unlock(&_minor_lock); 414 415 return r; 416 } 417 418 int dm_cancel_deferred_remove(struct mapped_device *md) 419 { 420 int r = 0; 421 422 spin_lock(&_minor_lock); 423 424 if (test_bit(DMF_DELETING, &md->flags)) 425 r = -EBUSY; 426 else 427 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 428 429 spin_unlock(&_minor_lock); 430 431 return r; 432 } 433 434 static void do_deferred_remove(struct work_struct *w) 435 { 436 dm_deferred_remove(); 437 } 438 439 sector_t dm_get_size(struct mapped_device *md) 440 { 441 return get_capacity(md->disk); 442 } 443 444 struct request_queue *dm_get_md_queue(struct mapped_device *md) 445 { 446 return md->queue; 447 } 448 449 struct dm_stats *dm_get_stats(struct mapped_device *md) 450 { 451 return &md->stats; 452 } 453 454 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 455 { 456 struct mapped_device *md = bdev->bd_disk->private_data; 457 458 return dm_get_geometry(md, geo); 459 } 460 461 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 462 struct block_device **bdev) 463 __acquires(md->io_barrier) 464 { 465 struct dm_target *tgt; 466 struct dm_table *map; 467 int r; 468 469 retry: 470 r = -ENOTTY; 471 map = dm_get_live_table(md, srcu_idx); 472 if (!map || !dm_table_get_size(map)) 473 return r; 474 475 /* We only support devices that have a single target */ 476 if (dm_table_get_num_targets(map) != 1) 477 return r; 478 479 tgt = dm_table_get_target(map, 0); 480 if (!tgt->type->prepare_ioctl) 481 return r; 482 483 if (dm_suspended_md(md)) 484 return -EAGAIN; 485 486 r = tgt->type->prepare_ioctl(tgt, bdev); 487 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 488 dm_put_live_table(md, *srcu_idx); 489 msleep(10); 490 goto retry; 491 } 492 493 return r; 494 } 495 496 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 497 __releases(md->io_barrier) 498 { 499 dm_put_live_table(md, srcu_idx); 500 } 501 502 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 503 unsigned int cmd, unsigned long arg) 504 { 505 struct mapped_device *md = bdev->bd_disk->private_data; 506 int r, srcu_idx; 507 508 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 509 if (r < 0) 510 goto out; 511 512 if (r > 0) { 513 /* 514 * Target determined this ioctl is being issued against a 515 * subset of the parent bdev; require extra privileges. 516 */ 517 if (!capable(CAP_SYS_RAWIO)) { 518 DMWARN_LIMIT( 519 "%s: sending ioctl %x to DM device without required privilege.", 520 current->comm, cmd); 521 r = -ENOIOCTLCMD; 522 goto out; 523 } 524 } 525 526 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 527 out: 528 dm_unprepare_ioctl(md, srcu_idx); 529 return r; 530 } 531 532 static void start_io_acct(struct dm_io *io); 533 534 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 535 { 536 struct dm_io *io; 537 struct dm_target_io *tio; 538 struct bio *clone; 539 540 clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs); 541 if (!clone) 542 return NULL; 543 544 tio = container_of(clone, struct dm_target_io, clone); 545 tio->inside_dm_io = true; 546 tio->io = NULL; 547 548 io = container_of(tio, struct dm_io, tio); 549 io->magic = DM_IO_MAGIC; 550 io->status = 0; 551 atomic_set(&io->io_count, 1); 552 io->orig_bio = bio; 553 io->md = md; 554 spin_lock_init(&io->endio_lock); 555 556 start_io_acct(io); 557 558 return io; 559 } 560 561 static void free_io(struct mapped_device *md, struct dm_io *io) 562 { 563 bio_put(&io->tio.clone); 564 } 565 566 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 567 unsigned target_bio_nr, gfp_t gfp_mask) 568 { 569 struct dm_target_io *tio; 570 571 if (!ci->io->tio.io) { 572 /* the dm_target_io embedded in ci->io is available */ 573 tio = &ci->io->tio; 574 } else { 575 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs); 576 if (!clone) 577 return NULL; 578 579 tio = container_of(clone, struct dm_target_io, clone); 580 tio->inside_dm_io = false; 581 } 582 583 tio->magic = DM_TIO_MAGIC; 584 tio->io = ci->io; 585 tio->ti = ti; 586 tio->target_bio_nr = target_bio_nr; 587 588 return tio; 589 } 590 591 static void free_tio(struct dm_target_io *tio) 592 { 593 if (tio->inside_dm_io) 594 return; 595 bio_put(&tio->clone); 596 } 597 598 int md_in_flight(struct mapped_device *md) 599 { 600 return atomic_read(&md->pending[READ]) + 601 atomic_read(&md->pending[WRITE]); 602 } 603 604 static void start_io_acct(struct dm_io *io) 605 { 606 struct mapped_device *md = io->md; 607 struct bio *bio = io->orig_bio; 608 int rw = bio_data_dir(bio); 609 610 io->start_time = jiffies; 611 612 generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0); 613 614 atomic_set(&dm_disk(md)->part0.in_flight[rw], 615 atomic_inc_return(&md->pending[rw])); 616 617 if (unlikely(dm_stats_used(&md->stats))) 618 dm_stats_account_io(&md->stats, bio_data_dir(bio), 619 bio->bi_iter.bi_sector, bio_sectors(bio), 620 false, 0, &io->stats_aux); 621 } 622 623 static void end_io_acct(struct dm_io *io) 624 { 625 struct mapped_device *md = io->md; 626 struct bio *bio = io->orig_bio; 627 unsigned long duration = jiffies - io->start_time; 628 int pending; 629 int rw = bio_data_dir(bio); 630 631 generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 632 633 if (unlikely(dm_stats_used(&md->stats))) 634 dm_stats_account_io(&md->stats, bio_data_dir(bio), 635 bio->bi_iter.bi_sector, bio_sectors(bio), 636 true, duration, &io->stats_aux); 637 638 /* 639 * After this is decremented the bio must not be touched if it is 640 * a flush. 641 */ 642 pending = atomic_dec_return(&md->pending[rw]); 643 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 644 pending += atomic_read(&md->pending[rw^0x1]); 645 646 /* nudge anyone waiting on suspend queue */ 647 if (!pending) 648 wake_up(&md->wait); 649 } 650 651 /* 652 * Add the bio to the list of deferred io. 653 */ 654 static void queue_io(struct mapped_device *md, struct bio *bio) 655 { 656 unsigned long flags; 657 658 spin_lock_irqsave(&md->deferred_lock, flags); 659 bio_list_add(&md->deferred, bio); 660 spin_unlock_irqrestore(&md->deferred_lock, flags); 661 queue_work(md->wq, &md->work); 662 } 663 664 /* 665 * Everyone (including functions in this file), should use this 666 * function to access the md->map field, and make sure they call 667 * dm_put_live_table() when finished. 668 */ 669 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 670 { 671 *srcu_idx = srcu_read_lock(&md->io_barrier); 672 673 return srcu_dereference(md->map, &md->io_barrier); 674 } 675 676 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 677 { 678 srcu_read_unlock(&md->io_barrier, srcu_idx); 679 } 680 681 void dm_sync_table(struct mapped_device *md) 682 { 683 synchronize_srcu(&md->io_barrier); 684 synchronize_rcu_expedited(); 685 } 686 687 /* 688 * A fast alternative to dm_get_live_table/dm_put_live_table. 689 * The caller must not block between these two functions. 690 */ 691 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 692 { 693 rcu_read_lock(); 694 return rcu_dereference(md->map); 695 } 696 697 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 698 { 699 rcu_read_unlock(); 700 } 701 702 static char *_dm_claim_ptr = "I belong to device-mapper"; 703 704 /* 705 * Open a table device so we can use it as a map destination. 706 */ 707 static int open_table_device(struct table_device *td, dev_t dev, 708 struct mapped_device *md) 709 { 710 struct block_device *bdev; 711 712 int r; 713 714 BUG_ON(td->dm_dev.bdev); 715 716 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 717 if (IS_ERR(bdev)) 718 return PTR_ERR(bdev); 719 720 r = bd_link_disk_holder(bdev, dm_disk(md)); 721 if (r) { 722 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 723 return r; 724 } 725 726 td->dm_dev.bdev = bdev; 727 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 728 return 0; 729 } 730 731 /* 732 * Close a table device that we've been using. 733 */ 734 static void close_table_device(struct table_device *td, struct mapped_device *md) 735 { 736 if (!td->dm_dev.bdev) 737 return; 738 739 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 740 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 741 put_dax(td->dm_dev.dax_dev); 742 td->dm_dev.bdev = NULL; 743 td->dm_dev.dax_dev = NULL; 744 } 745 746 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 747 fmode_t mode) { 748 struct table_device *td; 749 750 list_for_each_entry(td, l, list) 751 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 752 return td; 753 754 return NULL; 755 } 756 757 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 758 struct dm_dev **result) { 759 int r; 760 struct table_device *td; 761 762 mutex_lock(&md->table_devices_lock); 763 td = find_table_device(&md->table_devices, dev, mode); 764 if (!td) { 765 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 766 if (!td) { 767 mutex_unlock(&md->table_devices_lock); 768 return -ENOMEM; 769 } 770 771 td->dm_dev.mode = mode; 772 td->dm_dev.bdev = NULL; 773 774 if ((r = open_table_device(td, dev, md))) { 775 mutex_unlock(&md->table_devices_lock); 776 kfree(td); 777 return r; 778 } 779 780 format_dev_t(td->dm_dev.name, dev); 781 782 refcount_set(&td->count, 1); 783 list_add(&td->list, &md->table_devices); 784 } else { 785 refcount_inc(&td->count); 786 } 787 mutex_unlock(&md->table_devices_lock); 788 789 *result = &td->dm_dev; 790 return 0; 791 } 792 EXPORT_SYMBOL_GPL(dm_get_table_device); 793 794 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 795 { 796 struct table_device *td = container_of(d, struct table_device, dm_dev); 797 798 mutex_lock(&md->table_devices_lock); 799 if (refcount_dec_and_test(&td->count)) { 800 close_table_device(td, md); 801 list_del(&td->list); 802 kfree(td); 803 } 804 mutex_unlock(&md->table_devices_lock); 805 } 806 EXPORT_SYMBOL(dm_put_table_device); 807 808 static void free_table_devices(struct list_head *devices) 809 { 810 struct list_head *tmp, *next; 811 812 list_for_each_safe(tmp, next, devices) { 813 struct table_device *td = list_entry(tmp, struct table_device, list); 814 815 DMWARN("dm_destroy: %s still exists with %d references", 816 td->dm_dev.name, refcount_read(&td->count)); 817 kfree(td); 818 } 819 } 820 821 /* 822 * Get the geometry associated with a dm device 823 */ 824 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 825 { 826 *geo = md->geometry; 827 828 return 0; 829 } 830 831 /* 832 * Set the geometry of a device. 833 */ 834 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 835 { 836 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 837 838 if (geo->start > sz) { 839 DMWARN("Start sector is beyond the geometry limits."); 840 return -EINVAL; 841 } 842 843 md->geometry = *geo; 844 845 return 0; 846 } 847 848 static int __noflush_suspending(struct mapped_device *md) 849 { 850 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 851 } 852 853 /* 854 * Decrements the number of outstanding ios that a bio has been 855 * cloned into, completing the original io if necc. 856 */ 857 static void dec_pending(struct dm_io *io, blk_status_t error) 858 { 859 unsigned long flags; 860 blk_status_t io_error; 861 struct bio *bio; 862 struct mapped_device *md = io->md; 863 864 /* Push-back supersedes any I/O errors */ 865 if (unlikely(error)) { 866 spin_lock_irqsave(&io->endio_lock, flags); 867 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 868 io->status = error; 869 spin_unlock_irqrestore(&io->endio_lock, flags); 870 } 871 872 if (atomic_dec_and_test(&io->io_count)) { 873 if (io->status == BLK_STS_DM_REQUEUE) { 874 /* 875 * Target requested pushing back the I/O. 876 */ 877 spin_lock_irqsave(&md->deferred_lock, flags); 878 if (__noflush_suspending(md)) 879 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 880 bio_list_add_head(&md->deferred, io->orig_bio); 881 else 882 /* noflush suspend was interrupted. */ 883 io->status = BLK_STS_IOERR; 884 spin_unlock_irqrestore(&md->deferred_lock, flags); 885 } 886 887 io_error = io->status; 888 bio = io->orig_bio; 889 end_io_acct(io); 890 free_io(md, io); 891 892 if (io_error == BLK_STS_DM_REQUEUE) 893 return; 894 895 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 896 /* 897 * Preflush done for flush with data, reissue 898 * without REQ_PREFLUSH. 899 */ 900 bio->bi_opf &= ~REQ_PREFLUSH; 901 queue_io(md, bio); 902 } else { 903 /* done with normal IO or empty flush */ 904 if (io_error) 905 bio->bi_status = io_error; 906 bio_endio(bio); 907 } 908 } 909 } 910 911 void disable_write_same(struct mapped_device *md) 912 { 913 struct queue_limits *limits = dm_get_queue_limits(md); 914 915 /* device doesn't really support WRITE SAME, disable it */ 916 limits->max_write_same_sectors = 0; 917 } 918 919 void disable_write_zeroes(struct mapped_device *md) 920 { 921 struct queue_limits *limits = dm_get_queue_limits(md); 922 923 /* device doesn't really support WRITE ZEROES, disable it */ 924 limits->max_write_zeroes_sectors = 0; 925 } 926 927 static void clone_endio(struct bio *bio) 928 { 929 blk_status_t error = bio->bi_status; 930 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 931 struct dm_io *io = tio->io; 932 struct mapped_device *md = tio->io->md; 933 dm_endio_fn endio = tio->ti->type->end_io; 934 935 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 936 if (bio_op(bio) == REQ_OP_WRITE_SAME && 937 !bio->bi_disk->queue->limits.max_write_same_sectors) 938 disable_write_same(md); 939 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 940 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 941 disable_write_zeroes(md); 942 } 943 944 if (endio) { 945 int r = endio(tio->ti, bio, &error); 946 switch (r) { 947 case DM_ENDIO_REQUEUE: 948 error = BLK_STS_DM_REQUEUE; 949 /*FALLTHRU*/ 950 case DM_ENDIO_DONE: 951 break; 952 case DM_ENDIO_INCOMPLETE: 953 /* The target will handle the io */ 954 return; 955 default: 956 DMWARN("unimplemented target endio return value: %d", r); 957 BUG(); 958 } 959 } 960 961 free_tio(tio); 962 dec_pending(io, error); 963 } 964 965 /* 966 * Return maximum size of I/O possible at the supplied sector up to the current 967 * target boundary. 968 */ 969 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 970 { 971 sector_t target_offset = dm_target_offset(ti, sector); 972 973 return ti->len - target_offset; 974 } 975 976 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 977 { 978 sector_t len = max_io_len_target_boundary(sector, ti); 979 sector_t offset, max_len; 980 981 /* 982 * Does the target need to split even further? 983 */ 984 if (ti->max_io_len) { 985 offset = dm_target_offset(ti, sector); 986 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 987 max_len = sector_div(offset, ti->max_io_len); 988 else 989 max_len = offset & (ti->max_io_len - 1); 990 max_len = ti->max_io_len - max_len; 991 992 if (len > max_len) 993 len = max_len; 994 } 995 996 return len; 997 } 998 999 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1000 { 1001 if (len > UINT_MAX) { 1002 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1003 (unsigned long long)len, UINT_MAX); 1004 ti->error = "Maximum size of target IO is too large"; 1005 return -EINVAL; 1006 } 1007 1008 /* 1009 * BIO based queue uses its own splitting. When multipage bvecs 1010 * is switched on, size of the incoming bio may be too big to 1011 * be handled in some targets, such as crypt. 1012 * 1013 * When these targets are ready for the big bio, we can remove 1014 * the limit. 1015 */ 1016 ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); 1017 1018 return 0; 1019 } 1020 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1021 1022 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1023 sector_t sector, int *srcu_idx) 1024 { 1025 struct dm_table *map; 1026 struct dm_target *ti; 1027 1028 map = dm_get_live_table(md, srcu_idx); 1029 if (!map) 1030 return NULL; 1031 1032 ti = dm_table_find_target(map, sector); 1033 if (!dm_target_is_valid(ti)) 1034 return NULL; 1035 1036 return ti; 1037 } 1038 1039 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1040 long nr_pages, void **kaddr, pfn_t *pfn) 1041 { 1042 struct mapped_device *md = dax_get_private(dax_dev); 1043 sector_t sector = pgoff * PAGE_SECTORS; 1044 struct dm_target *ti; 1045 long len, ret = -EIO; 1046 int srcu_idx; 1047 1048 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1049 1050 if (!ti) 1051 goto out; 1052 if (!ti->type->direct_access) 1053 goto out; 1054 len = max_io_len(sector, ti) / PAGE_SECTORS; 1055 if (len < 1) 1056 goto out; 1057 nr_pages = min(len, nr_pages); 1058 if (ti->type->direct_access) 1059 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1060 1061 out: 1062 dm_put_live_table(md, srcu_idx); 1063 1064 return ret; 1065 } 1066 1067 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1068 void *addr, size_t bytes, struct iov_iter *i) 1069 { 1070 struct mapped_device *md = dax_get_private(dax_dev); 1071 sector_t sector = pgoff * PAGE_SECTORS; 1072 struct dm_target *ti; 1073 long ret = 0; 1074 int srcu_idx; 1075 1076 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1077 1078 if (!ti) 1079 goto out; 1080 if (!ti->type->dax_copy_from_iter) { 1081 ret = copy_from_iter(addr, bytes, i); 1082 goto out; 1083 } 1084 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1085 out: 1086 dm_put_live_table(md, srcu_idx); 1087 1088 return ret; 1089 } 1090 1091 /* 1092 * A target may call dm_accept_partial_bio only from the map routine. It is 1093 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. 1094 * 1095 * dm_accept_partial_bio informs the dm that the target only wants to process 1096 * additional n_sectors sectors of the bio and the rest of the data should be 1097 * sent in a next bio. 1098 * 1099 * A diagram that explains the arithmetics: 1100 * +--------------------+---------------+-------+ 1101 * | 1 | 2 | 3 | 1102 * +--------------------+---------------+-------+ 1103 * 1104 * <-------------- *tio->len_ptr ---------------> 1105 * <------- bi_size -------> 1106 * <-- n_sectors --> 1107 * 1108 * Region 1 was already iterated over with bio_advance or similar function. 1109 * (it may be empty if the target doesn't use bio_advance) 1110 * Region 2 is the remaining bio size that the target wants to process. 1111 * (it may be empty if region 1 is non-empty, although there is no reason 1112 * to make it empty) 1113 * The target requires that region 3 is to be sent in the next bio. 1114 * 1115 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1116 * the partially processed part (the sum of regions 1+2) must be the same for all 1117 * copies of the bio. 1118 */ 1119 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1120 { 1121 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1122 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1123 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1124 BUG_ON(bi_size > *tio->len_ptr); 1125 BUG_ON(n_sectors > bi_size); 1126 *tio->len_ptr -= bi_size - n_sectors; 1127 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1128 } 1129 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1130 1131 /* 1132 * The zone descriptors obtained with a zone report indicate 1133 * zone positions within the target device. The zone descriptors 1134 * must be remapped to match their position within the dm device. 1135 * A target may call dm_remap_zone_report after completion of a 1136 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 1137 * from the target device mapping to the dm device. 1138 */ 1139 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 1140 { 1141 #ifdef CONFIG_BLK_DEV_ZONED 1142 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1143 struct bio *report_bio = tio->io->orig_bio; 1144 struct blk_zone_report_hdr *hdr = NULL; 1145 struct blk_zone *zone; 1146 unsigned int nr_rep = 0; 1147 unsigned int ofst; 1148 struct bio_vec bvec; 1149 struct bvec_iter iter; 1150 void *addr; 1151 1152 if (bio->bi_status) 1153 return; 1154 1155 /* 1156 * Remap the start sector of the reported zones. For sequential zones, 1157 * also remap the write pointer position. 1158 */ 1159 bio_for_each_segment(bvec, report_bio, iter) { 1160 addr = kmap_atomic(bvec.bv_page); 1161 1162 /* Remember the report header in the first page */ 1163 if (!hdr) { 1164 hdr = addr; 1165 ofst = sizeof(struct blk_zone_report_hdr); 1166 } else 1167 ofst = 0; 1168 1169 /* Set zones start sector */ 1170 while (hdr->nr_zones && ofst < bvec.bv_len) { 1171 zone = addr + ofst; 1172 if (zone->start >= start + ti->len) { 1173 hdr->nr_zones = 0; 1174 break; 1175 } 1176 zone->start = zone->start + ti->begin - start; 1177 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 1178 if (zone->cond == BLK_ZONE_COND_FULL) 1179 zone->wp = zone->start + zone->len; 1180 else if (zone->cond == BLK_ZONE_COND_EMPTY) 1181 zone->wp = zone->start; 1182 else 1183 zone->wp = zone->wp + ti->begin - start; 1184 } 1185 ofst += sizeof(struct blk_zone); 1186 hdr->nr_zones--; 1187 nr_rep++; 1188 } 1189 1190 if (addr != hdr) 1191 kunmap_atomic(addr); 1192 1193 if (!hdr->nr_zones) 1194 break; 1195 } 1196 1197 if (hdr) { 1198 hdr->nr_zones = nr_rep; 1199 kunmap_atomic(hdr); 1200 } 1201 1202 bio_advance(report_bio, report_bio->bi_iter.bi_size); 1203 1204 #else /* !CONFIG_BLK_DEV_ZONED */ 1205 bio->bi_status = BLK_STS_NOTSUPP; 1206 #endif 1207 } 1208 EXPORT_SYMBOL_GPL(dm_remap_zone_report); 1209 1210 static blk_qc_t __map_bio(struct dm_target_io *tio) 1211 { 1212 int r; 1213 sector_t sector; 1214 struct bio *clone = &tio->clone; 1215 struct dm_io *io = tio->io; 1216 struct mapped_device *md = io->md; 1217 struct dm_target *ti = tio->ti; 1218 blk_qc_t ret = BLK_QC_T_NONE; 1219 1220 clone->bi_end_io = clone_endio; 1221 1222 /* 1223 * Map the clone. If r == 0 we don't need to do 1224 * anything, the target has assumed ownership of 1225 * this io. 1226 */ 1227 atomic_inc(&io->io_count); 1228 sector = clone->bi_iter.bi_sector; 1229 1230 r = ti->type->map(ti, clone); 1231 switch (r) { 1232 case DM_MAPIO_SUBMITTED: 1233 break; 1234 case DM_MAPIO_REMAPPED: 1235 /* the bio has been remapped so dispatch it */ 1236 trace_block_bio_remap(clone->bi_disk->queue, clone, 1237 bio_dev(io->orig_bio), sector); 1238 if (md->type == DM_TYPE_NVME_BIO_BASED) 1239 ret = direct_make_request(clone); 1240 else 1241 ret = generic_make_request(clone); 1242 break; 1243 case DM_MAPIO_KILL: 1244 free_tio(tio); 1245 dec_pending(io, BLK_STS_IOERR); 1246 break; 1247 case DM_MAPIO_REQUEUE: 1248 free_tio(tio); 1249 dec_pending(io, BLK_STS_DM_REQUEUE); 1250 break; 1251 default: 1252 DMWARN("unimplemented target map return value: %d", r); 1253 BUG(); 1254 } 1255 1256 return ret; 1257 } 1258 1259 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1260 { 1261 bio->bi_iter.bi_sector = sector; 1262 bio->bi_iter.bi_size = to_bytes(len); 1263 } 1264 1265 /* 1266 * Creates a bio that consists of range of complete bvecs. 1267 */ 1268 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1269 sector_t sector, unsigned len) 1270 { 1271 struct bio *clone = &tio->clone; 1272 1273 __bio_clone_fast(clone, bio); 1274 1275 if (unlikely(bio_integrity(bio) != NULL)) { 1276 int r; 1277 1278 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1279 !dm_target_passes_integrity(tio->ti->type))) { 1280 DMWARN("%s: the target %s doesn't support integrity data.", 1281 dm_device_name(tio->io->md), 1282 tio->ti->type->name); 1283 return -EIO; 1284 } 1285 1286 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1287 if (r < 0) 1288 return r; 1289 } 1290 1291 if (bio_op(bio) != REQ_OP_ZONE_REPORT) 1292 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1293 clone->bi_iter.bi_size = to_bytes(len); 1294 1295 if (unlikely(bio_integrity(bio) != NULL)) 1296 bio_integrity_trim(clone); 1297 1298 return 0; 1299 } 1300 1301 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1302 struct dm_target *ti, unsigned num_bios) 1303 { 1304 struct dm_target_io *tio; 1305 int try; 1306 1307 if (!num_bios) 1308 return; 1309 1310 if (num_bios == 1) { 1311 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1312 bio_list_add(blist, &tio->clone); 1313 return; 1314 } 1315 1316 for (try = 0; try < 2; try++) { 1317 int bio_nr; 1318 struct bio *bio; 1319 1320 if (try) 1321 mutex_lock(&ci->io->md->table_devices_lock); 1322 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1323 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1324 if (!tio) 1325 break; 1326 1327 bio_list_add(blist, &tio->clone); 1328 } 1329 if (try) 1330 mutex_unlock(&ci->io->md->table_devices_lock); 1331 if (bio_nr == num_bios) 1332 return; 1333 1334 while ((bio = bio_list_pop(blist))) { 1335 tio = container_of(bio, struct dm_target_io, clone); 1336 free_tio(tio); 1337 } 1338 } 1339 } 1340 1341 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1342 struct dm_target_io *tio, unsigned *len) 1343 { 1344 struct bio *clone = &tio->clone; 1345 1346 tio->len_ptr = len; 1347 1348 __bio_clone_fast(clone, ci->bio); 1349 if (len) 1350 bio_setup_sector(clone, ci->sector, *len); 1351 1352 return __map_bio(tio); 1353 } 1354 1355 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1356 unsigned num_bios, unsigned *len) 1357 { 1358 struct bio_list blist = BIO_EMPTY_LIST; 1359 struct bio *bio; 1360 struct dm_target_io *tio; 1361 1362 alloc_multiple_bios(&blist, ci, ti, num_bios); 1363 1364 while ((bio = bio_list_pop(&blist))) { 1365 tio = container_of(bio, struct dm_target_io, clone); 1366 (void) __clone_and_map_simple_bio(ci, tio, len); 1367 } 1368 } 1369 1370 static int __send_empty_flush(struct clone_info *ci) 1371 { 1372 unsigned target_nr = 0; 1373 struct dm_target *ti; 1374 1375 BUG_ON(bio_has_data(ci->bio)); 1376 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1377 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1378 1379 return 0; 1380 } 1381 1382 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1383 sector_t sector, unsigned *len) 1384 { 1385 struct bio *bio = ci->bio; 1386 struct dm_target_io *tio; 1387 int r; 1388 1389 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1390 tio->len_ptr = len; 1391 r = clone_bio(tio, bio, sector, *len); 1392 if (r < 0) { 1393 free_tio(tio); 1394 return r; 1395 } 1396 (void) __map_bio(tio); 1397 1398 return 0; 1399 } 1400 1401 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1402 1403 static unsigned get_num_discard_bios(struct dm_target *ti) 1404 { 1405 return ti->num_discard_bios; 1406 } 1407 1408 static unsigned get_num_secure_erase_bios(struct dm_target *ti) 1409 { 1410 return ti->num_secure_erase_bios; 1411 } 1412 1413 static unsigned get_num_write_same_bios(struct dm_target *ti) 1414 { 1415 return ti->num_write_same_bios; 1416 } 1417 1418 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1419 { 1420 return ti->num_write_zeroes_bios; 1421 } 1422 1423 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1424 1425 static bool is_split_required_for_discard(struct dm_target *ti) 1426 { 1427 return ti->split_discard_bios; 1428 } 1429 1430 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1431 get_num_bios_fn get_num_bios, 1432 is_split_required_fn is_split_required) 1433 { 1434 unsigned len; 1435 unsigned num_bios; 1436 1437 /* 1438 * Even though the device advertised support for this type of 1439 * request, that does not mean every target supports it, and 1440 * reconfiguration might also have changed that since the 1441 * check was performed. 1442 */ 1443 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1444 if (!num_bios) 1445 return -EOPNOTSUPP; 1446 1447 if (is_split_required && !is_split_required(ti)) 1448 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1449 else 1450 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1451 1452 __send_duplicate_bios(ci, ti, num_bios, &len); 1453 1454 ci->sector += len; 1455 ci->sector_count -= len; 1456 1457 return 0; 1458 } 1459 1460 static int __send_discard(struct clone_info *ci, struct dm_target *ti) 1461 { 1462 return __send_changing_extent_only(ci, ti, get_num_discard_bios, 1463 is_split_required_for_discard); 1464 } 1465 1466 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 1467 { 1468 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios, NULL); 1469 } 1470 1471 static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 1472 { 1473 return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL); 1474 } 1475 1476 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1477 { 1478 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL); 1479 } 1480 1481 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1482 int *result) 1483 { 1484 struct bio *bio = ci->bio; 1485 1486 if (bio_op(bio) == REQ_OP_DISCARD) 1487 *result = __send_discard(ci, ti); 1488 else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 1489 *result = __send_secure_erase(ci, ti); 1490 else if (bio_op(bio) == REQ_OP_WRITE_SAME) 1491 *result = __send_write_same(ci, ti); 1492 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 1493 *result = __send_write_zeroes(ci, ti); 1494 else 1495 return false; 1496 1497 return true; 1498 } 1499 1500 /* 1501 * Select the correct strategy for processing a non-flush bio. 1502 */ 1503 static int __split_and_process_non_flush(struct clone_info *ci) 1504 { 1505 struct bio *bio = ci->bio; 1506 struct dm_target *ti; 1507 unsigned len; 1508 int r; 1509 1510 ti = dm_table_find_target(ci->map, ci->sector); 1511 if (!dm_target_is_valid(ti)) 1512 return -EIO; 1513 1514 if (unlikely(__process_abnormal_io(ci, ti, &r))) 1515 return r; 1516 1517 if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1518 len = ci->sector_count; 1519 else 1520 len = min_t(sector_t, max_io_len(ci->sector, ti), 1521 ci->sector_count); 1522 1523 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1524 if (r < 0) 1525 return r; 1526 1527 ci->sector += len; 1528 ci->sector_count -= len; 1529 1530 return 0; 1531 } 1532 1533 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1534 struct dm_table *map, struct bio *bio) 1535 { 1536 ci->map = map; 1537 ci->io = alloc_io(md, bio); 1538 ci->sector = bio->bi_iter.bi_sector; 1539 } 1540 1541 /* 1542 * Entry point to split a bio into clones and submit them to the targets. 1543 */ 1544 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1545 struct dm_table *map, struct bio *bio) 1546 { 1547 struct clone_info ci; 1548 blk_qc_t ret = BLK_QC_T_NONE; 1549 int error = 0; 1550 1551 if (unlikely(!map)) { 1552 bio_io_error(bio); 1553 return ret; 1554 } 1555 1556 init_clone_info(&ci, md, map, bio); 1557 1558 if (bio->bi_opf & REQ_PREFLUSH) { 1559 ci.bio = &ci.io->md->flush_bio; 1560 ci.sector_count = 0; 1561 error = __send_empty_flush(&ci); 1562 /* dec_pending submits any data associated with flush */ 1563 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1564 ci.bio = bio; 1565 ci.sector_count = 0; 1566 error = __split_and_process_non_flush(&ci); 1567 } else { 1568 ci.bio = bio; 1569 ci.sector_count = bio_sectors(bio); 1570 while (ci.sector_count && !error) { 1571 error = __split_and_process_non_flush(&ci); 1572 if (current->bio_list && ci.sector_count && !error) { 1573 /* 1574 * Remainder must be passed to generic_make_request() 1575 * so that it gets handled *after* bios already submitted 1576 * have been completely processed. 1577 * We take a clone of the original to store in 1578 * ci.io->orig_bio to be used by end_io_acct() and 1579 * for dec_pending to use for completion handling. 1580 * As this path is not used for REQ_OP_ZONE_REPORT, 1581 * the usage of io->orig_bio in dm_remap_zone_report() 1582 * won't be affected by this reassignment. 1583 */ 1584 struct bio *b = bio_clone_bioset(bio, GFP_NOIO, 1585 md->queue->bio_split); 1586 ci.io->orig_bio = b; 1587 bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9); 1588 bio_chain(b, bio); 1589 ret = generic_make_request(bio); 1590 break; 1591 } 1592 } 1593 } 1594 1595 /* drop the extra reference count */ 1596 dec_pending(ci.io, errno_to_blk_status(error)); 1597 return ret; 1598 } 1599 1600 /* 1601 * Optimized variant of __split_and_process_bio that leverages the 1602 * fact that targets that use it do _not_ have a need to split bios. 1603 */ 1604 static blk_qc_t __process_bio(struct mapped_device *md, 1605 struct dm_table *map, struct bio *bio) 1606 { 1607 struct clone_info ci; 1608 blk_qc_t ret = BLK_QC_T_NONE; 1609 int error = 0; 1610 1611 if (unlikely(!map)) { 1612 bio_io_error(bio); 1613 return ret; 1614 } 1615 1616 init_clone_info(&ci, md, map, bio); 1617 1618 if (bio->bi_opf & REQ_PREFLUSH) { 1619 ci.bio = &ci.io->md->flush_bio; 1620 ci.sector_count = 0; 1621 error = __send_empty_flush(&ci); 1622 /* dec_pending submits any data associated with flush */ 1623 } else { 1624 struct dm_target *ti = md->immutable_target; 1625 struct dm_target_io *tio; 1626 1627 /* 1628 * Defend against IO still getting in during teardown 1629 * - as was seen for a time with nvme-fcloop 1630 */ 1631 if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) { 1632 error = -EIO; 1633 goto out; 1634 } 1635 1636 ci.bio = bio; 1637 ci.sector_count = bio_sectors(bio); 1638 if (unlikely(__process_abnormal_io(&ci, ti, &error))) 1639 goto out; 1640 1641 tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1642 ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1643 } 1644 out: 1645 /* drop the extra reference count */ 1646 dec_pending(ci.io, errno_to_blk_status(error)); 1647 return ret; 1648 } 1649 1650 typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); 1651 1652 static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, 1653 process_bio_fn process_bio) 1654 { 1655 struct mapped_device *md = q->queuedata; 1656 blk_qc_t ret = BLK_QC_T_NONE; 1657 int srcu_idx; 1658 struct dm_table *map; 1659 1660 map = dm_get_live_table(md, &srcu_idx); 1661 1662 /* if we're suspended, we have to queue this io for later */ 1663 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1664 dm_put_live_table(md, srcu_idx); 1665 1666 if (!(bio->bi_opf & REQ_RAHEAD)) 1667 queue_io(md, bio); 1668 else 1669 bio_io_error(bio); 1670 return ret; 1671 } 1672 1673 ret = process_bio(md, map, bio); 1674 1675 dm_put_live_table(md, srcu_idx); 1676 return ret; 1677 } 1678 1679 /* 1680 * The request function that remaps the bio to one target and 1681 * splits off any remainder. 1682 */ 1683 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1684 { 1685 return __dm_make_request(q, bio, __split_and_process_bio); 1686 } 1687 1688 static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio) 1689 { 1690 return __dm_make_request(q, bio, __process_bio); 1691 } 1692 1693 static int dm_any_congested(void *congested_data, int bdi_bits) 1694 { 1695 int r = bdi_bits; 1696 struct mapped_device *md = congested_data; 1697 struct dm_table *map; 1698 1699 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1700 if (dm_request_based(md)) { 1701 /* 1702 * With request-based DM we only need to check the 1703 * top-level queue for congestion. 1704 */ 1705 r = md->queue->backing_dev_info->wb.state & bdi_bits; 1706 } else { 1707 map = dm_get_live_table_fast(md); 1708 if (map) 1709 r = dm_table_any_congested(map, bdi_bits); 1710 dm_put_live_table_fast(md); 1711 } 1712 } 1713 1714 return r; 1715 } 1716 1717 /*----------------------------------------------------------------- 1718 * An IDR is used to keep track of allocated minor numbers. 1719 *---------------------------------------------------------------*/ 1720 static void free_minor(int minor) 1721 { 1722 spin_lock(&_minor_lock); 1723 idr_remove(&_minor_idr, minor); 1724 spin_unlock(&_minor_lock); 1725 } 1726 1727 /* 1728 * See if the device with a specific minor # is free. 1729 */ 1730 static int specific_minor(int minor) 1731 { 1732 int r; 1733 1734 if (minor >= (1 << MINORBITS)) 1735 return -EINVAL; 1736 1737 idr_preload(GFP_KERNEL); 1738 spin_lock(&_minor_lock); 1739 1740 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1741 1742 spin_unlock(&_minor_lock); 1743 idr_preload_end(); 1744 if (r < 0) 1745 return r == -ENOSPC ? -EBUSY : r; 1746 return 0; 1747 } 1748 1749 static int next_free_minor(int *minor) 1750 { 1751 int r; 1752 1753 idr_preload(GFP_KERNEL); 1754 spin_lock(&_minor_lock); 1755 1756 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1757 1758 spin_unlock(&_minor_lock); 1759 idr_preload_end(); 1760 if (r < 0) 1761 return r; 1762 *minor = r; 1763 return 0; 1764 } 1765 1766 static const struct block_device_operations dm_blk_dops; 1767 static const struct dax_operations dm_dax_ops; 1768 1769 static void dm_wq_work(struct work_struct *work); 1770 1771 static void dm_init_normal_md_queue(struct mapped_device *md) 1772 { 1773 md->use_blk_mq = false; 1774 1775 /* 1776 * Initialize aspects of queue that aren't relevant for blk-mq 1777 */ 1778 md->queue->backing_dev_info->congested_fn = dm_any_congested; 1779 } 1780 1781 static void cleanup_mapped_device(struct mapped_device *md) 1782 { 1783 if (md->wq) 1784 destroy_workqueue(md->wq); 1785 if (md->kworker_task) 1786 kthread_stop(md->kworker_task); 1787 if (md->bs) 1788 bioset_free(md->bs); 1789 if (md->io_bs) 1790 bioset_free(md->io_bs); 1791 1792 if (md->dax_dev) { 1793 kill_dax(md->dax_dev); 1794 put_dax(md->dax_dev); 1795 md->dax_dev = NULL; 1796 } 1797 1798 if (md->disk) { 1799 spin_lock(&_minor_lock); 1800 md->disk->private_data = NULL; 1801 spin_unlock(&_minor_lock); 1802 del_gendisk(md->disk); 1803 put_disk(md->disk); 1804 } 1805 1806 if (md->queue) 1807 blk_cleanup_queue(md->queue); 1808 1809 cleanup_srcu_struct(&md->io_barrier); 1810 1811 if (md->bdev) { 1812 bdput(md->bdev); 1813 md->bdev = NULL; 1814 } 1815 1816 mutex_destroy(&md->suspend_lock); 1817 mutex_destroy(&md->type_lock); 1818 mutex_destroy(&md->table_devices_lock); 1819 1820 dm_mq_cleanup_mapped_device(md); 1821 } 1822 1823 /* 1824 * Allocate and initialise a blank device with a given minor. 1825 */ 1826 static struct mapped_device *alloc_dev(int minor) 1827 { 1828 int r, numa_node_id = dm_get_numa_node(); 1829 struct dax_device *dax_dev; 1830 struct mapped_device *md; 1831 void *old_md; 1832 1833 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1834 if (!md) { 1835 DMWARN("unable to allocate device, out of memory."); 1836 return NULL; 1837 } 1838 1839 if (!try_module_get(THIS_MODULE)) 1840 goto bad_module_get; 1841 1842 /* get a minor number for the dev */ 1843 if (minor == DM_ANY_MINOR) 1844 r = next_free_minor(&minor); 1845 else 1846 r = specific_minor(minor); 1847 if (r < 0) 1848 goto bad_minor; 1849 1850 r = init_srcu_struct(&md->io_barrier); 1851 if (r < 0) 1852 goto bad_io_barrier; 1853 1854 md->numa_node_id = numa_node_id; 1855 md->use_blk_mq = dm_use_blk_mq_default(); 1856 md->init_tio_pdu = false; 1857 md->type = DM_TYPE_NONE; 1858 mutex_init(&md->suspend_lock); 1859 mutex_init(&md->type_lock); 1860 mutex_init(&md->table_devices_lock); 1861 spin_lock_init(&md->deferred_lock); 1862 atomic_set(&md->holders, 1); 1863 atomic_set(&md->open_count, 0); 1864 atomic_set(&md->event_nr, 0); 1865 atomic_set(&md->uevent_seq, 0); 1866 INIT_LIST_HEAD(&md->uevent_list); 1867 INIT_LIST_HEAD(&md->table_devices); 1868 spin_lock_init(&md->uevent_lock); 1869 1870 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); 1871 if (!md->queue) 1872 goto bad; 1873 md->queue->queuedata = md; 1874 md->queue->backing_dev_info->congested_data = md; 1875 1876 md->disk = alloc_disk_node(1, md->numa_node_id); 1877 if (!md->disk) 1878 goto bad; 1879 1880 atomic_set(&md->pending[0], 0); 1881 atomic_set(&md->pending[1], 0); 1882 init_waitqueue_head(&md->wait); 1883 INIT_WORK(&md->work, dm_wq_work); 1884 init_waitqueue_head(&md->eventq); 1885 init_completion(&md->kobj_holder.completion); 1886 md->kworker_task = NULL; 1887 1888 md->disk->major = _major; 1889 md->disk->first_minor = minor; 1890 md->disk->fops = &dm_blk_dops; 1891 md->disk->queue = md->queue; 1892 md->disk->private_data = md; 1893 sprintf(md->disk->disk_name, "dm-%d", minor); 1894 1895 dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1896 if (!dax_dev) 1897 goto bad; 1898 md->dax_dev = dax_dev; 1899 1900 add_disk_no_queue_reg(md->disk); 1901 format_dev_t(md->name, MKDEV(_major, minor)); 1902 1903 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1904 if (!md->wq) 1905 goto bad; 1906 1907 md->bdev = bdget_disk(md->disk, 0); 1908 if (!md->bdev) 1909 goto bad; 1910 1911 bio_init(&md->flush_bio, NULL, 0); 1912 bio_set_dev(&md->flush_bio, md->bdev); 1913 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1914 1915 dm_stats_init(&md->stats); 1916 1917 /* Populate the mapping, nobody knows we exist yet */ 1918 spin_lock(&_minor_lock); 1919 old_md = idr_replace(&_minor_idr, md, minor); 1920 spin_unlock(&_minor_lock); 1921 1922 BUG_ON(old_md != MINOR_ALLOCED); 1923 1924 return md; 1925 1926 bad: 1927 cleanup_mapped_device(md); 1928 bad_io_barrier: 1929 free_minor(minor); 1930 bad_minor: 1931 module_put(THIS_MODULE); 1932 bad_module_get: 1933 kvfree(md); 1934 return NULL; 1935 } 1936 1937 static void unlock_fs(struct mapped_device *md); 1938 1939 static void free_dev(struct mapped_device *md) 1940 { 1941 int minor = MINOR(disk_devt(md->disk)); 1942 1943 unlock_fs(md); 1944 1945 cleanup_mapped_device(md); 1946 1947 free_table_devices(&md->table_devices); 1948 dm_stats_cleanup(&md->stats); 1949 free_minor(minor); 1950 1951 module_put(THIS_MODULE); 1952 kvfree(md); 1953 } 1954 1955 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1956 { 1957 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1958 1959 if (dm_table_bio_based(t)) { 1960 /* 1961 * The md may already have mempools that need changing. 1962 * If so, reload bioset because front_pad may have changed 1963 * because a different table was loaded. 1964 */ 1965 if (md->bs) { 1966 bioset_free(md->bs); 1967 md->bs = NULL; 1968 } 1969 if (md->io_bs) { 1970 bioset_free(md->io_bs); 1971 md->io_bs = NULL; 1972 } 1973 1974 } else if (md->bs) { 1975 /* 1976 * There's no need to reload with request-based dm 1977 * because the size of front_pad doesn't change. 1978 * Note for future: If you are to reload bioset, 1979 * prep-ed requests in the queue may refer 1980 * to bio from the old bioset, so you must walk 1981 * through the queue to unprep. 1982 */ 1983 goto out; 1984 } 1985 1986 BUG_ON(!p || md->bs || md->io_bs); 1987 1988 md->bs = p->bs; 1989 p->bs = NULL; 1990 md->io_bs = p->io_bs; 1991 p->io_bs = NULL; 1992 out: 1993 /* mempool bind completed, no longer need any mempools in the table */ 1994 dm_table_free_md_mempools(t); 1995 } 1996 1997 /* 1998 * Bind a table to the device. 1999 */ 2000 static void event_callback(void *context) 2001 { 2002 unsigned long flags; 2003 LIST_HEAD(uevents); 2004 struct mapped_device *md = (struct mapped_device *) context; 2005 2006 spin_lock_irqsave(&md->uevent_lock, flags); 2007 list_splice_init(&md->uevent_list, &uevents); 2008 spin_unlock_irqrestore(&md->uevent_lock, flags); 2009 2010 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2011 2012 atomic_inc(&md->event_nr); 2013 wake_up(&md->eventq); 2014 dm_issue_global_event(); 2015 } 2016 2017 /* 2018 * Protected by md->suspend_lock obtained by dm_swap_table(). 2019 */ 2020 static void __set_size(struct mapped_device *md, sector_t size) 2021 { 2022 lockdep_assert_held(&md->suspend_lock); 2023 2024 set_capacity(md->disk, size); 2025 2026 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2027 } 2028 2029 /* 2030 * Returns old map, which caller must destroy. 2031 */ 2032 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2033 struct queue_limits *limits) 2034 { 2035 struct dm_table *old_map; 2036 struct request_queue *q = md->queue; 2037 bool request_based = dm_table_request_based(t); 2038 sector_t size; 2039 2040 lockdep_assert_held(&md->suspend_lock); 2041 2042 size = dm_table_get_size(t); 2043 2044 /* 2045 * Wipe any geometry if the size of the table changed. 2046 */ 2047 if (size != dm_get_size(md)) 2048 memset(&md->geometry, 0, sizeof(md->geometry)); 2049 2050 __set_size(md, size); 2051 2052 dm_table_event_callback(t, event_callback, md); 2053 2054 /* 2055 * The queue hasn't been stopped yet, if the old table type wasn't 2056 * for request-based during suspension. So stop it to prevent 2057 * I/O mapping before resume. 2058 * This must be done before setting the queue restrictions, 2059 * because request-based dm may be run just after the setting. 2060 */ 2061 if (request_based) 2062 dm_stop_queue(q); 2063 2064 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 2065 /* 2066 * Leverage the fact that request-based DM targets and 2067 * NVMe bio based targets are immutable singletons 2068 * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2069 * and __process_bio. 2070 */ 2071 md->immutable_target = dm_table_get_immutable_target(t); 2072 } 2073 2074 __bind_mempools(md, t); 2075 2076 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2077 rcu_assign_pointer(md->map, (void *)t); 2078 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2079 2080 dm_table_set_restrictions(t, q, limits); 2081 if (old_map) 2082 dm_sync_table(md); 2083 2084 return old_map; 2085 } 2086 2087 /* 2088 * Returns unbound table for the caller to free. 2089 */ 2090 static struct dm_table *__unbind(struct mapped_device *md) 2091 { 2092 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2093 2094 if (!map) 2095 return NULL; 2096 2097 dm_table_event_callback(map, NULL, NULL); 2098 RCU_INIT_POINTER(md->map, NULL); 2099 dm_sync_table(md); 2100 2101 return map; 2102 } 2103 2104 /* 2105 * Constructor for a new device. 2106 */ 2107 int dm_create(int minor, struct mapped_device **result) 2108 { 2109 int r; 2110 struct mapped_device *md; 2111 2112 md = alloc_dev(minor); 2113 if (!md) 2114 return -ENXIO; 2115 2116 r = dm_sysfs_init(md); 2117 if (r) { 2118 free_dev(md); 2119 return r; 2120 } 2121 2122 *result = md; 2123 return 0; 2124 } 2125 2126 /* 2127 * Functions to manage md->type. 2128 * All are required to hold md->type_lock. 2129 */ 2130 void dm_lock_md_type(struct mapped_device *md) 2131 { 2132 mutex_lock(&md->type_lock); 2133 } 2134 2135 void dm_unlock_md_type(struct mapped_device *md) 2136 { 2137 mutex_unlock(&md->type_lock); 2138 } 2139 2140 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2141 { 2142 BUG_ON(!mutex_is_locked(&md->type_lock)); 2143 md->type = type; 2144 } 2145 2146 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2147 { 2148 return md->type; 2149 } 2150 2151 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2152 { 2153 return md->immutable_target_type; 2154 } 2155 2156 /* 2157 * The queue_limits are only valid as long as you have a reference 2158 * count on 'md'. 2159 */ 2160 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2161 { 2162 BUG_ON(!atomic_read(&md->holders)); 2163 return &md->queue->limits; 2164 } 2165 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2166 2167 /* 2168 * Setup the DM device's queue based on md's type 2169 */ 2170 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2171 { 2172 int r; 2173 struct queue_limits limits; 2174 enum dm_queue_mode type = dm_get_md_type(md); 2175 2176 switch (type) { 2177 case DM_TYPE_REQUEST_BASED: 2178 dm_init_normal_md_queue(md); 2179 r = dm_old_init_request_queue(md, t); 2180 if (r) { 2181 DMERR("Cannot initialize queue for request-based mapped device"); 2182 return r; 2183 } 2184 break; 2185 case DM_TYPE_MQ_REQUEST_BASED: 2186 r = dm_mq_init_request_queue(md, t); 2187 if (r) { 2188 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2189 return r; 2190 } 2191 break; 2192 case DM_TYPE_BIO_BASED: 2193 case DM_TYPE_DAX_BIO_BASED: 2194 dm_init_normal_md_queue(md); 2195 blk_queue_make_request(md->queue, dm_make_request); 2196 break; 2197 case DM_TYPE_NVME_BIO_BASED: 2198 dm_init_normal_md_queue(md); 2199 blk_queue_make_request(md->queue, dm_make_request_nvme); 2200 break; 2201 case DM_TYPE_NONE: 2202 WARN_ON_ONCE(true); 2203 break; 2204 } 2205 2206 r = dm_calculate_queue_limits(t, &limits); 2207 if (r) { 2208 DMERR("Cannot calculate initial queue limits"); 2209 return r; 2210 } 2211 dm_table_set_restrictions(t, md->queue, &limits); 2212 blk_register_queue(md->disk); 2213 2214 return 0; 2215 } 2216 2217 struct mapped_device *dm_get_md(dev_t dev) 2218 { 2219 struct mapped_device *md; 2220 unsigned minor = MINOR(dev); 2221 2222 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2223 return NULL; 2224 2225 spin_lock(&_minor_lock); 2226 2227 md = idr_find(&_minor_idr, minor); 2228 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2229 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2230 md = NULL; 2231 goto out; 2232 } 2233 dm_get(md); 2234 out: 2235 spin_unlock(&_minor_lock); 2236 2237 return md; 2238 } 2239 EXPORT_SYMBOL_GPL(dm_get_md); 2240 2241 void *dm_get_mdptr(struct mapped_device *md) 2242 { 2243 return md->interface_ptr; 2244 } 2245 2246 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2247 { 2248 md->interface_ptr = ptr; 2249 } 2250 2251 void dm_get(struct mapped_device *md) 2252 { 2253 atomic_inc(&md->holders); 2254 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2255 } 2256 2257 int dm_hold(struct mapped_device *md) 2258 { 2259 spin_lock(&_minor_lock); 2260 if (test_bit(DMF_FREEING, &md->flags)) { 2261 spin_unlock(&_minor_lock); 2262 return -EBUSY; 2263 } 2264 dm_get(md); 2265 spin_unlock(&_minor_lock); 2266 return 0; 2267 } 2268 EXPORT_SYMBOL_GPL(dm_hold); 2269 2270 const char *dm_device_name(struct mapped_device *md) 2271 { 2272 return md->name; 2273 } 2274 EXPORT_SYMBOL_GPL(dm_device_name); 2275 2276 static void __dm_destroy(struct mapped_device *md, bool wait) 2277 { 2278 struct dm_table *map; 2279 int srcu_idx; 2280 2281 might_sleep(); 2282 2283 spin_lock(&_minor_lock); 2284 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2285 set_bit(DMF_FREEING, &md->flags); 2286 spin_unlock(&_minor_lock); 2287 2288 blk_set_queue_dying(md->queue); 2289 2290 if (dm_request_based(md) && md->kworker_task) 2291 kthread_flush_worker(&md->kworker); 2292 2293 /* 2294 * Take suspend_lock so that presuspend and postsuspend methods 2295 * do not race with internal suspend. 2296 */ 2297 mutex_lock(&md->suspend_lock); 2298 map = dm_get_live_table(md, &srcu_idx); 2299 if (!dm_suspended_md(md)) { 2300 dm_table_presuspend_targets(map); 2301 dm_table_postsuspend_targets(map); 2302 } 2303 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2304 dm_put_live_table(md, srcu_idx); 2305 mutex_unlock(&md->suspend_lock); 2306 2307 /* 2308 * Rare, but there may be I/O requests still going to complete, 2309 * for example. Wait for all references to disappear. 2310 * No one should increment the reference count of the mapped_device, 2311 * after the mapped_device state becomes DMF_FREEING. 2312 */ 2313 if (wait) 2314 while (atomic_read(&md->holders)) 2315 msleep(1); 2316 else if (atomic_read(&md->holders)) 2317 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2318 dm_device_name(md), atomic_read(&md->holders)); 2319 2320 dm_sysfs_exit(md); 2321 dm_table_destroy(__unbind(md)); 2322 free_dev(md); 2323 } 2324 2325 void dm_destroy(struct mapped_device *md) 2326 { 2327 __dm_destroy(md, true); 2328 } 2329 2330 void dm_destroy_immediate(struct mapped_device *md) 2331 { 2332 __dm_destroy(md, false); 2333 } 2334 2335 void dm_put(struct mapped_device *md) 2336 { 2337 atomic_dec(&md->holders); 2338 } 2339 EXPORT_SYMBOL_GPL(dm_put); 2340 2341 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2342 { 2343 int r = 0; 2344 DEFINE_WAIT(wait); 2345 2346 while (1) { 2347 prepare_to_wait(&md->wait, &wait, task_state); 2348 2349 if (!md_in_flight(md)) 2350 break; 2351 2352 if (signal_pending_state(task_state, current)) { 2353 r = -EINTR; 2354 break; 2355 } 2356 2357 io_schedule(); 2358 } 2359 finish_wait(&md->wait, &wait); 2360 2361 return r; 2362 } 2363 2364 /* 2365 * Process the deferred bios 2366 */ 2367 static void dm_wq_work(struct work_struct *work) 2368 { 2369 struct mapped_device *md = container_of(work, struct mapped_device, 2370 work); 2371 struct bio *c; 2372 int srcu_idx; 2373 struct dm_table *map; 2374 2375 map = dm_get_live_table(md, &srcu_idx); 2376 2377 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2378 spin_lock_irq(&md->deferred_lock); 2379 c = bio_list_pop(&md->deferred); 2380 spin_unlock_irq(&md->deferred_lock); 2381 2382 if (!c) 2383 break; 2384 2385 if (dm_request_based(md)) 2386 generic_make_request(c); 2387 else 2388 __split_and_process_bio(md, map, c); 2389 } 2390 2391 dm_put_live_table(md, srcu_idx); 2392 } 2393 2394 static void dm_queue_flush(struct mapped_device *md) 2395 { 2396 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2397 smp_mb__after_atomic(); 2398 queue_work(md->wq, &md->work); 2399 } 2400 2401 /* 2402 * Swap in a new table, returning the old one for the caller to destroy. 2403 */ 2404 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2405 { 2406 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2407 struct queue_limits limits; 2408 int r; 2409 2410 mutex_lock(&md->suspend_lock); 2411 2412 /* device must be suspended */ 2413 if (!dm_suspended_md(md)) 2414 goto out; 2415 2416 /* 2417 * If the new table has no data devices, retain the existing limits. 2418 * This helps multipath with queue_if_no_path if all paths disappear, 2419 * then new I/O is queued based on these limits, and then some paths 2420 * reappear. 2421 */ 2422 if (dm_table_has_no_data_devices(table)) { 2423 live_map = dm_get_live_table_fast(md); 2424 if (live_map) 2425 limits = md->queue->limits; 2426 dm_put_live_table_fast(md); 2427 } 2428 2429 if (!live_map) { 2430 r = dm_calculate_queue_limits(table, &limits); 2431 if (r) { 2432 map = ERR_PTR(r); 2433 goto out; 2434 } 2435 } 2436 2437 map = __bind(md, table, &limits); 2438 dm_issue_global_event(); 2439 2440 out: 2441 mutex_unlock(&md->suspend_lock); 2442 return map; 2443 } 2444 2445 /* 2446 * Functions to lock and unlock any filesystem running on the 2447 * device. 2448 */ 2449 static int lock_fs(struct mapped_device *md) 2450 { 2451 int r; 2452 2453 WARN_ON(md->frozen_sb); 2454 2455 md->frozen_sb = freeze_bdev(md->bdev); 2456 if (IS_ERR(md->frozen_sb)) { 2457 r = PTR_ERR(md->frozen_sb); 2458 md->frozen_sb = NULL; 2459 return r; 2460 } 2461 2462 set_bit(DMF_FROZEN, &md->flags); 2463 2464 return 0; 2465 } 2466 2467 static void unlock_fs(struct mapped_device *md) 2468 { 2469 if (!test_bit(DMF_FROZEN, &md->flags)) 2470 return; 2471 2472 thaw_bdev(md->bdev, md->frozen_sb); 2473 md->frozen_sb = NULL; 2474 clear_bit(DMF_FROZEN, &md->flags); 2475 } 2476 2477 /* 2478 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2479 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2480 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2481 * 2482 * If __dm_suspend returns 0, the device is completely quiescent 2483 * now. There is no request-processing activity. All new requests 2484 * are being added to md->deferred list. 2485 */ 2486 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2487 unsigned suspend_flags, long task_state, 2488 int dmf_suspended_flag) 2489 { 2490 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2491 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2492 int r; 2493 2494 lockdep_assert_held(&md->suspend_lock); 2495 2496 /* 2497 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2498 * This flag is cleared before dm_suspend returns. 2499 */ 2500 if (noflush) 2501 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2502 else 2503 pr_debug("%s: suspending with flush\n", dm_device_name(md)); 2504 2505 /* 2506 * This gets reverted if there's an error later and the targets 2507 * provide the .presuspend_undo hook. 2508 */ 2509 dm_table_presuspend_targets(map); 2510 2511 /* 2512 * Flush I/O to the device. 2513 * Any I/O submitted after lock_fs() may not be flushed. 2514 * noflush takes precedence over do_lockfs. 2515 * (lock_fs() flushes I/Os and waits for them to complete.) 2516 */ 2517 if (!noflush && do_lockfs) { 2518 r = lock_fs(md); 2519 if (r) { 2520 dm_table_presuspend_undo_targets(map); 2521 return r; 2522 } 2523 } 2524 2525 /* 2526 * Here we must make sure that no processes are submitting requests 2527 * to target drivers i.e. no one may be executing 2528 * __split_and_process_bio. This is called from dm_request and 2529 * dm_wq_work. 2530 * 2531 * To get all processes out of __split_and_process_bio in dm_request, 2532 * we take the write lock. To prevent any process from reentering 2533 * __split_and_process_bio from dm_request and quiesce the thread 2534 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2535 * flush_workqueue(md->wq). 2536 */ 2537 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2538 if (map) 2539 synchronize_srcu(&md->io_barrier); 2540 2541 /* 2542 * Stop md->queue before flushing md->wq in case request-based 2543 * dm defers requests to md->wq from md->queue. 2544 */ 2545 if (dm_request_based(md)) { 2546 dm_stop_queue(md->queue); 2547 if (md->kworker_task) 2548 kthread_flush_worker(&md->kworker); 2549 } 2550 2551 flush_workqueue(md->wq); 2552 2553 /* 2554 * At this point no more requests are entering target request routines. 2555 * We call dm_wait_for_completion to wait for all existing requests 2556 * to finish. 2557 */ 2558 r = dm_wait_for_completion(md, task_state); 2559 if (!r) 2560 set_bit(dmf_suspended_flag, &md->flags); 2561 2562 if (noflush) 2563 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2564 if (map) 2565 synchronize_srcu(&md->io_barrier); 2566 2567 /* were we interrupted ? */ 2568 if (r < 0) { 2569 dm_queue_flush(md); 2570 2571 if (dm_request_based(md)) 2572 dm_start_queue(md->queue); 2573 2574 unlock_fs(md); 2575 dm_table_presuspend_undo_targets(map); 2576 /* pushback list is already flushed, so skip flush */ 2577 } 2578 2579 return r; 2580 } 2581 2582 /* 2583 * We need to be able to change a mapping table under a mounted 2584 * filesystem. For example we might want to move some data in 2585 * the background. Before the table can be swapped with 2586 * dm_bind_table, dm_suspend must be called to flush any in 2587 * flight bios and ensure that any further io gets deferred. 2588 */ 2589 /* 2590 * Suspend mechanism in request-based dm. 2591 * 2592 * 1. Flush all I/Os by lock_fs() if needed. 2593 * 2. Stop dispatching any I/O by stopping the request_queue. 2594 * 3. Wait for all in-flight I/Os to be completed or requeued. 2595 * 2596 * To abort suspend, start the request_queue. 2597 */ 2598 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2599 { 2600 struct dm_table *map = NULL; 2601 int r = 0; 2602 2603 retry: 2604 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2605 2606 if (dm_suspended_md(md)) { 2607 r = -EINVAL; 2608 goto out_unlock; 2609 } 2610 2611 if (dm_suspended_internally_md(md)) { 2612 /* already internally suspended, wait for internal resume */ 2613 mutex_unlock(&md->suspend_lock); 2614 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2615 if (r) 2616 return r; 2617 goto retry; 2618 } 2619 2620 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2621 2622 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2623 if (r) 2624 goto out_unlock; 2625 2626 dm_table_postsuspend_targets(map); 2627 2628 out_unlock: 2629 mutex_unlock(&md->suspend_lock); 2630 return r; 2631 } 2632 2633 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2634 { 2635 if (map) { 2636 int r = dm_table_resume_targets(map); 2637 if (r) 2638 return r; 2639 } 2640 2641 dm_queue_flush(md); 2642 2643 /* 2644 * Flushing deferred I/Os must be done after targets are resumed 2645 * so that mapping of targets can work correctly. 2646 * Request-based dm is queueing the deferred I/Os in its request_queue. 2647 */ 2648 if (dm_request_based(md)) 2649 dm_start_queue(md->queue); 2650 2651 unlock_fs(md); 2652 2653 return 0; 2654 } 2655 2656 int dm_resume(struct mapped_device *md) 2657 { 2658 int r; 2659 struct dm_table *map = NULL; 2660 2661 retry: 2662 r = -EINVAL; 2663 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2664 2665 if (!dm_suspended_md(md)) 2666 goto out; 2667 2668 if (dm_suspended_internally_md(md)) { 2669 /* already internally suspended, wait for internal resume */ 2670 mutex_unlock(&md->suspend_lock); 2671 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2672 if (r) 2673 return r; 2674 goto retry; 2675 } 2676 2677 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2678 if (!map || !dm_table_get_size(map)) 2679 goto out; 2680 2681 r = __dm_resume(md, map); 2682 if (r) 2683 goto out; 2684 2685 clear_bit(DMF_SUSPENDED, &md->flags); 2686 out: 2687 mutex_unlock(&md->suspend_lock); 2688 2689 return r; 2690 } 2691 2692 /* 2693 * Internal suspend/resume works like userspace-driven suspend. It waits 2694 * until all bios finish and prevents issuing new bios to the target drivers. 2695 * It may be used only from the kernel. 2696 */ 2697 2698 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2699 { 2700 struct dm_table *map = NULL; 2701 2702 lockdep_assert_held(&md->suspend_lock); 2703 2704 if (md->internal_suspend_count++) 2705 return; /* nested internal suspend */ 2706 2707 if (dm_suspended_md(md)) { 2708 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2709 return; /* nest suspend */ 2710 } 2711 2712 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2713 2714 /* 2715 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2716 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2717 * would require changing .presuspend to return an error -- avoid this 2718 * until there is a need for more elaborate variants of internal suspend. 2719 */ 2720 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2721 DMF_SUSPENDED_INTERNALLY); 2722 2723 dm_table_postsuspend_targets(map); 2724 } 2725 2726 static void __dm_internal_resume(struct mapped_device *md) 2727 { 2728 BUG_ON(!md->internal_suspend_count); 2729 2730 if (--md->internal_suspend_count) 2731 return; /* resume from nested internal suspend */ 2732 2733 if (dm_suspended_md(md)) 2734 goto done; /* resume from nested suspend */ 2735 2736 /* 2737 * NOTE: existing callers don't need to call dm_table_resume_targets 2738 * (which may fail -- so best to avoid it for now by passing NULL map) 2739 */ 2740 (void) __dm_resume(md, NULL); 2741 2742 done: 2743 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2744 smp_mb__after_atomic(); 2745 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2746 } 2747 2748 void dm_internal_suspend_noflush(struct mapped_device *md) 2749 { 2750 mutex_lock(&md->suspend_lock); 2751 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2752 mutex_unlock(&md->suspend_lock); 2753 } 2754 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2755 2756 void dm_internal_resume(struct mapped_device *md) 2757 { 2758 mutex_lock(&md->suspend_lock); 2759 __dm_internal_resume(md); 2760 mutex_unlock(&md->suspend_lock); 2761 } 2762 EXPORT_SYMBOL_GPL(dm_internal_resume); 2763 2764 /* 2765 * Fast variants of internal suspend/resume hold md->suspend_lock, 2766 * which prevents interaction with userspace-driven suspend. 2767 */ 2768 2769 void dm_internal_suspend_fast(struct mapped_device *md) 2770 { 2771 mutex_lock(&md->suspend_lock); 2772 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2773 return; 2774 2775 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2776 synchronize_srcu(&md->io_barrier); 2777 flush_workqueue(md->wq); 2778 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2779 } 2780 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2781 2782 void dm_internal_resume_fast(struct mapped_device *md) 2783 { 2784 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2785 goto done; 2786 2787 dm_queue_flush(md); 2788 2789 done: 2790 mutex_unlock(&md->suspend_lock); 2791 } 2792 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2793 2794 /*----------------------------------------------------------------- 2795 * Event notification. 2796 *---------------------------------------------------------------*/ 2797 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2798 unsigned cookie) 2799 { 2800 char udev_cookie[DM_COOKIE_LENGTH]; 2801 char *envp[] = { udev_cookie, NULL }; 2802 2803 if (!cookie) 2804 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2805 else { 2806 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2807 DM_COOKIE_ENV_VAR_NAME, cookie); 2808 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2809 action, envp); 2810 } 2811 } 2812 2813 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2814 { 2815 return atomic_add_return(1, &md->uevent_seq); 2816 } 2817 2818 uint32_t dm_get_event_nr(struct mapped_device *md) 2819 { 2820 return atomic_read(&md->event_nr); 2821 } 2822 2823 int dm_wait_event(struct mapped_device *md, int event_nr) 2824 { 2825 return wait_event_interruptible(md->eventq, 2826 (event_nr != atomic_read(&md->event_nr))); 2827 } 2828 2829 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2830 { 2831 unsigned long flags; 2832 2833 spin_lock_irqsave(&md->uevent_lock, flags); 2834 list_add(elist, &md->uevent_list); 2835 spin_unlock_irqrestore(&md->uevent_lock, flags); 2836 } 2837 2838 /* 2839 * The gendisk is only valid as long as you have a reference 2840 * count on 'md'. 2841 */ 2842 struct gendisk *dm_disk(struct mapped_device *md) 2843 { 2844 return md->disk; 2845 } 2846 EXPORT_SYMBOL_GPL(dm_disk); 2847 2848 struct kobject *dm_kobject(struct mapped_device *md) 2849 { 2850 return &md->kobj_holder.kobj; 2851 } 2852 2853 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2854 { 2855 struct mapped_device *md; 2856 2857 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2858 2859 spin_lock(&_minor_lock); 2860 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2861 md = NULL; 2862 goto out; 2863 } 2864 dm_get(md); 2865 out: 2866 spin_unlock(&_minor_lock); 2867 2868 return md; 2869 } 2870 2871 int dm_suspended_md(struct mapped_device *md) 2872 { 2873 return test_bit(DMF_SUSPENDED, &md->flags); 2874 } 2875 2876 int dm_suspended_internally_md(struct mapped_device *md) 2877 { 2878 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2879 } 2880 2881 int dm_test_deferred_remove_flag(struct mapped_device *md) 2882 { 2883 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2884 } 2885 2886 int dm_suspended(struct dm_target *ti) 2887 { 2888 return dm_suspended_md(dm_table_get_md(ti->table)); 2889 } 2890 EXPORT_SYMBOL_GPL(dm_suspended); 2891 2892 int dm_noflush_suspending(struct dm_target *ti) 2893 { 2894 return __noflush_suspending(dm_table_get_md(ti->table)); 2895 } 2896 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2897 2898 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2899 unsigned integrity, unsigned per_io_data_size, 2900 unsigned min_pool_size) 2901 { 2902 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2903 unsigned int pool_size = 0; 2904 unsigned int front_pad, io_front_pad; 2905 2906 if (!pools) 2907 return NULL; 2908 2909 switch (type) { 2910 case DM_TYPE_BIO_BASED: 2911 case DM_TYPE_DAX_BIO_BASED: 2912 case DM_TYPE_NVME_BIO_BASED: 2913 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2914 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2915 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 2916 pools->io_bs = bioset_create(pool_size, io_front_pad, 0); 2917 if (!pools->io_bs) 2918 goto out; 2919 if (integrity && bioset_integrity_create(pools->io_bs, pool_size)) 2920 goto out; 2921 break; 2922 case DM_TYPE_REQUEST_BASED: 2923 case DM_TYPE_MQ_REQUEST_BASED: 2924 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2925 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2927 break; 2928 default: 2929 BUG(); 2930 } 2931 2932 pools->bs = bioset_create(pool_size, front_pad, 0); 2933 if (!pools->bs) 2934 goto out; 2935 2936 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 2937 goto out; 2938 2939 return pools; 2940 2941 out: 2942 dm_free_md_mempools(pools); 2943 2944 return NULL; 2945 } 2946 2947 void dm_free_md_mempools(struct dm_md_mempools *pools) 2948 { 2949 if (!pools) 2950 return; 2951 2952 if (pools->bs) 2953 bioset_free(pools->bs); 2954 if (pools->io_bs) 2955 bioset_free(pools->io_bs); 2956 2957 kfree(pools); 2958 } 2959 2960 struct dm_pr { 2961 u64 old_key; 2962 u64 new_key; 2963 u32 flags; 2964 bool fail_early; 2965 }; 2966 2967 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2968 void *data) 2969 { 2970 struct mapped_device *md = bdev->bd_disk->private_data; 2971 struct dm_table *table; 2972 struct dm_target *ti; 2973 int ret = -ENOTTY, srcu_idx; 2974 2975 table = dm_get_live_table(md, &srcu_idx); 2976 if (!table || !dm_table_get_size(table)) 2977 goto out; 2978 2979 /* We only support devices that have a single target */ 2980 if (dm_table_get_num_targets(table) != 1) 2981 goto out; 2982 ti = dm_table_get_target(table, 0); 2983 2984 ret = -EINVAL; 2985 if (!ti->type->iterate_devices) 2986 goto out; 2987 2988 ret = ti->type->iterate_devices(ti, fn, data); 2989 out: 2990 dm_put_live_table(md, srcu_idx); 2991 return ret; 2992 } 2993 2994 /* 2995 * For register / unregister we need to manually call out to every path. 2996 */ 2997 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 2998 sector_t start, sector_t len, void *data) 2999 { 3000 struct dm_pr *pr = data; 3001 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3002 3003 if (!ops || !ops->pr_register) 3004 return -EOPNOTSUPP; 3005 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3006 } 3007 3008 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3009 u32 flags) 3010 { 3011 struct dm_pr pr = { 3012 .old_key = old_key, 3013 .new_key = new_key, 3014 .flags = flags, 3015 .fail_early = true, 3016 }; 3017 int ret; 3018 3019 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3020 if (ret && new_key) { 3021 /* unregister all paths if we failed to register any path */ 3022 pr.old_key = new_key; 3023 pr.new_key = 0; 3024 pr.flags = 0; 3025 pr.fail_early = false; 3026 dm_call_pr(bdev, __dm_pr_register, &pr); 3027 } 3028 3029 return ret; 3030 } 3031 3032 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3033 u32 flags) 3034 { 3035 struct mapped_device *md = bdev->bd_disk->private_data; 3036 const struct pr_ops *ops; 3037 int r, srcu_idx; 3038 3039 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3040 if (r < 0) 3041 goto out; 3042 3043 ops = bdev->bd_disk->fops->pr_ops; 3044 if (ops && ops->pr_reserve) 3045 r = ops->pr_reserve(bdev, key, type, flags); 3046 else 3047 r = -EOPNOTSUPP; 3048 out: 3049 dm_unprepare_ioctl(md, srcu_idx); 3050 return r; 3051 } 3052 3053 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3054 { 3055 struct mapped_device *md = bdev->bd_disk->private_data; 3056 const struct pr_ops *ops; 3057 int r, srcu_idx; 3058 3059 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3060 if (r < 0) 3061 goto out; 3062 3063 ops = bdev->bd_disk->fops->pr_ops; 3064 if (ops && ops->pr_release) 3065 r = ops->pr_release(bdev, key, type); 3066 else 3067 r = -EOPNOTSUPP; 3068 out: 3069 dm_unprepare_ioctl(md, srcu_idx); 3070 return r; 3071 } 3072 3073 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3074 enum pr_type type, bool abort) 3075 { 3076 struct mapped_device *md = bdev->bd_disk->private_data; 3077 const struct pr_ops *ops; 3078 int r, srcu_idx; 3079 3080 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3081 if (r < 0) 3082 goto out; 3083 3084 ops = bdev->bd_disk->fops->pr_ops; 3085 if (ops && ops->pr_preempt) 3086 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3087 else 3088 r = -EOPNOTSUPP; 3089 out: 3090 dm_unprepare_ioctl(md, srcu_idx); 3091 return r; 3092 } 3093 3094 static int dm_pr_clear(struct block_device *bdev, u64 key) 3095 { 3096 struct mapped_device *md = bdev->bd_disk->private_data; 3097 const struct pr_ops *ops; 3098 int r, srcu_idx; 3099 3100 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3101 if (r < 0) 3102 goto out; 3103 3104 ops = bdev->bd_disk->fops->pr_ops; 3105 if (ops && ops->pr_clear) 3106 r = ops->pr_clear(bdev, key); 3107 else 3108 r = -EOPNOTSUPP; 3109 out: 3110 dm_unprepare_ioctl(md, srcu_idx); 3111 return r; 3112 } 3113 3114 static const struct pr_ops dm_pr_ops = { 3115 .pr_register = dm_pr_register, 3116 .pr_reserve = dm_pr_reserve, 3117 .pr_release = dm_pr_release, 3118 .pr_preempt = dm_pr_preempt, 3119 .pr_clear = dm_pr_clear, 3120 }; 3121 3122 static const struct block_device_operations dm_blk_dops = { 3123 .open = dm_blk_open, 3124 .release = dm_blk_close, 3125 .ioctl = dm_blk_ioctl, 3126 .getgeo = dm_blk_getgeo, 3127 .pr_ops = &dm_pr_ops, 3128 .owner = THIS_MODULE 3129 }; 3130 3131 static const struct dax_operations dm_dax_ops = { 3132 .direct_access = dm_dax_direct_access, 3133 .copy_from_iter = dm_dax_copy_from_iter, 3134 }; 3135 3136 /* 3137 * module hooks 3138 */ 3139 module_init(dm_init); 3140 module_exit(dm_exit); 3141 3142 module_param(major, uint, 0); 3143 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3144 3145 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3146 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3147 3148 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3149 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3150 3151 MODULE_DESCRIPTION(DM_NAME " driver"); 3152 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3153 MODULE_LICENSE("GPL"); 3154