1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include "dm-core.h" 10 #include "dm-rq.h" 11 #include "dm-uevent.h" 12 #include "dm-ima.h" 13 14 #include <linux/init.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/signal.h> 19 #include <linux/blkpg.h> 20 #include <linux/bio.h> 21 #include <linux/mempool.h> 22 #include <linux/dax.h> 23 #include <linux/slab.h> 24 #include <linux/idr.h> 25 #include <linux/uio.h> 26 #include <linux/hdreg.h> 27 #include <linux/delay.h> 28 #include <linux/wait.h> 29 #include <linux/pr.h> 30 #include <linux/refcount.h> 31 #include <linux/part_stat.h> 32 #include <linux/blk-crypto.h> 33 #include <linux/blk-crypto-profile.h> 34 35 #define DM_MSG_PREFIX "core" 36 37 /* 38 * Cookies are numeric values sent with CHANGE and REMOVE 39 * uevents while resuming, removing or renaming the device. 40 */ 41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 42 #define DM_COOKIE_LENGTH 24 43 44 /* 45 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 46 * dm_io into one list, and reuse bio->bi_private as the list head. Before 47 * ending this fs bio, we will recover its ->bi_private. 48 */ 49 #define REQ_DM_POLL_LIST REQ_DRV 50 51 static const char *_name = DM_NAME; 52 53 static unsigned int major; 54 static unsigned int _major; 55 56 static DEFINE_IDR(_minor_idr); 57 58 static DEFINE_SPINLOCK(_minor_lock); 59 60 static void do_deferred_remove(struct work_struct *w); 61 62 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 63 64 static struct workqueue_struct *deferred_remove_workqueue; 65 66 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 67 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 68 69 void dm_issue_global_event(void) 70 { 71 atomic_inc(&dm_global_event_nr); 72 wake_up(&dm_global_eventq); 73 } 74 75 DEFINE_STATIC_KEY_FALSE(stats_enabled); 76 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); 77 DEFINE_STATIC_KEY_FALSE(zoned_enabled); 78 79 /* 80 * One of these is allocated (on-stack) per original bio. 81 */ 82 struct clone_info { 83 struct dm_table *map; 84 struct bio *bio; 85 struct dm_io *io; 86 sector_t sector; 87 unsigned int sector_count; 88 bool is_abnormal_io:1; 89 bool submit_as_polled:1; 90 }; 91 92 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 93 { 94 return container_of(clone, struct dm_target_io, clone); 95 } 96 97 void *dm_per_bio_data(struct bio *bio, size_t data_size) 98 { 99 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 100 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 101 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 102 } 103 EXPORT_SYMBOL_GPL(dm_per_bio_data); 104 105 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 106 { 107 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 108 109 if (io->magic == DM_IO_MAGIC) 110 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 111 BUG_ON(io->magic != DM_TIO_MAGIC); 112 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 113 } 114 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 115 116 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) 117 { 118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 119 } 120 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 121 122 #define MINOR_ALLOCED ((void *)-1) 123 124 #define DM_NUMA_NODE NUMA_NO_NODE 125 static int dm_numa_node = DM_NUMA_NODE; 126 127 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 128 static int swap_bios = DEFAULT_SWAP_BIOS; 129 static int get_swap_bios(void) 130 { 131 int latch = READ_ONCE(swap_bios); 132 133 if (unlikely(latch <= 0)) 134 latch = DEFAULT_SWAP_BIOS; 135 return latch; 136 } 137 138 struct table_device { 139 struct list_head list; 140 refcount_t count; 141 struct dm_dev dm_dev; 142 }; 143 144 /* 145 * Bio-based DM's mempools' reserved IOs set by the user. 146 */ 147 #define RESERVED_BIO_BASED_IOS 16 148 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 149 150 static int __dm_get_module_param_int(int *module_param, int min, int max) 151 { 152 int param = READ_ONCE(*module_param); 153 int modified_param = 0; 154 bool modified = true; 155 156 if (param < min) 157 modified_param = min; 158 else if (param > max) 159 modified_param = max; 160 else 161 modified = false; 162 163 if (modified) { 164 (void)cmpxchg(module_param, param, modified_param); 165 param = modified_param; 166 } 167 168 return param; 169 } 170 171 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max) 172 { 173 unsigned int param = READ_ONCE(*module_param); 174 unsigned int modified_param = 0; 175 176 if (!param) 177 modified_param = def; 178 else if (param > max) 179 modified_param = max; 180 181 if (modified_param) { 182 (void)cmpxchg(module_param, param, modified_param); 183 param = modified_param; 184 } 185 186 return param; 187 } 188 189 unsigned int dm_get_reserved_bio_based_ios(void) 190 { 191 return __dm_get_module_param(&reserved_bio_based_ios, 192 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 193 } 194 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 195 196 static unsigned int dm_get_numa_node(void) 197 { 198 return __dm_get_module_param_int(&dm_numa_node, 199 DM_NUMA_NODE, num_online_nodes() - 1); 200 } 201 202 static int __init local_init(void) 203 { 204 int r; 205 206 r = dm_uevent_init(); 207 if (r) 208 return r; 209 210 deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0); 211 if (!deferred_remove_workqueue) { 212 r = -ENOMEM; 213 goto out_uevent_exit; 214 } 215 216 _major = major; 217 r = register_blkdev(_major, _name); 218 if (r < 0) 219 goto out_free_workqueue; 220 221 if (!_major) 222 _major = r; 223 224 return 0; 225 226 out_free_workqueue: 227 destroy_workqueue(deferred_remove_workqueue); 228 out_uevent_exit: 229 dm_uevent_exit(); 230 231 return r; 232 } 233 234 static void local_exit(void) 235 { 236 destroy_workqueue(deferred_remove_workqueue); 237 238 unregister_blkdev(_major, _name); 239 dm_uevent_exit(); 240 241 _major = 0; 242 243 DMINFO("cleaned up"); 244 } 245 246 static int (*_inits[])(void) __initdata = { 247 local_init, 248 dm_target_init, 249 dm_linear_init, 250 dm_stripe_init, 251 dm_io_init, 252 dm_kcopyd_init, 253 dm_interface_init, 254 dm_statistics_init, 255 }; 256 257 static void (*_exits[])(void) = { 258 local_exit, 259 dm_target_exit, 260 dm_linear_exit, 261 dm_stripe_exit, 262 dm_io_exit, 263 dm_kcopyd_exit, 264 dm_interface_exit, 265 dm_statistics_exit, 266 }; 267 268 static int __init dm_init(void) 269 { 270 const int count = ARRAY_SIZE(_inits); 271 int r, i; 272 273 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 274 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 275 " Duplicate IMA measurements will not be recorded in the IMA log."); 276 #endif 277 278 for (i = 0; i < count; i++) { 279 r = _inits[i](); 280 if (r) 281 goto bad; 282 } 283 284 return 0; 285 bad: 286 while (i--) 287 _exits[i](); 288 289 return r; 290 } 291 292 static void __exit dm_exit(void) 293 { 294 int i = ARRAY_SIZE(_exits); 295 296 while (i--) 297 _exits[i](); 298 299 /* 300 * Should be empty by this point. 301 */ 302 idr_destroy(&_minor_idr); 303 } 304 305 /* 306 * Block device functions 307 */ 308 int dm_deleting_md(struct mapped_device *md) 309 { 310 return test_bit(DMF_DELETING, &md->flags); 311 } 312 313 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) 314 { 315 struct mapped_device *md; 316 317 spin_lock(&_minor_lock); 318 319 md = disk->private_data; 320 if (!md) 321 goto out; 322 323 if (test_bit(DMF_FREEING, &md->flags) || 324 dm_deleting_md(md)) { 325 md = NULL; 326 goto out; 327 } 328 329 dm_get(md); 330 atomic_inc(&md->open_count); 331 out: 332 spin_unlock(&_minor_lock); 333 334 return md ? 0 : -ENXIO; 335 } 336 337 static void dm_blk_close(struct gendisk *disk) 338 { 339 struct mapped_device *md; 340 341 spin_lock(&_minor_lock); 342 343 md = disk->private_data; 344 if (WARN_ON(!md)) 345 goto out; 346 347 if (atomic_dec_and_test(&md->open_count) && 348 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 349 queue_work(deferred_remove_workqueue, &deferred_remove_work); 350 351 dm_put(md); 352 out: 353 spin_unlock(&_minor_lock); 354 } 355 356 int dm_open_count(struct mapped_device *md) 357 { 358 return atomic_read(&md->open_count); 359 } 360 361 /* 362 * Guarantees nothing is using the device before it's deleted. 363 */ 364 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 365 { 366 int r = 0; 367 368 spin_lock(&_minor_lock); 369 370 if (dm_open_count(md)) { 371 r = -EBUSY; 372 if (mark_deferred) 373 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 374 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 375 r = -EEXIST; 376 else 377 set_bit(DMF_DELETING, &md->flags); 378 379 spin_unlock(&_minor_lock); 380 381 return r; 382 } 383 384 int dm_cancel_deferred_remove(struct mapped_device *md) 385 { 386 int r = 0; 387 388 spin_lock(&_minor_lock); 389 390 if (test_bit(DMF_DELETING, &md->flags)) 391 r = -EBUSY; 392 else 393 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 394 395 spin_unlock(&_minor_lock); 396 397 return r; 398 } 399 400 static void do_deferred_remove(struct work_struct *w) 401 { 402 dm_deferred_remove(); 403 } 404 405 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 406 { 407 struct mapped_device *md = bdev->bd_disk->private_data; 408 409 return dm_get_geometry(md, geo); 410 } 411 412 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 413 struct block_device **bdev) 414 { 415 struct dm_target *ti; 416 struct dm_table *map; 417 int r; 418 419 retry: 420 r = -ENOTTY; 421 map = dm_get_live_table(md, srcu_idx); 422 if (!map || !dm_table_get_size(map)) 423 return r; 424 425 /* We only support devices that have a single target */ 426 if (map->num_targets != 1) 427 return r; 428 429 ti = dm_table_get_target(map, 0); 430 if (!ti->type->prepare_ioctl) 431 return r; 432 433 if (dm_suspended_md(md)) 434 return -EAGAIN; 435 436 r = ti->type->prepare_ioctl(ti, bdev); 437 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 438 dm_put_live_table(md, *srcu_idx); 439 fsleep(10000); 440 goto retry; 441 } 442 443 return r; 444 } 445 446 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 447 { 448 dm_put_live_table(md, srcu_idx); 449 } 450 451 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, 452 unsigned int cmd, unsigned long arg) 453 { 454 struct mapped_device *md = bdev->bd_disk->private_data; 455 int r, srcu_idx; 456 457 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 458 if (r < 0) 459 goto out; 460 461 if (r > 0) { 462 /* 463 * Target determined this ioctl is being issued against a 464 * subset of the parent bdev; require extra privileges. 465 */ 466 if (!capable(CAP_SYS_RAWIO)) { 467 DMDEBUG_LIMIT( 468 "%s: sending ioctl %x to DM device without required privilege.", 469 current->comm, cmd); 470 r = -ENOIOCTLCMD; 471 goto out; 472 } 473 } 474 475 if (!bdev->bd_disk->fops->ioctl) 476 r = -ENOTTY; 477 else 478 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 479 out: 480 dm_unprepare_ioctl(md, srcu_idx); 481 return r; 482 } 483 484 u64 dm_start_time_ns_from_clone(struct bio *bio) 485 { 486 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 487 } 488 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 489 490 static inline bool bio_is_flush_with_data(struct bio *bio) 491 { 492 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 493 } 494 495 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio) 496 { 497 /* 498 * If REQ_PREFLUSH set, don't account payload, it will be 499 * submitted (and accounted) after this flush completes. 500 */ 501 if (bio_is_flush_with_data(bio)) 502 return 0; 503 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) 504 return io->sectors; 505 return bio_sectors(bio); 506 } 507 508 static void dm_io_acct(struct dm_io *io, bool end) 509 { 510 struct bio *bio = io->orig_bio; 511 512 if (dm_io_flagged(io, DM_IO_BLK_STAT)) { 513 if (!end) 514 bdev_start_io_acct(bio->bi_bdev, bio_op(bio), 515 io->start_time); 516 else 517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), 518 dm_io_sectors(io, bio), 519 io->start_time); 520 } 521 522 if (static_branch_unlikely(&stats_enabled) && 523 unlikely(dm_stats_used(&io->md->stats))) { 524 sector_t sector; 525 526 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) 527 sector = bio_end_sector(bio) - io->sector_offset; 528 else 529 sector = bio->bi_iter.bi_sector; 530 531 dm_stats_account_io(&io->md->stats, bio_data_dir(bio), 532 sector, dm_io_sectors(io, bio), 533 end, io->start_time, &io->stats_aux); 534 } 535 } 536 537 static void __dm_start_io_acct(struct dm_io *io) 538 { 539 dm_io_acct(io, false); 540 } 541 542 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 543 { 544 /* 545 * Ensure IO accounting is only ever started once. 546 */ 547 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 548 return; 549 550 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ 551 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { 552 dm_io_set_flag(io, DM_IO_ACCOUNTED); 553 } else { 554 unsigned long flags; 555 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 556 spin_lock_irqsave(&io->lock, flags); 557 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { 558 spin_unlock_irqrestore(&io->lock, flags); 559 return; 560 } 561 dm_io_set_flag(io, DM_IO_ACCOUNTED); 562 spin_unlock_irqrestore(&io->lock, flags); 563 } 564 565 __dm_start_io_acct(io); 566 } 567 568 static void dm_end_io_acct(struct dm_io *io) 569 { 570 dm_io_acct(io, true); 571 } 572 573 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) 574 { 575 struct dm_io *io; 576 struct dm_target_io *tio; 577 struct bio *clone; 578 579 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); 580 if (unlikely(!clone)) 581 return NULL; 582 tio = clone_to_tio(clone); 583 tio->flags = 0; 584 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 585 tio->io = NULL; 586 587 io = container_of(tio, struct dm_io, tio); 588 io->magic = DM_IO_MAGIC; 589 io->status = BLK_STS_OK; 590 591 /* one ref is for submission, the other is for completion */ 592 atomic_set(&io->io_count, 2); 593 this_cpu_inc(*md->pending_io); 594 io->orig_bio = bio; 595 io->md = md; 596 spin_lock_init(&io->lock); 597 io->start_time = jiffies; 598 io->flags = 0; 599 if (blk_queue_io_stat(md->queue)) 600 dm_io_set_flag(io, DM_IO_BLK_STAT); 601 602 if (static_branch_unlikely(&stats_enabled) && 603 unlikely(dm_stats_used(&md->stats))) 604 dm_stats_record_start(&md->stats, &io->stats_aux); 605 606 return io; 607 } 608 609 static void free_io(struct dm_io *io) 610 { 611 bio_put(&io->tio.clone); 612 } 613 614 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 615 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) 616 { 617 struct mapped_device *md = ci->io->md; 618 struct dm_target_io *tio; 619 struct bio *clone; 620 621 if (!ci->io->tio.io) { 622 /* the dm_target_io embedded in ci->io is available */ 623 tio = &ci->io->tio; 624 /* alloc_io() already initialized embedded clone */ 625 clone = &tio->clone; 626 } else { 627 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, 628 &md->mempools->bs); 629 if (!clone) 630 return NULL; 631 632 /* REQ_DM_POLL_LIST shouldn't be inherited */ 633 clone->bi_opf &= ~REQ_DM_POLL_LIST; 634 635 tio = clone_to_tio(clone); 636 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 637 } 638 639 tio->magic = DM_TIO_MAGIC; 640 tio->io = ci->io; 641 tio->ti = ti; 642 tio->target_bio_nr = target_bio_nr; 643 tio->len_ptr = len; 644 tio->old_sector = 0; 645 646 /* Set default bdev, but target must bio_set_dev() before issuing IO */ 647 clone->bi_bdev = md->disk->part0; 648 if (unlikely(ti->needs_bio_set_dev)) 649 bio_set_dev(clone, md->disk->part0); 650 651 if (len) { 652 clone->bi_iter.bi_size = to_bytes(*len); 653 if (bio_integrity(clone)) 654 bio_integrity_trim(clone); 655 } 656 657 return clone; 658 } 659 660 static void free_tio(struct bio *clone) 661 { 662 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 663 return; 664 bio_put(clone); 665 } 666 667 /* 668 * Add the bio to the list of deferred io. 669 */ 670 static void queue_io(struct mapped_device *md, struct bio *bio) 671 { 672 unsigned long flags; 673 674 spin_lock_irqsave(&md->deferred_lock, flags); 675 bio_list_add(&md->deferred, bio); 676 spin_unlock_irqrestore(&md->deferred_lock, flags); 677 queue_work(md->wq, &md->work); 678 } 679 680 /* 681 * Everyone (including functions in this file), should use this 682 * function to access the md->map field, and make sure they call 683 * dm_put_live_table() when finished. 684 */ 685 struct dm_table *dm_get_live_table(struct mapped_device *md, 686 int *srcu_idx) __acquires(md->io_barrier) 687 { 688 *srcu_idx = srcu_read_lock(&md->io_barrier); 689 690 return srcu_dereference(md->map, &md->io_barrier); 691 } 692 693 void dm_put_live_table(struct mapped_device *md, 694 int srcu_idx) __releases(md->io_barrier) 695 { 696 srcu_read_unlock(&md->io_barrier, srcu_idx); 697 } 698 699 void dm_sync_table(struct mapped_device *md) 700 { 701 synchronize_srcu(&md->io_barrier); 702 synchronize_rcu_expedited(); 703 } 704 705 /* 706 * A fast alternative to dm_get_live_table/dm_put_live_table. 707 * The caller must not block between these two functions. 708 */ 709 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 710 { 711 rcu_read_lock(); 712 return rcu_dereference(md->map); 713 } 714 715 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 716 { 717 rcu_read_unlock(); 718 } 719 720 static char *_dm_claim_ptr = "I belong to device-mapper"; 721 722 /* 723 * Open a table device so we can use it as a map destination. 724 */ 725 static struct table_device *open_table_device(struct mapped_device *md, 726 dev_t dev, blk_mode_t mode) 727 { 728 struct table_device *td; 729 struct file *bdev_file; 730 struct block_device *bdev; 731 u64 part_off; 732 int r; 733 734 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 735 if (!td) 736 return ERR_PTR(-ENOMEM); 737 refcount_set(&td->count, 1); 738 739 bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL); 740 if (IS_ERR(bdev_file)) { 741 r = PTR_ERR(bdev_file); 742 goto out_free_td; 743 } 744 745 bdev = file_bdev(bdev_file); 746 747 /* 748 * We can be called before the dm disk is added. In that case we can't 749 * register the holder relation here. It will be done once add_disk was 750 * called. 751 */ 752 if (md->disk->slave_dir) { 753 r = bd_link_disk_holder(bdev, md->disk); 754 if (r) 755 goto out_blkdev_put; 756 } 757 758 td->dm_dev.mode = mode; 759 td->dm_dev.bdev = bdev; 760 td->dm_dev.bdev_file = bdev_file; 761 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, 762 NULL, NULL); 763 format_dev_t(td->dm_dev.name, dev); 764 list_add(&td->list, &md->table_devices); 765 return td; 766 767 out_blkdev_put: 768 __fput_sync(bdev_file); 769 out_free_td: 770 kfree(td); 771 return ERR_PTR(r); 772 } 773 774 /* 775 * Close a table device that we've been using. 776 */ 777 static void close_table_device(struct table_device *td, struct mapped_device *md) 778 { 779 if (md->disk->slave_dir) 780 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 781 782 /* Leverage async fput() if DMF_DEFERRED_REMOVE set */ 783 if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 784 fput(td->dm_dev.bdev_file); 785 else 786 __fput_sync(td->dm_dev.bdev_file); 787 788 put_dax(td->dm_dev.dax_dev); 789 list_del(&td->list); 790 kfree(td); 791 } 792 793 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 794 blk_mode_t mode) 795 { 796 struct table_device *td; 797 798 list_for_each_entry(td, l, list) 799 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 800 return td; 801 802 return NULL; 803 } 804 805 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, 806 struct dm_dev **result) 807 { 808 struct table_device *td; 809 810 mutex_lock(&md->table_devices_lock); 811 td = find_table_device(&md->table_devices, dev, mode); 812 if (!td) { 813 td = open_table_device(md, dev, mode); 814 if (IS_ERR(td)) { 815 mutex_unlock(&md->table_devices_lock); 816 return PTR_ERR(td); 817 } 818 } else { 819 refcount_inc(&td->count); 820 } 821 mutex_unlock(&md->table_devices_lock); 822 823 *result = &td->dm_dev; 824 return 0; 825 } 826 827 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 828 { 829 struct table_device *td = container_of(d, struct table_device, dm_dev); 830 831 mutex_lock(&md->table_devices_lock); 832 if (refcount_dec_and_test(&td->count)) 833 close_table_device(td, md); 834 mutex_unlock(&md->table_devices_lock); 835 } 836 837 /* 838 * Get the geometry associated with a dm device 839 */ 840 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 841 { 842 *geo = md->geometry; 843 844 return 0; 845 } 846 847 /* 848 * Set the geometry of a device. 849 */ 850 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 851 { 852 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 853 854 if (geo->start > sz) { 855 DMERR("Start sector is beyond the geometry limits."); 856 return -EINVAL; 857 } 858 859 md->geometry = *geo; 860 861 return 0; 862 } 863 864 static int __noflush_suspending(struct mapped_device *md) 865 { 866 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 867 } 868 869 static void dm_requeue_add_io(struct dm_io *io, bool first_stage) 870 { 871 struct mapped_device *md = io->md; 872 873 if (first_stage) { 874 struct dm_io *next = md->requeue_list; 875 876 md->requeue_list = io; 877 io->next = next; 878 } else { 879 bio_list_add_head(&md->deferred, io->orig_bio); 880 } 881 } 882 883 static void dm_kick_requeue(struct mapped_device *md, bool first_stage) 884 { 885 if (first_stage) 886 queue_work(md->wq, &md->requeue_work); 887 else 888 queue_work(md->wq, &md->work); 889 } 890 891 /* 892 * Return true if the dm_io's original bio is requeued. 893 * io->status is updated with error if requeue disallowed. 894 */ 895 static bool dm_handle_requeue(struct dm_io *io, bool first_stage) 896 { 897 struct bio *bio = io->orig_bio; 898 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); 899 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && 900 (bio->bi_opf & REQ_POLLED)); 901 struct mapped_device *md = io->md; 902 bool requeued = false; 903 904 if (handle_requeue || handle_polled_eagain) { 905 unsigned long flags; 906 907 if (bio->bi_opf & REQ_POLLED) { 908 /* 909 * Upper layer won't help us poll split bio 910 * (io->orig_bio may only reflect a subset of the 911 * pre-split original) so clear REQ_POLLED. 912 */ 913 bio_clear_polled(bio); 914 } 915 916 /* 917 * Target requested pushing back the I/O or 918 * polled IO hit BLK_STS_AGAIN. 919 */ 920 spin_lock_irqsave(&md->deferred_lock, flags); 921 if ((__noflush_suspending(md) && 922 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || 923 handle_polled_eagain || first_stage) { 924 dm_requeue_add_io(io, first_stage); 925 requeued = true; 926 } else { 927 /* 928 * noflush suspend was interrupted or this is 929 * a write to a zoned target. 930 */ 931 io->status = BLK_STS_IOERR; 932 } 933 spin_unlock_irqrestore(&md->deferred_lock, flags); 934 } 935 936 if (requeued) 937 dm_kick_requeue(md, first_stage); 938 939 return requeued; 940 } 941 942 static void __dm_io_complete(struct dm_io *io, bool first_stage) 943 { 944 struct bio *bio = io->orig_bio; 945 struct mapped_device *md = io->md; 946 blk_status_t io_error; 947 bool requeued; 948 949 requeued = dm_handle_requeue(io, first_stage); 950 if (requeued && first_stage) 951 return; 952 953 io_error = io->status; 954 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 955 dm_end_io_acct(io); 956 else if (!io_error) { 957 /* 958 * Must handle target that DM_MAPIO_SUBMITTED only to 959 * then bio_endio() rather than dm_submit_bio_remap() 960 */ 961 __dm_start_io_acct(io); 962 dm_end_io_acct(io); 963 } 964 free_io(io); 965 smp_wmb(); 966 this_cpu_dec(*md->pending_io); 967 968 /* nudge anyone waiting on suspend queue */ 969 if (unlikely(wq_has_sleeper(&md->wait))) 970 wake_up(&md->wait); 971 972 /* Return early if the original bio was requeued */ 973 if (requeued) 974 return; 975 976 if (bio_is_flush_with_data(bio)) { 977 /* 978 * Preflush done for flush with data, reissue 979 * without REQ_PREFLUSH. 980 */ 981 bio->bi_opf &= ~REQ_PREFLUSH; 982 queue_io(md, bio); 983 } else { 984 /* done with normal IO or empty flush */ 985 if (io_error) 986 bio->bi_status = io_error; 987 bio_endio(bio); 988 } 989 } 990 991 static void dm_wq_requeue_work(struct work_struct *work) 992 { 993 struct mapped_device *md = container_of(work, struct mapped_device, 994 requeue_work); 995 unsigned long flags; 996 struct dm_io *io; 997 998 /* reuse deferred lock to simplify dm_handle_requeue */ 999 spin_lock_irqsave(&md->deferred_lock, flags); 1000 io = md->requeue_list; 1001 md->requeue_list = NULL; 1002 spin_unlock_irqrestore(&md->deferred_lock, flags); 1003 1004 while (io) { 1005 struct dm_io *next = io->next; 1006 1007 dm_io_rewind(io, &md->disk->bio_split); 1008 1009 io->next = NULL; 1010 __dm_io_complete(io, false); 1011 io = next; 1012 cond_resched(); 1013 } 1014 } 1015 1016 /* 1017 * Two staged requeue: 1018 * 1019 * 1) io->orig_bio points to the real original bio, and the part mapped to 1020 * this io must be requeued, instead of other parts of the original bio. 1021 * 1022 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. 1023 */ 1024 static void dm_io_complete(struct dm_io *io) 1025 { 1026 bool first_requeue; 1027 1028 /* 1029 * Only dm_io that has been split needs two stage requeue, otherwise 1030 * we may run into long bio clone chain during suspend and OOM could 1031 * be triggered. 1032 * 1033 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they 1034 * also aren't handled via the first stage requeue. 1035 */ 1036 if (dm_io_flagged(io, DM_IO_WAS_SPLIT)) 1037 first_requeue = true; 1038 else 1039 first_requeue = false; 1040 1041 __dm_io_complete(io, first_requeue); 1042 } 1043 1044 /* 1045 * Decrements the number of outstanding ios that a bio has been 1046 * cloned into, completing the original io if necc. 1047 */ 1048 static inline void __dm_io_dec_pending(struct dm_io *io) 1049 { 1050 if (atomic_dec_and_test(&io->io_count)) 1051 dm_io_complete(io); 1052 } 1053 1054 static void dm_io_set_error(struct dm_io *io, blk_status_t error) 1055 { 1056 unsigned long flags; 1057 1058 /* Push-back supersedes any I/O errors */ 1059 spin_lock_irqsave(&io->lock, flags); 1060 if (!(io->status == BLK_STS_DM_REQUEUE && 1061 __noflush_suspending(io->md))) { 1062 io->status = error; 1063 } 1064 spin_unlock_irqrestore(&io->lock, flags); 1065 } 1066 1067 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 1068 { 1069 if (unlikely(error)) 1070 dm_io_set_error(io, error); 1071 1072 __dm_io_dec_pending(io); 1073 } 1074 1075 /* 1076 * The queue_limits are only valid as long as you have a reference 1077 * count on 'md'. But _not_ imposing verification to avoid atomic_read(), 1078 */ 1079 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 1080 { 1081 return &md->queue->limits; 1082 } 1083 1084 void disable_discard(struct mapped_device *md) 1085 { 1086 struct queue_limits *limits = dm_get_queue_limits(md); 1087 1088 /* device doesn't really support DISCARD, disable it */ 1089 limits->max_hw_discard_sectors = 0; 1090 } 1091 1092 void disable_write_zeroes(struct mapped_device *md) 1093 { 1094 struct queue_limits *limits = dm_get_queue_limits(md); 1095 1096 /* device doesn't really support WRITE ZEROES, disable it */ 1097 limits->max_write_zeroes_sectors = 0; 1098 } 1099 1100 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 1101 { 1102 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 1103 } 1104 1105 static void clone_endio(struct bio *bio) 1106 { 1107 blk_status_t error = bio->bi_status; 1108 struct dm_target_io *tio = clone_to_tio(bio); 1109 struct dm_target *ti = tio->ti; 1110 dm_endio_fn endio = ti->type->end_io; 1111 struct dm_io *io = tio->io; 1112 struct mapped_device *md = io->md; 1113 1114 if (unlikely(error == BLK_STS_TARGET)) { 1115 if (bio_op(bio) == REQ_OP_DISCARD && 1116 !bdev_max_discard_sectors(bio->bi_bdev)) 1117 disable_discard(md); 1118 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1119 !bdev_write_zeroes_sectors(bio->bi_bdev)) 1120 disable_write_zeroes(md); 1121 } 1122 1123 if (static_branch_unlikely(&zoned_enabled) && 1124 unlikely(bdev_is_zoned(bio->bi_bdev))) 1125 dm_zone_endio(io, bio); 1126 1127 if (endio) { 1128 int r = endio(ti, bio, &error); 1129 1130 switch (r) { 1131 case DM_ENDIO_REQUEUE: 1132 if (static_branch_unlikely(&zoned_enabled)) { 1133 /* 1134 * Requeuing writes to a sequential zone of a zoned 1135 * target will break the sequential write pattern: 1136 * fail such IO. 1137 */ 1138 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1139 error = BLK_STS_IOERR; 1140 else 1141 error = BLK_STS_DM_REQUEUE; 1142 } else 1143 error = BLK_STS_DM_REQUEUE; 1144 fallthrough; 1145 case DM_ENDIO_DONE: 1146 break; 1147 case DM_ENDIO_INCOMPLETE: 1148 /* The target will handle the io */ 1149 return; 1150 default: 1151 DMCRIT("unimplemented target endio return value: %d", r); 1152 BUG(); 1153 } 1154 } 1155 1156 if (static_branch_unlikely(&swap_bios_enabled) && 1157 unlikely(swap_bios_limit(ti, bio))) 1158 up(&md->swap_bios_semaphore); 1159 1160 free_tio(bio); 1161 dm_io_dec_pending(io, error); 1162 } 1163 1164 /* 1165 * Return maximum size of I/O possible at the supplied sector up to the current 1166 * target boundary. 1167 */ 1168 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1169 sector_t target_offset) 1170 { 1171 return ti->len - target_offset; 1172 } 1173 1174 static sector_t __max_io_len(struct dm_target *ti, sector_t sector, 1175 unsigned int max_granularity, 1176 unsigned int max_sectors) 1177 { 1178 sector_t target_offset = dm_target_offset(ti, sector); 1179 sector_t len = max_io_len_target_boundary(ti, target_offset); 1180 1181 /* 1182 * Does the target need to split IO even further? 1183 * - varied (per target) IO splitting is a tenet of DM; this 1184 * explains why stacked chunk_sectors based splitting via 1185 * bio_split_to_limits() isn't possible here. 1186 */ 1187 if (!max_granularity) 1188 return len; 1189 return min_t(sector_t, len, 1190 min(max_sectors ? : queue_max_sectors(ti->table->md->queue), 1191 blk_boundary_sectors_left(target_offset, max_granularity))); 1192 } 1193 1194 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) 1195 { 1196 return __max_io_len(ti, sector, ti->max_io_len, 0); 1197 } 1198 1199 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1200 { 1201 if (len > UINT_MAX) { 1202 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1203 (unsigned long long)len, UINT_MAX); 1204 ti->error = "Maximum size of target IO is too large"; 1205 return -EINVAL; 1206 } 1207 1208 ti->max_io_len = (uint32_t) len; 1209 1210 return 0; 1211 } 1212 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1213 1214 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1215 sector_t sector, int *srcu_idx) 1216 __acquires(md->io_barrier) 1217 { 1218 struct dm_table *map; 1219 struct dm_target *ti; 1220 1221 map = dm_get_live_table(md, srcu_idx); 1222 if (!map) 1223 return NULL; 1224 1225 ti = dm_table_find_target(map, sector); 1226 if (!ti) 1227 return NULL; 1228 1229 return ti; 1230 } 1231 1232 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1233 long nr_pages, enum dax_access_mode mode, void **kaddr, 1234 pfn_t *pfn) 1235 { 1236 struct mapped_device *md = dax_get_private(dax_dev); 1237 sector_t sector = pgoff * PAGE_SECTORS; 1238 struct dm_target *ti; 1239 long len, ret = -EIO; 1240 int srcu_idx; 1241 1242 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1243 1244 if (!ti) 1245 goto out; 1246 if (!ti->type->direct_access) 1247 goto out; 1248 len = max_io_len(ti, sector) / PAGE_SECTORS; 1249 if (len < 1) 1250 goto out; 1251 nr_pages = min(len, nr_pages); 1252 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1253 1254 out: 1255 dm_put_live_table(md, srcu_idx); 1256 1257 return ret; 1258 } 1259 1260 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1261 size_t nr_pages) 1262 { 1263 struct mapped_device *md = dax_get_private(dax_dev); 1264 sector_t sector = pgoff * PAGE_SECTORS; 1265 struct dm_target *ti; 1266 int ret = -EIO; 1267 int srcu_idx; 1268 1269 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1270 1271 if (!ti) 1272 goto out; 1273 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1274 /* 1275 * ->zero_page_range() is mandatory dax operation. If we are 1276 * here, something is wrong. 1277 */ 1278 goto out; 1279 } 1280 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1281 out: 1282 dm_put_live_table(md, srcu_idx); 1283 1284 return ret; 1285 } 1286 1287 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1288 void *addr, size_t bytes, struct iov_iter *i) 1289 { 1290 struct mapped_device *md = dax_get_private(dax_dev); 1291 sector_t sector = pgoff * PAGE_SECTORS; 1292 struct dm_target *ti; 1293 int srcu_idx; 1294 long ret = 0; 1295 1296 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1297 if (!ti || !ti->type->dax_recovery_write) 1298 goto out; 1299 1300 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1301 out: 1302 dm_put_live_table(md, srcu_idx); 1303 return ret; 1304 } 1305 1306 /* 1307 * A target may call dm_accept_partial_bio only from the map routine. It is 1308 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1309 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1310 * __send_duplicate_bios(). 1311 * 1312 * dm_accept_partial_bio informs the dm that the target only wants to process 1313 * additional n_sectors sectors of the bio and the rest of the data should be 1314 * sent in a next bio. 1315 * 1316 * A diagram that explains the arithmetics: 1317 * +--------------------+---------------+-------+ 1318 * | 1 | 2 | 3 | 1319 * +--------------------+---------------+-------+ 1320 * 1321 * <-------------- *tio->len_ptr ---------------> 1322 * <----- bio_sectors -----> 1323 * <-- n_sectors --> 1324 * 1325 * Region 1 was already iterated over with bio_advance or similar function. 1326 * (it may be empty if the target doesn't use bio_advance) 1327 * Region 2 is the remaining bio size that the target wants to process. 1328 * (it may be empty if region 1 is non-empty, although there is no reason 1329 * to make it empty) 1330 * The target requires that region 3 is to be sent in the next bio. 1331 * 1332 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1333 * the partially processed part (the sum of regions 1+2) must be the same for all 1334 * copies of the bio. 1335 */ 1336 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) 1337 { 1338 struct dm_target_io *tio = clone_to_tio(bio); 1339 struct dm_io *io = tio->io; 1340 unsigned int bio_sectors = bio_sectors(bio); 1341 1342 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 1343 BUG_ON(op_is_zone_mgmt(bio_op(bio))); 1344 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1345 BUG_ON(bio_sectors > *tio->len_ptr); 1346 BUG_ON(n_sectors > bio_sectors); 1347 1348 *tio->len_ptr -= bio_sectors - n_sectors; 1349 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1350 1351 /* 1352 * __split_and_process_bio() may have already saved mapped part 1353 * for accounting but it is being reduced so update accordingly. 1354 */ 1355 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1356 io->sectors = n_sectors; 1357 io->sector_offset = bio_sectors(io->orig_bio); 1358 } 1359 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1360 1361 /* 1362 * @clone: clone bio that DM core passed to target's .map function 1363 * @tgt_clone: clone of @clone bio that target needs submitted 1364 * 1365 * Targets should use this interface to submit bios they take 1366 * ownership of when returning DM_MAPIO_SUBMITTED. 1367 * 1368 * Target should also enable ti->accounts_remapped_io 1369 */ 1370 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1371 { 1372 struct dm_target_io *tio = clone_to_tio(clone); 1373 struct dm_io *io = tio->io; 1374 1375 /* establish bio that will get submitted */ 1376 if (!tgt_clone) 1377 tgt_clone = clone; 1378 1379 /* 1380 * Account io->origin_bio to DM dev on behalf of target 1381 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1382 */ 1383 dm_start_io_acct(io, clone); 1384 1385 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), 1386 tio->old_sector); 1387 submit_bio_noacct(tgt_clone); 1388 } 1389 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1390 1391 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1392 { 1393 mutex_lock(&md->swap_bios_lock); 1394 while (latch < md->swap_bios) { 1395 cond_resched(); 1396 down(&md->swap_bios_semaphore); 1397 md->swap_bios--; 1398 } 1399 while (latch > md->swap_bios) { 1400 cond_resched(); 1401 up(&md->swap_bios_semaphore); 1402 md->swap_bios++; 1403 } 1404 mutex_unlock(&md->swap_bios_lock); 1405 } 1406 1407 static void __map_bio(struct bio *clone) 1408 { 1409 struct dm_target_io *tio = clone_to_tio(clone); 1410 struct dm_target *ti = tio->ti; 1411 struct dm_io *io = tio->io; 1412 struct mapped_device *md = io->md; 1413 int r; 1414 1415 clone->bi_end_io = clone_endio; 1416 1417 /* 1418 * Map the clone. 1419 */ 1420 tio->old_sector = clone->bi_iter.bi_sector; 1421 1422 if (static_branch_unlikely(&swap_bios_enabled) && 1423 unlikely(swap_bios_limit(ti, clone))) { 1424 int latch = get_swap_bios(); 1425 1426 if (unlikely(latch != md->swap_bios)) 1427 __set_swap_bios_limit(md, latch); 1428 down(&md->swap_bios_semaphore); 1429 } 1430 1431 if (likely(ti->type->map == linear_map)) 1432 r = linear_map(ti, clone); 1433 else if (ti->type->map == stripe_map) 1434 r = stripe_map(ti, clone); 1435 else 1436 r = ti->type->map(ti, clone); 1437 1438 switch (r) { 1439 case DM_MAPIO_SUBMITTED: 1440 /* target has assumed ownership of this io */ 1441 if (!ti->accounts_remapped_io) 1442 dm_start_io_acct(io, clone); 1443 break; 1444 case DM_MAPIO_REMAPPED: 1445 dm_submit_bio_remap(clone, NULL); 1446 break; 1447 case DM_MAPIO_KILL: 1448 case DM_MAPIO_REQUEUE: 1449 if (static_branch_unlikely(&swap_bios_enabled) && 1450 unlikely(swap_bios_limit(ti, clone))) 1451 up(&md->swap_bios_semaphore); 1452 free_tio(clone); 1453 if (r == DM_MAPIO_KILL) 1454 dm_io_dec_pending(io, BLK_STS_IOERR); 1455 else 1456 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1457 break; 1458 default: 1459 DMCRIT("unimplemented target map return value: %d", r); 1460 BUG(); 1461 } 1462 } 1463 1464 static void setup_split_accounting(struct clone_info *ci, unsigned int len) 1465 { 1466 struct dm_io *io = ci->io; 1467 1468 if (ci->sector_count > len) { 1469 /* 1470 * Split needed, save the mapped part for accounting. 1471 * NOTE: dm_accept_partial_bio() will update accordingly. 1472 */ 1473 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1474 io->sectors = len; 1475 io->sector_offset = bio_sectors(ci->bio); 1476 } 1477 } 1478 1479 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1480 struct dm_target *ti, unsigned int num_bios, 1481 unsigned *len, gfp_t gfp_flag) 1482 { 1483 struct bio *bio; 1484 int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1; 1485 1486 for (; try < 2; try++) { 1487 int bio_nr; 1488 1489 if (try && num_bios > 1) 1490 mutex_lock(&ci->io->md->table_devices_lock); 1491 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1492 bio = alloc_tio(ci, ti, bio_nr, len, 1493 try ? GFP_NOIO : GFP_NOWAIT); 1494 if (!bio) 1495 break; 1496 1497 bio_list_add(blist, bio); 1498 } 1499 if (try && num_bios > 1) 1500 mutex_unlock(&ci->io->md->table_devices_lock); 1501 if (bio_nr == num_bios) 1502 return; 1503 1504 while ((bio = bio_list_pop(blist))) 1505 free_tio(bio); 1506 } 1507 } 1508 1509 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1510 unsigned int num_bios, unsigned int *len, 1511 gfp_t gfp_flag) 1512 { 1513 struct bio_list blist = BIO_EMPTY_LIST; 1514 struct bio *clone; 1515 unsigned int ret = 0; 1516 1517 if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ 1518 return 0; 1519 1520 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1521 if (len) 1522 setup_split_accounting(ci, *len); 1523 1524 /* 1525 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently 1526 * support allocating using GFP_NOWAIT with GFP_NOIO fallback. 1527 */ 1528 alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag); 1529 while ((clone = bio_list_pop(&blist))) { 1530 if (num_bios > 1) 1531 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1532 __map_bio(clone); 1533 ret += 1; 1534 } 1535 1536 return ret; 1537 } 1538 1539 static void __send_empty_flush(struct clone_info *ci) 1540 { 1541 struct dm_table *t = ci->map; 1542 struct bio flush_bio; 1543 1544 /* 1545 * Use an on-stack bio for this, it's safe since we don't 1546 * need to reference it after submit. It's just used as 1547 * the basis for the clone(s). 1548 */ 1549 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 1550 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 1551 1552 ci->bio = &flush_bio; 1553 ci->sector_count = 0; 1554 ci->io->tio.clone.bi_iter.bi_size = 0; 1555 1556 for (unsigned int i = 0; i < t->num_targets; i++) { 1557 unsigned int bios; 1558 struct dm_target *ti = dm_table_get_target(t, i); 1559 1560 if (unlikely(ti->num_flush_bios == 0)) 1561 continue; 1562 1563 atomic_add(ti->num_flush_bios, &ci->io->io_count); 1564 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, 1565 NULL, GFP_NOWAIT); 1566 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 1567 } 1568 1569 /* 1570 * alloc_io() takes one extra reference for submission, so the 1571 * reference won't reach 0 without the following subtraction 1572 */ 1573 atomic_sub(1, &ci->io->io_count); 1574 1575 bio_uninit(ci->bio); 1576 } 1577 1578 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1579 unsigned int num_bios, unsigned int max_granularity, 1580 unsigned int max_sectors) 1581 { 1582 unsigned int len, bios; 1583 1584 len = min_t(sector_t, ci->sector_count, 1585 __max_io_len(ti, ci->sector, max_granularity, max_sectors)); 1586 1587 atomic_add(num_bios, &ci->io->io_count); 1588 bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO); 1589 /* 1590 * alloc_io() takes one extra reference for submission, so the 1591 * reference won't reach 0 without the following (+1) subtraction 1592 */ 1593 atomic_sub(num_bios - bios + 1, &ci->io->io_count); 1594 1595 ci->sector += len; 1596 ci->sector_count -= len; 1597 } 1598 1599 static bool is_abnormal_io(struct bio *bio) 1600 { 1601 switch (bio_op(bio)) { 1602 case REQ_OP_READ: 1603 case REQ_OP_WRITE: 1604 case REQ_OP_FLUSH: 1605 return false; 1606 case REQ_OP_DISCARD: 1607 case REQ_OP_SECURE_ERASE: 1608 case REQ_OP_WRITE_ZEROES: 1609 case REQ_OP_ZONE_RESET_ALL: 1610 return true; 1611 default: 1612 return false; 1613 } 1614 } 1615 1616 static blk_status_t __process_abnormal_io(struct clone_info *ci, 1617 struct dm_target *ti) 1618 { 1619 unsigned int num_bios = 0; 1620 unsigned int max_granularity = 0; 1621 unsigned int max_sectors = 0; 1622 struct queue_limits *limits = dm_get_queue_limits(ti->table->md); 1623 1624 switch (bio_op(ci->bio)) { 1625 case REQ_OP_DISCARD: 1626 num_bios = ti->num_discard_bios; 1627 max_sectors = limits->max_discard_sectors; 1628 if (ti->max_discard_granularity) 1629 max_granularity = max_sectors; 1630 break; 1631 case REQ_OP_SECURE_ERASE: 1632 num_bios = ti->num_secure_erase_bios; 1633 max_sectors = limits->max_secure_erase_sectors; 1634 if (ti->max_secure_erase_granularity) 1635 max_granularity = max_sectors; 1636 break; 1637 case REQ_OP_WRITE_ZEROES: 1638 num_bios = ti->num_write_zeroes_bios; 1639 max_sectors = limits->max_write_zeroes_sectors; 1640 if (ti->max_write_zeroes_granularity) 1641 max_granularity = max_sectors; 1642 break; 1643 default: 1644 break; 1645 } 1646 1647 /* 1648 * Even though the device advertised support for this type of 1649 * request, that does not mean every target supports it, and 1650 * reconfiguration might also have changed that since the 1651 * check was performed. 1652 */ 1653 if (unlikely(!num_bios)) 1654 return BLK_STS_NOTSUPP; 1655 1656 __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); 1657 1658 return BLK_STS_OK; 1659 } 1660 1661 /* 1662 * Reuse ->bi_private as dm_io list head for storing all dm_io instances 1663 * associated with this bio, and this bio's bi_private needs to be 1664 * stored in dm_io->data before the reuse. 1665 * 1666 * bio->bi_private is owned by fs or upper layer, so block layer won't 1667 * touch it after splitting. Meantime it won't be changed by anyone after 1668 * bio is submitted. So this reuse is safe. 1669 */ 1670 static inline struct dm_io **dm_poll_list_head(struct bio *bio) 1671 { 1672 return (struct dm_io **)&bio->bi_private; 1673 } 1674 1675 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1676 { 1677 struct dm_io **head = dm_poll_list_head(bio); 1678 1679 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1680 bio->bi_opf |= REQ_DM_POLL_LIST; 1681 /* 1682 * Save .bi_private into dm_io, so that we can reuse 1683 * .bi_private as dm_io list head for storing dm_io list 1684 */ 1685 io->data = bio->bi_private; 1686 1687 /* tell block layer to poll for completion */ 1688 bio->bi_cookie = ~BLK_QC_T_NONE; 1689 1690 io->next = NULL; 1691 } else { 1692 /* 1693 * bio recursed due to split, reuse original poll list, 1694 * and save bio->bi_private too. 1695 */ 1696 io->data = (*head)->data; 1697 io->next = *head; 1698 } 1699 1700 *head = io; 1701 } 1702 1703 /* 1704 * Select the correct strategy for processing a non-flush bio. 1705 */ 1706 static blk_status_t __split_and_process_bio(struct clone_info *ci) 1707 { 1708 struct bio *clone; 1709 struct dm_target *ti; 1710 unsigned int len; 1711 1712 ti = dm_table_find_target(ci->map, ci->sector); 1713 if (unlikely(!ti)) 1714 return BLK_STS_IOERR; 1715 1716 if (unlikely(ci->is_abnormal_io)) 1717 return __process_abnormal_io(ci, ti); 1718 1719 /* 1720 * Only support bio polling for normal IO, and the target io is 1721 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1722 */ 1723 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); 1724 1725 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1726 setup_split_accounting(ci, len); 1727 1728 if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { 1729 if (unlikely(!dm_target_supports_nowait(ti->type))) 1730 return BLK_STS_NOTSUPP; 1731 1732 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); 1733 if (unlikely(!clone)) 1734 return BLK_STS_AGAIN; 1735 } else { 1736 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1737 } 1738 __map_bio(clone); 1739 1740 ci->sector += len; 1741 ci->sector_count -= len; 1742 1743 return BLK_STS_OK; 1744 } 1745 1746 static void init_clone_info(struct clone_info *ci, struct dm_io *io, 1747 struct dm_table *map, struct bio *bio, bool is_abnormal) 1748 { 1749 ci->map = map; 1750 ci->io = io; 1751 ci->bio = bio; 1752 ci->is_abnormal_io = is_abnormal; 1753 ci->submit_as_polled = false; 1754 ci->sector = bio->bi_iter.bi_sector; 1755 ci->sector_count = bio_sectors(bio); 1756 1757 /* Shouldn't happen but sector_count was being set to 0 so... */ 1758 if (static_branch_unlikely(&zoned_enabled) && 1759 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1760 ci->sector_count = 0; 1761 } 1762 1763 #ifdef CONFIG_BLK_DEV_ZONED 1764 static inline bool dm_zone_bio_needs_split(struct mapped_device *md, 1765 struct bio *bio) 1766 { 1767 /* 1768 * For mapped device that need zone append emulation, we must 1769 * split any large BIO that straddles zone boundaries. 1770 */ 1771 return dm_emulate_zone_append(md) && bio_straddles_zones(bio) && 1772 !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); 1773 } 1774 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) 1775 { 1776 return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0); 1777 } 1778 1779 static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, 1780 struct dm_target *ti) 1781 { 1782 struct bio_list blist = BIO_EMPTY_LIST; 1783 struct mapped_device *md = ci->io->md; 1784 unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; 1785 unsigned long *need_reset; 1786 unsigned int i, nr_zones, nr_reset; 1787 unsigned int num_bios = 0; 1788 blk_status_t sts = BLK_STS_OK; 1789 sector_t sector = ti->begin; 1790 struct bio *clone; 1791 int ret; 1792 1793 nr_zones = ti->len >> ilog2(zone_sectors); 1794 need_reset = bitmap_zalloc(nr_zones, GFP_NOIO); 1795 if (!need_reset) 1796 return BLK_STS_RESOURCE; 1797 1798 ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, 1799 nr_zones, need_reset); 1800 if (ret) { 1801 sts = BLK_STS_IOERR; 1802 goto free_bitmap; 1803 } 1804 1805 /* If we have no zone to reset, we are done. */ 1806 nr_reset = bitmap_weight(need_reset, nr_zones); 1807 if (!nr_reset) 1808 goto free_bitmap; 1809 1810 atomic_add(nr_zones, &ci->io->io_count); 1811 1812 for (i = 0; i < nr_zones; i++) { 1813 1814 if (!test_bit(i, need_reset)) { 1815 sector += zone_sectors; 1816 continue; 1817 } 1818 1819 if (bio_list_empty(&blist)) { 1820 /* This may take a while, so be nice to others */ 1821 if (num_bios) 1822 cond_resched(); 1823 1824 /* 1825 * We may need to reset thousands of zones, so let's 1826 * not go crazy with the clone allocation. 1827 */ 1828 alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32), 1829 NULL, GFP_NOIO); 1830 } 1831 1832 /* Get a clone and change it to a regular reset operation. */ 1833 clone = bio_list_pop(&blist); 1834 clone->bi_opf &= ~REQ_OP_MASK; 1835 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; 1836 clone->bi_iter.bi_sector = sector; 1837 clone->bi_iter.bi_size = 0; 1838 __map_bio(clone); 1839 1840 sector += zone_sectors; 1841 num_bios++; 1842 nr_reset--; 1843 } 1844 1845 WARN_ON_ONCE(!bio_list_empty(&blist)); 1846 atomic_sub(nr_zones - num_bios, &ci->io->io_count); 1847 ci->sector_count = 0; 1848 1849 free_bitmap: 1850 bitmap_free(need_reset); 1851 1852 return sts; 1853 } 1854 1855 static void __send_zone_reset_all_native(struct clone_info *ci, 1856 struct dm_target *ti) 1857 { 1858 unsigned int bios; 1859 1860 atomic_add(1, &ci->io->io_count); 1861 bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO); 1862 atomic_sub(1 - bios, &ci->io->io_count); 1863 1864 ci->sector_count = 0; 1865 } 1866 1867 static blk_status_t __send_zone_reset_all(struct clone_info *ci) 1868 { 1869 struct dm_table *t = ci->map; 1870 blk_status_t sts = BLK_STS_OK; 1871 1872 for (unsigned int i = 0; i < t->num_targets; i++) { 1873 struct dm_target *ti = dm_table_get_target(t, i); 1874 1875 if (ti->zone_reset_all_supported) { 1876 __send_zone_reset_all_native(ci, ti); 1877 continue; 1878 } 1879 1880 sts = __send_zone_reset_all_emulated(ci, ti); 1881 if (sts != BLK_STS_OK) 1882 break; 1883 } 1884 1885 /* Release the reference that alloc_io() took for submission. */ 1886 atomic_sub(1, &ci->io->io_count); 1887 1888 return sts; 1889 } 1890 1891 #else 1892 static inline bool dm_zone_bio_needs_split(struct mapped_device *md, 1893 struct bio *bio) 1894 { 1895 return false; 1896 } 1897 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) 1898 { 1899 return false; 1900 } 1901 static blk_status_t __send_zone_reset_all(struct clone_info *ci) 1902 { 1903 return BLK_STS_NOTSUPP; 1904 } 1905 #endif 1906 1907 /* 1908 * Entry point to split a bio into clones and submit them to the targets. 1909 */ 1910 static void dm_split_and_process_bio(struct mapped_device *md, 1911 struct dm_table *map, struct bio *bio) 1912 { 1913 struct clone_info ci; 1914 struct dm_io *io; 1915 blk_status_t error = BLK_STS_OK; 1916 bool is_abnormal, need_split; 1917 1918 is_abnormal = is_abnormal_io(bio); 1919 if (static_branch_unlikely(&zoned_enabled)) { 1920 /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */ 1921 need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) && 1922 (is_abnormal || dm_zone_bio_needs_split(md, bio)); 1923 } else { 1924 need_split = is_abnormal; 1925 } 1926 1927 if (unlikely(need_split)) { 1928 /* 1929 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) 1930 * otherwise associated queue_limits won't be imposed. 1931 * Also split the BIO for mapped devices needing zone append 1932 * emulation to ensure that the BIO does not cross zone 1933 * boundaries. 1934 */ 1935 bio = bio_split_to_limits(bio); 1936 if (!bio) 1937 return; 1938 } 1939 1940 /* 1941 * Use the block layer zone write plugging for mapped devices that 1942 * need zone append emulation (e.g. dm-crypt). 1943 */ 1944 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio)) 1945 return; 1946 1947 /* Only support nowait for normal IO */ 1948 if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { 1949 io = alloc_io(md, bio, GFP_NOWAIT); 1950 if (unlikely(!io)) { 1951 /* Unable to do anything without dm_io. */ 1952 bio_wouldblock_error(bio); 1953 return; 1954 } 1955 } else { 1956 io = alloc_io(md, bio, GFP_NOIO); 1957 } 1958 init_clone_info(&ci, io, map, bio, is_abnormal); 1959 1960 if (bio->bi_opf & REQ_PREFLUSH) { 1961 __send_empty_flush(&ci); 1962 /* dm_io_complete submits any data associated with flush */ 1963 goto out; 1964 } 1965 1966 if (static_branch_unlikely(&zoned_enabled) && 1967 (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { 1968 error = __send_zone_reset_all(&ci); 1969 goto out; 1970 } 1971 1972 error = __split_and_process_bio(&ci); 1973 if (error || !ci.sector_count) 1974 goto out; 1975 /* 1976 * Remainder must be passed to submit_bio_noacct() so it gets handled 1977 * *after* bios already submitted have been completely processed. 1978 */ 1979 bio_trim(bio, io->sectors, ci.sector_count); 1980 trace_block_split(bio, bio->bi_iter.bi_sector); 1981 bio_inc_remaining(bio); 1982 submit_bio_noacct(bio); 1983 out: 1984 /* 1985 * Drop the extra reference count for non-POLLED bio, and hold one 1986 * reference for POLLED bio, which will be released in dm_poll_bio 1987 * 1988 * Add every dm_io instance into the dm_io list head which is stored 1989 * in bio->bi_private, so that dm_poll_bio can poll them all. 1990 */ 1991 if (error || !ci.submit_as_polled) { 1992 /* 1993 * In case of submission failure, the extra reference for 1994 * submitting io isn't consumed yet 1995 */ 1996 if (error) 1997 atomic_dec(&io->io_count); 1998 dm_io_dec_pending(io, error); 1999 } else 2000 dm_queue_poll_io(bio, io); 2001 } 2002 2003 static void dm_submit_bio(struct bio *bio) 2004 { 2005 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 2006 int srcu_idx; 2007 struct dm_table *map; 2008 2009 map = dm_get_live_table(md, &srcu_idx); 2010 2011 /* If suspended, or map not yet available, queue this IO for later */ 2012 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 2013 unlikely(!map)) { 2014 if (bio->bi_opf & REQ_NOWAIT) 2015 bio_wouldblock_error(bio); 2016 else if (bio->bi_opf & REQ_RAHEAD) 2017 bio_io_error(bio); 2018 else 2019 queue_io(md, bio); 2020 goto out; 2021 } 2022 2023 dm_split_and_process_bio(md, map, bio); 2024 out: 2025 dm_put_live_table(md, srcu_idx); 2026 } 2027 2028 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 2029 unsigned int flags) 2030 { 2031 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 2032 2033 /* don't poll if the mapped io is done */ 2034 if (atomic_read(&io->io_count) > 1) 2035 bio_poll(&io->tio.clone, iob, flags); 2036 2037 /* bio_poll holds the last reference */ 2038 return atomic_read(&io->io_count) == 1; 2039 } 2040 2041 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 2042 unsigned int flags) 2043 { 2044 struct dm_io **head = dm_poll_list_head(bio); 2045 struct dm_io *list = *head; 2046 struct dm_io *tmp = NULL; 2047 struct dm_io *curr, *next; 2048 2049 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 2050 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 2051 return 0; 2052 2053 WARN_ON_ONCE(!list); 2054 2055 /* 2056 * Restore .bi_private before possibly completing dm_io. 2057 * 2058 * bio_poll() is only possible once @bio has been completely 2059 * submitted via submit_bio_noacct()'s depth-first submission. 2060 * So there is no dm_queue_poll_io() race associated with 2061 * clearing REQ_DM_POLL_LIST here. 2062 */ 2063 bio->bi_opf &= ~REQ_DM_POLL_LIST; 2064 bio->bi_private = list->data; 2065 2066 for (curr = list, next = curr->next; curr; curr = next, next = 2067 curr ? curr->next : NULL) { 2068 if (dm_poll_dm_io(curr, iob, flags)) { 2069 /* 2070 * clone_endio() has already occurred, so no 2071 * error handling is needed here. 2072 */ 2073 __dm_io_dec_pending(curr); 2074 } else { 2075 curr->next = tmp; 2076 tmp = curr; 2077 } 2078 } 2079 2080 /* Not done? */ 2081 if (tmp) { 2082 bio->bi_opf |= REQ_DM_POLL_LIST; 2083 /* Reset bio->bi_private to dm_io list head */ 2084 *head = tmp; 2085 return 0; 2086 } 2087 return 1; 2088 } 2089 2090 /* 2091 *--------------------------------------------------------------- 2092 * An IDR is used to keep track of allocated minor numbers. 2093 *--------------------------------------------------------------- 2094 */ 2095 static void free_minor(int minor) 2096 { 2097 spin_lock(&_minor_lock); 2098 idr_remove(&_minor_idr, minor); 2099 spin_unlock(&_minor_lock); 2100 } 2101 2102 /* 2103 * See if the device with a specific minor # is free. 2104 */ 2105 static int specific_minor(int minor) 2106 { 2107 int r; 2108 2109 if (minor >= (1 << MINORBITS)) 2110 return -EINVAL; 2111 2112 idr_preload(GFP_KERNEL); 2113 spin_lock(&_minor_lock); 2114 2115 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 2116 2117 spin_unlock(&_minor_lock); 2118 idr_preload_end(); 2119 if (r < 0) 2120 return r == -ENOSPC ? -EBUSY : r; 2121 return 0; 2122 } 2123 2124 static int next_free_minor(int *minor) 2125 { 2126 int r; 2127 2128 idr_preload(GFP_KERNEL); 2129 spin_lock(&_minor_lock); 2130 2131 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 2132 2133 spin_unlock(&_minor_lock); 2134 idr_preload_end(); 2135 if (r < 0) 2136 return r; 2137 *minor = r; 2138 return 0; 2139 } 2140 2141 static const struct block_device_operations dm_blk_dops; 2142 static const struct block_device_operations dm_rq_blk_dops; 2143 static const struct dax_operations dm_dax_ops; 2144 2145 static void dm_wq_work(struct work_struct *work); 2146 2147 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 2148 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 2149 { 2150 dm_destroy_crypto_profile(q->crypto_profile); 2151 } 2152 2153 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 2154 2155 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 2156 { 2157 } 2158 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 2159 2160 static void cleanup_mapped_device(struct mapped_device *md) 2161 { 2162 if (md->wq) 2163 destroy_workqueue(md->wq); 2164 dm_free_md_mempools(md->mempools); 2165 2166 if (md->dax_dev) { 2167 dax_remove_host(md->disk); 2168 kill_dax(md->dax_dev); 2169 put_dax(md->dax_dev); 2170 md->dax_dev = NULL; 2171 } 2172 2173 if (md->disk) { 2174 spin_lock(&_minor_lock); 2175 md->disk->private_data = NULL; 2176 spin_unlock(&_minor_lock); 2177 if (dm_get_md_type(md) != DM_TYPE_NONE) { 2178 struct table_device *td; 2179 2180 dm_sysfs_exit(md); 2181 list_for_each_entry(td, &md->table_devices, list) { 2182 bd_unlink_disk_holder(td->dm_dev.bdev, 2183 md->disk); 2184 } 2185 2186 /* 2187 * Hold lock to make sure del_gendisk() won't concurrent 2188 * with open/close_table_device(). 2189 */ 2190 mutex_lock(&md->table_devices_lock); 2191 del_gendisk(md->disk); 2192 mutex_unlock(&md->table_devices_lock); 2193 } 2194 dm_queue_destroy_crypto_profile(md->queue); 2195 put_disk(md->disk); 2196 } 2197 2198 if (md->pending_io) { 2199 free_percpu(md->pending_io); 2200 md->pending_io = NULL; 2201 } 2202 2203 cleanup_srcu_struct(&md->io_barrier); 2204 2205 mutex_destroy(&md->suspend_lock); 2206 mutex_destroy(&md->type_lock); 2207 mutex_destroy(&md->table_devices_lock); 2208 mutex_destroy(&md->swap_bios_lock); 2209 2210 dm_mq_cleanup_mapped_device(md); 2211 } 2212 2213 /* 2214 * Allocate and initialise a blank device with a given minor. 2215 */ 2216 static struct mapped_device *alloc_dev(int minor) 2217 { 2218 int r, numa_node_id = dm_get_numa_node(); 2219 struct dax_device *dax_dev; 2220 struct mapped_device *md; 2221 void *old_md; 2222 2223 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 2224 if (!md) { 2225 DMERR("unable to allocate device, out of memory."); 2226 return NULL; 2227 } 2228 2229 if (!try_module_get(THIS_MODULE)) 2230 goto bad_module_get; 2231 2232 /* get a minor number for the dev */ 2233 if (minor == DM_ANY_MINOR) 2234 r = next_free_minor(&minor); 2235 else 2236 r = specific_minor(minor); 2237 if (r < 0) 2238 goto bad_minor; 2239 2240 r = init_srcu_struct(&md->io_barrier); 2241 if (r < 0) 2242 goto bad_io_barrier; 2243 2244 md->numa_node_id = numa_node_id; 2245 md->init_tio_pdu = false; 2246 md->type = DM_TYPE_NONE; 2247 mutex_init(&md->suspend_lock); 2248 mutex_init(&md->type_lock); 2249 mutex_init(&md->table_devices_lock); 2250 spin_lock_init(&md->deferred_lock); 2251 atomic_set(&md->holders, 1); 2252 atomic_set(&md->open_count, 0); 2253 atomic_set(&md->event_nr, 0); 2254 atomic_set(&md->uevent_seq, 0); 2255 INIT_LIST_HEAD(&md->uevent_list); 2256 INIT_LIST_HEAD(&md->table_devices); 2257 spin_lock_init(&md->uevent_lock); 2258 2259 /* 2260 * default to bio-based until DM table is loaded and md->type 2261 * established. If request-based table is loaded: blk-mq will 2262 * override accordingly. 2263 */ 2264 md->disk = blk_alloc_disk(NULL, md->numa_node_id); 2265 if (IS_ERR(md->disk)) 2266 goto bad; 2267 md->queue = md->disk->queue; 2268 2269 init_waitqueue_head(&md->wait); 2270 INIT_WORK(&md->work, dm_wq_work); 2271 INIT_WORK(&md->requeue_work, dm_wq_requeue_work); 2272 init_waitqueue_head(&md->eventq); 2273 init_completion(&md->kobj_holder.completion); 2274 2275 md->requeue_list = NULL; 2276 md->swap_bios = get_swap_bios(); 2277 sema_init(&md->swap_bios_semaphore, md->swap_bios); 2278 mutex_init(&md->swap_bios_lock); 2279 2280 md->disk->major = _major; 2281 md->disk->first_minor = minor; 2282 md->disk->minors = 1; 2283 md->disk->flags |= GENHD_FL_NO_PART; 2284 md->disk->fops = &dm_blk_dops; 2285 md->disk->private_data = md; 2286 sprintf(md->disk->disk_name, "dm-%d", minor); 2287 2288 dax_dev = alloc_dax(md, &dm_dax_ops); 2289 if (IS_ERR(dax_dev)) { 2290 if (PTR_ERR(dax_dev) != -EOPNOTSUPP) 2291 goto bad; 2292 } else { 2293 set_dax_nocache(dax_dev); 2294 set_dax_nomc(dax_dev); 2295 md->dax_dev = dax_dev; 2296 if (dax_add_host(dax_dev, md->disk)) 2297 goto bad; 2298 } 2299 2300 format_dev_t(md->name, MKDEV(_major, minor)); 2301 2302 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 2303 if (!md->wq) 2304 goto bad; 2305 2306 md->pending_io = alloc_percpu(unsigned long); 2307 if (!md->pending_io) 2308 goto bad; 2309 2310 r = dm_stats_init(&md->stats); 2311 if (r < 0) 2312 goto bad; 2313 2314 /* Populate the mapping, nobody knows we exist yet */ 2315 spin_lock(&_minor_lock); 2316 old_md = idr_replace(&_minor_idr, md, minor); 2317 spin_unlock(&_minor_lock); 2318 2319 BUG_ON(old_md != MINOR_ALLOCED); 2320 2321 return md; 2322 2323 bad: 2324 cleanup_mapped_device(md); 2325 bad_io_barrier: 2326 free_minor(minor); 2327 bad_minor: 2328 module_put(THIS_MODULE); 2329 bad_module_get: 2330 kvfree(md); 2331 return NULL; 2332 } 2333 2334 static void unlock_fs(struct mapped_device *md); 2335 2336 static void free_dev(struct mapped_device *md) 2337 { 2338 int minor = MINOR(disk_devt(md->disk)); 2339 2340 unlock_fs(md); 2341 2342 cleanup_mapped_device(md); 2343 2344 WARN_ON_ONCE(!list_empty(&md->table_devices)); 2345 dm_stats_cleanup(&md->stats); 2346 free_minor(minor); 2347 2348 module_put(THIS_MODULE); 2349 kvfree(md); 2350 } 2351 2352 /* 2353 * Bind a table to the device. 2354 */ 2355 static void event_callback(void *context) 2356 { 2357 unsigned long flags; 2358 LIST_HEAD(uevents); 2359 struct mapped_device *md = context; 2360 2361 spin_lock_irqsave(&md->uevent_lock, flags); 2362 list_splice_init(&md->uevent_list, &uevents); 2363 spin_unlock_irqrestore(&md->uevent_lock, flags); 2364 2365 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2366 2367 atomic_inc(&md->event_nr); 2368 wake_up(&md->eventq); 2369 dm_issue_global_event(); 2370 } 2371 2372 /* 2373 * Returns old map, which caller must destroy. 2374 */ 2375 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2376 struct queue_limits *limits) 2377 { 2378 struct dm_table *old_map; 2379 sector_t size; 2380 int ret; 2381 2382 lockdep_assert_held(&md->suspend_lock); 2383 2384 size = dm_table_get_size(t); 2385 2386 /* 2387 * Wipe any geometry if the size of the table changed. 2388 */ 2389 if (size != dm_get_size(md)) 2390 memset(&md->geometry, 0, sizeof(md->geometry)); 2391 2392 set_capacity(md->disk, size); 2393 2394 dm_table_event_callback(t, event_callback, md); 2395 2396 if (dm_table_request_based(t)) { 2397 /* 2398 * Leverage the fact that request-based DM targets are 2399 * immutable singletons - used to optimize dm_mq_queue_rq. 2400 */ 2401 md->immutable_target = dm_table_get_immutable_target(t); 2402 2403 /* 2404 * There is no need to reload with request-based dm because the 2405 * size of front_pad doesn't change. 2406 * 2407 * Note for future: If you are to reload bioset, prep-ed 2408 * requests in the queue may refer to bio from the old bioset, 2409 * so you must walk through the queue to unprep. 2410 */ 2411 if (!md->mempools) { 2412 md->mempools = t->mempools; 2413 t->mempools = NULL; 2414 } 2415 } else { 2416 /* 2417 * The md may already have mempools that need changing. 2418 * If so, reload bioset because front_pad may have changed 2419 * because a different table was loaded. 2420 */ 2421 dm_free_md_mempools(md->mempools); 2422 md->mempools = t->mempools; 2423 t->mempools = NULL; 2424 } 2425 2426 ret = dm_table_set_restrictions(t, md->queue, limits); 2427 if (ret) { 2428 old_map = ERR_PTR(ret); 2429 goto out; 2430 } 2431 2432 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2433 rcu_assign_pointer(md->map, (void *)t); 2434 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2435 2436 if (old_map) 2437 dm_sync_table(md); 2438 out: 2439 return old_map; 2440 } 2441 2442 /* 2443 * Returns unbound table for the caller to free. 2444 */ 2445 static struct dm_table *__unbind(struct mapped_device *md) 2446 { 2447 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2448 2449 if (!map) 2450 return NULL; 2451 2452 dm_table_event_callback(map, NULL, NULL); 2453 RCU_INIT_POINTER(md->map, NULL); 2454 dm_sync_table(md); 2455 2456 return map; 2457 } 2458 2459 /* 2460 * Constructor for a new device. 2461 */ 2462 int dm_create(int minor, struct mapped_device **result) 2463 { 2464 struct mapped_device *md; 2465 2466 md = alloc_dev(minor); 2467 if (!md) 2468 return -ENXIO; 2469 2470 dm_ima_reset_data(md); 2471 2472 *result = md; 2473 return 0; 2474 } 2475 2476 /* 2477 * Functions to manage md->type. 2478 * All are required to hold md->type_lock. 2479 */ 2480 void dm_lock_md_type(struct mapped_device *md) 2481 { 2482 mutex_lock(&md->type_lock); 2483 } 2484 2485 void dm_unlock_md_type(struct mapped_device *md) 2486 { 2487 mutex_unlock(&md->type_lock); 2488 } 2489 2490 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2491 { 2492 BUG_ON(!mutex_is_locked(&md->type_lock)); 2493 md->type = type; 2494 } 2495 2496 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2497 { 2498 return md->type; 2499 } 2500 2501 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2502 { 2503 return md->immutable_target_type; 2504 } 2505 2506 /* 2507 * Setup the DM device's queue based on md's type 2508 */ 2509 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2510 { 2511 enum dm_queue_mode type = dm_table_get_type(t); 2512 struct queue_limits limits; 2513 struct table_device *td; 2514 int r; 2515 2516 WARN_ON_ONCE(type == DM_TYPE_NONE); 2517 2518 if (type == DM_TYPE_REQUEST_BASED) { 2519 md->disk->fops = &dm_rq_blk_dops; 2520 r = dm_mq_init_request_queue(md, t); 2521 if (r) { 2522 DMERR("Cannot initialize queue for request-based dm mapped device"); 2523 return r; 2524 } 2525 } 2526 2527 r = dm_calculate_queue_limits(t, &limits); 2528 if (r) { 2529 DMERR("Cannot calculate initial queue limits"); 2530 return r; 2531 } 2532 r = dm_table_set_restrictions(t, md->queue, &limits); 2533 if (r) 2534 return r; 2535 2536 /* 2537 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent 2538 * with open_table_device() and close_table_device(). 2539 */ 2540 mutex_lock(&md->table_devices_lock); 2541 r = add_disk(md->disk); 2542 mutex_unlock(&md->table_devices_lock); 2543 if (r) 2544 return r; 2545 2546 /* 2547 * Register the holder relationship for devices added before the disk 2548 * was live. 2549 */ 2550 list_for_each_entry(td, &md->table_devices, list) { 2551 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); 2552 if (r) 2553 goto out_undo_holders; 2554 } 2555 2556 r = dm_sysfs_init(md); 2557 if (r) 2558 goto out_undo_holders; 2559 2560 md->type = type; 2561 return 0; 2562 2563 out_undo_holders: 2564 list_for_each_entry_continue_reverse(td, &md->table_devices, list) 2565 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 2566 mutex_lock(&md->table_devices_lock); 2567 del_gendisk(md->disk); 2568 mutex_unlock(&md->table_devices_lock); 2569 return r; 2570 } 2571 2572 struct mapped_device *dm_get_md(dev_t dev) 2573 { 2574 struct mapped_device *md; 2575 unsigned int minor = MINOR(dev); 2576 2577 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2578 return NULL; 2579 2580 spin_lock(&_minor_lock); 2581 2582 md = idr_find(&_minor_idr, minor); 2583 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2584 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2585 md = NULL; 2586 goto out; 2587 } 2588 dm_get(md); 2589 out: 2590 spin_unlock(&_minor_lock); 2591 2592 return md; 2593 } 2594 EXPORT_SYMBOL_GPL(dm_get_md); 2595 2596 void *dm_get_mdptr(struct mapped_device *md) 2597 { 2598 return md->interface_ptr; 2599 } 2600 2601 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2602 { 2603 md->interface_ptr = ptr; 2604 } 2605 2606 void dm_get(struct mapped_device *md) 2607 { 2608 atomic_inc(&md->holders); 2609 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2610 } 2611 2612 int dm_hold(struct mapped_device *md) 2613 { 2614 spin_lock(&_minor_lock); 2615 if (test_bit(DMF_FREEING, &md->flags)) { 2616 spin_unlock(&_minor_lock); 2617 return -EBUSY; 2618 } 2619 dm_get(md); 2620 spin_unlock(&_minor_lock); 2621 return 0; 2622 } 2623 EXPORT_SYMBOL_GPL(dm_hold); 2624 2625 const char *dm_device_name(struct mapped_device *md) 2626 { 2627 return md->name; 2628 } 2629 EXPORT_SYMBOL_GPL(dm_device_name); 2630 2631 static void __dm_destroy(struct mapped_device *md, bool wait) 2632 { 2633 struct dm_table *map; 2634 int srcu_idx; 2635 2636 might_sleep(); 2637 2638 spin_lock(&_minor_lock); 2639 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2640 set_bit(DMF_FREEING, &md->flags); 2641 spin_unlock(&_minor_lock); 2642 2643 blk_mark_disk_dead(md->disk); 2644 2645 /* 2646 * Take suspend_lock so that presuspend and postsuspend methods 2647 * do not race with internal suspend. 2648 */ 2649 mutex_lock(&md->suspend_lock); 2650 map = dm_get_live_table(md, &srcu_idx); 2651 if (!dm_suspended_md(md)) { 2652 dm_table_presuspend_targets(map); 2653 set_bit(DMF_SUSPENDED, &md->flags); 2654 set_bit(DMF_POST_SUSPENDING, &md->flags); 2655 dm_table_postsuspend_targets(map); 2656 } 2657 /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ 2658 dm_put_live_table(md, srcu_idx); 2659 mutex_unlock(&md->suspend_lock); 2660 2661 /* 2662 * Rare, but there may be I/O requests still going to complete, 2663 * for example. Wait for all references to disappear. 2664 * No one should increment the reference count of the mapped_device, 2665 * after the mapped_device state becomes DMF_FREEING. 2666 */ 2667 if (wait) 2668 while (atomic_read(&md->holders)) 2669 fsleep(1000); 2670 else if (atomic_read(&md->holders)) 2671 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2672 dm_device_name(md), atomic_read(&md->holders)); 2673 2674 dm_table_destroy(__unbind(md)); 2675 free_dev(md); 2676 } 2677 2678 void dm_destroy(struct mapped_device *md) 2679 { 2680 __dm_destroy(md, true); 2681 } 2682 2683 void dm_destroy_immediate(struct mapped_device *md) 2684 { 2685 __dm_destroy(md, false); 2686 } 2687 2688 void dm_put(struct mapped_device *md) 2689 { 2690 atomic_dec(&md->holders); 2691 } 2692 EXPORT_SYMBOL_GPL(dm_put); 2693 2694 static bool dm_in_flight_bios(struct mapped_device *md) 2695 { 2696 int cpu; 2697 unsigned long sum = 0; 2698 2699 for_each_possible_cpu(cpu) 2700 sum += *per_cpu_ptr(md->pending_io, cpu); 2701 2702 return sum != 0; 2703 } 2704 2705 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2706 { 2707 int r = 0; 2708 DEFINE_WAIT(wait); 2709 2710 while (true) { 2711 prepare_to_wait(&md->wait, &wait, task_state); 2712 2713 if (!dm_in_flight_bios(md)) 2714 break; 2715 2716 if (signal_pending_state(task_state, current)) { 2717 r = -EINTR; 2718 break; 2719 } 2720 2721 io_schedule(); 2722 } 2723 finish_wait(&md->wait, &wait); 2724 2725 smp_rmb(); 2726 2727 return r; 2728 } 2729 2730 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2731 { 2732 int r = 0; 2733 2734 if (!queue_is_mq(md->queue)) 2735 return dm_wait_for_bios_completion(md, task_state); 2736 2737 while (true) { 2738 if (!blk_mq_queue_inflight(md->queue)) 2739 break; 2740 2741 if (signal_pending_state(task_state, current)) { 2742 r = -EINTR; 2743 break; 2744 } 2745 2746 fsleep(5000); 2747 } 2748 2749 return r; 2750 } 2751 2752 /* 2753 * Process the deferred bios 2754 */ 2755 static void dm_wq_work(struct work_struct *work) 2756 { 2757 struct mapped_device *md = container_of(work, struct mapped_device, work); 2758 struct bio *bio; 2759 2760 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2761 spin_lock_irq(&md->deferred_lock); 2762 bio = bio_list_pop(&md->deferred); 2763 spin_unlock_irq(&md->deferred_lock); 2764 2765 if (!bio) 2766 break; 2767 2768 submit_bio_noacct(bio); 2769 cond_resched(); 2770 } 2771 } 2772 2773 static void dm_queue_flush(struct mapped_device *md) 2774 { 2775 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2776 smp_mb__after_atomic(); 2777 queue_work(md->wq, &md->work); 2778 } 2779 2780 /* 2781 * Swap in a new table, returning the old one for the caller to destroy. 2782 */ 2783 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2784 { 2785 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2786 struct queue_limits limits; 2787 int r; 2788 2789 mutex_lock(&md->suspend_lock); 2790 2791 /* device must be suspended */ 2792 if (!dm_suspended_md(md)) 2793 goto out; 2794 2795 /* 2796 * If the new table has no data devices, retain the existing limits. 2797 * This helps multipath with queue_if_no_path if all paths disappear, 2798 * then new I/O is queued based on these limits, and then some paths 2799 * reappear. 2800 */ 2801 if (dm_table_has_no_data_devices(table)) { 2802 live_map = dm_get_live_table_fast(md); 2803 if (live_map) 2804 limits = md->queue->limits; 2805 dm_put_live_table_fast(md); 2806 } 2807 2808 if (!live_map) { 2809 r = dm_calculate_queue_limits(table, &limits); 2810 if (r) { 2811 map = ERR_PTR(r); 2812 goto out; 2813 } 2814 } 2815 2816 map = __bind(md, table, &limits); 2817 dm_issue_global_event(); 2818 2819 out: 2820 mutex_unlock(&md->suspend_lock); 2821 return map; 2822 } 2823 2824 /* 2825 * Functions to lock and unlock any filesystem running on the 2826 * device. 2827 */ 2828 static int lock_fs(struct mapped_device *md) 2829 { 2830 int r; 2831 2832 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2833 2834 r = bdev_freeze(md->disk->part0); 2835 if (!r) 2836 set_bit(DMF_FROZEN, &md->flags); 2837 return r; 2838 } 2839 2840 static void unlock_fs(struct mapped_device *md) 2841 { 2842 if (!test_bit(DMF_FROZEN, &md->flags)) 2843 return; 2844 bdev_thaw(md->disk->part0); 2845 clear_bit(DMF_FROZEN, &md->flags); 2846 } 2847 2848 /* 2849 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2850 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2851 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2852 * 2853 * If __dm_suspend returns 0, the device is completely quiescent 2854 * now. There is no request-processing activity. All new requests 2855 * are being added to md->deferred list. 2856 */ 2857 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2858 unsigned int suspend_flags, unsigned int task_state, 2859 int dmf_suspended_flag) 2860 { 2861 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2862 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2863 int r; 2864 2865 lockdep_assert_held(&md->suspend_lock); 2866 2867 /* 2868 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2869 * This flag is cleared before dm_suspend returns. 2870 */ 2871 if (noflush) 2872 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2873 else 2874 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2875 2876 /* 2877 * This gets reverted if there's an error later and the targets 2878 * provide the .presuspend_undo hook. 2879 */ 2880 dm_table_presuspend_targets(map); 2881 2882 /* 2883 * Flush I/O to the device. 2884 * Any I/O submitted after lock_fs() may not be flushed. 2885 * noflush takes precedence over do_lockfs. 2886 * (lock_fs() flushes I/Os and waits for them to complete.) 2887 */ 2888 if (!noflush && do_lockfs) { 2889 r = lock_fs(md); 2890 if (r) { 2891 dm_table_presuspend_undo_targets(map); 2892 return r; 2893 } 2894 } 2895 2896 /* 2897 * Here we must make sure that no processes are submitting requests 2898 * to target drivers i.e. no one may be executing 2899 * dm_split_and_process_bio from dm_submit_bio. 2900 * 2901 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2902 * we take the write lock. To prevent any process from reentering 2903 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2904 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2905 * flush_workqueue(md->wq). 2906 */ 2907 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2908 if (map) 2909 synchronize_srcu(&md->io_barrier); 2910 2911 /* 2912 * Stop md->queue before flushing md->wq in case request-based 2913 * dm defers requests to md->wq from md->queue. 2914 */ 2915 if (dm_request_based(md)) 2916 dm_stop_queue(md->queue); 2917 2918 flush_workqueue(md->wq); 2919 2920 /* 2921 * At this point no more requests are entering target request routines. 2922 * We call dm_wait_for_completion to wait for all existing requests 2923 * to finish. 2924 */ 2925 r = dm_wait_for_completion(md, task_state); 2926 if (!r) 2927 set_bit(dmf_suspended_flag, &md->flags); 2928 2929 if (noflush) 2930 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2931 if (map) 2932 synchronize_srcu(&md->io_barrier); 2933 2934 /* were we interrupted ? */ 2935 if (r < 0) { 2936 dm_queue_flush(md); 2937 2938 if (dm_request_based(md)) 2939 dm_start_queue(md->queue); 2940 2941 unlock_fs(md); 2942 dm_table_presuspend_undo_targets(map); 2943 /* pushback list is already flushed, so skip flush */ 2944 } 2945 2946 return r; 2947 } 2948 2949 /* 2950 * We need to be able to change a mapping table under a mounted 2951 * filesystem. For example we might want to move some data in 2952 * the background. Before the table can be swapped with 2953 * dm_bind_table, dm_suspend must be called to flush any in 2954 * flight bios and ensure that any further io gets deferred. 2955 */ 2956 /* 2957 * Suspend mechanism in request-based dm. 2958 * 2959 * 1. Flush all I/Os by lock_fs() if needed. 2960 * 2. Stop dispatching any I/O by stopping the request_queue. 2961 * 3. Wait for all in-flight I/Os to be completed or requeued. 2962 * 2963 * To abort suspend, start the request_queue. 2964 */ 2965 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) 2966 { 2967 struct dm_table *map = NULL; 2968 int r = 0; 2969 2970 retry: 2971 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2972 2973 if (dm_suspended_md(md)) { 2974 r = -EINVAL; 2975 goto out_unlock; 2976 } 2977 2978 if (dm_suspended_internally_md(md)) { 2979 /* already internally suspended, wait for internal resume */ 2980 mutex_unlock(&md->suspend_lock); 2981 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2982 if (r) 2983 return r; 2984 goto retry; 2985 } 2986 2987 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2988 if (!map) { 2989 /* avoid deadlock with fs/namespace.c:do_mount() */ 2990 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 2991 } 2992 2993 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2994 if (r) 2995 goto out_unlock; 2996 2997 set_bit(DMF_POST_SUSPENDING, &md->flags); 2998 dm_table_postsuspend_targets(map); 2999 clear_bit(DMF_POST_SUSPENDING, &md->flags); 3000 3001 out_unlock: 3002 mutex_unlock(&md->suspend_lock); 3003 return r; 3004 } 3005 3006 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 3007 { 3008 if (map) { 3009 int r = dm_table_resume_targets(map); 3010 3011 if (r) 3012 return r; 3013 } 3014 3015 dm_queue_flush(md); 3016 3017 /* 3018 * Flushing deferred I/Os must be done after targets are resumed 3019 * so that mapping of targets can work correctly. 3020 * Request-based dm is queueing the deferred I/Os in its request_queue. 3021 */ 3022 if (dm_request_based(md)) 3023 dm_start_queue(md->queue); 3024 3025 unlock_fs(md); 3026 3027 return 0; 3028 } 3029 3030 int dm_resume(struct mapped_device *md) 3031 { 3032 int r; 3033 struct dm_table *map = NULL; 3034 3035 retry: 3036 r = -EINVAL; 3037 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3038 3039 if (!dm_suspended_md(md)) 3040 goto out; 3041 3042 if (dm_suspended_internally_md(md)) { 3043 /* already internally suspended, wait for internal resume */ 3044 mutex_unlock(&md->suspend_lock); 3045 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3046 if (r) 3047 return r; 3048 goto retry; 3049 } 3050 3051 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3052 if (!map || !dm_table_get_size(map)) 3053 goto out; 3054 3055 r = __dm_resume(md, map); 3056 if (r) 3057 goto out; 3058 3059 clear_bit(DMF_SUSPENDED, &md->flags); 3060 out: 3061 mutex_unlock(&md->suspend_lock); 3062 3063 return r; 3064 } 3065 3066 /* 3067 * Internal suspend/resume works like userspace-driven suspend. It waits 3068 * until all bios finish and prevents issuing new bios to the target drivers. 3069 * It may be used only from the kernel. 3070 */ 3071 3072 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) 3073 { 3074 struct dm_table *map = NULL; 3075 3076 lockdep_assert_held(&md->suspend_lock); 3077 3078 if (md->internal_suspend_count++) 3079 return; /* nested internal suspend */ 3080 3081 if (dm_suspended_md(md)) { 3082 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3083 return; /* nest suspend */ 3084 } 3085 3086 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3087 3088 /* 3089 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3090 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3091 * would require changing .presuspend to return an error -- avoid this 3092 * until there is a need for more elaborate variants of internal suspend. 3093 */ 3094 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 3095 DMF_SUSPENDED_INTERNALLY); 3096 3097 set_bit(DMF_POST_SUSPENDING, &md->flags); 3098 dm_table_postsuspend_targets(map); 3099 clear_bit(DMF_POST_SUSPENDING, &md->flags); 3100 } 3101 3102 static void __dm_internal_resume(struct mapped_device *md) 3103 { 3104 int r; 3105 struct dm_table *map; 3106 3107 BUG_ON(!md->internal_suspend_count); 3108 3109 if (--md->internal_suspend_count) 3110 return; /* resume from nested internal suspend */ 3111 3112 if (dm_suspended_md(md)) 3113 goto done; /* resume from nested suspend */ 3114 3115 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3116 r = __dm_resume(md, map); 3117 if (r) { 3118 /* 3119 * If a preresume method of some target failed, we are in a 3120 * tricky situation. We can't return an error to the caller. We 3121 * can't fake success because then the "resume" and 3122 * "postsuspend" methods would not be paired correctly, and it 3123 * would break various targets, for example it would cause list 3124 * corruption in the "origin" target. 3125 * 3126 * So, we fake normal suspend here, to make sure that the 3127 * "resume" and "postsuspend" methods will be paired correctly. 3128 */ 3129 DMERR("Preresume method failed: %d", r); 3130 set_bit(DMF_SUSPENDED, &md->flags); 3131 } 3132 done: 3133 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3134 smp_mb__after_atomic(); 3135 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3136 } 3137 3138 void dm_internal_suspend_noflush(struct mapped_device *md) 3139 { 3140 mutex_lock(&md->suspend_lock); 3141 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3142 mutex_unlock(&md->suspend_lock); 3143 } 3144 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3145 3146 void dm_internal_resume(struct mapped_device *md) 3147 { 3148 mutex_lock(&md->suspend_lock); 3149 __dm_internal_resume(md); 3150 mutex_unlock(&md->suspend_lock); 3151 } 3152 EXPORT_SYMBOL_GPL(dm_internal_resume); 3153 3154 /* 3155 * Fast variants of internal suspend/resume hold md->suspend_lock, 3156 * which prevents interaction with userspace-driven suspend. 3157 */ 3158 3159 void dm_internal_suspend_fast(struct mapped_device *md) 3160 { 3161 mutex_lock(&md->suspend_lock); 3162 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3163 return; 3164 3165 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3166 synchronize_srcu(&md->io_barrier); 3167 flush_workqueue(md->wq); 3168 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3169 } 3170 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3171 3172 void dm_internal_resume_fast(struct mapped_device *md) 3173 { 3174 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3175 goto done; 3176 3177 dm_queue_flush(md); 3178 3179 done: 3180 mutex_unlock(&md->suspend_lock); 3181 } 3182 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3183 3184 /* 3185 *--------------------------------------------------------------- 3186 * Event notification. 3187 *--------------------------------------------------------------- 3188 */ 3189 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 3190 unsigned int cookie, bool need_resize_uevent) 3191 { 3192 int r; 3193 unsigned int noio_flag; 3194 char udev_cookie[DM_COOKIE_LENGTH]; 3195 char *envp[3] = { NULL, NULL, NULL }; 3196 char **envpp = envp; 3197 if (cookie) { 3198 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 3199 DM_COOKIE_ENV_VAR_NAME, cookie); 3200 *envpp++ = udev_cookie; 3201 } 3202 if (need_resize_uevent) { 3203 *envpp++ = "RESIZE=1"; 3204 } 3205 3206 noio_flag = memalloc_noio_save(); 3207 3208 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); 3209 3210 memalloc_noio_restore(noio_flag); 3211 3212 return r; 3213 } 3214 3215 uint32_t dm_next_uevent_seq(struct mapped_device *md) 3216 { 3217 return atomic_add_return(1, &md->uevent_seq); 3218 } 3219 3220 uint32_t dm_get_event_nr(struct mapped_device *md) 3221 { 3222 return atomic_read(&md->event_nr); 3223 } 3224 3225 int dm_wait_event(struct mapped_device *md, int event_nr) 3226 { 3227 return wait_event_interruptible(md->eventq, 3228 (event_nr != atomic_read(&md->event_nr))); 3229 } 3230 3231 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3232 { 3233 unsigned long flags; 3234 3235 spin_lock_irqsave(&md->uevent_lock, flags); 3236 list_add(elist, &md->uevent_list); 3237 spin_unlock_irqrestore(&md->uevent_lock, flags); 3238 } 3239 3240 /* 3241 * The gendisk is only valid as long as you have a reference 3242 * count on 'md'. 3243 */ 3244 struct gendisk *dm_disk(struct mapped_device *md) 3245 { 3246 return md->disk; 3247 } 3248 EXPORT_SYMBOL_GPL(dm_disk); 3249 3250 struct kobject *dm_kobject(struct mapped_device *md) 3251 { 3252 return &md->kobj_holder.kobj; 3253 } 3254 3255 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3256 { 3257 struct mapped_device *md; 3258 3259 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3260 3261 spin_lock(&_minor_lock); 3262 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 3263 md = NULL; 3264 goto out; 3265 } 3266 dm_get(md); 3267 out: 3268 spin_unlock(&_minor_lock); 3269 3270 return md; 3271 } 3272 3273 int dm_suspended_md(struct mapped_device *md) 3274 { 3275 return test_bit(DMF_SUSPENDED, &md->flags); 3276 } 3277 3278 static int dm_post_suspending_md(struct mapped_device *md) 3279 { 3280 return test_bit(DMF_POST_SUSPENDING, &md->flags); 3281 } 3282 3283 int dm_suspended_internally_md(struct mapped_device *md) 3284 { 3285 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3286 } 3287 3288 int dm_test_deferred_remove_flag(struct mapped_device *md) 3289 { 3290 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3291 } 3292 3293 int dm_suspended(struct dm_target *ti) 3294 { 3295 return dm_suspended_md(ti->table->md); 3296 } 3297 EXPORT_SYMBOL_GPL(dm_suspended); 3298 3299 int dm_post_suspending(struct dm_target *ti) 3300 { 3301 return dm_post_suspending_md(ti->table->md); 3302 } 3303 EXPORT_SYMBOL_GPL(dm_post_suspending); 3304 3305 int dm_noflush_suspending(struct dm_target *ti) 3306 { 3307 return __noflush_suspending(ti->table->md); 3308 } 3309 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3310 3311 void dm_free_md_mempools(struct dm_md_mempools *pools) 3312 { 3313 if (!pools) 3314 return; 3315 3316 bioset_exit(&pools->bs); 3317 bioset_exit(&pools->io_bs); 3318 3319 kfree(pools); 3320 } 3321 3322 struct dm_pr { 3323 u64 old_key; 3324 u64 new_key; 3325 u32 flags; 3326 bool abort; 3327 bool fail_early; 3328 int ret; 3329 enum pr_type type; 3330 struct pr_keys *read_keys; 3331 struct pr_held_reservation *rsv; 3332 }; 3333 3334 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3335 struct dm_pr *pr) 3336 { 3337 struct mapped_device *md = bdev->bd_disk->private_data; 3338 struct dm_table *table; 3339 struct dm_target *ti; 3340 int ret = -ENOTTY, srcu_idx; 3341 3342 table = dm_get_live_table(md, &srcu_idx); 3343 if (!table || !dm_table_get_size(table)) 3344 goto out; 3345 3346 /* We only support devices that have a single target */ 3347 if (table->num_targets != 1) 3348 goto out; 3349 ti = dm_table_get_target(table, 0); 3350 3351 if (dm_suspended_md(md)) { 3352 ret = -EAGAIN; 3353 goto out; 3354 } 3355 3356 ret = -EINVAL; 3357 if (!ti->type->iterate_devices) 3358 goto out; 3359 3360 ti->type->iterate_devices(ti, fn, pr); 3361 ret = 0; 3362 out: 3363 dm_put_live_table(md, srcu_idx); 3364 return ret; 3365 } 3366 3367 /* 3368 * For register / unregister we need to manually call out to every path. 3369 */ 3370 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3371 sector_t start, sector_t len, void *data) 3372 { 3373 struct dm_pr *pr = data; 3374 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3375 int ret; 3376 3377 if (!ops || !ops->pr_register) { 3378 pr->ret = -EOPNOTSUPP; 3379 return -1; 3380 } 3381 3382 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3383 if (!ret) 3384 return 0; 3385 3386 if (!pr->ret) 3387 pr->ret = ret; 3388 3389 if (pr->fail_early) 3390 return -1; 3391 3392 return 0; 3393 } 3394 3395 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3396 u32 flags) 3397 { 3398 struct dm_pr pr = { 3399 .old_key = old_key, 3400 .new_key = new_key, 3401 .flags = flags, 3402 .fail_early = true, 3403 .ret = 0, 3404 }; 3405 int ret; 3406 3407 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3408 if (ret) { 3409 /* Didn't even get to register a path */ 3410 return ret; 3411 } 3412 3413 if (!pr.ret) 3414 return 0; 3415 ret = pr.ret; 3416 3417 if (!new_key) 3418 return ret; 3419 3420 /* unregister all paths if we failed to register any path */ 3421 pr.old_key = new_key; 3422 pr.new_key = 0; 3423 pr.flags = 0; 3424 pr.fail_early = false; 3425 (void) dm_call_pr(bdev, __dm_pr_register, &pr); 3426 return ret; 3427 } 3428 3429 3430 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, 3431 sector_t start, sector_t len, void *data) 3432 { 3433 struct dm_pr *pr = data; 3434 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3435 3436 if (!ops || !ops->pr_reserve) { 3437 pr->ret = -EOPNOTSUPP; 3438 return -1; 3439 } 3440 3441 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); 3442 if (!pr->ret) 3443 return -1; 3444 3445 return 0; 3446 } 3447 3448 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3449 u32 flags) 3450 { 3451 struct dm_pr pr = { 3452 .old_key = key, 3453 .flags = flags, 3454 .type = type, 3455 .fail_early = false, 3456 .ret = 0, 3457 }; 3458 int ret; 3459 3460 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); 3461 if (ret) 3462 return ret; 3463 3464 return pr.ret; 3465 } 3466 3467 /* 3468 * If there is a non-All Registrants type of reservation, the release must be 3469 * sent down the holding path. For the cases where there is no reservation or 3470 * the path is not the holder the device will also return success, so we must 3471 * try each path to make sure we got the correct path. 3472 */ 3473 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, 3474 sector_t start, sector_t len, void *data) 3475 { 3476 struct dm_pr *pr = data; 3477 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3478 3479 if (!ops || !ops->pr_release) { 3480 pr->ret = -EOPNOTSUPP; 3481 return -1; 3482 } 3483 3484 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); 3485 if (pr->ret) 3486 return -1; 3487 3488 return 0; 3489 } 3490 3491 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3492 { 3493 struct dm_pr pr = { 3494 .old_key = key, 3495 .type = type, 3496 .fail_early = false, 3497 }; 3498 int ret; 3499 3500 ret = dm_call_pr(bdev, __dm_pr_release, &pr); 3501 if (ret) 3502 return ret; 3503 3504 return pr.ret; 3505 } 3506 3507 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, 3508 sector_t start, sector_t len, void *data) 3509 { 3510 struct dm_pr *pr = data; 3511 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3512 3513 if (!ops || !ops->pr_preempt) { 3514 pr->ret = -EOPNOTSUPP; 3515 return -1; 3516 } 3517 3518 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, 3519 pr->abort); 3520 if (!pr->ret) 3521 return -1; 3522 3523 return 0; 3524 } 3525 3526 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3527 enum pr_type type, bool abort) 3528 { 3529 struct dm_pr pr = { 3530 .new_key = new_key, 3531 .old_key = old_key, 3532 .type = type, 3533 .fail_early = false, 3534 }; 3535 int ret; 3536 3537 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); 3538 if (ret) 3539 return ret; 3540 3541 return pr.ret; 3542 } 3543 3544 static int dm_pr_clear(struct block_device *bdev, u64 key) 3545 { 3546 struct mapped_device *md = bdev->bd_disk->private_data; 3547 const struct pr_ops *ops; 3548 int r, srcu_idx; 3549 3550 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3551 if (r < 0) 3552 goto out; 3553 3554 ops = bdev->bd_disk->fops->pr_ops; 3555 if (ops && ops->pr_clear) 3556 r = ops->pr_clear(bdev, key); 3557 else 3558 r = -EOPNOTSUPP; 3559 out: 3560 dm_unprepare_ioctl(md, srcu_idx); 3561 return r; 3562 } 3563 3564 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, 3565 sector_t start, sector_t len, void *data) 3566 { 3567 struct dm_pr *pr = data; 3568 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3569 3570 if (!ops || !ops->pr_read_keys) { 3571 pr->ret = -EOPNOTSUPP; 3572 return -1; 3573 } 3574 3575 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); 3576 if (!pr->ret) 3577 return -1; 3578 3579 return 0; 3580 } 3581 3582 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) 3583 { 3584 struct dm_pr pr = { 3585 .read_keys = keys, 3586 }; 3587 int ret; 3588 3589 ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr); 3590 if (ret) 3591 return ret; 3592 3593 return pr.ret; 3594 } 3595 3596 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, 3597 sector_t start, sector_t len, void *data) 3598 { 3599 struct dm_pr *pr = data; 3600 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3601 3602 if (!ops || !ops->pr_read_reservation) { 3603 pr->ret = -EOPNOTSUPP; 3604 return -1; 3605 } 3606 3607 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); 3608 if (!pr->ret) 3609 return -1; 3610 3611 return 0; 3612 } 3613 3614 static int dm_pr_read_reservation(struct block_device *bdev, 3615 struct pr_held_reservation *rsv) 3616 { 3617 struct dm_pr pr = { 3618 .rsv = rsv, 3619 }; 3620 int ret; 3621 3622 ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr); 3623 if (ret) 3624 return ret; 3625 3626 return pr.ret; 3627 } 3628 3629 static const struct pr_ops dm_pr_ops = { 3630 .pr_register = dm_pr_register, 3631 .pr_reserve = dm_pr_reserve, 3632 .pr_release = dm_pr_release, 3633 .pr_preempt = dm_pr_preempt, 3634 .pr_clear = dm_pr_clear, 3635 .pr_read_keys = dm_pr_read_keys, 3636 .pr_read_reservation = dm_pr_read_reservation, 3637 }; 3638 3639 static const struct block_device_operations dm_blk_dops = { 3640 .submit_bio = dm_submit_bio, 3641 .poll_bio = dm_poll_bio, 3642 .open = dm_blk_open, 3643 .release = dm_blk_close, 3644 .ioctl = dm_blk_ioctl, 3645 .getgeo = dm_blk_getgeo, 3646 .report_zones = dm_blk_report_zones, 3647 .pr_ops = &dm_pr_ops, 3648 .owner = THIS_MODULE 3649 }; 3650 3651 static const struct block_device_operations dm_rq_blk_dops = { 3652 .open = dm_blk_open, 3653 .release = dm_blk_close, 3654 .ioctl = dm_blk_ioctl, 3655 .getgeo = dm_blk_getgeo, 3656 .pr_ops = &dm_pr_ops, 3657 .owner = THIS_MODULE 3658 }; 3659 3660 static const struct dax_operations dm_dax_ops = { 3661 .direct_access = dm_dax_direct_access, 3662 .zero_page_range = dm_dax_zero_page_range, 3663 .recovery_write = dm_dax_recovery_write, 3664 }; 3665 3666 /* 3667 * module hooks 3668 */ 3669 module_init(dm_init); 3670 module_exit(dm_exit); 3671 3672 module_param(major, uint, 0); 3673 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3674 3675 module_param(reserved_bio_based_ios, uint, 0644); 3676 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3677 3678 module_param(dm_numa_node, int, 0644); 3679 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3680 3681 module_param(swap_bios, int, 0644); 3682 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3683 3684 MODULE_DESCRIPTION(DM_NAME " driver"); 3685 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>"); 3686 MODULE_LICENSE("GPL"); 3687