1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include "dm-core.h" 10 #include "dm-rq.h" 11 #include "dm-uevent.h" 12 #include "dm-ima.h" 13 14 #include <linux/bio-integrity.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/sched/mm.h> 19 #include <linux/sched/signal.h> 20 #include <linux/blkpg.h> 21 #include <linux/bio.h> 22 #include <linux/mempool.h> 23 #include <linux/dax.h> 24 #include <linux/slab.h> 25 #include <linux/idr.h> 26 #include <linux/uio.h> 27 #include <linux/hdreg.h> 28 #include <linux/delay.h> 29 #include <linux/wait.h> 30 #include <linux/pr.h> 31 #include <linux/refcount.h> 32 #include <linux/part_stat.h> 33 #include <linux/blk-crypto.h> 34 #include <linux/blk-crypto-profile.h> 35 36 #define DM_MSG_PREFIX "core" 37 38 /* 39 * Cookies are numeric values sent with CHANGE and REMOVE 40 * uevents while resuming, removing or renaming the device. 41 */ 42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 43 #define DM_COOKIE_LENGTH 24 44 45 /* 46 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 47 * dm_io into one list, and reuse bio->bi_private as the list head. Before 48 * ending this fs bio, we will recover its ->bi_private. 49 */ 50 #define REQ_DM_POLL_LIST REQ_DRV 51 52 static const char *_name = DM_NAME; 53 54 static unsigned int major; 55 static unsigned int _major; 56 57 static DEFINE_IDR(_minor_idr); 58 59 static DEFINE_SPINLOCK(_minor_lock); 60 61 static void do_deferred_remove(struct work_struct *w); 62 63 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 64 65 static struct workqueue_struct *deferred_remove_workqueue; 66 67 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 68 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 69 70 void dm_issue_global_event(void) 71 { 72 atomic_inc(&dm_global_event_nr); 73 wake_up(&dm_global_eventq); 74 } 75 76 DEFINE_STATIC_KEY_FALSE(stats_enabled); 77 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); 78 DEFINE_STATIC_KEY_FALSE(zoned_enabled); 79 80 /* 81 * One of these is allocated (on-stack) per original bio. 82 */ 83 struct clone_info { 84 struct dm_table *map; 85 struct bio *bio; 86 struct dm_io *io; 87 sector_t sector; 88 unsigned int sector_count; 89 bool is_abnormal_io:1; 90 bool submit_as_polled:1; 91 }; 92 93 static inline struct dm_target_io *clone_to_tio(struct bio *clone) 94 { 95 return container_of(clone, struct dm_target_io, clone); 96 } 97 98 void *dm_per_bio_data(struct bio *bio, size_t data_size) 99 { 100 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 101 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 102 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 103 } 104 EXPORT_SYMBOL_GPL(dm_per_bio_data); 105 106 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 107 { 108 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 109 110 if (io->magic == DM_IO_MAGIC) 111 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 112 BUG_ON(io->magic != DM_TIO_MAGIC); 113 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 114 } 115 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 116 117 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) 118 { 119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 120 } 121 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 122 123 #define MINOR_ALLOCED ((void *)-1) 124 125 #define DM_NUMA_NODE NUMA_NO_NODE 126 static int dm_numa_node = DM_NUMA_NODE; 127 128 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 129 static int swap_bios = DEFAULT_SWAP_BIOS; 130 static int get_swap_bios(void) 131 { 132 int latch = READ_ONCE(swap_bios); 133 134 if (unlikely(latch <= 0)) 135 latch = DEFAULT_SWAP_BIOS; 136 return latch; 137 } 138 139 struct table_device { 140 struct list_head list; 141 refcount_t count; 142 struct dm_dev dm_dev; 143 }; 144 145 /* 146 * Bio-based DM's mempools' reserved IOs set by the user. 147 */ 148 #define RESERVED_BIO_BASED_IOS 16 149 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 150 151 static int __dm_get_module_param_int(int *module_param, int min, int max) 152 { 153 int param = READ_ONCE(*module_param); 154 int modified_param = 0; 155 bool modified = true; 156 157 if (param < min) 158 modified_param = min; 159 else if (param > max) 160 modified_param = max; 161 else 162 modified = false; 163 164 if (modified) { 165 (void)cmpxchg(module_param, param, modified_param); 166 param = modified_param; 167 } 168 169 return param; 170 } 171 172 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max) 173 { 174 unsigned int param = READ_ONCE(*module_param); 175 unsigned int modified_param = 0; 176 177 if (!param) 178 modified_param = def; 179 else if (param > max) 180 modified_param = max; 181 182 if (modified_param) { 183 (void)cmpxchg(module_param, param, modified_param); 184 param = modified_param; 185 } 186 187 return param; 188 } 189 190 unsigned int dm_get_reserved_bio_based_ios(void) 191 { 192 return __dm_get_module_param(&reserved_bio_based_ios, 193 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 194 } 195 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 196 197 static unsigned int dm_get_numa_node(void) 198 { 199 return __dm_get_module_param_int(&dm_numa_node, 200 DM_NUMA_NODE, num_online_nodes() - 1); 201 } 202 203 static int __init local_init(void) 204 { 205 int r; 206 207 r = dm_uevent_init(); 208 if (r) 209 return r; 210 211 deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0); 212 if (!deferred_remove_workqueue) { 213 r = -ENOMEM; 214 goto out_uevent_exit; 215 } 216 217 _major = major; 218 r = register_blkdev(_major, _name); 219 if (r < 0) 220 goto out_free_workqueue; 221 222 if (!_major) 223 _major = r; 224 225 return 0; 226 227 out_free_workqueue: 228 destroy_workqueue(deferred_remove_workqueue); 229 out_uevent_exit: 230 dm_uevent_exit(); 231 232 return r; 233 } 234 235 static void local_exit(void) 236 { 237 destroy_workqueue(deferred_remove_workqueue); 238 239 unregister_blkdev(_major, _name); 240 dm_uevent_exit(); 241 242 _major = 0; 243 244 DMINFO("cleaned up"); 245 } 246 247 static int (*_inits[])(void) __initdata = { 248 local_init, 249 dm_target_init, 250 dm_linear_init, 251 dm_stripe_init, 252 dm_io_init, 253 dm_kcopyd_init, 254 dm_interface_init, 255 dm_statistics_init, 256 }; 257 258 static void (*_exits[])(void) = { 259 local_exit, 260 dm_target_exit, 261 dm_linear_exit, 262 dm_stripe_exit, 263 dm_io_exit, 264 dm_kcopyd_exit, 265 dm_interface_exit, 266 dm_statistics_exit, 267 }; 268 269 static int __init dm_init(void) 270 { 271 const int count = ARRAY_SIZE(_inits); 272 int r, i; 273 274 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 275 DMINFO("CONFIG_IMA_DISABLE_HTABLE is disabled." 276 " Duplicate IMA measurements will not be recorded in the IMA log."); 277 #endif 278 279 for (i = 0; i < count; i++) { 280 r = _inits[i](); 281 if (r) 282 goto bad; 283 } 284 285 return 0; 286 bad: 287 while (i--) 288 _exits[i](); 289 290 return r; 291 } 292 293 static void __exit dm_exit(void) 294 { 295 int i = ARRAY_SIZE(_exits); 296 297 while (i--) 298 _exits[i](); 299 300 /* 301 * Should be empty by this point. 302 */ 303 idr_destroy(&_minor_idr); 304 } 305 306 /* 307 * Block device functions 308 */ 309 int dm_deleting_md(struct mapped_device *md) 310 { 311 return test_bit(DMF_DELETING, &md->flags); 312 } 313 314 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) 315 { 316 struct mapped_device *md; 317 318 spin_lock(&_minor_lock); 319 320 md = disk->private_data; 321 if (!md) 322 goto out; 323 324 if (test_bit(DMF_FREEING, &md->flags) || 325 dm_deleting_md(md)) { 326 md = NULL; 327 goto out; 328 } 329 330 dm_get(md); 331 atomic_inc(&md->open_count); 332 out: 333 spin_unlock(&_minor_lock); 334 335 return md ? 0 : -ENXIO; 336 } 337 338 static void dm_blk_close(struct gendisk *disk) 339 { 340 struct mapped_device *md; 341 342 spin_lock(&_minor_lock); 343 344 md = disk->private_data; 345 if (WARN_ON(!md)) 346 goto out; 347 348 if (atomic_dec_and_test(&md->open_count) && 349 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 350 queue_work(deferred_remove_workqueue, &deferred_remove_work); 351 352 dm_put(md); 353 out: 354 spin_unlock(&_minor_lock); 355 } 356 357 int dm_open_count(struct mapped_device *md) 358 { 359 return atomic_read(&md->open_count); 360 } 361 362 /* 363 * Guarantees nothing is using the device before it's deleted. 364 */ 365 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 366 { 367 int r = 0; 368 369 spin_lock(&_minor_lock); 370 371 if (dm_open_count(md)) { 372 r = -EBUSY; 373 if (mark_deferred) 374 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 375 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 376 r = -EEXIST; 377 else 378 set_bit(DMF_DELETING, &md->flags); 379 380 spin_unlock(&_minor_lock); 381 382 return r; 383 } 384 385 int dm_cancel_deferred_remove(struct mapped_device *md) 386 { 387 int r = 0; 388 389 spin_lock(&_minor_lock); 390 391 if (test_bit(DMF_DELETING, &md->flags)) 392 r = -EBUSY; 393 else 394 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 395 396 spin_unlock(&_minor_lock); 397 398 return r; 399 } 400 401 static void do_deferred_remove(struct work_struct *w) 402 { 403 dm_deferred_remove(); 404 } 405 406 static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo) 407 { 408 struct mapped_device *md = disk->private_data; 409 410 return dm_get_geometry(md, geo); 411 } 412 413 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 414 struct block_device **bdev, unsigned int cmd, 415 unsigned long arg, bool *forward) 416 { 417 struct dm_target *ti; 418 struct dm_table *map; 419 int r; 420 421 retry: 422 r = -ENOTTY; 423 map = dm_get_live_table(md, srcu_idx); 424 if (!map || !dm_table_get_size(map)) 425 return r; 426 427 /* We only support devices that have a single target */ 428 if (map->num_targets != 1) 429 return r; 430 431 ti = dm_table_get_target(map, 0); 432 if (!ti->type->prepare_ioctl) 433 return r; 434 435 if (dm_suspended_md(md)) 436 return -EAGAIN; 437 438 r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward); 439 if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) { 440 dm_put_live_table(md, *srcu_idx); 441 fsleep(10000); 442 goto retry; 443 } 444 445 return r; 446 } 447 448 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 449 { 450 dm_put_live_table(md, srcu_idx); 451 } 452 453 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, 454 unsigned int cmd, unsigned long arg) 455 { 456 struct mapped_device *md = bdev->bd_disk->private_data; 457 int r, srcu_idx; 458 bool forward = true; 459 460 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward); 461 if (!forward || r < 0) 462 goto out; 463 464 if (r > 0) { 465 /* 466 * Target determined this ioctl is being issued against a 467 * subset of the parent bdev; require extra privileges. 468 */ 469 if (!capable(CAP_SYS_RAWIO)) { 470 DMDEBUG_LIMIT( 471 "%s: sending ioctl %x to DM device without required privilege.", 472 current->comm, cmd); 473 r = -ENOIOCTLCMD; 474 goto out; 475 } 476 } 477 478 if (!bdev->bd_disk->fops->ioctl) 479 r = -ENOTTY; 480 else 481 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 482 out: 483 dm_unprepare_ioctl(md, srcu_idx); 484 return r; 485 } 486 487 u64 dm_start_time_ns_from_clone(struct bio *bio) 488 { 489 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 490 } 491 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 492 493 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio) 494 { 495 /* 496 * If REQ_PREFLUSH set, don't account payload, it will be 497 * submitted (and accounted) after this flush completes. 498 */ 499 if (io->requeue_flush_with_data) 500 return 0; 501 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) 502 return io->sectors; 503 return bio_sectors(bio); 504 } 505 506 static void dm_io_acct(struct dm_io *io, bool end) 507 { 508 struct bio *bio = io->orig_bio; 509 510 if (dm_io_flagged(io, DM_IO_BLK_STAT)) { 511 if (!end) 512 bdev_start_io_acct(bio->bi_bdev, bio_op(bio), 513 io->start_time); 514 else 515 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), 516 dm_io_sectors(io, bio), 517 io->start_time); 518 } 519 520 if (static_branch_unlikely(&stats_enabled) && 521 unlikely(dm_stats_used(&io->md->stats))) { 522 sector_t sector; 523 524 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) 525 sector = bio_end_sector(bio) - io->sector_offset; 526 else 527 sector = bio->bi_iter.bi_sector; 528 529 dm_stats_account_io(&io->md->stats, bio_data_dir(bio), 530 sector, dm_io_sectors(io, bio), 531 end, io->start_time, &io->stats_aux); 532 } 533 } 534 535 static void __dm_start_io_acct(struct dm_io *io) 536 { 537 dm_io_acct(io, false); 538 } 539 540 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 541 { 542 /* 543 * Ensure IO accounting is only ever started once. 544 */ 545 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 546 return; 547 548 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ 549 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { 550 dm_io_set_flag(io, DM_IO_ACCOUNTED); 551 } else { 552 unsigned long flags; 553 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 554 spin_lock_irqsave(&io->lock, flags); 555 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { 556 spin_unlock_irqrestore(&io->lock, flags); 557 return; 558 } 559 dm_io_set_flag(io, DM_IO_ACCOUNTED); 560 spin_unlock_irqrestore(&io->lock, flags); 561 } 562 563 __dm_start_io_acct(io); 564 } 565 566 static void dm_end_io_acct(struct dm_io *io) 567 { 568 dm_io_acct(io, true); 569 } 570 571 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) 572 { 573 struct dm_io *io; 574 struct dm_target_io *tio; 575 struct bio *clone; 576 577 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); 578 if (unlikely(!clone)) 579 return NULL; 580 tio = clone_to_tio(clone); 581 tio->flags = 0; 582 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 583 tio->io = NULL; 584 585 io = container_of(tio, struct dm_io, tio); 586 io->magic = DM_IO_MAGIC; 587 io->status = BLK_STS_OK; 588 io->requeue_flush_with_data = false; 589 590 /* one ref is for submission, the other is for completion */ 591 atomic_set(&io->io_count, 2); 592 this_cpu_inc(*md->pending_io); 593 io->orig_bio = bio; 594 io->md = md; 595 spin_lock_init(&io->lock); 596 io->start_time = jiffies; 597 io->flags = 0; 598 if (blk_queue_io_stat(md->queue)) 599 dm_io_set_flag(io, DM_IO_BLK_STAT); 600 601 if (static_branch_unlikely(&stats_enabled) && 602 unlikely(dm_stats_used(&md->stats))) 603 dm_stats_record_start(&md->stats, &io->stats_aux); 604 605 return io; 606 } 607 608 static void free_io(struct dm_io *io) 609 { 610 bio_put(&io->tio.clone); 611 } 612 613 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 614 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) 615 { 616 struct mapped_device *md = ci->io->md; 617 struct dm_target_io *tio; 618 struct bio *clone; 619 620 if (!ci->io->tio.io) { 621 /* the dm_target_io embedded in ci->io is available */ 622 tio = &ci->io->tio; 623 /* alloc_io() already initialized embedded clone */ 624 clone = &tio->clone; 625 } else { 626 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, 627 &md->mempools->bs); 628 if (!clone) 629 return NULL; 630 631 /* REQ_DM_POLL_LIST shouldn't be inherited */ 632 clone->bi_opf &= ~REQ_DM_POLL_LIST; 633 634 tio = clone_to_tio(clone); 635 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 636 } 637 638 tio->magic = DM_TIO_MAGIC; 639 tio->io = ci->io; 640 tio->ti = ti; 641 tio->target_bio_nr = target_bio_nr; 642 tio->len_ptr = len; 643 tio->old_sector = 0; 644 645 /* Set default bdev, but target must bio_set_dev() before issuing IO */ 646 clone->bi_bdev = md->disk->part0; 647 if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev)) 648 bio_set_dev(clone, md->disk->part0); 649 650 if (len) { 651 clone->bi_iter.bi_size = to_bytes(*len); 652 if (bio_integrity(clone)) 653 bio_integrity_trim(clone); 654 } 655 656 return clone; 657 } 658 659 static void free_tio(struct bio *clone) 660 { 661 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 662 return; 663 bio_put(clone); 664 } 665 666 /* 667 * Add the bio to the list of deferred io. 668 */ 669 static void queue_io(struct mapped_device *md, struct bio *bio) 670 { 671 unsigned long flags; 672 673 spin_lock_irqsave(&md->deferred_lock, flags); 674 bio_list_add(&md->deferred, bio); 675 spin_unlock_irqrestore(&md->deferred_lock, flags); 676 queue_work(md->wq, &md->work); 677 } 678 679 /* 680 * Everyone (including functions in this file), should use this 681 * function to access the md->map field, and make sure they call 682 * dm_put_live_table() when finished. 683 */ 684 struct dm_table *dm_get_live_table(struct mapped_device *md, 685 int *srcu_idx) __acquires(md->io_barrier) 686 { 687 *srcu_idx = srcu_read_lock(&md->io_barrier); 688 689 return srcu_dereference(md->map, &md->io_barrier); 690 } 691 692 void dm_put_live_table(struct mapped_device *md, 693 int srcu_idx) __releases(md->io_barrier) 694 { 695 srcu_read_unlock(&md->io_barrier, srcu_idx); 696 } 697 698 void dm_sync_table(struct mapped_device *md) 699 { 700 synchronize_srcu(&md->io_barrier); 701 synchronize_rcu_expedited(); 702 } 703 704 /* 705 * A fast alternative to dm_get_live_table/dm_put_live_table. 706 * The caller must not block between these two functions. 707 */ 708 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 709 { 710 rcu_read_lock(); 711 return rcu_dereference(md->map); 712 } 713 714 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 715 { 716 rcu_read_unlock(); 717 } 718 719 static char *_dm_claim_ptr = "I belong to device-mapper"; 720 721 /* 722 * Open a table device so we can use it as a map destination. 723 */ 724 static struct table_device *open_table_device(struct mapped_device *md, 725 dev_t dev, blk_mode_t mode) 726 { 727 struct table_device *td; 728 struct file *bdev_file; 729 struct block_device *bdev; 730 u64 part_off; 731 int r; 732 733 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 734 if (!td) 735 return ERR_PTR(-ENOMEM); 736 refcount_set(&td->count, 1); 737 738 bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL); 739 if (IS_ERR(bdev_file)) { 740 r = PTR_ERR(bdev_file); 741 goto out_free_td; 742 } 743 744 bdev = file_bdev(bdev_file); 745 746 /* 747 * We can be called before the dm disk is added. In that case we can't 748 * register the holder relation here. It will be done once add_disk was 749 * called. 750 */ 751 if (md->disk->slave_dir) { 752 r = bd_link_disk_holder(bdev, md->disk); 753 if (r) 754 goto out_blkdev_put; 755 } 756 757 td->dm_dev.mode = mode; 758 td->dm_dev.bdev = bdev; 759 td->dm_dev.bdev_file = bdev_file; 760 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, 761 NULL, NULL); 762 format_dev_t(td->dm_dev.name, dev); 763 list_add(&td->list, &md->table_devices); 764 return td; 765 766 out_blkdev_put: 767 __fput_sync(bdev_file); 768 out_free_td: 769 kfree(td); 770 return ERR_PTR(r); 771 } 772 773 /* 774 * Close a table device that we've been using. 775 */ 776 static void close_table_device(struct table_device *td, struct mapped_device *md) 777 { 778 if (md->disk->slave_dir) 779 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 780 781 /* Leverage async fput() if DMF_DEFERRED_REMOVE set */ 782 if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 783 fput(td->dm_dev.bdev_file); 784 else 785 __fput_sync(td->dm_dev.bdev_file); 786 787 put_dax(td->dm_dev.dax_dev); 788 list_del(&td->list); 789 kfree(td); 790 } 791 792 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 793 blk_mode_t mode) 794 { 795 struct table_device *td; 796 797 list_for_each_entry(td, l, list) 798 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 799 return td; 800 801 return NULL; 802 } 803 804 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, 805 struct dm_dev **result) 806 { 807 struct table_device *td; 808 809 mutex_lock(&md->table_devices_lock); 810 td = find_table_device(&md->table_devices, dev, mode); 811 if (!td) { 812 td = open_table_device(md, dev, mode); 813 if (IS_ERR(td)) { 814 mutex_unlock(&md->table_devices_lock); 815 return PTR_ERR(td); 816 } 817 } else { 818 refcount_inc(&td->count); 819 } 820 mutex_unlock(&md->table_devices_lock); 821 822 *result = &td->dm_dev; 823 return 0; 824 } 825 826 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 827 { 828 struct table_device *td = container_of(d, struct table_device, dm_dev); 829 830 mutex_lock(&md->table_devices_lock); 831 if (refcount_dec_and_test(&td->count)) 832 close_table_device(td, md); 833 mutex_unlock(&md->table_devices_lock); 834 } 835 836 /* 837 * Get the geometry associated with a dm device 838 */ 839 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 840 { 841 *geo = md->geometry; 842 843 return 0; 844 } 845 846 /* 847 * Set the geometry of a device. 848 */ 849 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 850 { 851 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 852 853 if (geo->start > sz) { 854 DMERR("Start sector is beyond the geometry limits."); 855 return -EINVAL; 856 } 857 858 md->geometry = *geo; 859 860 return 0; 861 } 862 863 static int __noflush_suspending(struct mapped_device *md) 864 { 865 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 866 } 867 868 static void dm_requeue_add_io(struct dm_io *io, bool first_stage) 869 { 870 struct mapped_device *md = io->md; 871 872 if (first_stage) { 873 struct dm_io *next = md->requeue_list; 874 875 md->requeue_list = io; 876 io->next = next; 877 } else { 878 bio_list_add_head(&md->deferred, io->orig_bio); 879 } 880 } 881 882 static void dm_kick_requeue(struct mapped_device *md, bool first_stage) 883 { 884 if (first_stage) 885 queue_work(md->wq, &md->requeue_work); 886 else 887 queue_work(md->wq, &md->work); 888 } 889 890 /* 891 * Return true if the dm_io's original bio is requeued. 892 * io->status is updated with error if requeue disallowed. 893 */ 894 static bool dm_handle_requeue(struct dm_io *io, bool first_stage) 895 { 896 struct bio *bio = io->orig_bio; 897 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); 898 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && 899 (bio->bi_opf & REQ_POLLED)); 900 struct mapped_device *md = io->md; 901 bool requeued = false; 902 903 if (handle_requeue || handle_polled_eagain) { 904 unsigned long flags; 905 906 if (bio->bi_opf & REQ_POLLED) { 907 /* 908 * Upper layer won't help us poll split bio 909 * (io->orig_bio may only reflect a subset of the 910 * pre-split original) so clear REQ_POLLED. 911 */ 912 bio_clear_polled(bio); 913 } 914 915 /* 916 * Target requested pushing back the I/O or 917 * polled IO hit BLK_STS_AGAIN. 918 */ 919 spin_lock_irqsave(&md->deferred_lock, flags); 920 if ((__noflush_suspending(md) && 921 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || 922 handle_polled_eagain || first_stage) { 923 dm_requeue_add_io(io, first_stage); 924 requeued = true; 925 } else { 926 /* 927 * noflush suspend was interrupted or this is 928 * a write to a zoned target. 929 */ 930 io->status = BLK_STS_IOERR; 931 } 932 spin_unlock_irqrestore(&md->deferred_lock, flags); 933 } 934 935 if (requeued) 936 dm_kick_requeue(md, first_stage); 937 938 return requeued; 939 } 940 941 static void __dm_io_complete(struct dm_io *io, bool first_stage) 942 { 943 struct bio *bio = io->orig_bio; 944 struct mapped_device *md = io->md; 945 blk_status_t io_error; 946 bool requeued; 947 bool requeue_flush_with_data; 948 949 requeued = dm_handle_requeue(io, first_stage); 950 if (requeued && first_stage) 951 return; 952 953 io_error = io->status; 954 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 955 dm_end_io_acct(io); 956 else if (!io_error) { 957 /* 958 * Must handle target that DM_MAPIO_SUBMITTED only to 959 * then bio_endio() rather than dm_submit_bio_remap() 960 */ 961 __dm_start_io_acct(io); 962 dm_end_io_acct(io); 963 } 964 requeue_flush_with_data = io->requeue_flush_with_data; 965 free_io(io); 966 smp_wmb(); 967 this_cpu_dec(*md->pending_io); 968 969 /* nudge anyone waiting on suspend queue */ 970 if (unlikely(wq_has_sleeper(&md->wait))) 971 wake_up(&md->wait); 972 973 /* Return early if the original bio was requeued */ 974 if (requeued) 975 return; 976 977 if (unlikely(requeue_flush_with_data)) { 978 /* 979 * Preflush done for flush with data, reissue 980 * without REQ_PREFLUSH. 981 */ 982 bio->bi_opf &= ~REQ_PREFLUSH; 983 queue_io(md, bio); 984 } else { 985 /* done with normal IO or empty flush */ 986 if (io_error) 987 bio->bi_status = io_error; 988 bio_endio(bio); 989 } 990 } 991 992 static void dm_wq_requeue_work(struct work_struct *work) 993 { 994 struct mapped_device *md = container_of(work, struct mapped_device, 995 requeue_work); 996 unsigned long flags; 997 struct dm_io *io; 998 999 /* reuse deferred lock to simplify dm_handle_requeue */ 1000 spin_lock_irqsave(&md->deferred_lock, flags); 1001 io = md->requeue_list; 1002 md->requeue_list = NULL; 1003 spin_unlock_irqrestore(&md->deferred_lock, flags); 1004 1005 while (io) { 1006 struct dm_io *next = io->next; 1007 1008 dm_io_rewind(io, &md->disk->bio_split); 1009 1010 io->next = NULL; 1011 __dm_io_complete(io, false); 1012 io = next; 1013 cond_resched(); 1014 } 1015 } 1016 1017 /* 1018 * Two staged requeue: 1019 * 1020 * 1) io->orig_bio points to the real original bio, and the part mapped to 1021 * this io must be requeued, instead of other parts of the original bio. 1022 * 1023 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. 1024 */ 1025 static inline void dm_io_complete(struct dm_io *io) 1026 { 1027 /* 1028 * Only dm_io that has been split needs two stage requeue, otherwise 1029 * we may run into long bio clone chain during suspend and OOM could 1030 * be triggered. 1031 * 1032 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they 1033 * also aren't handled via the first stage requeue. 1034 */ 1035 __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT)); 1036 } 1037 1038 /* 1039 * Decrements the number of outstanding ios that a bio has been 1040 * cloned into, completing the original io if necc. 1041 */ 1042 static inline void __dm_io_dec_pending(struct dm_io *io) 1043 { 1044 if (atomic_dec_and_test(&io->io_count)) 1045 dm_io_complete(io); 1046 } 1047 1048 static void dm_io_set_error(struct dm_io *io, blk_status_t error) 1049 { 1050 unsigned long flags; 1051 1052 /* Push-back supersedes any I/O errors */ 1053 spin_lock_irqsave(&io->lock, flags); 1054 if (!(io->status == BLK_STS_DM_REQUEUE && 1055 __noflush_suspending(io->md))) { 1056 io->status = error; 1057 } 1058 spin_unlock_irqrestore(&io->lock, flags); 1059 } 1060 1061 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 1062 { 1063 if (unlikely(error)) 1064 dm_io_set_error(io, error); 1065 1066 __dm_io_dec_pending(io); 1067 } 1068 1069 /* 1070 * The queue_limits are only valid as long as you have a reference 1071 * count on 'md'. But _not_ imposing verification to avoid atomic_read(), 1072 */ 1073 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 1074 { 1075 return &md->queue->limits; 1076 } 1077 1078 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 1079 { 1080 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 1081 } 1082 1083 static void clone_endio(struct bio *bio) 1084 { 1085 blk_status_t error = bio->bi_status; 1086 struct dm_target_io *tio = clone_to_tio(bio); 1087 struct dm_target *ti = tio->ti; 1088 dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL; 1089 struct dm_io *io = tio->io; 1090 struct mapped_device *md = io->md; 1091 1092 if (unlikely(error == BLK_STS_TARGET)) { 1093 if (bio_op(bio) == REQ_OP_DISCARD && 1094 !bdev_max_discard_sectors(bio->bi_bdev)) 1095 blk_queue_disable_discard(md->queue); 1096 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1097 !bdev_write_zeroes_sectors(bio->bi_bdev)) 1098 blk_queue_disable_write_zeroes(md->queue); 1099 } 1100 1101 if (static_branch_unlikely(&zoned_enabled) && 1102 unlikely(bdev_is_zoned(bio->bi_bdev))) 1103 dm_zone_endio(io, bio); 1104 1105 if (endio) { 1106 int r = endio(ti, bio, &error); 1107 1108 switch (r) { 1109 case DM_ENDIO_REQUEUE: 1110 if (static_branch_unlikely(&zoned_enabled)) { 1111 /* 1112 * Requeuing writes to a sequential zone of a zoned 1113 * target will break the sequential write pattern: 1114 * fail such IO. 1115 */ 1116 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1117 error = BLK_STS_IOERR; 1118 else 1119 error = BLK_STS_DM_REQUEUE; 1120 } else 1121 error = BLK_STS_DM_REQUEUE; 1122 fallthrough; 1123 case DM_ENDIO_DONE: 1124 break; 1125 case DM_ENDIO_INCOMPLETE: 1126 /* The target will handle the io */ 1127 return; 1128 default: 1129 DMCRIT("unimplemented target endio return value: %d", r); 1130 BUG(); 1131 } 1132 } 1133 1134 if (static_branch_unlikely(&swap_bios_enabled) && 1135 likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio))) 1136 up(&md->swap_bios_semaphore); 1137 1138 free_tio(bio); 1139 dm_io_dec_pending(io, error); 1140 } 1141 1142 /* 1143 * Return maximum size of I/O possible at the supplied sector up to the current 1144 * target boundary. 1145 */ 1146 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1147 sector_t target_offset) 1148 { 1149 return ti->len - target_offset; 1150 } 1151 1152 static sector_t __max_io_len(struct dm_target *ti, sector_t sector, 1153 unsigned int max_granularity, 1154 unsigned int max_sectors) 1155 { 1156 sector_t target_offset = dm_target_offset(ti, sector); 1157 sector_t len = max_io_len_target_boundary(ti, target_offset); 1158 1159 /* 1160 * Does the target need to split IO even further? 1161 * - varied (per target) IO splitting is a tenet of DM; this 1162 * explains why stacked chunk_sectors based splitting via 1163 * bio_split_to_limits() isn't possible here. 1164 */ 1165 if (!max_granularity) 1166 return len; 1167 return min_t(sector_t, len, 1168 min(max_sectors ? : queue_max_sectors(ti->table->md->queue), 1169 blk_boundary_sectors_left(target_offset, max_granularity))); 1170 } 1171 1172 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) 1173 { 1174 return __max_io_len(ti, sector, ti->max_io_len, 0); 1175 } 1176 1177 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1178 { 1179 if (len > UINT_MAX) { 1180 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1181 (unsigned long long)len, UINT_MAX); 1182 ti->error = "Maximum size of target IO is too large"; 1183 return -EINVAL; 1184 } 1185 1186 ti->max_io_len = (uint32_t) len; 1187 1188 return 0; 1189 } 1190 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1191 1192 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1193 sector_t sector, int *srcu_idx) 1194 __acquires(md->io_barrier) 1195 { 1196 struct dm_table *map; 1197 struct dm_target *ti; 1198 1199 map = dm_get_live_table(md, srcu_idx); 1200 if (!map) 1201 return NULL; 1202 1203 ti = dm_table_find_target(map, sector); 1204 if (!ti) 1205 return NULL; 1206 1207 return ti; 1208 } 1209 1210 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1211 long nr_pages, enum dax_access_mode mode, void **kaddr, 1212 unsigned long *pfn) 1213 { 1214 struct mapped_device *md = dax_get_private(dax_dev); 1215 sector_t sector = pgoff * PAGE_SECTORS; 1216 struct dm_target *ti; 1217 long len, ret = -EIO; 1218 int srcu_idx; 1219 1220 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1221 1222 if (!ti) 1223 goto out; 1224 if (!ti->type->direct_access) 1225 goto out; 1226 len = max_io_len(ti, sector) / PAGE_SECTORS; 1227 if (len < 1) 1228 goto out; 1229 nr_pages = min(len, nr_pages); 1230 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1231 1232 out: 1233 dm_put_live_table(md, srcu_idx); 1234 1235 return ret; 1236 } 1237 1238 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1239 size_t nr_pages) 1240 { 1241 struct mapped_device *md = dax_get_private(dax_dev); 1242 sector_t sector = pgoff * PAGE_SECTORS; 1243 struct dm_target *ti; 1244 int ret = -EIO; 1245 int srcu_idx; 1246 1247 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1248 1249 if (!ti) 1250 goto out; 1251 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1252 /* 1253 * ->zero_page_range() is mandatory dax operation. If we are 1254 * here, something is wrong. 1255 */ 1256 goto out; 1257 } 1258 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1259 out: 1260 dm_put_live_table(md, srcu_idx); 1261 1262 return ret; 1263 } 1264 1265 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1266 void *addr, size_t bytes, struct iov_iter *i) 1267 { 1268 struct mapped_device *md = dax_get_private(dax_dev); 1269 sector_t sector = pgoff * PAGE_SECTORS; 1270 struct dm_target *ti; 1271 int srcu_idx; 1272 long ret = 0; 1273 1274 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1275 if (!ti || !ti->type->dax_recovery_write) 1276 goto out; 1277 1278 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1279 out: 1280 dm_put_live_table(md, srcu_idx); 1281 return ret; 1282 } 1283 1284 /* 1285 * A target may call dm_accept_partial_bio only from the map routine. It is 1286 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1287 * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated 1288 * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced 1289 * by __send_duplicate_bios(). 1290 * 1291 * dm_accept_partial_bio informs the dm that the target only wants to process 1292 * additional n_sectors sectors of the bio and the rest of the data should be 1293 * sent in a next bio. 1294 * 1295 * A diagram that explains the arithmetics: 1296 * +--------------------+---------------+-------+ 1297 * | 1 | 2 | 3 | 1298 * +--------------------+---------------+-------+ 1299 * 1300 * <-------------- *tio->len_ptr ---------------> 1301 * <----- bio_sectors -----> 1302 * <-- n_sectors --> 1303 * 1304 * Region 1 was already iterated over with bio_advance or similar function. 1305 * (it may be empty if the target doesn't use bio_advance) 1306 * Region 2 is the remaining bio size that the target wants to process. 1307 * (it may be empty if region 1 is non-empty, although there is no reason 1308 * to make it empty) 1309 * The target requires that region 3 is to be sent in the next bio. 1310 * 1311 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1312 * the partially processed part (the sum of regions 1+2) must be the same for all 1313 * copies of the bio. 1314 */ 1315 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) 1316 { 1317 struct dm_target_io *tio = clone_to_tio(bio); 1318 struct dm_io *io = tio->io; 1319 unsigned int bio_sectors = bio_sectors(bio); 1320 1321 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 1322 BUG_ON(bio_sectors > *tio->len_ptr); 1323 BUG_ON(n_sectors > bio_sectors); 1324 BUG_ON(bio->bi_opf & REQ_ATOMIC); 1325 1326 if (static_branch_unlikely(&zoned_enabled) && 1327 unlikely(bdev_is_zoned(bio->bi_bdev))) { 1328 enum req_op op = bio_op(bio); 1329 1330 BUG_ON(op_is_zone_mgmt(op)); 1331 BUG_ON(op == REQ_OP_WRITE); 1332 BUG_ON(op == REQ_OP_WRITE_ZEROES); 1333 BUG_ON(op == REQ_OP_ZONE_APPEND); 1334 } 1335 1336 *tio->len_ptr -= bio_sectors - n_sectors; 1337 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1338 1339 /* 1340 * __split_and_process_bio() may have already saved mapped part 1341 * for accounting but it is being reduced so update accordingly. 1342 */ 1343 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1344 io->sectors = n_sectors; 1345 io->sector_offset = bio_sectors(io->orig_bio); 1346 } 1347 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1348 1349 /* 1350 * @clone: clone bio that DM core passed to target's .map function 1351 * @tgt_clone: clone of @clone bio that target needs submitted 1352 * 1353 * Targets should use this interface to submit bios they take 1354 * ownership of when returning DM_MAPIO_SUBMITTED. 1355 * 1356 * Target should also enable ti->accounts_remapped_io 1357 */ 1358 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 1359 { 1360 struct dm_target_io *tio = clone_to_tio(clone); 1361 struct dm_io *io = tio->io; 1362 1363 /* establish bio that will get submitted */ 1364 if (!tgt_clone) 1365 tgt_clone = clone; 1366 1367 /* 1368 * Account io->origin_bio to DM dev on behalf of target 1369 * that took ownership of IO with DM_MAPIO_SUBMITTED. 1370 */ 1371 dm_start_io_acct(io, clone); 1372 1373 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), 1374 tio->old_sector); 1375 submit_bio_noacct(tgt_clone); 1376 } 1377 EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 1378 1379 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1380 { 1381 mutex_lock(&md->swap_bios_lock); 1382 while (latch < md->swap_bios) { 1383 cond_resched(); 1384 down(&md->swap_bios_semaphore); 1385 md->swap_bios--; 1386 } 1387 while (latch > md->swap_bios) { 1388 cond_resched(); 1389 up(&md->swap_bios_semaphore); 1390 md->swap_bios++; 1391 } 1392 mutex_unlock(&md->swap_bios_lock); 1393 } 1394 1395 static void __map_bio(struct bio *clone) 1396 { 1397 struct dm_target_io *tio = clone_to_tio(clone); 1398 struct dm_target *ti = tio->ti; 1399 struct dm_io *io = tio->io; 1400 struct mapped_device *md = io->md; 1401 int r; 1402 1403 clone->bi_end_io = clone_endio; 1404 1405 /* 1406 * Map the clone. 1407 */ 1408 tio->old_sector = clone->bi_iter.bi_sector; 1409 1410 if (static_branch_unlikely(&swap_bios_enabled) && 1411 unlikely(swap_bios_limit(ti, clone))) { 1412 int latch = get_swap_bios(); 1413 1414 if (unlikely(latch != md->swap_bios)) 1415 __set_swap_bios_limit(md, latch); 1416 down(&md->swap_bios_semaphore); 1417 } 1418 1419 if (likely(ti->type->map == linear_map)) 1420 r = linear_map(ti, clone); 1421 else if (ti->type->map == stripe_map) 1422 r = stripe_map(ti, clone); 1423 else 1424 r = ti->type->map(ti, clone); 1425 1426 switch (r) { 1427 case DM_MAPIO_SUBMITTED: 1428 /* target has assumed ownership of this io */ 1429 if (!ti->accounts_remapped_io) 1430 dm_start_io_acct(io, clone); 1431 break; 1432 case DM_MAPIO_REMAPPED: 1433 dm_submit_bio_remap(clone, NULL); 1434 break; 1435 case DM_MAPIO_KILL: 1436 case DM_MAPIO_REQUEUE: 1437 if (static_branch_unlikely(&swap_bios_enabled) && 1438 unlikely(swap_bios_limit(ti, clone))) 1439 up(&md->swap_bios_semaphore); 1440 free_tio(clone); 1441 if (r == DM_MAPIO_KILL) 1442 dm_io_dec_pending(io, BLK_STS_IOERR); 1443 else 1444 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1445 break; 1446 default: 1447 DMCRIT("unimplemented target map return value: %d", r); 1448 BUG(); 1449 } 1450 } 1451 1452 static void setup_split_accounting(struct clone_info *ci, unsigned int len) 1453 { 1454 struct dm_io *io = ci->io; 1455 1456 if (ci->sector_count > len) { 1457 /* 1458 * Split needed, save the mapped part for accounting. 1459 * NOTE: dm_accept_partial_bio() will update accordingly. 1460 */ 1461 dm_io_set_flag(io, DM_IO_WAS_SPLIT); 1462 io->sectors = len; 1463 io->sector_offset = bio_sectors(ci->bio); 1464 } 1465 } 1466 1467 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1468 struct dm_target *ti, unsigned int num_bios, 1469 unsigned *len) 1470 { 1471 struct bio *bio; 1472 int try; 1473 1474 for (try = 0; try < 2; try++) { 1475 int bio_nr; 1476 1477 if (try && num_bios > 1) 1478 mutex_lock(&ci->io->md->table_devices_lock); 1479 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1480 bio = alloc_tio(ci, ti, bio_nr, len, 1481 try ? GFP_NOIO : GFP_NOWAIT); 1482 if (!bio) 1483 break; 1484 1485 bio_list_add(blist, bio); 1486 } 1487 if (try && num_bios > 1) 1488 mutex_unlock(&ci->io->md->table_devices_lock); 1489 if (bio_nr == num_bios) 1490 return; 1491 1492 while ((bio = bio_list_pop(blist))) 1493 free_tio(bio); 1494 } 1495 } 1496 1497 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1498 unsigned int num_bios, unsigned int *len) 1499 { 1500 struct bio_list blist = BIO_EMPTY_LIST; 1501 struct bio *clone; 1502 unsigned int ret = 0; 1503 1504 if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ 1505 return 0; 1506 1507 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1508 if (len) 1509 setup_split_accounting(ci, *len); 1510 1511 /* 1512 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently 1513 * support allocating using GFP_NOWAIT with GFP_NOIO fallback. 1514 */ 1515 alloc_multiple_bios(&blist, ci, ti, num_bios, len); 1516 while ((clone = bio_list_pop(&blist))) { 1517 if (num_bios > 1) 1518 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1519 __map_bio(clone); 1520 ret += 1; 1521 } 1522 1523 return ret; 1524 } 1525 1526 static void __send_empty_flush(struct clone_info *ci) 1527 { 1528 struct dm_table *t = ci->map; 1529 struct bio flush_bio; 1530 blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1531 1532 if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) == 1533 (REQ_IDLE | REQ_SYNC)) 1534 opf |= REQ_IDLE; 1535 1536 /* 1537 * Use an on-stack bio for this, it's safe since we don't 1538 * need to reference it after submit. It's just used as 1539 * the basis for the clone(s). 1540 */ 1541 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); 1542 1543 ci->bio = &flush_bio; 1544 ci->sector_count = 0; 1545 ci->io->tio.clone.bi_iter.bi_size = 0; 1546 1547 if (!t->flush_bypasses_map) { 1548 for (unsigned int i = 0; i < t->num_targets; i++) { 1549 unsigned int bios; 1550 struct dm_target *ti = dm_table_get_target(t, i); 1551 1552 if (unlikely(ti->num_flush_bios == 0)) 1553 continue; 1554 1555 atomic_add(ti->num_flush_bios, &ci->io->io_count); 1556 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, 1557 NULL); 1558 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 1559 } 1560 } else { 1561 /* 1562 * Note that there's no need to grab t->devices_lock here 1563 * because the targets that support flush optimization don't 1564 * modify the list of devices. 1565 */ 1566 struct list_head *devices = dm_table_get_devices(t); 1567 unsigned int len = 0; 1568 struct dm_dev_internal *dd; 1569 list_for_each_entry(dd, devices, list) { 1570 struct bio *clone; 1571 /* 1572 * Note that the structure dm_target_io is not 1573 * associated with any target (because the device may be 1574 * used by multiple targets), so we set tio->ti = NULL. 1575 * We must check for NULL in the I/O processing path, to 1576 * avoid NULL pointer dereference. 1577 */ 1578 clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO); 1579 atomic_add(1, &ci->io->io_count); 1580 bio_set_dev(clone, dd->dm_dev->bdev); 1581 clone->bi_end_io = clone_endio; 1582 dm_submit_bio_remap(clone, NULL); 1583 } 1584 } 1585 1586 /* 1587 * alloc_io() takes one extra reference for submission, so the 1588 * reference won't reach 0 without the following subtraction 1589 */ 1590 atomic_sub(1, &ci->io->io_count); 1591 1592 bio_uninit(ci->bio); 1593 } 1594 1595 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1596 unsigned int num_bios, unsigned int max_granularity, 1597 unsigned int max_sectors) 1598 { 1599 unsigned int len, bios; 1600 1601 len = min_t(sector_t, ci->sector_count, 1602 __max_io_len(ti, ci->sector, max_granularity, max_sectors)); 1603 1604 atomic_add(num_bios, &ci->io->io_count); 1605 bios = __send_duplicate_bios(ci, ti, num_bios, &len); 1606 /* 1607 * alloc_io() takes one extra reference for submission, so the 1608 * reference won't reach 0 without the following (+1) subtraction 1609 */ 1610 atomic_sub(num_bios - bios + 1, &ci->io->io_count); 1611 1612 ci->sector += len; 1613 ci->sector_count -= len; 1614 } 1615 1616 static bool is_abnormal_io(struct bio *bio) 1617 { 1618 switch (bio_op(bio)) { 1619 case REQ_OP_READ: 1620 case REQ_OP_WRITE: 1621 case REQ_OP_FLUSH: 1622 return false; 1623 case REQ_OP_DISCARD: 1624 case REQ_OP_SECURE_ERASE: 1625 case REQ_OP_WRITE_ZEROES: 1626 case REQ_OP_ZONE_RESET_ALL: 1627 return true; 1628 default: 1629 return false; 1630 } 1631 } 1632 1633 static blk_status_t __process_abnormal_io(struct clone_info *ci, 1634 struct dm_target *ti) 1635 { 1636 unsigned int num_bios = 0; 1637 unsigned int max_granularity = 0; 1638 unsigned int max_sectors = 0; 1639 struct queue_limits *limits = dm_get_queue_limits(ti->table->md); 1640 1641 switch (bio_op(ci->bio)) { 1642 case REQ_OP_DISCARD: 1643 num_bios = ti->num_discard_bios; 1644 max_sectors = limits->max_discard_sectors; 1645 if (ti->max_discard_granularity) 1646 max_granularity = max_sectors; 1647 break; 1648 case REQ_OP_SECURE_ERASE: 1649 num_bios = ti->num_secure_erase_bios; 1650 max_sectors = limits->max_secure_erase_sectors; 1651 break; 1652 case REQ_OP_WRITE_ZEROES: 1653 num_bios = ti->num_write_zeroes_bios; 1654 max_sectors = limits->max_write_zeroes_sectors; 1655 break; 1656 default: 1657 break; 1658 } 1659 1660 /* 1661 * Even though the device advertised support for this type of 1662 * request, that does not mean every target supports it, and 1663 * reconfiguration might also have changed that since the 1664 * check was performed. 1665 */ 1666 if (unlikely(!num_bios)) 1667 return BLK_STS_NOTSUPP; 1668 1669 __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); 1670 1671 return BLK_STS_OK; 1672 } 1673 1674 /* 1675 * Reuse ->bi_private as dm_io list head for storing all dm_io instances 1676 * associated with this bio, and this bio's bi_private needs to be 1677 * stored in dm_io->data before the reuse. 1678 * 1679 * bio->bi_private is owned by fs or upper layer, so block layer won't 1680 * touch it after splitting. Meantime it won't be changed by anyone after 1681 * bio is submitted. So this reuse is safe. 1682 */ 1683 static inline struct dm_io **dm_poll_list_head(struct bio *bio) 1684 { 1685 return (struct dm_io **)&bio->bi_private; 1686 } 1687 1688 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1689 { 1690 struct dm_io **head = dm_poll_list_head(bio); 1691 1692 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1693 bio->bi_opf |= REQ_DM_POLL_LIST; 1694 /* 1695 * Save .bi_private into dm_io, so that we can reuse 1696 * .bi_private as dm_io list head for storing dm_io list 1697 */ 1698 io->data = bio->bi_private; 1699 1700 /* tell block layer to poll for completion */ 1701 bio->bi_cookie = ~BLK_QC_T_NONE; 1702 1703 io->next = NULL; 1704 } else { 1705 /* 1706 * bio recursed due to split, reuse original poll list, 1707 * and save bio->bi_private too. 1708 */ 1709 io->data = (*head)->data; 1710 io->next = *head; 1711 } 1712 1713 *head = io; 1714 } 1715 1716 /* 1717 * Select the correct strategy for processing a non-flush bio. 1718 */ 1719 static blk_status_t __split_and_process_bio(struct clone_info *ci) 1720 { 1721 struct bio *clone; 1722 struct dm_target *ti; 1723 unsigned int len; 1724 1725 ti = dm_table_find_target(ci->map, ci->sector); 1726 if (unlikely(!ti)) 1727 return BLK_STS_IOERR; 1728 1729 if (unlikely(ci->is_abnormal_io)) 1730 return __process_abnormal_io(ci, ti); 1731 1732 /* 1733 * Only support bio polling for normal IO, and the target io is 1734 * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1735 */ 1736 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); 1737 1738 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1739 if (ci->bio->bi_opf & REQ_ATOMIC) { 1740 if (unlikely(!dm_target_supports_atomic_writes(ti->type))) 1741 return BLK_STS_IOERR; 1742 if (unlikely(len != ci->sector_count)) 1743 return BLK_STS_IOERR; 1744 } 1745 1746 setup_split_accounting(ci, len); 1747 1748 if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { 1749 if (unlikely(!dm_target_supports_nowait(ti->type))) 1750 return BLK_STS_NOTSUPP; 1751 1752 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); 1753 if (unlikely(!clone)) 1754 return BLK_STS_AGAIN; 1755 } else { 1756 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 1757 } 1758 __map_bio(clone); 1759 1760 ci->sector += len; 1761 ci->sector_count -= len; 1762 1763 return BLK_STS_OK; 1764 } 1765 1766 static void init_clone_info(struct clone_info *ci, struct dm_io *io, 1767 struct dm_table *map, struct bio *bio, bool is_abnormal) 1768 { 1769 ci->map = map; 1770 ci->io = io; 1771 ci->bio = bio; 1772 ci->is_abnormal_io = is_abnormal; 1773 ci->submit_as_polled = false; 1774 ci->sector = bio->bi_iter.bi_sector; 1775 ci->sector_count = bio_sectors(bio); 1776 1777 /* Shouldn't happen but sector_count was being set to 0 so... */ 1778 if (static_branch_unlikely(&zoned_enabled) && 1779 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1780 ci->sector_count = 0; 1781 } 1782 1783 #ifdef CONFIG_BLK_DEV_ZONED 1784 static inline bool dm_zone_bio_needs_split(struct bio *bio) 1785 { 1786 /* 1787 * Special case the zone operations that cannot or should not be split. 1788 */ 1789 switch (bio_op(bio)) { 1790 case REQ_OP_ZONE_APPEND: 1791 case REQ_OP_ZONE_FINISH: 1792 case REQ_OP_ZONE_RESET: 1793 case REQ_OP_ZONE_RESET_ALL: 1794 return false; 1795 default: 1796 break; 1797 } 1798 1799 /* 1800 * When mapped devices use the block layer zone write plugging, we must 1801 * split any large BIO to the mapped device limits to not submit BIOs 1802 * that span zone boundaries and to avoid potential deadlocks with 1803 * queue freeze operations. 1804 */ 1805 return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio); 1806 } 1807 1808 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) 1809 { 1810 if (!bio_needs_zone_write_plugging(bio)) 1811 return false; 1812 return blk_zone_plug_bio(bio, 0); 1813 } 1814 1815 static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, 1816 struct dm_target *ti) 1817 { 1818 struct bio_list blist = BIO_EMPTY_LIST; 1819 struct mapped_device *md = ci->io->md; 1820 unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; 1821 unsigned long *need_reset; 1822 unsigned int i, nr_zones, nr_reset; 1823 unsigned int num_bios = 0; 1824 blk_status_t sts = BLK_STS_OK; 1825 sector_t sector = ti->begin; 1826 struct bio *clone; 1827 int ret; 1828 1829 nr_zones = ti->len >> ilog2(zone_sectors); 1830 need_reset = bitmap_zalloc(nr_zones, GFP_NOIO); 1831 if (!need_reset) 1832 return BLK_STS_RESOURCE; 1833 1834 ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, 1835 nr_zones, need_reset); 1836 if (ret) { 1837 sts = BLK_STS_IOERR; 1838 goto free_bitmap; 1839 } 1840 1841 /* If we have no zone to reset, we are done. */ 1842 nr_reset = bitmap_weight(need_reset, nr_zones); 1843 if (!nr_reset) 1844 goto free_bitmap; 1845 1846 atomic_add(nr_zones, &ci->io->io_count); 1847 1848 for (i = 0; i < nr_zones; i++) { 1849 1850 if (!test_bit(i, need_reset)) { 1851 sector += zone_sectors; 1852 continue; 1853 } 1854 1855 if (bio_list_empty(&blist)) { 1856 /* This may take a while, so be nice to others */ 1857 if (num_bios) 1858 cond_resched(); 1859 1860 /* 1861 * We may need to reset thousands of zones, so let's 1862 * not go crazy with the clone allocation. 1863 */ 1864 alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32), 1865 NULL); 1866 } 1867 1868 /* Get a clone and change it to a regular reset operation. */ 1869 clone = bio_list_pop(&blist); 1870 clone->bi_opf &= ~REQ_OP_MASK; 1871 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; 1872 clone->bi_iter.bi_sector = sector; 1873 clone->bi_iter.bi_size = 0; 1874 __map_bio(clone); 1875 1876 sector += zone_sectors; 1877 num_bios++; 1878 nr_reset--; 1879 } 1880 1881 WARN_ON_ONCE(!bio_list_empty(&blist)); 1882 atomic_sub(nr_zones - num_bios, &ci->io->io_count); 1883 ci->sector_count = 0; 1884 1885 free_bitmap: 1886 bitmap_free(need_reset); 1887 1888 return sts; 1889 } 1890 1891 static void __send_zone_reset_all_native(struct clone_info *ci, 1892 struct dm_target *ti) 1893 { 1894 unsigned int bios; 1895 1896 atomic_add(1, &ci->io->io_count); 1897 bios = __send_duplicate_bios(ci, ti, 1, NULL); 1898 atomic_sub(1 - bios, &ci->io->io_count); 1899 1900 ci->sector_count = 0; 1901 } 1902 1903 static blk_status_t __send_zone_reset_all(struct clone_info *ci) 1904 { 1905 struct dm_table *t = ci->map; 1906 blk_status_t sts = BLK_STS_OK; 1907 1908 for (unsigned int i = 0; i < t->num_targets; i++) { 1909 struct dm_target *ti = dm_table_get_target(t, i); 1910 1911 if (ti->zone_reset_all_supported) { 1912 __send_zone_reset_all_native(ci, ti); 1913 continue; 1914 } 1915 1916 sts = __send_zone_reset_all_emulated(ci, ti); 1917 if (sts != BLK_STS_OK) 1918 break; 1919 } 1920 1921 /* Release the reference that alloc_io() took for submission. */ 1922 atomic_sub(1, &ci->io->io_count); 1923 1924 return sts; 1925 } 1926 1927 #else 1928 static inline bool dm_zone_bio_needs_split(struct bio *bio) 1929 { 1930 return false; 1931 } 1932 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) 1933 { 1934 return false; 1935 } 1936 static blk_status_t __send_zone_reset_all(struct clone_info *ci) 1937 { 1938 return BLK_STS_NOTSUPP; 1939 } 1940 #endif 1941 1942 /* 1943 * Entry point to split a bio into clones and submit them to the targets. 1944 */ 1945 static void dm_split_and_process_bio(struct mapped_device *md, 1946 struct dm_table *map, struct bio *bio) 1947 { 1948 struct clone_info ci; 1949 struct dm_io *io; 1950 blk_status_t error = BLK_STS_OK; 1951 bool is_abnormal, need_split; 1952 1953 is_abnormal = is_abnormal_io(bio); 1954 if (static_branch_unlikely(&zoned_enabled)) { 1955 need_split = is_abnormal || dm_zone_bio_needs_split(bio); 1956 } else { 1957 need_split = is_abnormal; 1958 } 1959 1960 if (unlikely(need_split)) { 1961 /* 1962 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) 1963 * otherwise associated queue_limits won't be imposed. 1964 * Also split the BIO for mapped devices needing zone append 1965 * emulation to ensure that the BIO does not cross zone 1966 * boundaries. 1967 */ 1968 bio = bio_split_to_limits(bio); 1969 if (!bio) 1970 return; 1971 } 1972 1973 /* 1974 * Use the block layer zone write plugging for mapped devices that 1975 * need zone append emulation (e.g. dm-crypt). 1976 */ 1977 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio)) 1978 return; 1979 1980 /* Only support nowait for normal IO */ 1981 if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { 1982 /* 1983 * Don't support NOWAIT for FLUSH because it may allocate 1984 * multiple bios and there's no easy way how to undo the 1985 * allocations. 1986 */ 1987 if (bio->bi_opf & REQ_PREFLUSH) { 1988 bio_wouldblock_error(bio); 1989 return; 1990 } 1991 io = alloc_io(md, bio, GFP_NOWAIT); 1992 if (unlikely(!io)) { 1993 /* Unable to do anything without dm_io. */ 1994 bio_wouldblock_error(bio); 1995 return; 1996 } 1997 } else { 1998 io = alloc_io(md, bio, GFP_NOIO); 1999 } 2000 init_clone_info(&ci, io, map, bio, is_abnormal); 2001 2002 if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) { 2003 /* 2004 * The "flush_bypasses_map" is set on targets where it is safe 2005 * to skip the map function and submit bios directly to the 2006 * underlying block devices - currently, it is set for dm-linear 2007 * and dm-stripe. 2008 * 2009 * If we have just one underlying device (i.e. there is one 2010 * linear target or multiple linear targets pointing to the same 2011 * device), we can send the flush with data directly to it. 2012 */ 2013 if (bio->bi_iter.bi_size && map->flush_bypasses_map) { 2014 struct list_head *devices = dm_table_get_devices(map); 2015 if (devices->next == devices->prev) 2016 goto send_preflush_with_data; 2017 } 2018 if (bio->bi_iter.bi_size) 2019 io->requeue_flush_with_data = true; 2020 __send_empty_flush(&ci); 2021 /* dm_io_complete submits any data associated with flush */ 2022 goto out; 2023 } 2024 2025 send_preflush_with_data: 2026 if (static_branch_unlikely(&zoned_enabled) && 2027 (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { 2028 error = __send_zone_reset_all(&ci); 2029 goto out; 2030 } 2031 2032 error = __split_and_process_bio(&ci); 2033 if (error || !ci.sector_count) 2034 goto out; 2035 /* 2036 * Remainder must be passed to submit_bio_noacct() so it gets handled 2037 * *after* bios already submitted have been completely processed. 2038 */ 2039 bio_trim(bio, io->sectors, ci.sector_count); 2040 trace_block_split(bio, bio->bi_iter.bi_sector); 2041 bio_inc_remaining(bio); 2042 submit_bio_noacct(bio); 2043 out: 2044 /* 2045 * Drop the extra reference count for non-POLLED bio, and hold one 2046 * reference for POLLED bio, which will be released in dm_poll_bio 2047 * 2048 * Add every dm_io instance into the dm_io list head which is stored 2049 * in bio->bi_private, so that dm_poll_bio can poll them all. 2050 */ 2051 if (error || !ci.submit_as_polled) { 2052 /* 2053 * In case of submission failure, the extra reference for 2054 * submitting io isn't consumed yet 2055 */ 2056 if (error) 2057 atomic_dec(&io->io_count); 2058 dm_io_dec_pending(io, error); 2059 } else 2060 dm_queue_poll_io(bio, io); 2061 } 2062 2063 static void dm_submit_bio(struct bio *bio) 2064 { 2065 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 2066 int srcu_idx; 2067 struct dm_table *map; 2068 2069 map = dm_get_live_table(md, &srcu_idx); 2070 if (unlikely(!map)) { 2071 DMERR_LIMIT("%s: mapping table unavailable, erroring io", 2072 dm_device_name(md)); 2073 bio_io_error(bio); 2074 goto out; 2075 } 2076 2077 /* If suspended, queue this IO for later */ 2078 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 2079 if (bio->bi_opf & REQ_NOWAIT) 2080 bio_wouldblock_error(bio); 2081 else if (bio->bi_opf & REQ_RAHEAD) 2082 bio_io_error(bio); 2083 else 2084 queue_io(md, bio); 2085 goto out; 2086 } 2087 2088 dm_split_and_process_bio(md, map, bio); 2089 out: 2090 dm_put_live_table(md, srcu_idx); 2091 } 2092 2093 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 2094 unsigned int flags) 2095 { 2096 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 2097 2098 /* don't poll if the mapped io is done */ 2099 if (atomic_read(&io->io_count) > 1) 2100 bio_poll(&io->tio.clone, iob, flags); 2101 2102 /* bio_poll holds the last reference */ 2103 return atomic_read(&io->io_count) == 1; 2104 } 2105 2106 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 2107 unsigned int flags) 2108 { 2109 struct dm_io **head = dm_poll_list_head(bio); 2110 struct dm_io *list = *head; 2111 struct dm_io *tmp = NULL; 2112 struct dm_io *curr, *next; 2113 2114 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 2115 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 2116 return 0; 2117 2118 WARN_ON_ONCE(!list); 2119 2120 /* 2121 * Restore .bi_private before possibly completing dm_io. 2122 * 2123 * bio_poll() is only possible once @bio has been completely 2124 * submitted via submit_bio_noacct()'s depth-first submission. 2125 * So there is no dm_queue_poll_io() race associated with 2126 * clearing REQ_DM_POLL_LIST here. 2127 */ 2128 bio->bi_opf &= ~REQ_DM_POLL_LIST; 2129 bio->bi_private = list->data; 2130 2131 for (curr = list, next = curr->next; curr; curr = next, next = 2132 curr ? curr->next : NULL) { 2133 if (dm_poll_dm_io(curr, iob, flags)) { 2134 /* 2135 * clone_endio() has already occurred, so no 2136 * error handling is needed here. 2137 */ 2138 __dm_io_dec_pending(curr); 2139 } else { 2140 curr->next = tmp; 2141 tmp = curr; 2142 } 2143 } 2144 2145 /* Not done? */ 2146 if (tmp) { 2147 bio->bi_opf |= REQ_DM_POLL_LIST; 2148 /* Reset bio->bi_private to dm_io list head */ 2149 *head = tmp; 2150 return 0; 2151 } 2152 return 1; 2153 } 2154 2155 /* 2156 *--------------------------------------------------------------- 2157 * An IDR is used to keep track of allocated minor numbers. 2158 *--------------------------------------------------------------- 2159 */ 2160 static void free_minor(int minor) 2161 { 2162 spin_lock(&_minor_lock); 2163 idr_remove(&_minor_idr, minor); 2164 spin_unlock(&_minor_lock); 2165 } 2166 2167 /* 2168 * See if the device with a specific minor # is free. 2169 */ 2170 static int specific_minor(int minor) 2171 { 2172 int r; 2173 2174 if (minor >= (1 << MINORBITS)) 2175 return -EINVAL; 2176 2177 idr_preload(GFP_KERNEL); 2178 spin_lock(&_minor_lock); 2179 2180 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 2181 2182 spin_unlock(&_minor_lock); 2183 idr_preload_end(); 2184 if (r < 0) 2185 return r == -ENOSPC ? -EBUSY : r; 2186 return 0; 2187 } 2188 2189 static int next_free_minor(int *minor) 2190 { 2191 int r; 2192 2193 idr_preload(GFP_KERNEL); 2194 spin_lock(&_minor_lock); 2195 2196 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 2197 2198 spin_unlock(&_minor_lock); 2199 idr_preload_end(); 2200 if (r < 0) 2201 return r; 2202 *minor = r; 2203 return 0; 2204 } 2205 2206 static const struct block_device_operations dm_blk_dops; 2207 static const struct block_device_operations dm_rq_blk_dops; 2208 static const struct dax_operations dm_dax_ops; 2209 2210 static void dm_wq_work(struct work_struct *work); 2211 2212 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 2213 static void dm_queue_destroy_crypto_profile(struct request_queue *q) 2214 { 2215 dm_destroy_crypto_profile(q->crypto_profile); 2216 } 2217 2218 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 2219 2220 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 2221 { 2222 } 2223 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 2224 2225 static void cleanup_mapped_device(struct mapped_device *md) 2226 { 2227 if (md->wq) 2228 destroy_workqueue(md->wq); 2229 dm_free_md_mempools(md->mempools); 2230 2231 if (md->dax_dev) { 2232 dax_remove_host(md->disk); 2233 kill_dax(md->dax_dev); 2234 put_dax(md->dax_dev); 2235 md->dax_dev = NULL; 2236 } 2237 2238 if (md->disk) { 2239 spin_lock(&_minor_lock); 2240 md->disk->private_data = NULL; 2241 spin_unlock(&_minor_lock); 2242 if (dm_get_md_type(md) != DM_TYPE_NONE) { 2243 struct table_device *td; 2244 2245 dm_sysfs_exit(md); 2246 list_for_each_entry(td, &md->table_devices, list) { 2247 bd_unlink_disk_holder(td->dm_dev.bdev, 2248 md->disk); 2249 } 2250 2251 /* 2252 * Hold lock to make sure del_gendisk() won't concurrent 2253 * with open/close_table_device(). 2254 */ 2255 mutex_lock(&md->table_devices_lock); 2256 del_gendisk(md->disk); 2257 mutex_unlock(&md->table_devices_lock); 2258 } 2259 dm_queue_destroy_crypto_profile(md->queue); 2260 put_disk(md->disk); 2261 } 2262 2263 if (md->pending_io) { 2264 free_percpu(md->pending_io); 2265 md->pending_io = NULL; 2266 } 2267 2268 cleanup_srcu_struct(&md->io_barrier); 2269 2270 mutex_destroy(&md->suspend_lock); 2271 mutex_destroy(&md->type_lock); 2272 mutex_destroy(&md->table_devices_lock); 2273 mutex_destroy(&md->swap_bios_lock); 2274 2275 dm_mq_cleanup_mapped_device(md); 2276 } 2277 2278 /* 2279 * Allocate and initialise a blank device with a given minor. 2280 */ 2281 static struct mapped_device *alloc_dev(int minor) 2282 { 2283 int r, numa_node_id = dm_get_numa_node(); 2284 struct dax_device *dax_dev; 2285 struct mapped_device *md; 2286 void *old_md; 2287 2288 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 2289 if (!md) { 2290 DMERR("unable to allocate device, out of memory."); 2291 return NULL; 2292 } 2293 2294 if (!try_module_get(THIS_MODULE)) 2295 goto bad_module_get; 2296 2297 /* get a minor number for the dev */ 2298 if (minor == DM_ANY_MINOR) 2299 r = next_free_minor(&minor); 2300 else 2301 r = specific_minor(minor); 2302 if (r < 0) 2303 goto bad_minor; 2304 2305 r = init_srcu_struct(&md->io_barrier); 2306 if (r < 0) 2307 goto bad_io_barrier; 2308 2309 md->numa_node_id = numa_node_id; 2310 md->init_tio_pdu = false; 2311 md->type = DM_TYPE_NONE; 2312 mutex_init(&md->suspend_lock); 2313 mutex_init(&md->type_lock); 2314 mutex_init(&md->table_devices_lock); 2315 spin_lock_init(&md->deferred_lock); 2316 atomic_set(&md->holders, 1); 2317 atomic_set(&md->open_count, 0); 2318 atomic_set(&md->event_nr, 0); 2319 atomic_set(&md->uevent_seq, 0); 2320 INIT_LIST_HEAD(&md->uevent_list); 2321 INIT_LIST_HEAD(&md->table_devices); 2322 spin_lock_init(&md->uevent_lock); 2323 2324 /* 2325 * default to bio-based until DM table is loaded and md->type 2326 * established. If request-based table is loaded: blk-mq will 2327 * override accordingly. 2328 */ 2329 md->disk = blk_alloc_disk(NULL, md->numa_node_id); 2330 if (IS_ERR(md->disk)) { 2331 md->disk = NULL; 2332 goto bad; 2333 } 2334 md->queue = md->disk->queue; 2335 2336 init_waitqueue_head(&md->wait); 2337 INIT_WORK(&md->work, dm_wq_work); 2338 INIT_WORK(&md->requeue_work, dm_wq_requeue_work); 2339 init_waitqueue_head(&md->eventq); 2340 init_completion(&md->kobj_holder.completion); 2341 2342 md->requeue_list = NULL; 2343 md->swap_bios = get_swap_bios(); 2344 sema_init(&md->swap_bios_semaphore, md->swap_bios); 2345 mutex_init(&md->swap_bios_lock); 2346 2347 md->disk->major = _major; 2348 md->disk->first_minor = minor; 2349 md->disk->minors = 1; 2350 md->disk->flags |= GENHD_FL_NO_PART; 2351 md->disk->fops = &dm_blk_dops; 2352 md->disk->private_data = md; 2353 sprintf(md->disk->disk_name, "dm-%d", minor); 2354 2355 dax_dev = alloc_dax(md, &dm_dax_ops); 2356 if (IS_ERR(dax_dev)) { 2357 if (PTR_ERR(dax_dev) != -EOPNOTSUPP) 2358 goto bad; 2359 } else { 2360 set_dax_nocache(dax_dev); 2361 set_dax_nomc(dax_dev); 2362 md->dax_dev = dax_dev; 2363 if (dax_add_host(dax_dev, md->disk)) 2364 goto bad; 2365 } 2366 2367 format_dev_t(md->name, MKDEV(_major, minor)); 2368 2369 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 2370 if (!md->wq) 2371 goto bad; 2372 2373 md->pending_io = alloc_percpu(unsigned long); 2374 if (!md->pending_io) 2375 goto bad; 2376 2377 r = dm_stats_init(&md->stats); 2378 if (r < 0) 2379 goto bad; 2380 2381 /* Populate the mapping, nobody knows we exist yet */ 2382 spin_lock(&_minor_lock); 2383 old_md = idr_replace(&_minor_idr, md, minor); 2384 spin_unlock(&_minor_lock); 2385 2386 BUG_ON(old_md != MINOR_ALLOCED); 2387 2388 return md; 2389 2390 bad: 2391 cleanup_mapped_device(md); 2392 bad_io_barrier: 2393 free_minor(minor); 2394 bad_minor: 2395 module_put(THIS_MODULE); 2396 bad_module_get: 2397 kvfree(md); 2398 return NULL; 2399 } 2400 2401 static void unlock_fs(struct mapped_device *md); 2402 2403 static void free_dev(struct mapped_device *md) 2404 { 2405 int minor = MINOR(disk_devt(md->disk)); 2406 2407 unlock_fs(md); 2408 2409 cleanup_mapped_device(md); 2410 2411 WARN_ON_ONCE(!list_empty(&md->table_devices)); 2412 dm_stats_cleanup(&md->stats); 2413 free_minor(minor); 2414 2415 module_put(THIS_MODULE); 2416 kvfree(md); 2417 } 2418 2419 /* 2420 * Bind a table to the device. 2421 */ 2422 static void event_callback(void *context) 2423 { 2424 unsigned long flags; 2425 LIST_HEAD(uevents); 2426 struct mapped_device *md = context; 2427 2428 spin_lock_irqsave(&md->uevent_lock, flags); 2429 list_splice_init(&md->uevent_list, &uevents); 2430 spin_unlock_irqrestore(&md->uevent_lock, flags); 2431 2432 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2433 2434 atomic_inc(&md->event_nr); 2435 wake_up(&md->eventq); 2436 dm_issue_global_event(); 2437 } 2438 2439 /* 2440 * Returns old map, which caller must destroy. 2441 */ 2442 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2443 struct queue_limits *limits) 2444 { 2445 struct dm_table *old_map; 2446 sector_t size, old_size; 2447 2448 lockdep_assert_held(&md->suspend_lock); 2449 2450 size = dm_table_get_size(t); 2451 2452 old_size = dm_get_size(md); 2453 2454 if (!dm_table_supports_size_change(t, old_size, size)) { 2455 old_map = ERR_PTR(-EINVAL); 2456 goto out; 2457 } 2458 2459 set_capacity(md->disk, size); 2460 2461 if (limits) { 2462 int ret = dm_table_set_restrictions(t, md->queue, limits); 2463 if (ret) { 2464 set_capacity(md->disk, old_size); 2465 old_map = ERR_PTR(ret); 2466 goto out; 2467 } 2468 } 2469 2470 /* 2471 * Wipe any geometry if the size of the table changed. 2472 */ 2473 if (size != old_size) 2474 memset(&md->geometry, 0, sizeof(md->geometry)); 2475 2476 dm_table_event_callback(t, event_callback, md); 2477 2478 if (dm_table_request_based(t)) { 2479 /* 2480 * Leverage the fact that request-based DM targets are 2481 * immutable singletons - used to optimize dm_mq_queue_rq. 2482 */ 2483 md->immutable_target = dm_table_get_immutable_target(t); 2484 2485 /* 2486 * There is no need to reload with request-based dm because the 2487 * size of front_pad doesn't change. 2488 * 2489 * Note for future: If you are to reload bioset, prep-ed 2490 * requests in the queue may refer to bio from the old bioset, 2491 * so you must walk through the queue to unprep. 2492 */ 2493 if (!md->mempools) 2494 md->mempools = t->mempools; 2495 else 2496 dm_free_md_mempools(t->mempools); 2497 } else { 2498 /* 2499 * The md may already have mempools that need changing. 2500 * If so, reload bioset because front_pad may have changed 2501 * because a different table was loaded. 2502 */ 2503 dm_free_md_mempools(md->mempools); 2504 md->mempools = t->mempools; 2505 } 2506 t->mempools = NULL; 2507 2508 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2509 rcu_assign_pointer(md->map, (void *)t); 2510 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2511 2512 if (old_map) 2513 dm_sync_table(md); 2514 out: 2515 return old_map; 2516 } 2517 2518 /* 2519 * Returns unbound table for the caller to free. 2520 */ 2521 static struct dm_table *__unbind(struct mapped_device *md) 2522 { 2523 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2524 2525 if (!map) 2526 return NULL; 2527 2528 dm_table_event_callback(map, NULL, NULL); 2529 RCU_INIT_POINTER(md->map, NULL); 2530 dm_sync_table(md); 2531 2532 return map; 2533 } 2534 2535 /* 2536 * Constructor for a new device. 2537 */ 2538 int dm_create(int minor, struct mapped_device **result) 2539 { 2540 struct mapped_device *md; 2541 2542 md = alloc_dev(minor); 2543 if (!md) 2544 return -ENXIO; 2545 2546 dm_ima_reset_data(md); 2547 2548 *result = md; 2549 return 0; 2550 } 2551 2552 /* 2553 * Functions to manage md->type. 2554 * All are required to hold md->type_lock. 2555 */ 2556 void dm_lock_md_type(struct mapped_device *md) 2557 { 2558 mutex_lock(&md->type_lock); 2559 } 2560 2561 void dm_unlock_md_type(struct mapped_device *md) 2562 { 2563 mutex_unlock(&md->type_lock); 2564 } 2565 2566 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2567 { 2568 return md->type; 2569 } 2570 2571 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2572 { 2573 return md->immutable_target_type; 2574 } 2575 2576 /* 2577 * Setup the DM device's queue based on md's type 2578 */ 2579 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2580 { 2581 enum dm_queue_mode type = dm_table_get_type(t); 2582 struct queue_limits limits; 2583 struct table_device *td; 2584 int r; 2585 2586 WARN_ON_ONCE(type == DM_TYPE_NONE); 2587 2588 if (type == DM_TYPE_REQUEST_BASED) { 2589 md->disk->fops = &dm_rq_blk_dops; 2590 r = dm_mq_init_request_queue(md, t); 2591 if (r) { 2592 DMERR("Cannot initialize queue for request-based dm mapped device"); 2593 return r; 2594 } 2595 } 2596 2597 r = dm_calculate_queue_limits(t, &limits); 2598 if (r) { 2599 DMERR("Cannot calculate initial queue limits"); 2600 return r; 2601 } 2602 r = dm_table_set_restrictions(t, md->queue, &limits); 2603 if (r) 2604 return r; 2605 2606 /* 2607 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent 2608 * with open_table_device() and close_table_device(). 2609 */ 2610 mutex_lock(&md->table_devices_lock); 2611 r = add_disk(md->disk); 2612 mutex_unlock(&md->table_devices_lock); 2613 if (r) 2614 return r; 2615 2616 /* 2617 * Register the holder relationship for devices added before the disk 2618 * was live. 2619 */ 2620 list_for_each_entry(td, &md->table_devices, list) { 2621 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); 2622 if (r) 2623 goto out_undo_holders; 2624 } 2625 2626 r = dm_sysfs_init(md); 2627 if (r) 2628 goto out_undo_holders; 2629 2630 md->type = type; 2631 return 0; 2632 2633 out_undo_holders: 2634 list_for_each_entry_continue_reverse(td, &md->table_devices, list) 2635 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 2636 mutex_lock(&md->table_devices_lock); 2637 del_gendisk(md->disk); 2638 mutex_unlock(&md->table_devices_lock); 2639 return r; 2640 } 2641 2642 struct mapped_device *dm_get_md(dev_t dev) 2643 { 2644 struct mapped_device *md; 2645 unsigned int minor = MINOR(dev); 2646 2647 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2648 return NULL; 2649 2650 spin_lock(&_minor_lock); 2651 2652 md = idr_find(&_minor_idr, minor); 2653 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2654 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2655 md = NULL; 2656 goto out; 2657 } 2658 dm_get(md); 2659 out: 2660 spin_unlock(&_minor_lock); 2661 2662 return md; 2663 } 2664 EXPORT_SYMBOL_GPL(dm_get_md); 2665 2666 void *dm_get_mdptr(struct mapped_device *md) 2667 { 2668 return md->interface_ptr; 2669 } 2670 2671 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2672 { 2673 md->interface_ptr = ptr; 2674 } 2675 2676 void dm_get(struct mapped_device *md) 2677 { 2678 atomic_inc(&md->holders); 2679 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2680 } 2681 2682 int dm_hold(struct mapped_device *md) 2683 { 2684 spin_lock(&_minor_lock); 2685 if (test_bit(DMF_FREEING, &md->flags)) { 2686 spin_unlock(&_minor_lock); 2687 return -EBUSY; 2688 } 2689 dm_get(md); 2690 spin_unlock(&_minor_lock); 2691 return 0; 2692 } 2693 EXPORT_SYMBOL_GPL(dm_hold); 2694 2695 const char *dm_device_name(struct mapped_device *md) 2696 { 2697 return md->name; 2698 } 2699 EXPORT_SYMBOL_GPL(dm_device_name); 2700 2701 static void __dm_destroy(struct mapped_device *md, bool wait) 2702 { 2703 struct dm_table *map; 2704 int srcu_idx; 2705 2706 might_sleep(); 2707 2708 spin_lock(&_minor_lock); 2709 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2710 set_bit(DMF_FREEING, &md->flags); 2711 spin_unlock(&_minor_lock); 2712 2713 blk_mark_disk_dead(md->disk); 2714 2715 /* 2716 * Take suspend_lock so that presuspend and postsuspend methods 2717 * do not race with internal suspend. 2718 */ 2719 mutex_lock(&md->suspend_lock); 2720 map = dm_get_live_table(md, &srcu_idx); 2721 if (!dm_suspended_md(md)) { 2722 dm_table_presuspend_targets(map); 2723 set_bit(DMF_SUSPENDED, &md->flags); 2724 set_bit(DMF_POST_SUSPENDING, &md->flags); 2725 dm_table_postsuspend_targets(map); 2726 } 2727 /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ 2728 dm_put_live_table(md, srcu_idx); 2729 mutex_unlock(&md->suspend_lock); 2730 2731 /* 2732 * Rare, but there may be I/O requests still going to complete, 2733 * for example. Wait for all references to disappear. 2734 * No one should increment the reference count of the mapped_device, 2735 * after the mapped_device state becomes DMF_FREEING. 2736 */ 2737 if (wait) 2738 while (atomic_read(&md->holders)) 2739 fsleep(1000); 2740 else if (atomic_read(&md->holders)) 2741 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2742 dm_device_name(md), atomic_read(&md->holders)); 2743 2744 dm_table_destroy(__unbind(md)); 2745 free_dev(md); 2746 } 2747 2748 void dm_destroy(struct mapped_device *md) 2749 { 2750 __dm_destroy(md, true); 2751 } 2752 2753 void dm_destroy_immediate(struct mapped_device *md) 2754 { 2755 __dm_destroy(md, false); 2756 } 2757 2758 void dm_put(struct mapped_device *md) 2759 { 2760 atomic_dec(&md->holders); 2761 } 2762 EXPORT_SYMBOL_GPL(dm_put); 2763 2764 static bool dm_in_flight_bios(struct mapped_device *md) 2765 { 2766 int cpu; 2767 unsigned long sum = 0; 2768 2769 for_each_possible_cpu(cpu) 2770 sum += *per_cpu_ptr(md->pending_io, cpu); 2771 2772 return sum != 0; 2773 } 2774 2775 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 2776 { 2777 int r = 0; 2778 DEFINE_WAIT(wait); 2779 2780 while (true) { 2781 prepare_to_wait(&md->wait, &wait, task_state); 2782 2783 if (!dm_in_flight_bios(md)) 2784 break; 2785 2786 if (signal_pending_state(task_state, current)) { 2787 r = -ERESTARTSYS; 2788 break; 2789 } 2790 2791 io_schedule(); 2792 } 2793 finish_wait(&md->wait, &wait); 2794 2795 smp_rmb(); 2796 2797 return r; 2798 } 2799 2800 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 2801 { 2802 int r = 0; 2803 2804 if (!queue_is_mq(md->queue)) 2805 return dm_wait_for_bios_completion(md, task_state); 2806 2807 while (true) { 2808 if (!blk_mq_queue_inflight(md->queue)) 2809 break; 2810 2811 if (signal_pending_state(task_state, current)) { 2812 r = -ERESTARTSYS; 2813 break; 2814 } 2815 2816 fsleep(5000); 2817 } 2818 2819 return r; 2820 } 2821 2822 /* 2823 * Process the deferred bios 2824 */ 2825 static void dm_wq_work(struct work_struct *work) 2826 { 2827 struct mapped_device *md = container_of(work, struct mapped_device, work); 2828 struct bio *bio; 2829 2830 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2831 spin_lock_irq(&md->deferred_lock); 2832 bio = bio_list_pop(&md->deferred); 2833 spin_unlock_irq(&md->deferred_lock); 2834 2835 if (!bio) 2836 break; 2837 2838 submit_bio_noacct(bio); 2839 cond_resched(); 2840 } 2841 } 2842 2843 static void dm_queue_flush(struct mapped_device *md) 2844 { 2845 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2846 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2847 smp_mb__after_atomic(); 2848 queue_work(md->wq, &md->work); 2849 } 2850 2851 /* 2852 * Swap in a new table, returning the old one for the caller to destroy. 2853 */ 2854 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2855 { 2856 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2857 struct queue_limits limits; 2858 bool update_limits = true; 2859 int r; 2860 2861 mutex_lock(&md->suspend_lock); 2862 2863 /* device must be suspended */ 2864 if (!dm_suspended_md(md)) 2865 goto out; 2866 2867 /* 2868 * To avoid a potential deadlock locking the queue limits, disallow 2869 * updating the queue limits during a table swap, when updating an 2870 * immutable request-based dm device (dm-multipath) during a noflush 2871 * suspend. It is userspace's responsibility to make sure that the new 2872 * table uses the same limits as the existing table, if it asks for a 2873 * noflush suspend. 2874 */ 2875 if (dm_request_based(md) && md->immutable_target && 2876 __noflush_suspending(md)) 2877 update_limits = false; 2878 /* 2879 * If the new table has no data devices, retain the existing limits. 2880 * This helps multipath with queue_if_no_path if all paths disappear, 2881 * then new I/O is queued based on these limits, and then some paths 2882 * reappear. 2883 */ 2884 else if (dm_table_has_no_data_devices(table)) { 2885 live_map = dm_get_live_table_fast(md); 2886 if (live_map) 2887 limits = md->queue->limits; 2888 dm_put_live_table_fast(md); 2889 } 2890 2891 if (update_limits && !live_map) { 2892 r = dm_calculate_queue_limits(table, &limits); 2893 if (r) { 2894 map = ERR_PTR(r); 2895 goto out; 2896 } 2897 } 2898 2899 map = __bind(md, table, update_limits ? &limits : NULL); 2900 dm_issue_global_event(); 2901 2902 out: 2903 mutex_unlock(&md->suspend_lock); 2904 return map; 2905 } 2906 2907 /* 2908 * Functions to lock and unlock any filesystem running on the 2909 * device. 2910 */ 2911 static int lock_fs(struct mapped_device *md) 2912 { 2913 int r; 2914 2915 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2916 2917 r = bdev_freeze(md->disk->part0); 2918 if (!r) 2919 set_bit(DMF_FROZEN, &md->flags); 2920 return r; 2921 } 2922 2923 static void unlock_fs(struct mapped_device *md) 2924 { 2925 if (!test_bit(DMF_FROZEN, &md->flags)) 2926 return; 2927 bdev_thaw(md->disk->part0); 2928 clear_bit(DMF_FROZEN, &md->flags); 2929 } 2930 2931 /* 2932 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2933 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2934 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2935 * 2936 * If __dm_suspend returns 0, the device is completely quiescent 2937 * now. There is no request-processing activity. All new requests 2938 * are being added to md->deferred list. 2939 */ 2940 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2941 unsigned int suspend_flags, unsigned int task_state, 2942 int dmf_suspended_flag) 2943 { 2944 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2945 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2946 int r = 0; 2947 2948 lockdep_assert_held(&md->suspend_lock); 2949 2950 /* 2951 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2952 */ 2953 if (noflush) 2954 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2955 else 2956 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2957 2958 /* 2959 * This gets reverted if there's an error later and the targets 2960 * provide the .presuspend_undo hook. 2961 */ 2962 dm_table_presuspend_targets(map); 2963 2964 /* 2965 * Flush I/O to the device. 2966 * Any I/O submitted after lock_fs() may not be flushed. 2967 * noflush takes precedence over do_lockfs. 2968 * (lock_fs() flushes I/Os and waits for them to complete.) 2969 */ 2970 if (!noflush && do_lockfs) { 2971 r = lock_fs(md); 2972 if (r) { 2973 dm_table_presuspend_undo_targets(map); 2974 return r; 2975 } 2976 } 2977 2978 /* 2979 * Here we must make sure that no processes are submitting requests 2980 * to target drivers i.e. no one may be executing 2981 * dm_split_and_process_bio from dm_submit_bio. 2982 * 2983 * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 2984 * we take the write lock. To prevent any process from reentering 2985 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 2986 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2987 * flush_workqueue(md->wq). 2988 */ 2989 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2990 if (map) 2991 synchronize_srcu(&md->io_barrier); 2992 2993 /* 2994 * Stop md->queue before flushing md->wq in case request-based 2995 * dm defers requests to md->wq from md->queue. 2996 */ 2997 if (map && dm_request_based(md)) { 2998 dm_stop_queue(md->queue); 2999 set_bit(DMF_QUEUE_STOPPED, &md->flags); 3000 } 3001 3002 flush_workqueue(md->wq); 3003 3004 /* 3005 * At this point no more requests are entering target request routines. 3006 * We call dm_wait_for_completion to wait for all existing requests 3007 * to finish. 3008 */ 3009 if (map) 3010 r = dm_wait_for_completion(md, task_state); 3011 if (!r) 3012 set_bit(dmf_suspended_flag, &md->flags); 3013 3014 if (map) 3015 synchronize_srcu(&md->io_barrier); 3016 3017 /* were we interrupted ? */ 3018 if (r < 0) { 3019 dm_queue_flush(md); 3020 3021 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) 3022 dm_start_queue(md->queue); 3023 3024 unlock_fs(md); 3025 dm_table_presuspend_undo_targets(map); 3026 /* pushback list is already flushed, so skip flush */ 3027 } 3028 3029 return r; 3030 } 3031 3032 /* 3033 * We need to be able to change a mapping table under a mounted 3034 * filesystem. For example we might want to move some data in 3035 * the background. Before the table can be swapped with 3036 * dm_bind_table, dm_suspend must be called to flush any in 3037 * flight bios and ensure that any further io gets deferred. 3038 */ 3039 /* 3040 * Suspend mechanism in request-based dm. 3041 * 3042 * 1. Flush all I/Os by lock_fs() if needed. 3043 * 2. Stop dispatching any I/O by stopping the request_queue. 3044 * 3. Wait for all in-flight I/Os to be completed or requeued. 3045 * 3046 * To abort suspend, start the request_queue. 3047 */ 3048 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) 3049 { 3050 struct dm_table *map = NULL; 3051 int r = 0; 3052 3053 retry: 3054 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3055 3056 if (dm_suspended_md(md)) { 3057 r = -EINVAL; 3058 goto out_unlock; 3059 } 3060 3061 if (dm_suspended_internally_md(md)) { 3062 /* already internally suspended, wait for internal resume */ 3063 mutex_unlock(&md->suspend_lock); 3064 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3065 if (r) 3066 return r; 3067 goto retry; 3068 } 3069 3070 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3071 if (!map) { 3072 /* avoid deadlock with fs/namespace.c:do_mount() */ 3073 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 3074 } 3075 3076 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 3077 if (r) 3078 goto out_unlock; 3079 3080 set_bit(DMF_POST_SUSPENDING, &md->flags); 3081 dm_table_postsuspend_targets(map); 3082 clear_bit(DMF_POST_SUSPENDING, &md->flags); 3083 3084 out_unlock: 3085 mutex_unlock(&md->suspend_lock); 3086 return r; 3087 } 3088 3089 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 3090 { 3091 if (map) { 3092 int r = dm_table_resume_targets(map); 3093 3094 if (r) 3095 return r; 3096 } 3097 3098 dm_queue_flush(md); 3099 3100 /* 3101 * Flushing deferred I/Os must be done after targets are resumed 3102 * so that mapping of targets can work correctly. 3103 * Request-based dm is queueing the deferred I/Os in its request_queue. 3104 */ 3105 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) 3106 dm_start_queue(md->queue); 3107 3108 unlock_fs(md); 3109 3110 return 0; 3111 } 3112 3113 int dm_resume(struct mapped_device *md) 3114 { 3115 int r; 3116 struct dm_table *map = NULL; 3117 3118 retry: 3119 r = -EINVAL; 3120 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3121 3122 if (!dm_suspended_md(md)) 3123 goto out; 3124 3125 if (dm_suspended_internally_md(md)) { 3126 /* already internally suspended, wait for internal resume */ 3127 mutex_unlock(&md->suspend_lock); 3128 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3129 if (r) 3130 return r; 3131 goto retry; 3132 } 3133 3134 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3135 if (!map || !dm_table_get_size(map)) 3136 goto out; 3137 3138 r = __dm_resume(md, map); 3139 if (r) 3140 goto out; 3141 3142 clear_bit(DMF_SUSPENDED, &md->flags); 3143 out: 3144 mutex_unlock(&md->suspend_lock); 3145 3146 return r; 3147 } 3148 3149 /* 3150 * Internal suspend/resume works like userspace-driven suspend. It waits 3151 * until all bios finish and prevents issuing new bios to the target drivers. 3152 * It may be used only from the kernel. 3153 */ 3154 3155 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) 3156 { 3157 struct dm_table *map = NULL; 3158 3159 lockdep_assert_held(&md->suspend_lock); 3160 3161 if (md->internal_suspend_count++) 3162 return; /* nested internal suspend */ 3163 3164 if (dm_suspended_md(md)) { 3165 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3166 return; /* nest suspend */ 3167 } 3168 3169 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3170 3171 /* 3172 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3173 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3174 * would require changing .presuspend to return an error -- avoid this 3175 * until there is a need for more elaborate variants of internal suspend. 3176 */ 3177 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 3178 DMF_SUSPENDED_INTERNALLY); 3179 3180 set_bit(DMF_POST_SUSPENDING, &md->flags); 3181 dm_table_postsuspend_targets(map); 3182 clear_bit(DMF_POST_SUSPENDING, &md->flags); 3183 } 3184 3185 static void __dm_internal_resume(struct mapped_device *md) 3186 { 3187 int r; 3188 struct dm_table *map; 3189 3190 BUG_ON(!md->internal_suspend_count); 3191 3192 if (--md->internal_suspend_count) 3193 return; /* resume from nested internal suspend */ 3194 3195 if (dm_suspended_md(md)) 3196 goto done; /* resume from nested suspend */ 3197 3198 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3199 r = __dm_resume(md, map); 3200 if (r) { 3201 /* 3202 * If a preresume method of some target failed, we are in a 3203 * tricky situation. We can't return an error to the caller. We 3204 * can't fake success because then the "resume" and 3205 * "postsuspend" methods would not be paired correctly, and it 3206 * would break various targets, for example it would cause list 3207 * corruption in the "origin" target. 3208 * 3209 * So, we fake normal suspend here, to make sure that the 3210 * "resume" and "postsuspend" methods will be paired correctly. 3211 */ 3212 DMERR("Preresume method failed: %d", r); 3213 set_bit(DMF_SUSPENDED, &md->flags); 3214 } 3215 done: 3216 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3217 smp_mb__after_atomic(); 3218 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3219 } 3220 3221 void dm_internal_suspend_noflush(struct mapped_device *md) 3222 { 3223 mutex_lock(&md->suspend_lock); 3224 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3225 mutex_unlock(&md->suspend_lock); 3226 } 3227 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3228 3229 void dm_internal_resume(struct mapped_device *md) 3230 { 3231 mutex_lock(&md->suspend_lock); 3232 __dm_internal_resume(md); 3233 mutex_unlock(&md->suspend_lock); 3234 } 3235 EXPORT_SYMBOL_GPL(dm_internal_resume); 3236 3237 /* 3238 * Fast variants of internal suspend/resume hold md->suspend_lock, 3239 * which prevents interaction with userspace-driven suspend. 3240 */ 3241 3242 void dm_internal_suspend_fast(struct mapped_device *md) 3243 { 3244 mutex_lock(&md->suspend_lock); 3245 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3246 return; 3247 3248 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3249 synchronize_srcu(&md->io_barrier); 3250 flush_workqueue(md->wq); 3251 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3252 } 3253 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3254 3255 void dm_internal_resume_fast(struct mapped_device *md) 3256 { 3257 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3258 goto done; 3259 3260 dm_queue_flush(md); 3261 3262 done: 3263 mutex_unlock(&md->suspend_lock); 3264 } 3265 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3266 3267 /* 3268 *--------------------------------------------------------------- 3269 * Event notification. 3270 *--------------------------------------------------------------- 3271 */ 3272 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 3273 unsigned int cookie, bool need_resize_uevent) 3274 { 3275 int r; 3276 unsigned int noio_flag; 3277 char udev_cookie[DM_COOKIE_LENGTH]; 3278 char *envp[3] = { NULL, NULL, NULL }; 3279 char **envpp = envp; 3280 if (cookie) { 3281 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 3282 DM_COOKIE_ENV_VAR_NAME, cookie); 3283 *envpp++ = udev_cookie; 3284 } 3285 if (need_resize_uevent) { 3286 *envpp++ = "RESIZE=1"; 3287 } 3288 3289 noio_flag = memalloc_noio_save(); 3290 3291 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); 3292 3293 memalloc_noio_restore(noio_flag); 3294 3295 return r; 3296 } 3297 3298 uint32_t dm_next_uevent_seq(struct mapped_device *md) 3299 { 3300 return atomic_add_return(1, &md->uevent_seq); 3301 } 3302 3303 uint32_t dm_get_event_nr(struct mapped_device *md) 3304 { 3305 return atomic_read(&md->event_nr); 3306 } 3307 3308 int dm_wait_event(struct mapped_device *md, int event_nr) 3309 { 3310 return wait_event_interruptible(md->eventq, 3311 (event_nr != atomic_read(&md->event_nr))); 3312 } 3313 3314 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 3315 { 3316 unsigned long flags; 3317 3318 spin_lock_irqsave(&md->uevent_lock, flags); 3319 list_add(elist, &md->uevent_list); 3320 spin_unlock_irqrestore(&md->uevent_lock, flags); 3321 } 3322 3323 /* 3324 * The gendisk is only valid as long as you have a reference 3325 * count on 'md'. 3326 */ 3327 struct gendisk *dm_disk(struct mapped_device *md) 3328 { 3329 return md->disk; 3330 } 3331 EXPORT_SYMBOL_GPL(dm_disk); 3332 3333 struct kobject *dm_kobject(struct mapped_device *md) 3334 { 3335 return &md->kobj_holder.kobj; 3336 } 3337 3338 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3339 { 3340 struct mapped_device *md; 3341 3342 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3343 3344 spin_lock(&_minor_lock); 3345 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 3346 md = NULL; 3347 goto out; 3348 } 3349 dm_get(md); 3350 out: 3351 spin_unlock(&_minor_lock); 3352 3353 return md; 3354 } 3355 3356 int dm_suspended_md(struct mapped_device *md) 3357 { 3358 return test_bit(DMF_SUSPENDED, &md->flags); 3359 } 3360 3361 static int dm_post_suspending_md(struct mapped_device *md) 3362 { 3363 return test_bit(DMF_POST_SUSPENDING, &md->flags); 3364 } 3365 3366 int dm_suspended_internally_md(struct mapped_device *md) 3367 { 3368 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3369 } 3370 3371 int dm_test_deferred_remove_flag(struct mapped_device *md) 3372 { 3373 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3374 } 3375 3376 int dm_suspended(struct dm_target *ti) 3377 { 3378 return dm_suspended_md(ti->table->md); 3379 } 3380 EXPORT_SYMBOL_GPL(dm_suspended); 3381 3382 int dm_post_suspending(struct dm_target *ti) 3383 { 3384 return dm_post_suspending_md(ti->table->md); 3385 } 3386 EXPORT_SYMBOL_GPL(dm_post_suspending); 3387 3388 int dm_noflush_suspending(struct dm_target *ti) 3389 { 3390 return __noflush_suspending(ti->table->md); 3391 } 3392 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3393 3394 void dm_free_md_mempools(struct dm_md_mempools *pools) 3395 { 3396 if (!pools) 3397 return; 3398 3399 bioset_exit(&pools->bs); 3400 bioset_exit(&pools->io_bs); 3401 3402 kfree(pools); 3403 } 3404 3405 struct dm_blkdev_id { 3406 u8 *id; 3407 enum blk_unique_id type; 3408 }; 3409 3410 static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev, 3411 sector_t start, sector_t len, void *data) 3412 { 3413 struct dm_blkdev_id *dm_id = data; 3414 const struct block_device_operations *fops = dev->bdev->bd_disk->fops; 3415 3416 if (!fops->get_unique_id) 3417 return 0; 3418 3419 return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type); 3420 } 3421 3422 /* 3423 * Allow access to get_unique_id() for the first device returning a 3424 * non-zero result. Reasonable use expects all devices to have the 3425 * same unique id. 3426 */ 3427 static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id, 3428 enum blk_unique_id type) 3429 { 3430 struct mapped_device *md = disk->private_data; 3431 struct dm_table *table; 3432 struct dm_target *ti; 3433 int ret = 0, srcu_idx; 3434 3435 struct dm_blkdev_id dm_id = { 3436 .id = id, 3437 .type = type, 3438 }; 3439 3440 table = dm_get_live_table(md, &srcu_idx); 3441 if (!table || !dm_table_get_size(table)) 3442 goto out; 3443 3444 /* We only support devices that have a single target */ 3445 if (table->num_targets != 1) 3446 goto out; 3447 ti = dm_table_get_target(table, 0); 3448 3449 if (!ti->type->iterate_devices) 3450 goto out; 3451 3452 ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id); 3453 out: 3454 dm_put_live_table(md, srcu_idx); 3455 return ret; 3456 } 3457 3458 struct dm_pr { 3459 u64 old_key; 3460 u64 new_key; 3461 u32 flags; 3462 bool abort; 3463 bool fail_early; 3464 int ret; 3465 enum pr_type type; 3466 struct pr_keys *read_keys; 3467 struct pr_held_reservation *rsv; 3468 }; 3469 3470 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3471 struct dm_pr *pr) 3472 { 3473 struct mapped_device *md = bdev->bd_disk->private_data; 3474 struct dm_table *table; 3475 struct dm_target *ti; 3476 int ret = -ENOTTY, srcu_idx; 3477 3478 table = dm_get_live_table(md, &srcu_idx); 3479 if (!table || !dm_table_get_size(table)) 3480 goto out; 3481 3482 /* We only support devices that have a single target */ 3483 if (table->num_targets != 1) 3484 goto out; 3485 ti = dm_table_get_target(table, 0); 3486 3487 if (dm_suspended_md(md)) { 3488 ret = -EAGAIN; 3489 goto out; 3490 } 3491 3492 ret = -EINVAL; 3493 if (!ti->type->iterate_devices) 3494 goto out; 3495 3496 ti->type->iterate_devices(ti, fn, pr); 3497 ret = 0; 3498 out: 3499 dm_put_live_table(md, srcu_idx); 3500 return ret; 3501 } 3502 3503 /* 3504 * For register / unregister we need to manually call out to every path. 3505 */ 3506 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3507 sector_t start, sector_t len, void *data) 3508 { 3509 struct dm_pr *pr = data; 3510 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3511 int ret; 3512 3513 if (!ops || !ops->pr_register) { 3514 pr->ret = -EOPNOTSUPP; 3515 return -1; 3516 } 3517 3518 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3519 if (!ret) 3520 return 0; 3521 3522 if (!pr->ret) 3523 pr->ret = ret; 3524 3525 if (pr->fail_early) 3526 return -1; 3527 3528 return 0; 3529 } 3530 3531 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3532 u32 flags) 3533 { 3534 struct dm_pr pr = { 3535 .old_key = old_key, 3536 .new_key = new_key, 3537 .flags = flags, 3538 .fail_early = true, 3539 .ret = 0, 3540 }; 3541 int ret; 3542 3543 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3544 if (ret) { 3545 /* Didn't even get to register a path */ 3546 return ret; 3547 } 3548 3549 if (!pr.ret) 3550 return 0; 3551 ret = pr.ret; 3552 3553 if (!new_key) 3554 return ret; 3555 3556 /* unregister all paths if we failed to register any path */ 3557 pr.old_key = new_key; 3558 pr.new_key = 0; 3559 pr.flags = 0; 3560 pr.fail_early = false; 3561 (void) dm_call_pr(bdev, __dm_pr_register, &pr); 3562 return ret; 3563 } 3564 3565 3566 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, 3567 sector_t start, sector_t len, void *data) 3568 { 3569 struct dm_pr *pr = data; 3570 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3571 3572 if (!ops || !ops->pr_reserve) { 3573 pr->ret = -EOPNOTSUPP; 3574 return -1; 3575 } 3576 3577 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); 3578 if (!pr->ret) 3579 return -1; 3580 3581 return 0; 3582 } 3583 3584 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3585 u32 flags) 3586 { 3587 struct dm_pr pr = { 3588 .old_key = key, 3589 .flags = flags, 3590 .type = type, 3591 .fail_early = false, 3592 .ret = 0, 3593 }; 3594 int ret; 3595 3596 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); 3597 if (ret) 3598 return ret; 3599 3600 return pr.ret; 3601 } 3602 3603 /* 3604 * If there is a non-All Registrants type of reservation, the release must be 3605 * sent down the holding path. For the cases where there is no reservation or 3606 * the path is not the holder the device will also return success, so we must 3607 * try each path to make sure we got the correct path. 3608 */ 3609 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, 3610 sector_t start, sector_t len, void *data) 3611 { 3612 struct dm_pr *pr = data; 3613 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3614 3615 if (!ops || !ops->pr_release) { 3616 pr->ret = -EOPNOTSUPP; 3617 return -1; 3618 } 3619 3620 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); 3621 if (pr->ret) 3622 return -1; 3623 3624 return 0; 3625 } 3626 3627 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3628 { 3629 struct dm_pr pr = { 3630 .old_key = key, 3631 .type = type, 3632 .fail_early = false, 3633 }; 3634 int ret; 3635 3636 ret = dm_call_pr(bdev, __dm_pr_release, &pr); 3637 if (ret) 3638 return ret; 3639 3640 return pr.ret; 3641 } 3642 3643 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, 3644 sector_t start, sector_t len, void *data) 3645 { 3646 struct dm_pr *pr = data; 3647 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3648 3649 if (!ops || !ops->pr_preempt) { 3650 pr->ret = -EOPNOTSUPP; 3651 return -1; 3652 } 3653 3654 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, 3655 pr->abort); 3656 if (!pr->ret) 3657 return -1; 3658 3659 return 0; 3660 } 3661 3662 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3663 enum pr_type type, bool abort) 3664 { 3665 struct dm_pr pr = { 3666 .new_key = new_key, 3667 .old_key = old_key, 3668 .type = type, 3669 .fail_early = false, 3670 }; 3671 int ret; 3672 3673 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); 3674 if (ret) 3675 return ret; 3676 3677 return pr.ret; 3678 } 3679 3680 static int dm_pr_clear(struct block_device *bdev, u64 key) 3681 { 3682 struct mapped_device *md = bdev->bd_disk->private_data; 3683 const struct pr_ops *ops; 3684 int r, srcu_idx; 3685 bool forward = true; 3686 3687 /* Not a real ioctl, but targets must not interpret non-DM ioctls */ 3688 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward); 3689 if (r < 0) 3690 goto out; 3691 WARN_ON_ONCE(!forward); 3692 3693 ops = bdev->bd_disk->fops->pr_ops; 3694 if (ops && ops->pr_clear) 3695 r = ops->pr_clear(bdev, key); 3696 else 3697 r = -EOPNOTSUPP; 3698 out: 3699 dm_unprepare_ioctl(md, srcu_idx); 3700 return r; 3701 } 3702 3703 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, 3704 sector_t start, sector_t len, void *data) 3705 { 3706 struct dm_pr *pr = data; 3707 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3708 3709 if (!ops || !ops->pr_read_keys) { 3710 pr->ret = -EOPNOTSUPP; 3711 return -1; 3712 } 3713 3714 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); 3715 if (!pr->ret) 3716 return -1; 3717 3718 return 0; 3719 } 3720 3721 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) 3722 { 3723 struct dm_pr pr = { 3724 .read_keys = keys, 3725 }; 3726 int ret; 3727 3728 ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr); 3729 if (ret) 3730 return ret; 3731 3732 return pr.ret; 3733 } 3734 3735 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, 3736 sector_t start, sector_t len, void *data) 3737 { 3738 struct dm_pr *pr = data; 3739 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3740 3741 if (!ops || !ops->pr_read_reservation) { 3742 pr->ret = -EOPNOTSUPP; 3743 return -1; 3744 } 3745 3746 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); 3747 if (!pr->ret) 3748 return -1; 3749 3750 return 0; 3751 } 3752 3753 static int dm_pr_read_reservation(struct block_device *bdev, 3754 struct pr_held_reservation *rsv) 3755 { 3756 struct dm_pr pr = { 3757 .rsv = rsv, 3758 }; 3759 int ret; 3760 3761 ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr); 3762 if (ret) 3763 return ret; 3764 3765 return pr.ret; 3766 } 3767 3768 static const struct pr_ops dm_pr_ops = { 3769 .pr_register = dm_pr_register, 3770 .pr_reserve = dm_pr_reserve, 3771 .pr_release = dm_pr_release, 3772 .pr_preempt = dm_pr_preempt, 3773 .pr_clear = dm_pr_clear, 3774 .pr_read_keys = dm_pr_read_keys, 3775 .pr_read_reservation = dm_pr_read_reservation, 3776 }; 3777 3778 static const struct block_device_operations dm_blk_dops = { 3779 .submit_bio = dm_submit_bio, 3780 .poll_bio = dm_poll_bio, 3781 .open = dm_blk_open, 3782 .release = dm_blk_close, 3783 .ioctl = dm_blk_ioctl, 3784 .getgeo = dm_blk_getgeo, 3785 .report_zones = dm_blk_report_zones, 3786 .get_unique_id = dm_blk_get_unique_id, 3787 .pr_ops = &dm_pr_ops, 3788 .owner = THIS_MODULE 3789 }; 3790 3791 static const struct block_device_operations dm_rq_blk_dops = { 3792 .open = dm_blk_open, 3793 .release = dm_blk_close, 3794 .ioctl = dm_blk_ioctl, 3795 .getgeo = dm_blk_getgeo, 3796 .get_unique_id = dm_blk_get_unique_id, 3797 .pr_ops = &dm_pr_ops, 3798 .owner = THIS_MODULE 3799 }; 3800 3801 static const struct dax_operations dm_dax_ops = { 3802 .direct_access = dm_dax_direct_access, 3803 .zero_page_range = dm_dax_zero_page_range, 3804 .recovery_write = dm_dax_recovery_write, 3805 }; 3806 3807 /* 3808 * module hooks 3809 */ 3810 module_init(dm_init); 3811 module_exit(dm_exit); 3812 3813 module_param(major, uint, 0); 3814 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3815 3816 module_param(reserved_bio_based_ios, uint, 0644); 3817 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3818 3819 module_param(dm_numa_node, int, 0644); 3820 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3821 3822 module_param(swap_bios, int, 0644); 3823 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3824 3825 MODULE_DESCRIPTION(DM_NAME " driver"); 3826 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>"); 3827 MODULE_LICENSE("GPL"); 3828