1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/signal.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/mempool.h> 19 #include <linux/dax.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/hdreg.h> 24 #include <linux/delay.h> 25 #include <linux/wait.h> 26 #include <linux/pr.h> 27 #include <linux/refcount.h> 28 #include <linux/part_stat.h> 29 30 #define DM_MSG_PREFIX "core" 31 32 /* 33 * Cookies are numeric values sent with CHANGE and REMOVE 34 * uevents while resuming, removing or renaming the device. 35 */ 36 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 37 #define DM_COOKIE_LENGTH 24 38 39 static const char *_name = DM_NAME; 40 41 static unsigned int major = 0; 42 static unsigned int _major = 0; 43 44 static DEFINE_IDR(_minor_idr); 45 46 static DEFINE_SPINLOCK(_minor_lock); 47 48 static void do_deferred_remove(struct work_struct *w); 49 50 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 51 52 static struct workqueue_struct *deferred_remove_workqueue; 53 54 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 55 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 56 57 void dm_issue_global_event(void) 58 { 59 atomic_inc(&dm_global_event_nr); 60 wake_up(&dm_global_eventq); 61 } 62 63 /* 64 * One of these is allocated (on-stack) per original bio. 65 */ 66 struct clone_info { 67 struct dm_table *map; 68 struct bio *bio; 69 struct dm_io *io; 70 sector_t sector; 71 unsigned sector_count; 72 }; 73 74 /* 75 * One of these is allocated per clone bio. 76 */ 77 #define DM_TIO_MAGIC 7282014 78 struct dm_target_io { 79 unsigned magic; 80 struct dm_io *io; 81 struct dm_target *ti; 82 unsigned target_bio_nr; 83 unsigned *len_ptr; 84 bool inside_dm_io; 85 struct bio clone; 86 }; 87 88 /* 89 * One of these is allocated per original bio. 90 * It contains the first clone used for that original. 91 */ 92 #define DM_IO_MAGIC 5191977 93 struct dm_io { 94 unsigned magic; 95 struct mapped_device *md; 96 blk_status_t status; 97 atomic_t io_count; 98 struct bio *orig_bio; 99 unsigned long start_time; 100 spinlock_t endio_lock; 101 struct dm_stats_aux stats_aux; 102 /* last member of dm_target_io is 'struct bio' */ 103 struct dm_target_io tio; 104 }; 105 106 void *dm_per_bio_data(struct bio *bio, size_t data_size) 107 { 108 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 109 if (!tio->inside_dm_io) 110 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 111 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 112 } 113 EXPORT_SYMBOL_GPL(dm_per_bio_data); 114 115 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 116 { 117 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 118 if (io->magic == DM_IO_MAGIC) 119 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 120 BUG_ON(io->magic != DM_TIO_MAGIC); 121 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 122 } 123 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 124 125 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 126 { 127 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 128 } 129 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 130 131 #define MINOR_ALLOCED ((void *)-1) 132 133 /* 134 * Bits for the md->flags field. 135 */ 136 #define DMF_BLOCK_IO_FOR_SUSPEND 0 137 #define DMF_SUSPENDED 1 138 #define DMF_FROZEN 2 139 #define DMF_FREEING 3 140 #define DMF_DELETING 4 141 #define DMF_NOFLUSH_SUSPENDING 5 142 #define DMF_DEFERRED_REMOVE 6 143 #define DMF_SUSPENDED_INTERNALLY 7 144 145 #define DM_NUMA_NODE NUMA_NO_NODE 146 static int dm_numa_node = DM_NUMA_NODE; 147 148 /* 149 * For mempools pre-allocation at the table loading time. 150 */ 151 struct dm_md_mempools { 152 struct bio_set bs; 153 struct bio_set io_bs; 154 }; 155 156 struct table_device { 157 struct list_head list; 158 refcount_t count; 159 struct dm_dev dm_dev; 160 }; 161 162 /* 163 * Bio-based DM's mempools' reserved IOs set by the user. 164 */ 165 #define RESERVED_BIO_BASED_IOS 16 166 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 167 168 static int __dm_get_module_param_int(int *module_param, int min, int max) 169 { 170 int param = READ_ONCE(*module_param); 171 int modified_param = 0; 172 bool modified = true; 173 174 if (param < min) 175 modified_param = min; 176 else if (param > max) 177 modified_param = max; 178 else 179 modified = false; 180 181 if (modified) { 182 (void)cmpxchg(module_param, param, modified_param); 183 param = modified_param; 184 } 185 186 return param; 187 } 188 189 unsigned __dm_get_module_param(unsigned *module_param, 190 unsigned def, unsigned max) 191 { 192 unsigned param = READ_ONCE(*module_param); 193 unsigned modified_param = 0; 194 195 if (!param) 196 modified_param = def; 197 else if (param > max) 198 modified_param = max; 199 200 if (modified_param) { 201 (void)cmpxchg(module_param, param, modified_param); 202 param = modified_param; 203 } 204 205 return param; 206 } 207 208 unsigned dm_get_reserved_bio_based_ios(void) 209 { 210 return __dm_get_module_param(&reserved_bio_based_ios, 211 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 212 } 213 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 214 215 static unsigned dm_get_numa_node(void) 216 { 217 return __dm_get_module_param_int(&dm_numa_node, 218 DM_NUMA_NODE, num_online_nodes() - 1); 219 } 220 221 static int __init local_init(void) 222 { 223 int r; 224 225 r = dm_uevent_init(); 226 if (r) 227 return r; 228 229 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 230 if (!deferred_remove_workqueue) { 231 r = -ENOMEM; 232 goto out_uevent_exit; 233 } 234 235 _major = major; 236 r = register_blkdev(_major, _name); 237 if (r < 0) 238 goto out_free_workqueue; 239 240 if (!_major) 241 _major = r; 242 243 return 0; 244 245 out_free_workqueue: 246 destroy_workqueue(deferred_remove_workqueue); 247 out_uevent_exit: 248 dm_uevent_exit(); 249 250 return r; 251 } 252 253 static void local_exit(void) 254 { 255 flush_scheduled_work(); 256 destroy_workqueue(deferred_remove_workqueue); 257 258 unregister_blkdev(_major, _name); 259 dm_uevent_exit(); 260 261 _major = 0; 262 263 DMINFO("cleaned up"); 264 } 265 266 static int (*_inits[])(void) __initdata = { 267 local_init, 268 dm_target_init, 269 dm_linear_init, 270 dm_stripe_init, 271 dm_io_init, 272 dm_kcopyd_init, 273 dm_interface_init, 274 dm_statistics_init, 275 }; 276 277 static void (*_exits[])(void) = { 278 local_exit, 279 dm_target_exit, 280 dm_linear_exit, 281 dm_stripe_exit, 282 dm_io_exit, 283 dm_kcopyd_exit, 284 dm_interface_exit, 285 dm_statistics_exit, 286 }; 287 288 static int __init dm_init(void) 289 { 290 const int count = ARRAY_SIZE(_inits); 291 292 int r, i; 293 294 for (i = 0; i < count; i++) { 295 r = _inits[i](); 296 if (r) 297 goto bad; 298 } 299 300 return 0; 301 302 bad: 303 while (i--) 304 _exits[i](); 305 306 return r; 307 } 308 309 static void __exit dm_exit(void) 310 { 311 int i = ARRAY_SIZE(_exits); 312 313 while (i--) 314 _exits[i](); 315 316 /* 317 * Should be empty by this point. 318 */ 319 idr_destroy(&_minor_idr); 320 } 321 322 /* 323 * Block device functions 324 */ 325 int dm_deleting_md(struct mapped_device *md) 326 { 327 return test_bit(DMF_DELETING, &md->flags); 328 } 329 330 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 331 { 332 struct mapped_device *md; 333 334 spin_lock(&_minor_lock); 335 336 md = bdev->bd_disk->private_data; 337 if (!md) 338 goto out; 339 340 if (test_bit(DMF_FREEING, &md->flags) || 341 dm_deleting_md(md)) { 342 md = NULL; 343 goto out; 344 } 345 346 dm_get(md); 347 atomic_inc(&md->open_count); 348 out: 349 spin_unlock(&_minor_lock); 350 351 return md ? 0 : -ENXIO; 352 } 353 354 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 355 { 356 struct mapped_device *md; 357 358 spin_lock(&_minor_lock); 359 360 md = disk->private_data; 361 if (WARN_ON(!md)) 362 goto out; 363 364 if (atomic_dec_and_test(&md->open_count) && 365 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 366 queue_work(deferred_remove_workqueue, &deferred_remove_work); 367 368 dm_put(md); 369 out: 370 spin_unlock(&_minor_lock); 371 } 372 373 int dm_open_count(struct mapped_device *md) 374 { 375 return atomic_read(&md->open_count); 376 } 377 378 /* 379 * Guarantees nothing is using the device before it's deleted. 380 */ 381 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 382 { 383 int r = 0; 384 385 spin_lock(&_minor_lock); 386 387 if (dm_open_count(md)) { 388 r = -EBUSY; 389 if (mark_deferred) 390 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 391 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 392 r = -EEXIST; 393 else 394 set_bit(DMF_DELETING, &md->flags); 395 396 spin_unlock(&_minor_lock); 397 398 return r; 399 } 400 401 int dm_cancel_deferred_remove(struct mapped_device *md) 402 { 403 int r = 0; 404 405 spin_lock(&_minor_lock); 406 407 if (test_bit(DMF_DELETING, &md->flags)) 408 r = -EBUSY; 409 else 410 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 411 412 spin_unlock(&_minor_lock); 413 414 return r; 415 } 416 417 static void do_deferred_remove(struct work_struct *w) 418 { 419 dm_deferred_remove(); 420 } 421 422 sector_t dm_get_size(struct mapped_device *md) 423 { 424 return get_capacity(md->disk); 425 } 426 427 struct request_queue *dm_get_md_queue(struct mapped_device *md) 428 { 429 return md->queue; 430 } 431 432 struct dm_stats *dm_get_stats(struct mapped_device *md) 433 { 434 return &md->stats; 435 } 436 437 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 438 { 439 struct mapped_device *md = bdev->bd_disk->private_data; 440 441 return dm_get_geometry(md, geo); 442 } 443 444 #ifdef CONFIG_BLK_DEV_ZONED 445 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 446 { 447 struct dm_report_zones_args *args = data; 448 sector_t sector_diff = args->tgt->begin - args->start; 449 450 /* 451 * Ignore zones beyond the target range. 452 */ 453 if (zone->start >= args->start + args->tgt->len) 454 return 0; 455 456 /* 457 * Remap the start sector and write pointer position of the zone 458 * to match its position in the target range. 459 */ 460 zone->start += sector_diff; 461 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 462 if (zone->cond == BLK_ZONE_COND_FULL) 463 zone->wp = zone->start + zone->len; 464 else if (zone->cond == BLK_ZONE_COND_EMPTY) 465 zone->wp = zone->start; 466 else 467 zone->wp += sector_diff; 468 } 469 470 args->next_sector = zone->start + zone->len; 471 return args->orig_cb(zone, args->zone_idx++, args->orig_data); 472 } 473 EXPORT_SYMBOL_GPL(dm_report_zones_cb); 474 475 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 476 unsigned int nr_zones, report_zones_cb cb, void *data) 477 { 478 struct mapped_device *md = disk->private_data; 479 struct dm_table *map; 480 int srcu_idx, ret; 481 struct dm_report_zones_args args = { 482 .next_sector = sector, 483 .orig_data = data, 484 .orig_cb = cb, 485 }; 486 487 if (dm_suspended_md(md)) 488 return -EAGAIN; 489 490 map = dm_get_live_table(md, &srcu_idx); 491 if (!map) 492 return -EIO; 493 494 do { 495 struct dm_target *tgt; 496 497 tgt = dm_table_find_target(map, args.next_sector); 498 if (WARN_ON_ONCE(!tgt->type->report_zones)) { 499 ret = -EIO; 500 goto out; 501 } 502 503 args.tgt = tgt; 504 ret = tgt->type->report_zones(tgt, &args, nr_zones); 505 if (ret < 0) 506 goto out; 507 } while (args.zone_idx < nr_zones && 508 args.next_sector < get_capacity(disk)); 509 510 ret = args.zone_idx; 511 out: 512 dm_put_live_table(md, srcu_idx); 513 return ret; 514 } 515 #else 516 #define dm_blk_report_zones NULL 517 #endif /* CONFIG_BLK_DEV_ZONED */ 518 519 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 520 struct block_device **bdev) 521 __acquires(md->io_barrier) 522 { 523 struct dm_target *tgt; 524 struct dm_table *map; 525 int r; 526 527 retry: 528 r = -ENOTTY; 529 map = dm_get_live_table(md, srcu_idx); 530 if (!map || !dm_table_get_size(map)) 531 return r; 532 533 /* We only support devices that have a single target */ 534 if (dm_table_get_num_targets(map) != 1) 535 return r; 536 537 tgt = dm_table_get_target(map, 0); 538 if (!tgt->type->prepare_ioctl) 539 return r; 540 541 if (dm_suspended_md(md)) 542 return -EAGAIN; 543 544 r = tgt->type->prepare_ioctl(tgt, bdev); 545 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 546 dm_put_live_table(md, *srcu_idx); 547 msleep(10); 548 goto retry; 549 } 550 551 return r; 552 } 553 554 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 555 __releases(md->io_barrier) 556 { 557 dm_put_live_table(md, srcu_idx); 558 } 559 560 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 561 unsigned int cmd, unsigned long arg) 562 { 563 struct mapped_device *md = bdev->bd_disk->private_data; 564 int r, srcu_idx; 565 566 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 567 if (r < 0) 568 goto out; 569 570 if (r > 0) { 571 /* 572 * Target determined this ioctl is being issued against a 573 * subset of the parent bdev; require extra privileges. 574 */ 575 if (!capable(CAP_SYS_RAWIO)) { 576 DMWARN_LIMIT( 577 "%s: sending ioctl %x to DM device without required privilege.", 578 current->comm, cmd); 579 r = -ENOIOCTLCMD; 580 goto out; 581 } 582 } 583 584 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 585 out: 586 dm_unprepare_ioctl(md, srcu_idx); 587 return r; 588 } 589 590 static void start_io_acct(struct dm_io *io); 591 592 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 593 { 594 struct dm_io *io; 595 struct dm_target_io *tio; 596 struct bio *clone; 597 598 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 599 if (!clone) 600 return NULL; 601 602 tio = container_of(clone, struct dm_target_io, clone); 603 tio->inside_dm_io = true; 604 tio->io = NULL; 605 606 io = container_of(tio, struct dm_io, tio); 607 io->magic = DM_IO_MAGIC; 608 io->status = 0; 609 atomic_set(&io->io_count, 1); 610 io->orig_bio = bio; 611 io->md = md; 612 spin_lock_init(&io->endio_lock); 613 614 start_io_acct(io); 615 616 return io; 617 } 618 619 static void free_io(struct mapped_device *md, struct dm_io *io) 620 { 621 bio_put(&io->tio.clone); 622 } 623 624 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 625 unsigned target_bio_nr, gfp_t gfp_mask) 626 { 627 struct dm_target_io *tio; 628 629 if (!ci->io->tio.io) { 630 /* the dm_target_io embedded in ci->io is available */ 631 tio = &ci->io->tio; 632 } else { 633 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 634 if (!clone) 635 return NULL; 636 637 tio = container_of(clone, struct dm_target_io, clone); 638 tio->inside_dm_io = false; 639 } 640 641 tio->magic = DM_TIO_MAGIC; 642 tio->io = ci->io; 643 tio->ti = ti; 644 tio->target_bio_nr = target_bio_nr; 645 646 return tio; 647 } 648 649 static void free_tio(struct dm_target_io *tio) 650 { 651 if (tio->inside_dm_io) 652 return; 653 bio_put(&tio->clone); 654 } 655 656 static bool md_in_flight_bios(struct mapped_device *md) 657 { 658 int cpu; 659 struct hd_struct *part = &dm_disk(md)->part0; 660 long sum = 0; 661 662 for_each_possible_cpu(cpu) { 663 sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 664 sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 665 } 666 667 return sum != 0; 668 } 669 670 static bool md_in_flight(struct mapped_device *md) 671 { 672 if (queue_is_mq(md->queue)) 673 return blk_mq_queue_inflight(md->queue); 674 else 675 return md_in_flight_bios(md); 676 } 677 678 static void start_io_acct(struct dm_io *io) 679 { 680 struct mapped_device *md = io->md; 681 struct bio *bio = io->orig_bio; 682 683 io->start_time = jiffies; 684 685 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), 686 &dm_disk(md)->part0); 687 688 if (unlikely(dm_stats_used(&md->stats))) 689 dm_stats_account_io(&md->stats, bio_data_dir(bio), 690 bio->bi_iter.bi_sector, bio_sectors(bio), 691 false, 0, &io->stats_aux); 692 } 693 694 static void end_io_acct(struct dm_io *io) 695 { 696 struct mapped_device *md = io->md; 697 struct bio *bio = io->orig_bio; 698 unsigned long duration = jiffies - io->start_time; 699 700 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, 701 io->start_time); 702 703 if (unlikely(dm_stats_used(&md->stats))) 704 dm_stats_account_io(&md->stats, bio_data_dir(bio), 705 bio->bi_iter.bi_sector, bio_sectors(bio), 706 true, duration, &io->stats_aux); 707 708 /* nudge anyone waiting on suspend queue */ 709 if (unlikely(wq_has_sleeper(&md->wait))) 710 wake_up(&md->wait); 711 } 712 713 /* 714 * Add the bio to the list of deferred io. 715 */ 716 static void queue_io(struct mapped_device *md, struct bio *bio) 717 { 718 unsigned long flags; 719 720 spin_lock_irqsave(&md->deferred_lock, flags); 721 bio_list_add(&md->deferred, bio); 722 spin_unlock_irqrestore(&md->deferred_lock, flags); 723 queue_work(md->wq, &md->work); 724 } 725 726 /* 727 * Everyone (including functions in this file), should use this 728 * function to access the md->map field, and make sure they call 729 * dm_put_live_table() when finished. 730 */ 731 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 732 { 733 *srcu_idx = srcu_read_lock(&md->io_barrier); 734 735 return srcu_dereference(md->map, &md->io_barrier); 736 } 737 738 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 739 { 740 srcu_read_unlock(&md->io_barrier, srcu_idx); 741 } 742 743 void dm_sync_table(struct mapped_device *md) 744 { 745 synchronize_srcu(&md->io_barrier); 746 synchronize_rcu_expedited(); 747 } 748 749 /* 750 * A fast alternative to dm_get_live_table/dm_put_live_table. 751 * The caller must not block between these two functions. 752 */ 753 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 754 { 755 rcu_read_lock(); 756 return rcu_dereference(md->map); 757 } 758 759 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 760 { 761 rcu_read_unlock(); 762 } 763 764 static char *_dm_claim_ptr = "I belong to device-mapper"; 765 766 /* 767 * Open a table device so we can use it as a map destination. 768 */ 769 static int open_table_device(struct table_device *td, dev_t dev, 770 struct mapped_device *md) 771 { 772 struct block_device *bdev; 773 774 int r; 775 776 BUG_ON(td->dm_dev.bdev); 777 778 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 779 if (IS_ERR(bdev)) 780 return PTR_ERR(bdev); 781 782 r = bd_link_disk_holder(bdev, dm_disk(md)); 783 if (r) { 784 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 785 return r; 786 } 787 788 td->dm_dev.bdev = bdev; 789 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 790 return 0; 791 } 792 793 /* 794 * Close a table device that we've been using. 795 */ 796 static void close_table_device(struct table_device *td, struct mapped_device *md) 797 { 798 if (!td->dm_dev.bdev) 799 return; 800 801 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 802 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 803 put_dax(td->dm_dev.dax_dev); 804 td->dm_dev.bdev = NULL; 805 td->dm_dev.dax_dev = NULL; 806 } 807 808 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 809 fmode_t mode) 810 { 811 struct table_device *td; 812 813 list_for_each_entry(td, l, list) 814 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 815 return td; 816 817 return NULL; 818 } 819 820 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 821 struct dm_dev **result) 822 { 823 int r; 824 struct table_device *td; 825 826 mutex_lock(&md->table_devices_lock); 827 td = find_table_device(&md->table_devices, dev, mode); 828 if (!td) { 829 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 830 if (!td) { 831 mutex_unlock(&md->table_devices_lock); 832 return -ENOMEM; 833 } 834 835 td->dm_dev.mode = mode; 836 td->dm_dev.bdev = NULL; 837 838 if ((r = open_table_device(td, dev, md))) { 839 mutex_unlock(&md->table_devices_lock); 840 kfree(td); 841 return r; 842 } 843 844 format_dev_t(td->dm_dev.name, dev); 845 846 refcount_set(&td->count, 1); 847 list_add(&td->list, &md->table_devices); 848 } else { 849 refcount_inc(&td->count); 850 } 851 mutex_unlock(&md->table_devices_lock); 852 853 *result = &td->dm_dev; 854 return 0; 855 } 856 EXPORT_SYMBOL_GPL(dm_get_table_device); 857 858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 859 { 860 struct table_device *td = container_of(d, struct table_device, dm_dev); 861 862 mutex_lock(&md->table_devices_lock); 863 if (refcount_dec_and_test(&td->count)) { 864 close_table_device(td, md); 865 list_del(&td->list); 866 kfree(td); 867 } 868 mutex_unlock(&md->table_devices_lock); 869 } 870 EXPORT_SYMBOL(dm_put_table_device); 871 872 static void free_table_devices(struct list_head *devices) 873 { 874 struct list_head *tmp, *next; 875 876 list_for_each_safe(tmp, next, devices) { 877 struct table_device *td = list_entry(tmp, struct table_device, list); 878 879 DMWARN("dm_destroy: %s still exists with %d references", 880 td->dm_dev.name, refcount_read(&td->count)); 881 kfree(td); 882 } 883 } 884 885 /* 886 * Get the geometry associated with a dm device 887 */ 888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 889 { 890 *geo = md->geometry; 891 892 return 0; 893 } 894 895 /* 896 * Set the geometry of a device. 897 */ 898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 899 { 900 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 901 902 if (geo->start > sz) { 903 DMWARN("Start sector is beyond the geometry limits."); 904 return -EINVAL; 905 } 906 907 md->geometry = *geo; 908 909 return 0; 910 } 911 912 static int __noflush_suspending(struct mapped_device *md) 913 { 914 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 915 } 916 917 /* 918 * Decrements the number of outstanding ios that a bio has been 919 * cloned into, completing the original io if necc. 920 */ 921 static void dec_pending(struct dm_io *io, blk_status_t error) 922 { 923 unsigned long flags; 924 blk_status_t io_error; 925 struct bio *bio; 926 struct mapped_device *md = io->md; 927 928 /* Push-back supersedes any I/O errors */ 929 if (unlikely(error)) { 930 spin_lock_irqsave(&io->endio_lock, flags); 931 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 932 io->status = error; 933 spin_unlock_irqrestore(&io->endio_lock, flags); 934 } 935 936 if (atomic_dec_and_test(&io->io_count)) { 937 if (io->status == BLK_STS_DM_REQUEUE) { 938 /* 939 * Target requested pushing back the I/O. 940 */ 941 spin_lock_irqsave(&md->deferred_lock, flags); 942 if (__noflush_suspending(md)) 943 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 944 bio_list_add_head(&md->deferred, io->orig_bio); 945 else 946 /* noflush suspend was interrupted. */ 947 io->status = BLK_STS_IOERR; 948 spin_unlock_irqrestore(&md->deferred_lock, flags); 949 } 950 951 io_error = io->status; 952 bio = io->orig_bio; 953 end_io_acct(io); 954 free_io(md, io); 955 956 if (io_error == BLK_STS_DM_REQUEUE) 957 return; 958 959 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 960 /* 961 * Preflush done for flush with data, reissue 962 * without REQ_PREFLUSH. 963 */ 964 bio->bi_opf &= ~REQ_PREFLUSH; 965 queue_io(md, bio); 966 } else { 967 /* done with normal IO or empty flush */ 968 if (io_error) 969 bio->bi_status = io_error; 970 bio_endio(bio); 971 } 972 } 973 } 974 975 void disable_discard(struct mapped_device *md) 976 { 977 struct queue_limits *limits = dm_get_queue_limits(md); 978 979 /* device doesn't really support DISCARD, disable it */ 980 limits->max_discard_sectors = 0; 981 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 982 } 983 984 void disable_write_same(struct mapped_device *md) 985 { 986 struct queue_limits *limits = dm_get_queue_limits(md); 987 988 /* device doesn't really support WRITE SAME, disable it */ 989 limits->max_write_same_sectors = 0; 990 } 991 992 void disable_write_zeroes(struct mapped_device *md) 993 { 994 struct queue_limits *limits = dm_get_queue_limits(md); 995 996 /* device doesn't really support WRITE ZEROES, disable it */ 997 limits->max_write_zeroes_sectors = 0; 998 } 999 1000 static void clone_endio(struct bio *bio) 1001 { 1002 blk_status_t error = bio->bi_status; 1003 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1004 struct dm_io *io = tio->io; 1005 struct mapped_device *md = tio->io->md; 1006 dm_endio_fn endio = tio->ti->type->end_io; 1007 1008 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 1009 if (bio_op(bio) == REQ_OP_DISCARD && 1010 !bio->bi_disk->queue->limits.max_discard_sectors) 1011 disable_discard(md); 1012 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 1013 !bio->bi_disk->queue->limits.max_write_same_sectors) 1014 disable_write_same(md); 1015 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1016 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 1017 disable_write_zeroes(md); 1018 } 1019 1020 if (endio) { 1021 int r = endio(tio->ti, bio, &error); 1022 switch (r) { 1023 case DM_ENDIO_REQUEUE: 1024 error = BLK_STS_DM_REQUEUE; 1025 /*FALLTHRU*/ 1026 case DM_ENDIO_DONE: 1027 break; 1028 case DM_ENDIO_INCOMPLETE: 1029 /* The target will handle the io */ 1030 return; 1031 default: 1032 DMWARN("unimplemented target endio return value: %d", r); 1033 BUG(); 1034 } 1035 } 1036 1037 free_tio(tio); 1038 dec_pending(io, error); 1039 } 1040 1041 /* 1042 * Return maximum size of I/O possible at the supplied sector up to the current 1043 * target boundary. 1044 */ 1045 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1046 { 1047 sector_t target_offset = dm_target_offset(ti, sector); 1048 1049 return ti->len - target_offset; 1050 } 1051 1052 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1053 { 1054 sector_t len = max_io_len_target_boundary(sector, ti); 1055 sector_t offset, max_len; 1056 1057 /* 1058 * Does the target need to split even further? 1059 */ 1060 if (ti->max_io_len) { 1061 offset = dm_target_offset(ti, sector); 1062 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1063 max_len = sector_div(offset, ti->max_io_len); 1064 else 1065 max_len = offset & (ti->max_io_len - 1); 1066 max_len = ti->max_io_len - max_len; 1067 1068 if (len > max_len) 1069 len = max_len; 1070 } 1071 1072 return len; 1073 } 1074 1075 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1076 { 1077 if (len > UINT_MAX) { 1078 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1079 (unsigned long long)len, UINT_MAX); 1080 ti->error = "Maximum size of target IO is too large"; 1081 return -EINVAL; 1082 } 1083 1084 ti->max_io_len = (uint32_t) len; 1085 1086 return 0; 1087 } 1088 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1089 1090 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1091 sector_t sector, int *srcu_idx) 1092 __acquires(md->io_barrier) 1093 { 1094 struct dm_table *map; 1095 struct dm_target *ti; 1096 1097 map = dm_get_live_table(md, srcu_idx); 1098 if (!map) 1099 return NULL; 1100 1101 ti = dm_table_find_target(map, sector); 1102 if (!ti) 1103 return NULL; 1104 1105 return ti; 1106 } 1107 1108 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1109 long nr_pages, void **kaddr, pfn_t *pfn) 1110 { 1111 struct mapped_device *md = dax_get_private(dax_dev); 1112 sector_t sector = pgoff * PAGE_SECTORS; 1113 struct dm_target *ti; 1114 long len, ret = -EIO; 1115 int srcu_idx; 1116 1117 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1118 1119 if (!ti) 1120 goto out; 1121 if (!ti->type->direct_access) 1122 goto out; 1123 len = max_io_len(sector, ti) / PAGE_SECTORS; 1124 if (len < 1) 1125 goto out; 1126 nr_pages = min(len, nr_pages); 1127 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1128 1129 out: 1130 dm_put_live_table(md, srcu_idx); 1131 1132 return ret; 1133 } 1134 1135 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 1136 int blocksize, sector_t start, sector_t len) 1137 { 1138 struct mapped_device *md = dax_get_private(dax_dev); 1139 struct dm_table *map; 1140 int srcu_idx; 1141 bool ret; 1142 1143 map = dm_get_live_table(md, &srcu_idx); 1144 if (!map) 1145 return false; 1146 1147 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 1148 1149 dm_put_live_table(md, srcu_idx); 1150 1151 return ret; 1152 } 1153 1154 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1155 void *addr, size_t bytes, struct iov_iter *i) 1156 { 1157 struct mapped_device *md = dax_get_private(dax_dev); 1158 sector_t sector = pgoff * PAGE_SECTORS; 1159 struct dm_target *ti; 1160 long ret = 0; 1161 int srcu_idx; 1162 1163 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1164 1165 if (!ti) 1166 goto out; 1167 if (!ti->type->dax_copy_from_iter) { 1168 ret = copy_from_iter(addr, bytes, i); 1169 goto out; 1170 } 1171 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1172 out: 1173 dm_put_live_table(md, srcu_idx); 1174 1175 return ret; 1176 } 1177 1178 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1179 void *addr, size_t bytes, struct iov_iter *i) 1180 { 1181 struct mapped_device *md = dax_get_private(dax_dev); 1182 sector_t sector = pgoff * PAGE_SECTORS; 1183 struct dm_target *ti; 1184 long ret = 0; 1185 int srcu_idx; 1186 1187 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1188 1189 if (!ti) 1190 goto out; 1191 if (!ti->type->dax_copy_to_iter) { 1192 ret = copy_to_iter(addr, bytes, i); 1193 goto out; 1194 } 1195 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1196 out: 1197 dm_put_live_table(md, srcu_idx); 1198 1199 return ret; 1200 } 1201 1202 /* 1203 * A target may call dm_accept_partial_bio only from the map routine. It is 1204 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1205 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 1206 * 1207 * dm_accept_partial_bio informs the dm that the target only wants to process 1208 * additional n_sectors sectors of the bio and the rest of the data should be 1209 * sent in a next bio. 1210 * 1211 * A diagram that explains the arithmetics: 1212 * +--------------------+---------------+-------+ 1213 * | 1 | 2 | 3 | 1214 * +--------------------+---------------+-------+ 1215 * 1216 * <-------------- *tio->len_ptr ---------------> 1217 * <------- bi_size -------> 1218 * <-- n_sectors --> 1219 * 1220 * Region 1 was already iterated over with bio_advance or similar function. 1221 * (it may be empty if the target doesn't use bio_advance) 1222 * Region 2 is the remaining bio size that the target wants to process. 1223 * (it may be empty if region 1 is non-empty, although there is no reason 1224 * to make it empty) 1225 * The target requires that region 3 is to be sent in the next bio. 1226 * 1227 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1228 * the partially processed part (the sum of regions 1+2) must be the same for all 1229 * copies of the bio. 1230 */ 1231 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1232 { 1233 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1234 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1235 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1236 BUG_ON(bi_size > *tio->len_ptr); 1237 BUG_ON(n_sectors > bi_size); 1238 *tio->len_ptr -= bi_size - n_sectors; 1239 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1240 } 1241 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1242 1243 static blk_qc_t __map_bio(struct dm_target_io *tio) 1244 { 1245 int r; 1246 sector_t sector; 1247 struct bio *clone = &tio->clone; 1248 struct dm_io *io = tio->io; 1249 struct mapped_device *md = io->md; 1250 struct dm_target *ti = tio->ti; 1251 blk_qc_t ret = BLK_QC_T_NONE; 1252 1253 clone->bi_end_io = clone_endio; 1254 1255 /* 1256 * Map the clone. If r == 0 we don't need to do 1257 * anything, the target has assumed ownership of 1258 * this io. 1259 */ 1260 atomic_inc(&io->io_count); 1261 sector = clone->bi_iter.bi_sector; 1262 1263 r = ti->type->map(ti, clone); 1264 switch (r) { 1265 case DM_MAPIO_SUBMITTED: 1266 break; 1267 case DM_MAPIO_REMAPPED: 1268 /* the bio has been remapped so dispatch it */ 1269 trace_block_bio_remap(clone->bi_disk->queue, clone, 1270 bio_dev(io->orig_bio), sector); 1271 if (md->type == DM_TYPE_NVME_BIO_BASED) 1272 ret = direct_make_request(clone); 1273 else 1274 ret = generic_make_request(clone); 1275 break; 1276 case DM_MAPIO_KILL: 1277 free_tio(tio); 1278 dec_pending(io, BLK_STS_IOERR); 1279 break; 1280 case DM_MAPIO_REQUEUE: 1281 free_tio(tio); 1282 dec_pending(io, BLK_STS_DM_REQUEUE); 1283 break; 1284 default: 1285 DMWARN("unimplemented target map return value: %d", r); 1286 BUG(); 1287 } 1288 1289 return ret; 1290 } 1291 1292 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1293 { 1294 bio->bi_iter.bi_sector = sector; 1295 bio->bi_iter.bi_size = to_bytes(len); 1296 } 1297 1298 /* 1299 * Creates a bio that consists of range of complete bvecs. 1300 */ 1301 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1302 sector_t sector, unsigned len) 1303 { 1304 struct bio *clone = &tio->clone; 1305 1306 __bio_clone_fast(clone, bio); 1307 1308 if (bio_integrity(bio)) { 1309 int r; 1310 1311 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1312 !dm_target_passes_integrity(tio->ti->type))) { 1313 DMWARN("%s: the target %s doesn't support integrity data.", 1314 dm_device_name(tio->io->md), 1315 tio->ti->type->name); 1316 return -EIO; 1317 } 1318 1319 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1320 if (r < 0) 1321 return r; 1322 } 1323 1324 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1325 clone->bi_iter.bi_size = to_bytes(len); 1326 1327 if (bio_integrity(bio)) 1328 bio_integrity_trim(clone); 1329 1330 return 0; 1331 } 1332 1333 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1334 struct dm_target *ti, unsigned num_bios) 1335 { 1336 struct dm_target_io *tio; 1337 int try; 1338 1339 if (!num_bios) 1340 return; 1341 1342 if (num_bios == 1) { 1343 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1344 bio_list_add(blist, &tio->clone); 1345 return; 1346 } 1347 1348 for (try = 0; try < 2; try++) { 1349 int bio_nr; 1350 struct bio *bio; 1351 1352 if (try) 1353 mutex_lock(&ci->io->md->table_devices_lock); 1354 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1355 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1356 if (!tio) 1357 break; 1358 1359 bio_list_add(blist, &tio->clone); 1360 } 1361 if (try) 1362 mutex_unlock(&ci->io->md->table_devices_lock); 1363 if (bio_nr == num_bios) 1364 return; 1365 1366 while ((bio = bio_list_pop(blist))) { 1367 tio = container_of(bio, struct dm_target_io, clone); 1368 free_tio(tio); 1369 } 1370 } 1371 } 1372 1373 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1374 struct dm_target_io *tio, unsigned *len) 1375 { 1376 struct bio *clone = &tio->clone; 1377 1378 tio->len_ptr = len; 1379 1380 __bio_clone_fast(clone, ci->bio); 1381 if (len) 1382 bio_setup_sector(clone, ci->sector, *len); 1383 1384 return __map_bio(tio); 1385 } 1386 1387 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1388 unsigned num_bios, unsigned *len) 1389 { 1390 struct bio_list blist = BIO_EMPTY_LIST; 1391 struct bio *bio; 1392 struct dm_target_io *tio; 1393 1394 alloc_multiple_bios(&blist, ci, ti, num_bios); 1395 1396 while ((bio = bio_list_pop(&blist))) { 1397 tio = container_of(bio, struct dm_target_io, clone); 1398 (void) __clone_and_map_simple_bio(ci, tio, len); 1399 } 1400 } 1401 1402 static int __send_empty_flush(struct clone_info *ci) 1403 { 1404 unsigned target_nr = 0; 1405 struct dm_target *ti; 1406 1407 /* 1408 * Empty flush uses a statically initialized bio, as the base for 1409 * cloning. However, blkg association requires that a bdev is 1410 * associated with a gendisk, which doesn't happen until the bdev is 1411 * opened. So, blkg association is done at issue time of the flush 1412 * rather than when the device is created in alloc_dev(). 1413 */ 1414 bio_set_dev(ci->bio, ci->io->md->bdev); 1415 1416 BUG_ON(bio_has_data(ci->bio)); 1417 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1418 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1419 1420 bio_disassociate_blkg(ci->bio); 1421 1422 return 0; 1423 } 1424 1425 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1426 sector_t sector, unsigned *len) 1427 { 1428 struct bio *bio = ci->bio; 1429 struct dm_target_io *tio; 1430 int r; 1431 1432 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1433 tio->len_ptr = len; 1434 r = clone_bio(tio, bio, sector, *len); 1435 if (r < 0) { 1436 free_tio(tio); 1437 return r; 1438 } 1439 (void) __map_bio(tio); 1440 1441 return 0; 1442 } 1443 1444 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1445 1446 static unsigned get_num_discard_bios(struct dm_target *ti) 1447 { 1448 return ti->num_discard_bios; 1449 } 1450 1451 static unsigned get_num_secure_erase_bios(struct dm_target *ti) 1452 { 1453 return ti->num_secure_erase_bios; 1454 } 1455 1456 static unsigned get_num_write_same_bios(struct dm_target *ti) 1457 { 1458 return ti->num_write_same_bios; 1459 } 1460 1461 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1462 { 1463 return ti->num_write_zeroes_bios; 1464 } 1465 1466 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1467 unsigned num_bios) 1468 { 1469 unsigned len; 1470 1471 /* 1472 * Even though the device advertised support for this type of 1473 * request, that does not mean every target supports it, and 1474 * reconfiguration might also have changed that since the 1475 * check was performed. 1476 */ 1477 if (!num_bios) 1478 return -EOPNOTSUPP; 1479 1480 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1481 1482 __send_duplicate_bios(ci, ti, num_bios, &len); 1483 1484 ci->sector += len; 1485 ci->sector_count -= len; 1486 1487 return 0; 1488 } 1489 1490 static int __send_discard(struct clone_info *ci, struct dm_target *ti) 1491 { 1492 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); 1493 } 1494 1495 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 1496 { 1497 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); 1498 } 1499 1500 static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 1501 { 1502 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); 1503 } 1504 1505 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1506 { 1507 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); 1508 } 1509 1510 static bool is_abnormal_io(struct bio *bio) 1511 { 1512 bool r = false; 1513 1514 switch (bio_op(bio)) { 1515 case REQ_OP_DISCARD: 1516 case REQ_OP_SECURE_ERASE: 1517 case REQ_OP_WRITE_SAME: 1518 case REQ_OP_WRITE_ZEROES: 1519 r = true; 1520 break; 1521 } 1522 1523 return r; 1524 } 1525 1526 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1527 int *result) 1528 { 1529 struct bio *bio = ci->bio; 1530 1531 if (bio_op(bio) == REQ_OP_DISCARD) 1532 *result = __send_discard(ci, ti); 1533 else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 1534 *result = __send_secure_erase(ci, ti); 1535 else if (bio_op(bio) == REQ_OP_WRITE_SAME) 1536 *result = __send_write_same(ci, ti); 1537 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 1538 *result = __send_write_zeroes(ci, ti); 1539 else 1540 return false; 1541 1542 return true; 1543 } 1544 1545 /* 1546 * Select the correct strategy for processing a non-flush bio. 1547 */ 1548 static int __split_and_process_non_flush(struct clone_info *ci) 1549 { 1550 struct dm_target *ti; 1551 unsigned len; 1552 int r; 1553 1554 ti = dm_table_find_target(ci->map, ci->sector); 1555 if (!ti) 1556 return -EIO; 1557 1558 if (__process_abnormal_io(ci, ti, &r)) 1559 return r; 1560 1561 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1562 1563 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1564 if (r < 0) 1565 return r; 1566 1567 ci->sector += len; 1568 ci->sector_count -= len; 1569 1570 return 0; 1571 } 1572 1573 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1574 struct dm_table *map, struct bio *bio) 1575 { 1576 ci->map = map; 1577 ci->io = alloc_io(md, bio); 1578 ci->sector = bio->bi_iter.bi_sector; 1579 } 1580 1581 #define __dm_part_stat_sub(part, field, subnd) \ 1582 (part_stat_get(part, field) -= (subnd)) 1583 1584 /* 1585 * Entry point to split a bio into clones and submit them to the targets. 1586 */ 1587 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1588 struct dm_table *map, struct bio *bio) 1589 { 1590 struct clone_info ci; 1591 blk_qc_t ret = BLK_QC_T_NONE; 1592 int error = 0; 1593 1594 init_clone_info(&ci, md, map, bio); 1595 1596 if (bio->bi_opf & REQ_PREFLUSH) { 1597 struct bio flush_bio; 1598 1599 /* 1600 * Use an on-stack bio for this, it's safe since we don't 1601 * need to reference it after submit. It's just used as 1602 * the basis for the clone(s). 1603 */ 1604 bio_init(&flush_bio, NULL, 0); 1605 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1606 ci.bio = &flush_bio; 1607 ci.sector_count = 0; 1608 error = __send_empty_flush(&ci); 1609 /* dec_pending submits any data associated with flush */ 1610 } else if (op_is_zone_mgmt(bio_op(bio))) { 1611 ci.bio = bio; 1612 ci.sector_count = 0; 1613 error = __split_and_process_non_flush(&ci); 1614 } else { 1615 ci.bio = bio; 1616 ci.sector_count = bio_sectors(bio); 1617 while (ci.sector_count && !error) { 1618 error = __split_and_process_non_flush(&ci); 1619 if (current->bio_list && ci.sector_count && !error) { 1620 /* 1621 * Remainder must be passed to generic_make_request() 1622 * so that it gets handled *after* bios already submitted 1623 * have been completely processed. 1624 * We take a clone of the original to store in 1625 * ci.io->orig_bio to be used by end_io_acct() and 1626 * for dec_pending to use for completion handling. 1627 */ 1628 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1629 GFP_NOIO, &md->queue->bio_split); 1630 ci.io->orig_bio = b; 1631 1632 /* 1633 * Adjust IO stats for each split, otherwise upon queue 1634 * reentry there will be redundant IO accounting. 1635 * NOTE: this is a stop-gap fix, a proper fix involves 1636 * significant refactoring of DM core's bio splitting 1637 * (by eliminating DM's splitting and just using bio_split) 1638 */ 1639 part_stat_lock(); 1640 __dm_part_stat_sub(&dm_disk(md)->part0, 1641 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1642 part_stat_unlock(); 1643 1644 bio_chain(b, bio); 1645 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1646 ret = generic_make_request(bio); 1647 break; 1648 } 1649 } 1650 } 1651 1652 /* drop the extra reference count */ 1653 dec_pending(ci.io, errno_to_blk_status(error)); 1654 return ret; 1655 } 1656 1657 /* 1658 * Optimized variant of __split_and_process_bio that leverages the 1659 * fact that targets that use it do _not_ have a need to split bios. 1660 */ 1661 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1662 struct bio *bio, struct dm_target *ti) 1663 { 1664 struct clone_info ci; 1665 blk_qc_t ret = BLK_QC_T_NONE; 1666 int error = 0; 1667 1668 init_clone_info(&ci, md, map, bio); 1669 1670 if (bio->bi_opf & REQ_PREFLUSH) { 1671 struct bio flush_bio; 1672 1673 /* 1674 * Use an on-stack bio for this, it's safe since we don't 1675 * need to reference it after submit. It's just used as 1676 * the basis for the clone(s). 1677 */ 1678 bio_init(&flush_bio, NULL, 0); 1679 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1680 ci.bio = &flush_bio; 1681 ci.sector_count = 0; 1682 error = __send_empty_flush(&ci); 1683 /* dec_pending submits any data associated with flush */ 1684 } else { 1685 struct dm_target_io *tio; 1686 1687 ci.bio = bio; 1688 ci.sector_count = bio_sectors(bio); 1689 if (__process_abnormal_io(&ci, ti, &error)) 1690 goto out; 1691 1692 tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1693 ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1694 } 1695 out: 1696 /* drop the extra reference count */ 1697 dec_pending(ci.io, errno_to_blk_status(error)); 1698 return ret; 1699 } 1700 1701 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) 1702 { 1703 unsigned len, sector_count; 1704 1705 sector_count = bio_sectors(*bio); 1706 len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); 1707 1708 if (sector_count > len) { 1709 struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); 1710 1711 bio_chain(split, *bio); 1712 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1713 generic_make_request(*bio); 1714 *bio = split; 1715 } 1716 } 1717 1718 static blk_qc_t dm_process_bio(struct mapped_device *md, 1719 struct dm_table *map, struct bio *bio) 1720 { 1721 blk_qc_t ret = BLK_QC_T_NONE; 1722 struct dm_target *ti = md->immutable_target; 1723 1724 if (unlikely(!map)) { 1725 bio_io_error(bio); 1726 return ret; 1727 } 1728 1729 if (!ti) { 1730 ti = dm_table_find_target(map, bio->bi_iter.bi_sector); 1731 if (unlikely(!ti)) { 1732 bio_io_error(bio); 1733 return ret; 1734 } 1735 } 1736 1737 /* 1738 * If in ->make_request_fn we need to use blk_queue_split(), otherwise 1739 * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1740 * won't be imposed. 1741 */ 1742 if (current->bio_list) { 1743 blk_queue_split(md->queue, &bio); 1744 if (!is_abnormal_io(bio)) 1745 dm_queue_split(md, ti, &bio); 1746 } 1747 1748 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1749 return __process_bio(md, map, bio, ti); 1750 else 1751 return __split_and_process_bio(md, map, bio); 1752 } 1753 1754 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1755 { 1756 struct mapped_device *md = q->queuedata; 1757 blk_qc_t ret = BLK_QC_T_NONE; 1758 int srcu_idx; 1759 struct dm_table *map; 1760 1761 map = dm_get_live_table(md, &srcu_idx); 1762 1763 /* if we're suspended, we have to queue this io for later */ 1764 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1765 dm_put_live_table(md, srcu_idx); 1766 1767 if (!(bio->bi_opf & REQ_RAHEAD)) 1768 queue_io(md, bio); 1769 else 1770 bio_io_error(bio); 1771 return ret; 1772 } 1773 1774 ret = dm_process_bio(md, map, bio); 1775 1776 dm_put_live_table(md, srcu_idx); 1777 return ret; 1778 } 1779 1780 static int dm_any_congested(void *congested_data, int bdi_bits) 1781 { 1782 int r = bdi_bits; 1783 struct mapped_device *md = congested_data; 1784 struct dm_table *map; 1785 1786 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1787 if (dm_request_based(md)) { 1788 /* 1789 * With request-based DM we only need to check the 1790 * top-level queue for congestion. 1791 */ 1792 struct backing_dev_info *bdi = md->queue->backing_dev_info; 1793 r = bdi->wb.congested->state & bdi_bits; 1794 } else { 1795 map = dm_get_live_table_fast(md); 1796 if (map) 1797 r = dm_table_any_congested(map, bdi_bits); 1798 dm_put_live_table_fast(md); 1799 } 1800 } 1801 1802 return r; 1803 } 1804 1805 /*----------------------------------------------------------------- 1806 * An IDR is used to keep track of allocated minor numbers. 1807 *---------------------------------------------------------------*/ 1808 static void free_minor(int minor) 1809 { 1810 spin_lock(&_minor_lock); 1811 idr_remove(&_minor_idr, minor); 1812 spin_unlock(&_minor_lock); 1813 } 1814 1815 /* 1816 * See if the device with a specific minor # is free. 1817 */ 1818 static int specific_minor(int minor) 1819 { 1820 int r; 1821 1822 if (minor >= (1 << MINORBITS)) 1823 return -EINVAL; 1824 1825 idr_preload(GFP_KERNEL); 1826 spin_lock(&_minor_lock); 1827 1828 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1829 1830 spin_unlock(&_minor_lock); 1831 idr_preload_end(); 1832 if (r < 0) 1833 return r == -ENOSPC ? -EBUSY : r; 1834 return 0; 1835 } 1836 1837 static int next_free_minor(int *minor) 1838 { 1839 int r; 1840 1841 idr_preload(GFP_KERNEL); 1842 spin_lock(&_minor_lock); 1843 1844 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1845 1846 spin_unlock(&_minor_lock); 1847 idr_preload_end(); 1848 if (r < 0) 1849 return r; 1850 *minor = r; 1851 return 0; 1852 } 1853 1854 static const struct block_device_operations dm_blk_dops; 1855 static const struct dax_operations dm_dax_ops; 1856 1857 static void dm_wq_work(struct work_struct *work); 1858 1859 static void cleanup_mapped_device(struct mapped_device *md) 1860 { 1861 if (md->wq) 1862 destroy_workqueue(md->wq); 1863 bioset_exit(&md->bs); 1864 bioset_exit(&md->io_bs); 1865 1866 if (md->dax_dev) { 1867 kill_dax(md->dax_dev); 1868 put_dax(md->dax_dev); 1869 md->dax_dev = NULL; 1870 } 1871 1872 if (md->disk) { 1873 spin_lock(&_minor_lock); 1874 md->disk->private_data = NULL; 1875 spin_unlock(&_minor_lock); 1876 del_gendisk(md->disk); 1877 put_disk(md->disk); 1878 } 1879 1880 if (md->queue) 1881 blk_cleanup_queue(md->queue); 1882 1883 cleanup_srcu_struct(&md->io_barrier); 1884 1885 if (md->bdev) { 1886 bdput(md->bdev); 1887 md->bdev = NULL; 1888 } 1889 1890 mutex_destroy(&md->suspend_lock); 1891 mutex_destroy(&md->type_lock); 1892 mutex_destroy(&md->table_devices_lock); 1893 1894 dm_mq_cleanup_mapped_device(md); 1895 } 1896 1897 /* 1898 * Allocate and initialise a blank device with a given minor. 1899 */ 1900 static struct mapped_device *alloc_dev(int minor) 1901 { 1902 int r, numa_node_id = dm_get_numa_node(); 1903 struct mapped_device *md; 1904 void *old_md; 1905 1906 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1907 if (!md) { 1908 DMWARN("unable to allocate device, out of memory."); 1909 return NULL; 1910 } 1911 1912 if (!try_module_get(THIS_MODULE)) 1913 goto bad_module_get; 1914 1915 /* get a minor number for the dev */ 1916 if (minor == DM_ANY_MINOR) 1917 r = next_free_minor(&minor); 1918 else 1919 r = specific_minor(minor); 1920 if (r < 0) 1921 goto bad_minor; 1922 1923 r = init_srcu_struct(&md->io_barrier); 1924 if (r < 0) 1925 goto bad_io_barrier; 1926 1927 md->numa_node_id = numa_node_id; 1928 md->init_tio_pdu = false; 1929 md->type = DM_TYPE_NONE; 1930 mutex_init(&md->suspend_lock); 1931 mutex_init(&md->type_lock); 1932 mutex_init(&md->table_devices_lock); 1933 spin_lock_init(&md->deferred_lock); 1934 atomic_set(&md->holders, 1); 1935 atomic_set(&md->open_count, 0); 1936 atomic_set(&md->event_nr, 0); 1937 atomic_set(&md->uevent_seq, 0); 1938 INIT_LIST_HEAD(&md->uevent_list); 1939 INIT_LIST_HEAD(&md->table_devices); 1940 spin_lock_init(&md->uevent_lock); 1941 1942 /* 1943 * default to bio-based required ->make_request_fn until DM 1944 * table is loaded and md->type established. If request-based 1945 * table is loaded: blk-mq will override accordingly. 1946 */ 1947 md->queue = blk_alloc_queue(dm_make_request, numa_node_id); 1948 if (!md->queue) 1949 goto bad; 1950 md->queue->queuedata = md; 1951 1952 md->disk = alloc_disk_node(1, md->numa_node_id); 1953 if (!md->disk) 1954 goto bad; 1955 1956 init_waitqueue_head(&md->wait); 1957 INIT_WORK(&md->work, dm_wq_work); 1958 init_waitqueue_head(&md->eventq); 1959 init_completion(&md->kobj_holder.completion); 1960 1961 md->disk->major = _major; 1962 md->disk->first_minor = minor; 1963 md->disk->fops = &dm_blk_dops; 1964 md->disk->queue = md->queue; 1965 md->disk->private_data = md; 1966 sprintf(md->disk->disk_name, "dm-%d", minor); 1967 1968 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1969 md->dax_dev = alloc_dax(md, md->disk->disk_name, 1970 &dm_dax_ops, 0); 1971 if (!md->dax_dev) 1972 goto bad; 1973 } 1974 1975 add_disk_no_queue_reg(md->disk); 1976 format_dev_t(md->name, MKDEV(_major, minor)); 1977 1978 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1979 if (!md->wq) 1980 goto bad; 1981 1982 md->bdev = bdget_disk(md->disk, 0); 1983 if (!md->bdev) 1984 goto bad; 1985 1986 dm_stats_init(&md->stats); 1987 1988 /* Populate the mapping, nobody knows we exist yet */ 1989 spin_lock(&_minor_lock); 1990 old_md = idr_replace(&_minor_idr, md, minor); 1991 spin_unlock(&_minor_lock); 1992 1993 BUG_ON(old_md != MINOR_ALLOCED); 1994 1995 return md; 1996 1997 bad: 1998 cleanup_mapped_device(md); 1999 bad_io_barrier: 2000 free_minor(minor); 2001 bad_minor: 2002 module_put(THIS_MODULE); 2003 bad_module_get: 2004 kvfree(md); 2005 return NULL; 2006 } 2007 2008 static void unlock_fs(struct mapped_device *md); 2009 2010 static void free_dev(struct mapped_device *md) 2011 { 2012 int minor = MINOR(disk_devt(md->disk)); 2013 2014 unlock_fs(md); 2015 2016 cleanup_mapped_device(md); 2017 2018 free_table_devices(&md->table_devices); 2019 dm_stats_cleanup(&md->stats); 2020 free_minor(minor); 2021 2022 module_put(THIS_MODULE); 2023 kvfree(md); 2024 } 2025 2026 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 2027 { 2028 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2029 int ret = 0; 2030 2031 if (dm_table_bio_based(t)) { 2032 /* 2033 * The md may already have mempools that need changing. 2034 * If so, reload bioset because front_pad may have changed 2035 * because a different table was loaded. 2036 */ 2037 bioset_exit(&md->bs); 2038 bioset_exit(&md->io_bs); 2039 2040 } else if (bioset_initialized(&md->bs)) { 2041 /* 2042 * There's no need to reload with request-based dm 2043 * because the size of front_pad doesn't change. 2044 * Note for future: If you are to reload bioset, 2045 * prep-ed requests in the queue may refer 2046 * to bio from the old bioset, so you must walk 2047 * through the queue to unprep. 2048 */ 2049 goto out; 2050 } 2051 2052 BUG_ON(!p || 2053 bioset_initialized(&md->bs) || 2054 bioset_initialized(&md->io_bs)); 2055 2056 ret = bioset_init_from_src(&md->bs, &p->bs); 2057 if (ret) 2058 goto out; 2059 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 2060 if (ret) 2061 bioset_exit(&md->bs); 2062 out: 2063 /* mempool bind completed, no longer need any mempools in the table */ 2064 dm_table_free_md_mempools(t); 2065 return ret; 2066 } 2067 2068 /* 2069 * Bind a table to the device. 2070 */ 2071 static void event_callback(void *context) 2072 { 2073 unsigned long flags; 2074 LIST_HEAD(uevents); 2075 struct mapped_device *md = (struct mapped_device *) context; 2076 2077 spin_lock_irqsave(&md->uevent_lock, flags); 2078 list_splice_init(&md->uevent_list, &uevents); 2079 spin_unlock_irqrestore(&md->uevent_lock, flags); 2080 2081 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2082 2083 atomic_inc(&md->event_nr); 2084 wake_up(&md->eventq); 2085 dm_issue_global_event(); 2086 } 2087 2088 /* 2089 * Protected by md->suspend_lock obtained by dm_swap_table(). 2090 */ 2091 static void __set_size(struct mapped_device *md, sector_t size) 2092 { 2093 lockdep_assert_held(&md->suspend_lock); 2094 2095 set_capacity(md->disk, size); 2096 2097 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2098 } 2099 2100 /* 2101 * Returns old map, which caller must destroy. 2102 */ 2103 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2104 struct queue_limits *limits) 2105 { 2106 struct dm_table *old_map; 2107 struct request_queue *q = md->queue; 2108 bool request_based = dm_table_request_based(t); 2109 sector_t size; 2110 int ret; 2111 2112 lockdep_assert_held(&md->suspend_lock); 2113 2114 size = dm_table_get_size(t); 2115 2116 /* 2117 * Wipe any geometry if the size of the table changed. 2118 */ 2119 if (size != dm_get_size(md)) 2120 memset(&md->geometry, 0, sizeof(md->geometry)); 2121 2122 __set_size(md, size); 2123 2124 dm_table_event_callback(t, event_callback, md); 2125 2126 /* 2127 * The queue hasn't been stopped yet, if the old table type wasn't 2128 * for request-based during suspension. So stop it to prevent 2129 * I/O mapping before resume. 2130 * This must be done before setting the queue restrictions, 2131 * because request-based dm may be run just after the setting. 2132 */ 2133 if (request_based) 2134 dm_stop_queue(q); 2135 2136 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 2137 /* 2138 * Leverage the fact that request-based DM targets and 2139 * NVMe bio based targets are immutable singletons 2140 * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2141 * and __process_bio. 2142 */ 2143 md->immutable_target = dm_table_get_immutable_target(t); 2144 } 2145 2146 ret = __bind_mempools(md, t); 2147 if (ret) { 2148 old_map = ERR_PTR(ret); 2149 goto out; 2150 } 2151 2152 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2153 rcu_assign_pointer(md->map, (void *)t); 2154 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2155 2156 dm_table_set_restrictions(t, q, limits); 2157 if (old_map) 2158 dm_sync_table(md); 2159 2160 out: 2161 return old_map; 2162 } 2163 2164 /* 2165 * Returns unbound table for the caller to free. 2166 */ 2167 static struct dm_table *__unbind(struct mapped_device *md) 2168 { 2169 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2170 2171 if (!map) 2172 return NULL; 2173 2174 dm_table_event_callback(map, NULL, NULL); 2175 RCU_INIT_POINTER(md->map, NULL); 2176 dm_sync_table(md); 2177 2178 return map; 2179 } 2180 2181 /* 2182 * Constructor for a new device. 2183 */ 2184 int dm_create(int minor, struct mapped_device **result) 2185 { 2186 int r; 2187 struct mapped_device *md; 2188 2189 md = alloc_dev(minor); 2190 if (!md) 2191 return -ENXIO; 2192 2193 r = dm_sysfs_init(md); 2194 if (r) { 2195 free_dev(md); 2196 return r; 2197 } 2198 2199 *result = md; 2200 return 0; 2201 } 2202 2203 /* 2204 * Functions to manage md->type. 2205 * All are required to hold md->type_lock. 2206 */ 2207 void dm_lock_md_type(struct mapped_device *md) 2208 { 2209 mutex_lock(&md->type_lock); 2210 } 2211 2212 void dm_unlock_md_type(struct mapped_device *md) 2213 { 2214 mutex_unlock(&md->type_lock); 2215 } 2216 2217 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2218 { 2219 BUG_ON(!mutex_is_locked(&md->type_lock)); 2220 md->type = type; 2221 } 2222 2223 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2224 { 2225 return md->type; 2226 } 2227 2228 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2229 { 2230 return md->immutable_target_type; 2231 } 2232 2233 /* 2234 * The queue_limits are only valid as long as you have a reference 2235 * count on 'md'. 2236 */ 2237 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2238 { 2239 BUG_ON(!atomic_read(&md->holders)); 2240 return &md->queue->limits; 2241 } 2242 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2243 2244 static void dm_init_congested_fn(struct mapped_device *md) 2245 { 2246 md->queue->backing_dev_info->congested_data = md; 2247 md->queue->backing_dev_info->congested_fn = dm_any_congested; 2248 } 2249 2250 /* 2251 * Setup the DM device's queue based on md's type 2252 */ 2253 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2254 { 2255 int r; 2256 struct queue_limits limits; 2257 enum dm_queue_mode type = dm_get_md_type(md); 2258 2259 switch (type) { 2260 case DM_TYPE_REQUEST_BASED: 2261 r = dm_mq_init_request_queue(md, t); 2262 if (r) { 2263 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2264 return r; 2265 } 2266 dm_init_congested_fn(md); 2267 break; 2268 case DM_TYPE_BIO_BASED: 2269 case DM_TYPE_DAX_BIO_BASED: 2270 case DM_TYPE_NVME_BIO_BASED: 2271 dm_init_congested_fn(md); 2272 break; 2273 case DM_TYPE_NONE: 2274 WARN_ON_ONCE(true); 2275 break; 2276 } 2277 2278 r = dm_calculate_queue_limits(t, &limits); 2279 if (r) { 2280 DMERR("Cannot calculate initial queue limits"); 2281 return r; 2282 } 2283 dm_table_set_restrictions(t, md->queue, &limits); 2284 blk_register_queue(md->disk); 2285 2286 return 0; 2287 } 2288 2289 struct mapped_device *dm_get_md(dev_t dev) 2290 { 2291 struct mapped_device *md; 2292 unsigned minor = MINOR(dev); 2293 2294 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2295 return NULL; 2296 2297 spin_lock(&_minor_lock); 2298 2299 md = idr_find(&_minor_idr, minor); 2300 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2301 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2302 md = NULL; 2303 goto out; 2304 } 2305 dm_get(md); 2306 out: 2307 spin_unlock(&_minor_lock); 2308 2309 return md; 2310 } 2311 EXPORT_SYMBOL_GPL(dm_get_md); 2312 2313 void *dm_get_mdptr(struct mapped_device *md) 2314 { 2315 return md->interface_ptr; 2316 } 2317 2318 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2319 { 2320 md->interface_ptr = ptr; 2321 } 2322 2323 void dm_get(struct mapped_device *md) 2324 { 2325 atomic_inc(&md->holders); 2326 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2327 } 2328 2329 int dm_hold(struct mapped_device *md) 2330 { 2331 spin_lock(&_minor_lock); 2332 if (test_bit(DMF_FREEING, &md->flags)) { 2333 spin_unlock(&_minor_lock); 2334 return -EBUSY; 2335 } 2336 dm_get(md); 2337 spin_unlock(&_minor_lock); 2338 return 0; 2339 } 2340 EXPORT_SYMBOL_GPL(dm_hold); 2341 2342 const char *dm_device_name(struct mapped_device *md) 2343 { 2344 return md->name; 2345 } 2346 EXPORT_SYMBOL_GPL(dm_device_name); 2347 2348 static void __dm_destroy(struct mapped_device *md, bool wait) 2349 { 2350 struct dm_table *map; 2351 int srcu_idx; 2352 2353 might_sleep(); 2354 2355 spin_lock(&_minor_lock); 2356 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2357 set_bit(DMF_FREEING, &md->flags); 2358 spin_unlock(&_minor_lock); 2359 2360 blk_set_queue_dying(md->queue); 2361 2362 /* 2363 * Take suspend_lock so that presuspend and postsuspend methods 2364 * do not race with internal suspend. 2365 */ 2366 mutex_lock(&md->suspend_lock); 2367 map = dm_get_live_table(md, &srcu_idx); 2368 if (!dm_suspended_md(md)) { 2369 dm_table_presuspend_targets(map); 2370 set_bit(DMF_SUSPENDED, &md->flags); 2371 dm_table_postsuspend_targets(map); 2372 } 2373 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2374 dm_put_live_table(md, srcu_idx); 2375 mutex_unlock(&md->suspend_lock); 2376 2377 /* 2378 * Rare, but there may be I/O requests still going to complete, 2379 * for example. Wait for all references to disappear. 2380 * No one should increment the reference count of the mapped_device, 2381 * after the mapped_device state becomes DMF_FREEING. 2382 */ 2383 if (wait) 2384 while (atomic_read(&md->holders)) 2385 msleep(1); 2386 else if (atomic_read(&md->holders)) 2387 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2388 dm_device_name(md), atomic_read(&md->holders)); 2389 2390 dm_sysfs_exit(md); 2391 dm_table_destroy(__unbind(md)); 2392 free_dev(md); 2393 } 2394 2395 void dm_destroy(struct mapped_device *md) 2396 { 2397 __dm_destroy(md, true); 2398 } 2399 2400 void dm_destroy_immediate(struct mapped_device *md) 2401 { 2402 __dm_destroy(md, false); 2403 } 2404 2405 void dm_put(struct mapped_device *md) 2406 { 2407 atomic_dec(&md->holders); 2408 } 2409 EXPORT_SYMBOL_GPL(dm_put); 2410 2411 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2412 { 2413 int r = 0; 2414 DEFINE_WAIT(wait); 2415 2416 while (1) { 2417 prepare_to_wait(&md->wait, &wait, task_state); 2418 2419 if (!md_in_flight(md)) 2420 break; 2421 2422 if (signal_pending_state(task_state, current)) { 2423 r = -EINTR; 2424 break; 2425 } 2426 2427 io_schedule(); 2428 } 2429 finish_wait(&md->wait, &wait); 2430 2431 return r; 2432 } 2433 2434 /* 2435 * Process the deferred bios 2436 */ 2437 static void dm_wq_work(struct work_struct *work) 2438 { 2439 struct mapped_device *md = container_of(work, struct mapped_device, 2440 work); 2441 struct bio *c; 2442 int srcu_idx; 2443 struct dm_table *map; 2444 2445 map = dm_get_live_table(md, &srcu_idx); 2446 2447 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2448 spin_lock_irq(&md->deferred_lock); 2449 c = bio_list_pop(&md->deferred); 2450 spin_unlock_irq(&md->deferred_lock); 2451 2452 if (!c) 2453 break; 2454 2455 if (dm_request_based(md)) 2456 (void) generic_make_request(c); 2457 else 2458 (void) dm_process_bio(md, map, c); 2459 } 2460 2461 dm_put_live_table(md, srcu_idx); 2462 } 2463 2464 static void dm_queue_flush(struct mapped_device *md) 2465 { 2466 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2467 smp_mb__after_atomic(); 2468 queue_work(md->wq, &md->work); 2469 } 2470 2471 /* 2472 * Swap in a new table, returning the old one for the caller to destroy. 2473 */ 2474 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2475 { 2476 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2477 struct queue_limits limits; 2478 int r; 2479 2480 mutex_lock(&md->suspend_lock); 2481 2482 /* device must be suspended */ 2483 if (!dm_suspended_md(md)) 2484 goto out; 2485 2486 /* 2487 * If the new table has no data devices, retain the existing limits. 2488 * This helps multipath with queue_if_no_path if all paths disappear, 2489 * then new I/O is queued based on these limits, and then some paths 2490 * reappear. 2491 */ 2492 if (dm_table_has_no_data_devices(table)) { 2493 live_map = dm_get_live_table_fast(md); 2494 if (live_map) 2495 limits = md->queue->limits; 2496 dm_put_live_table_fast(md); 2497 } 2498 2499 if (!live_map) { 2500 r = dm_calculate_queue_limits(table, &limits); 2501 if (r) { 2502 map = ERR_PTR(r); 2503 goto out; 2504 } 2505 } 2506 2507 map = __bind(md, table, &limits); 2508 dm_issue_global_event(); 2509 2510 out: 2511 mutex_unlock(&md->suspend_lock); 2512 return map; 2513 } 2514 2515 /* 2516 * Functions to lock and unlock any filesystem running on the 2517 * device. 2518 */ 2519 static int lock_fs(struct mapped_device *md) 2520 { 2521 int r; 2522 2523 WARN_ON(md->frozen_sb); 2524 2525 md->frozen_sb = freeze_bdev(md->bdev); 2526 if (IS_ERR(md->frozen_sb)) { 2527 r = PTR_ERR(md->frozen_sb); 2528 md->frozen_sb = NULL; 2529 return r; 2530 } 2531 2532 set_bit(DMF_FROZEN, &md->flags); 2533 2534 return 0; 2535 } 2536 2537 static void unlock_fs(struct mapped_device *md) 2538 { 2539 if (!test_bit(DMF_FROZEN, &md->flags)) 2540 return; 2541 2542 thaw_bdev(md->bdev, md->frozen_sb); 2543 md->frozen_sb = NULL; 2544 clear_bit(DMF_FROZEN, &md->flags); 2545 } 2546 2547 /* 2548 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2549 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2550 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2551 * 2552 * If __dm_suspend returns 0, the device is completely quiescent 2553 * now. There is no request-processing activity. All new requests 2554 * are being added to md->deferred list. 2555 */ 2556 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2557 unsigned suspend_flags, long task_state, 2558 int dmf_suspended_flag) 2559 { 2560 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2561 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2562 int r; 2563 2564 lockdep_assert_held(&md->suspend_lock); 2565 2566 /* 2567 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2568 * This flag is cleared before dm_suspend returns. 2569 */ 2570 if (noflush) 2571 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2572 else 2573 pr_debug("%s: suspending with flush\n", dm_device_name(md)); 2574 2575 /* 2576 * This gets reverted if there's an error later and the targets 2577 * provide the .presuspend_undo hook. 2578 */ 2579 dm_table_presuspend_targets(map); 2580 2581 /* 2582 * Flush I/O to the device. 2583 * Any I/O submitted after lock_fs() may not be flushed. 2584 * noflush takes precedence over do_lockfs. 2585 * (lock_fs() flushes I/Os and waits for them to complete.) 2586 */ 2587 if (!noflush && do_lockfs) { 2588 r = lock_fs(md); 2589 if (r) { 2590 dm_table_presuspend_undo_targets(map); 2591 return r; 2592 } 2593 } 2594 2595 /* 2596 * Here we must make sure that no processes are submitting requests 2597 * to target drivers i.e. no one may be executing 2598 * __split_and_process_bio. This is called from dm_request and 2599 * dm_wq_work. 2600 * 2601 * To get all processes out of __split_and_process_bio in dm_request, 2602 * we take the write lock. To prevent any process from reentering 2603 * __split_and_process_bio from dm_request and quiesce the thread 2604 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2605 * flush_workqueue(md->wq). 2606 */ 2607 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2608 if (map) 2609 synchronize_srcu(&md->io_barrier); 2610 2611 /* 2612 * Stop md->queue before flushing md->wq in case request-based 2613 * dm defers requests to md->wq from md->queue. 2614 */ 2615 if (dm_request_based(md)) 2616 dm_stop_queue(md->queue); 2617 2618 flush_workqueue(md->wq); 2619 2620 /* 2621 * At this point no more requests are entering target request routines. 2622 * We call dm_wait_for_completion to wait for all existing requests 2623 * to finish. 2624 */ 2625 r = dm_wait_for_completion(md, task_state); 2626 if (!r) 2627 set_bit(dmf_suspended_flag, &md->flags); 2628 2629 if (noflush) 2630 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2631 if (map) 2632 synchronize_srcu(&md->io_barrier); 2633 2634 /* were we interrupted ? */ 2635 if (r < 0) { 2636 dm_queue_flush(md); 2637 2638 if (dm_request_based(md)) 2639 dm_start_queue(md->queue); 2640 2641 unlock_fs(md); 2642 dm_table_presuspend_undo_targets(map); 2643 /* pushback list is already flushed, so skip flush */ 2644 } 2645 2646 return r; 2647 } 2648 2649 /* 2650 * We need to be able to change a mapping table under a mounted 2651 * filesystem. For example we might want to move some data in 2652 * the background. Before the table can be swapped with 2653 * dm_bind_table, dm_suspend must be called to flush any in 2654 * flight bios and ensure that any further io gets deferred. 2655 */ 2656 /* 2657 * Suspend mechanism in request-based dm. 2658 * 2659 * 1. Flush all I/Os by lock_fs() if needed. 2660 * 2. Stop dispatching any I/O by stopping the request_queue. 2661 * 3. Wait for all in-flight I/Os to be completed or requeued. 2662 * 2663 * To abort suspend, start the request_queue. 2664 */ 2665 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2666 { 2667 struct dm_table *map = NULL; 2668 int r = 0; 2669 2670 retry: 2671 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2672 2673 if (dm_suspended_md(md)) { 2674 r = -EINVAL; 2675 goto out_unlock; 2676 } 2677 2678 if (dm_suspended_internally_md(md)) { 2679 /* already internally suspended, wait for internal resume */ 2680 mutex_unlock(&md->suspend_lock); 2681 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2682 if (r) 2683 return r; 2684 goto retry; 2685 } 2686 2687 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2688 2689 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2690 if (r) 2691 goto out_unlock; 2692 2693 dm_table_postsuspend_targets(map); 2694 2695 out_unlock: 2696 mutex_unlock(&md->suspend_lock); 2697 return r; 2698 } 2699 2700 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2701 { 2702 if (map) { 2703 int r = dm_table_resume_targets(map); 2704 if (r) 2705 return r; 2706 } 2707 2708 dm_queue_flush(md); 2709 2710 /* 2711 * Flushing deferred I/Os must be done after targets are resumed 2712 * so that mapping of targets can work correctly. 2713 * Request-based dm is queueing the deferred I/Os in its request_queue. 2714 */ 2715 if (dm_request_based(md)) 2716 dm_start_queue(md->queue); 2717 2718 unlock_fs(md); 2719 2720 return 0; 2721 } 2722 2723 int dm_resume(struct mapped_device *md) 2724 { 2725 int r; 2726 struct dm_table *map = NULL; 2727 2728 retry: 2729 r = -EINVAL; 2730 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2731 2732 if (!dm_suspended_md(md)) 2733 goto out; 2734 2735 if (dm_suspended_internally_md(md)) { 2736 /* already internally suspended, wait for internal resume */ 2737 mutex_unlock(&md->suspend_lock); 2738 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2739 if (r) 2740 return r; 2741 goto retry; 2742 } 2743 2744 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2745 if (!map || !dm_table_get_size(map)) 2746 goto out; 2747 2748 r = __dm_resume(md, map); 2749 if (r) 2750 goto out; 2751 2752 clear_bit(DMF_SUSPENDED, &md->flags); 2753 out: 2754 mutex_unlock(&md->suspend_lock); 2755 2756 return r; 2757 } 2758 2759 /* 2760 * Internal suspend/resume works like userspace-driven suspend. It waits 2761 * until all bios finish and prevents issuing new bios to the target drivers. 2762 * It may be used only from the kernel. 2763 */ 2764 2765 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2766 { 2767 struct dm_table *map = NULL; 2768 2769 lockdep_assert_held(&md->suspend_lock); 2770 2771 if (md->internal_suspend_count++) 2772 return; /* nested internal suspend */ 2773 2774 if (dm_suspended_md(md)) { 2775 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2776 return; /* nest suspend */ 2777 } 2778 2779 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2780 2781 /* 2782 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2783 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2784 * would require changing .presuspend to return an error -- avoid this 2785 * until there is a need for more elaborate variants of internal suspend. 2786 */ 2787 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2788 DMF_SUSPENDED_INTERNALLY); 2789 2790 dm_table_postsuspend_targets(map); 2791 } 2792 2793 static void __dm_internal_resume(struct mapped_device *md) 2794 { 2795 BUG_ON(!md->internal_suspend_count); 2796 2797 if (--md->internal_suspend_count) 2798 return; /* resume from nested internal suspend */ 2799 2800 if (dm_suspended_md(md)) 2801 goto done; /* resume from nested suspend */ 2802 2803 /* 2804 * NOTE: existing callers don't need to call dm_table_resume_targets 2805 * (which may fail -- so best to avoid it for now by passing NULL map) 2806 */ 2807 (void) __dm_resume(md, NULL); 2808 2809 done: 2810 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2811 smp_mb__after_atomic(); 2812 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2813 } 2814 2815 void dm_internal_suspend_noflush(struct mapped_device *md) 2816 { 2817 mutex_lock(&md->suspend_lock); 2818 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2819 mutex_unlock(&md->suspend_lock); 2820 } 2821 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2822 2823 void dm_internal_resume(struct mapped_device *md) 2824 { 2825 mutex_lock(&md->suspend_lock); 2826 __dm_internal_resume(md); 2827 mutex_unlock(&md->suspend_lock); 2828 } 2829 EXPORT_SYMBOL_GPL(dm_internal_resume); 2830 2831 /* 2832 * Fast variants of internal suspend/resume hold md->suspend_lock, 2833 * which prevents interaction with userspace-driven suspend. 2834 */ 2835 2836 void dm_internal_suspend_fast(struct mapped_device *md) 2837 { 2838 mutex_lock(&md->suspend_lock); 2839 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2840 return; 2841 2842 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2843 synchronize_srcu(&md->io_barrier); 2844 flush_workqueue(md->wq); 2845 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2846 } 2847 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2848 2849 void dm_internal_resume_fast(struct mapped_device *md) 2850 { 2851 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2852 goto done; 2853 2854 dm_queue_flush(md); 2855 2856 done: 2857 mutex_unlock(&md->suspend_lock); 2858 } 2859 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2860 2861 /*----------------------------------------------------------------- 2862 * Event notification. 2863 *---------------------------------------------------------------*/ 2864 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2865 unsigned cookie) 2866 { 2867 char udev_cookie[DM_COOKIE_LENGTH]; 2868 char *envp[] = { udev_cookie, NULL }; 2869 2870 if (!cookie) 2871 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2872 else { 2873 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2874 DM_COOKIE_ENV_VAR_NAME, cookie); 2875 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2876 action, envp); 2877 } 2878 } 2879 2880 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2881 { 2882 return atomic_add_return(1, &md->uevent_seq); 2883 } 2884 2885 uint32_t dm_get_event_nr(struct mapped_device *md) 2886 { 2887 return atomic_read(&md->event_nr); 2888 } 2889 2890 int dm_wait_event(struct mapped_device *md, int event_nr) 2891 { 2892 return wait_event_interruptible(md->eventq, 2893 (event_nr != atomic_read(&md->event_nr))); 2894 } 2895 2896 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2897 { 2898 unsigned long flags; 2899 2900 spin_lock_irqsave(&md->uevent_lock, flags); 2901 list_add(elist, &md->uevent_list); 2902 spin_unlock_irqrestore(&md->uevent_lock, flags); 2903 } 2904 2905 /* 2906 * The gendisk is only valid as long as you have a reference 2907 * count on 'md'. 2908 */ 2909 struct gendisk *dm_disk(struct mapped_device *md) 2910 { 2911 return md->disk; 2912 } 2913 EXPORT_SYMBOL_GPL(dm_disk); 2914 2915 struct kobject *dm_kobject(struct mapped_device *md) 2916 { 2917 return &md->kobj_holder.kobj; 2918 } 2919 2920 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2921 { 2922 struct mapped_device *md; 2923 2924 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2925 2926 spin_lock(&_minor_lock); 2927 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2928 md = NULL; 2929 goto out; 2930 } 2931 dm_get(md); 2932 out: 2933 spin_unlock(&_minor_lock); 2934 2935 return md; 2936 } 2937 2938 int dm_suspended_md(struct mapped_device *md) 2939 { 2940 return test_bit(DMF_SUSPENDED, &md->flags); 2941 } 2942 2943 int dm_suspended_internally_md(struct mapped_device *md) 2944 { 2945 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2946 } 2947 2948 int dm_test_deferred_remove_flag(struct mapped_device *md) 2949 { 2950 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2951 } 2952 2953 int dm_suspended(struct dm_target *ti) 2954 { 2955 return dm_suspended_md(dm_table_get_md(ti->table)); 2956 } 2957 EXPORT_SYMBOL_GPL(dm_suspended); 2958 2959 int dm_noflush_suspending(struct dm_target *ti) 2960 { 2961 return __noflush_suspending(dm_table_get_md(ti->table)); 2962 } 2963 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2964 2965 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2966 unsigned integrity, unsigned per_io_data_size, 2967 unsigned min_pool_size) 2968 { 2969 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2970 unsigned int pool_size = 0; 2971 unsigned int front_pad, io_front_pad; 2972 int ret; 2973 2974 if (!pools) 2975 return NULL; 2976 2977 switch (type) { 2978 case DM_TYPE_BIO_BASED: 2979 case DM_TYPE_DAX_BIO_BASED: 2980 case DM_TYPE_NVME_BIO_BASED: 2981 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2982 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2983 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 2984 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2985 if (ret) 2986 goto out; 2987 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2988 goto out; 2989 break; 2990 case DM_TYPE_REQUEST_BASED: 2991 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2992 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2993 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2994 break; 2995 default: 2996 BUG(); 2997 } 2998 2999 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 3000 if (ret) 3001 goto out; 3002 3003 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 3004 goto out; 3005 3006 return pools; 3007 3008 out: 3009 dm_free_md_mempools(pools); 3010 3011 return NULL; 3012 } 3013 3014 void dm_free_md_mempools(struct dm_md_mempools *pools) 3015 { 3016 if (!pools) 3017 return; 3018 3019 bioset_exit(&pools->bs); 3020 bioset_exit(&pools->io_bs); 3021 3022 kfree(pools); 3023 } 3024 3025 struct dm_pr { 3026 u64 old_key; 3027 u64 new_key; 3028 u32 flags; 3029 bool fail_early; 3030 }; 3031 3032 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3033 void *data) 3034 { 3035 struct mapped_device *md = bdev->bd_disk->private_data; 3036 struct dm_table *table; 3037 struct dm_target *ti; 3038 int ret = -ENOTTY, srcu_idx; 3039 3040 table = dm_get_live_table(md, &srcu_idx); 3041 if (!table || !dm_table_get_size(table)) 3042 goto out; 3043 3044 /* We only support devices that have a single target */ 3045 if (dm_table_get_num_targets(table) != 1) 3046 goto out; 3047 ti = dm_table_get_target(table, 0); 3048 3049 ret = -EINVAL; 3050 if (!ti->type->iterate_devices) 3051 goto out; 3052 3053 ret = ti->type->iterate_devices(ti, fn, data); 3054 out: 3055 dm_put_live_table(md, srcu_idx); 3056 return ret; 3057 } 3058 3059 /* 3060 * For register / unregister we need to manually call out to every path. 3061 */ 3062 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3063 sector_t start, sector_t len, void *data) 3064 { 3065 struct dm_pr *pr = data; 3066 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3067 3068 if (!ops || !ops->pr_register) 3069 return -EOPNOTSUPP; 3070 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3071 } 3072 3073 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3074 u32 flags) 3075 { 3076 struct dm_pr pr = { 3077 .old_key = old_key, 3078 .new_key = new_key, 3079 .flags = flags, 3080 .fail_early = true, 3081 }; 3082 int ret; 3083 3084 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3085 if (ret && new_key) { 3086 /* unregister all paths if we failed to register any path */ 3087 pr.old_key = new_key; 3088 pr.new_key = 0; 3089 pr.flags = 0; 3090 pr.fail_early = false; 3091 dm_call_pr(bdev, __dm_pr_register, &pr); 3092 } 3093 3094 return ret; 3095 } 3096 3097 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3098 u32 flags) 3099 { 3100 struct mapped_device *md = bdev->bd_disk->private_data; 3101 const struct pr_ops *ops; 3102 int r, srcu_idx; 3103 3104 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3105 if (r < 0) 3106 goto out; 3107 3108 ops = bdev->bd_disk->fops->pr_ops; 3109 if (ops && ops->pr_reserve) 3110 r = ops->pr_reserve(bdev, key, type, flags); 3111 else 3112 r = -EOPNOTSUPP; 3113 out: 3114 dm_unprepare_ioctl(md, srcu_idx); 3115 return r; 3116 } 3117 3118 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3119 { 3120 struct mapped_device *md = bdev->bd_disk->private_data; 3121 const struct pr_ops *ops; 3122 int r, srcu_idx; 3123 3124 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3125 if (r < 0) 3126 goto out; 3127 3128 ops = bdev->bd_disk->fops->pr_ops; 3129 if (ops && ops->pr_release) 3130 r = ops->pr_release(bdev, key, type); 3131 else 3132 r = -EOPNOTSUPP; 3133 out: 3134 dm_unprepare_ioctl(md, srcu_idx); 3135 return r; 3136 } 3137 3138 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3139 enum pr_type type, bool abort) 3140 { 3141 struct mapped_device *md = bdev->bd_disk->private_data; 3142 const struct pr_ops *ops; 3143 int r, srcu_idx; 3144 3145 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3146 if (r < 0) 3147 goto out; 3148 3149 ops = bdev->bd_disk->fops->pr_ops; 3150 if (ops && ops->pr_preempt) 3151 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3152 else 3153 r = -EOPNOTSUPP; 3154 out: 3155 dm_unprepare_ioctl(md, srcu_idx); 3156 return r; 3157 } 3158 3159 static int dm_pr_clear(struct block_device *bdev, u64 key) 3160 { 3161 struct mapped_device *md = bdev->bd_disk->private_data; 3162 const struct pr_ops *ops; 3163 int r, srcu_idx; 3164 3165 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3166 if (r < 0) 3167 goto out; 3168 3169 ops = bdev->bd_disk->fops->pr_ops; 3170 if (ops && ops->pr_clear) 3171 r = ops->pr_clear(bdev, key); 3172 else 3173 r = -EOPNOTSUPP; 3174 out: 3175 dm_unprepare_ioctl(md, srcu_idx); 3176 return r; 3177 } 3178 3179 static const struct pr_ops dm_pr_ops = { 3180 .pr_register = dm_pr_register, 3181 .pr_reserve = dm_pr_reserve, 3182 .pr_release = dm_pr_release, 3183 .pr_preempt = dm_pr_preempt, 3184 .pr_clear = dm_pr_clear, 3185 }; 3186 3187 static const struct block_device_operations dm_blk_dops = { 3188 .open = dm_blk_open, 3189 .release = dm_blk_close, 3190 .ioctl = dm_blk_ioctl, 3191 .getgeo = dm_blk_getgeo, 3192 .report_zones = dm_blk_report_zones, 3193 .pr_ops = &dm_pr_ops, 3194 .owner = THIS_MODULE 3195 }; 3196 3197 static const struct dax_operations dm_dax_ops = { 3198 .direct_access = dm_dax_direct_access, 3199 .dax_supported = dm_dax_supported, 3200 .copy_from_iter = dm_dax_copy_from_iter, 3201 .copy_to_iter = dm_dax_copy_to_iter, 3202 }; 3203 3204 /* 3205 * module hooks 3206 */ 3207 module_init(dm_init); 3208 module_exit(dm_exit); 3209 3210 module_param(major, uint, 0); 3211 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3212 3213 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3214 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3215 3216 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3217 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3218 3219 MODULE_DESCRIPTION(DM_NAME " driver"); 3220 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3221 MODULE_LICENSE("GPL"); 3222