1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2001 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include "dm-core.h" 10 #include "dm-rq.h" 11 12 #include <linux/module.h> 13 #include <linux/vmalloc.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-integrity.h> 16 #include <linux/namei.h> 17 #include <linux/ctype.h> 18 #include <linux/string.h> 19 #include <linux/slab.h> 20 #include <linux/interrupt.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/atomic.h> 24 #include <linux/blk-mq.h> 25 #include <linux/mount.h> 26 #include <linux/dax.h> 27 28 #define DM_MSG_PREFIX "table" 29 30 #define NODE_SIZE L1_CACHE_BYTES 31 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 32 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 33 34 /* 35 * Similar to ceiling(log_size(n)) 36 */ 37 static unsigned int int_log(unsigned int n, unsigned int base) 38 { 39 int result = 0; 40 41 while (n > 1) { 42 n = dm_div_up(n, base); 43 result++; 44 } 45 46 return result; 47 } 48 49 /* 50 * Calculate the index of the child node of the n'th node k'th key. 51 */ 52 static inline unsigned int get_child(unsigned int n, unsigned int k) 53 { 54 return (n * CHILDREN_PER_NODE) + k; 55 } 56 57 /* 58 * Return the n'th node of level l from table t. 59 */ 60 static inline sector_t *get_node(struct dm_table *t, 61 unsigned int l, unsigned int n) 62 { 63 return t->index[l] + (n * KEYS_PER_NODE); 64 } 65 66 /* 67 * Return the highest key that you could lookup from the n'th 68 * node on level l of the btree. 69 */ 70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) 71 { 72 for (; l < t->depth - 1; l++) 73 n = get_child(n, CHILDREN_PER_NODE - 1); 74 75 if (n >= t->counts[l]) 76 return (sector_t) -1; 77 78 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 79 } 80 81 /* 82 * Fills in a level of the btree based on the highs of the level 83 * below it. 84 */ 85 static int setup_btree_index(unsigned int l, struct dm_table *t) 86 { 87 unsigned int n, k; 88 sector_t *node; 89 90 for (n = 0U; n < t->counts[l]; n++) { 91 node = get_node(t, l, n); 92 93 for (k = 0U; k < KEYS_PER_NODE; k++) 94 node[k] = high(t, l + 1, get_child(n, k)); 95 } 96 97 return 0; 98 } 99 100 /* 101 * highs, and targets are managed as dynamic arrays during a 102 * table load. 103 */ 104 static int alloc_targets(struct dm_table *t, unsigned int num) 105 { 106 sector_t *n_highs; 107 struct dm_target *n_targets; 108 109 /* 110 * Allocate both the target array and offset array at once. 111 */ 112 n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t), 113 GFP_KERNEL); 114 if (!n_highs) 115 return -ENOMEM; 116 117 n_targets = (struct dm_target *) (n_highs + num); 118 119 memset(n_highs, -1, sizeof(*n_highs) * num); 120 kvfree(t->highs); 121 122 t->num_allocated = num; 123 t->highs = n_highs; 124 t->targets = n_targets; 125 126 return 0; 127 } 128 129 int dm_table_create(struct dm_table **result, blk_mode_t mode, 130 unsigned int num_targets, struct mapped_device *md) 131 { 132 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); 133 134 if (!t) 135 return -ENOMEM; 136 137 INIT_LIST_HEAD(&t->devices); 138 init_rwsem(&t->devices_lock); 139 140 if (!num_targets) 141 num_targets = KEYS_PER_NODE; 142 143 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 144 145 if (!num_targets) { 146 kfree(t); 147 return -ENOMEM; 148 } 149 150 if (alloc_targets(t, num_targets)) { 151 kfree(t); 152 return -ENOMEM; 153 } 154 155 t->type = DM_TYPE_NONE; 156 t->mode = mode; 157 t->md = md; 158 *result = t; 159 return 0; 160 } 161 162 static void free_devices(struct list_head *devices, struct mapped_device *md) 163 { 164 struct list_head *tmp, *next; 165 166 list_for_each_safe(tmp, next, devices) { 167 struct dm_dev_internal *dd = 168 list_entry(tmp, struct dm_dev_internal, list); 169 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", 170 dm_device_name(md), dd->dm_dev->name); 171 dm_put_table_device(md, dd->dm_dev); 172 kfree(dd); 173 } 174 } 175 176 static void dm_table_destroy_crypto_profile(struct dm_table *t); 177 178 void dm_table_destroy(struct dm_table *t) 179 { 180 if (!t) 181 return; 182 183 /* free the indexes */ 184 if (t->depth >= 2) 185 kvfree(t->index[t->depth - 2]); 186 187 /* free the targets */ 188 for (unsigned int i = 0; i < t->num_targets; i++) { 189 struct dm_target *ti = dm_table_get_target(t, i); 190 191 if (ti->type->dtr) 192 ti->type->dtr(ti); 193 194 dm_put_target_type(ti->type); 195 } 196 197 kvfree(t->highs); 198 199 /* free the device list */ 200 free_devices(&t->devices, t->md); 201 202 dm_free_md_mempools(t->mempools); 203 204 dm_table_destroy_crypto_profile(t); 205 206 kfree(t); 207 } 208 209 /* 210 * See if we've already got a device in the list. 211 */ 212 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 213 { 214 struct dm_dev_internal *dd; 215 216 list_for_each_entry(dd, l, list) 217 if (dd->dm_dev->bdev->bd_dev == dev) 218 return dd; 219 220 return NULL; 221 } 222 223 /* 224 * If possible, this checks an area of a destination device is invalid. 225 */ 226 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 227 sector_t start, sector_t len, void *data) 228 { 229 struct queue_limits *limits = data; 230 struct block_device *bdev = dev->bdev; 231 sector_t dev_size = bdev_nr_sectors(bdev); 232 unsigned short logical_block_size_sectors = 233 limits->logical_block_size >> SECTOR_SHIFT; 234 235 if (!dev_size) 236 return 0; 237 238 if ((start >= dev_size) || (start + len > dev_size)) { 239 DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu", 240 dm_device_name(ti->table->md), bdev, 241 (unsigned long long)start, 242 (unsigned long long)len, 243 (unsigned long long)dev_size); 244 return 1; 245 } 246 247 /* 248 * If the target is mapped to zoned block device(s), check 249 * that the zones are not partially mapped. 250 */ 251 if (bdev_is_zoned(bdev)) { 252 unsigned int zone_sectors = bdev_zone_sectors(bdev); 253 254 if (start & (zone_sectors - 1)) { 255 DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", 256 dm_device_name(ti->table->md), 257 (unsigned long long)start, 258 zone_sectors, bdev); 259 return 1; 260 } 261 262 /* 263 * Note: The last zone of a zoned block device may be smaller 264 * than other zones. So for a target mapping the end of a 265 * zoned block device with such a zone, len would not be zone 266 * aligned. We do not allow such last smaller zone to be part 267 * of the mapping here to ensure that mappings with multiple 268 * devices do not end up with a smaller zone in the middle of 269 * the sector range. 270 */ 271 if (len & (zone_sectors - 1)) { 272 DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", 273 dm_device_name(ti->table->md), 274 (unsigned long long)len, 275 zone_sectors, bdev); 276 return 1; 277 } 278 } 279 280 if (logical_block_size_sectors <= 1) 281 return 0; 282 283 if (start & (logical_block_size_sectors - 1)) { 284 DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg", 285 dm_device_name(ti->table->md), 286 (unsigned long long)start, 287 limits->logical_block_size, bdev); 288 return 1; 289 } 290 291 if (len & (logical_block_size_sectors - 1)) { 292 DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg", 293 dm_device_name(ti->table->md), 294 (unsigned long long)len, 295 limits->logical_block_size, bdev); 296 return 1; 297 } 298 299 return 0; 300 } 301 302 /* 303 * This upgrades the mode on an already open dm_dev, being 304 * careful to leave things as they were if we fail to reopen the 305 * device and not to touch the existing bdev field in case 306 * it is accessed concurrently. 307 */ 308 static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode, 309 struct mapped_device *md) 310 { 311 int r; 312 struct dm_dev *old_dev, *new_dev; 313 314 old_dev = dd->dm_dev; 315 316 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, 317 dd->dm_dev->mode | new_mode, &new_dev); 318 if (r) 319 return r; 320 321 dd->dm_dev = new_dev; 322 dm_put_table_device(md, old_dev); 323 324 return 0; 325 } 326 327 /* 328 * Add a device to the list, or just increment the usage count if 329 * it's already present. 330 * 331 * Note: the __ref annotation is because this function can call the __init 332 * marked early_lookup_bdev when called during early boot code from dm-init.c. 333 */ 334 int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, 335 struct dm_dev **result) 336 { 337 int r; 338 dev_t dev; 339 unsigned int major, minor; 340 char dummy; 341 struct dm_dev_internal *dd; 342 struct dm_table *t = ti->table; 343 344 BUG_ON(!t); 345 346 if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { 347 /* Extract the major/minor numbers */ 348 dev = MKDEV(major, minor); 349 if (MAJOR(dev) != major || MINOR(dev) != minor) 350 return -EOVERFLOW; 351 } else { 352 r = lookup_bdev(path, &dev); 353 #ifndef MODULE 354 if (r && system_state < SYSTEM_RUNNING) 355 r = early_lookup_bdev(path, &dev); 356 #endif 357 if (r) 358 return r; 359 } 360 if (dev == disk_devt(t->md->disk)) 361 return -EINVAL; 362 363 down_write(&t->devices_lock); 364 365 dd = find_device(&t->devices, dev); 366 if (!dd) { 367 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 368 if (!dd) { 369 r = -ENOMEM; 370 goto unlock_ret_r; 371 } 372 373 r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); 374 if (r) { 375 kfree(dd); 376 goto unlock_ret_r; 377 } 378 379 refcount_set(&dd->count, 1); 380 list_add(&dd->list, &t->devices); 381 goto out; 382 383 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 384 r = upgrade_mode(dd, mode, t->md); 385 if (r) 386 goto unlock_ret_r; 387 } 388 refcount_inc(&dd->count); 389 out: 390 up_write(&t->devices_lock); 391 *result = dd->dm_dev; 392 return 0; 393 394 unlock_ret_r: 395 up_write(&t->devices_lock); 396 return r; 397 } 398 EXPORT_SYMBOL(dm_get_device); 399 400 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 401 sector_t start, sector_t len, void *data) 402 { 403 struct queue_limits *limits = data; 404 struct block_device *bdev = dev->bdev; 405 struct request_queue *q = bdev_get_queue(bdev); 406 407 if (unlikely(!q)) { 408 DMWARN("%s: Cannot set limits for nonexistent device %pg", 409 dm_device_name(ti->table->md), bdev); 410 return 0; 411 } 412 413 if (blk_stack_limits(limits, &q->limits, 414 get_start_sect(bdev) + start) < 0) 415 DMWARN("%s: adding target device %pg caused an alignment inconsistency: " 416 "physical_block_size=%u, logical_block_size=%u, " 417 "alignment_offset=%u, start=%llu", 418 dm_device_name(ti->table->md), bdev, 419 q->limits.physical_block_size, 420 q->limits.logical_block_size, 421 q->limits.alignment_offset, 422 (unsigned long long) start << SECTOR_SHIFT); 423 return 0; 424 } 425 426 /* 427 * Decrement a device's use count and remove it if necessary. 428 */ 429 void dm_put_device(struct dm_target *ti, struct dm_dev *d) 430 { 431 int found = 0; 432 struct dm_table *t = ti->table; 433 struct list_head *devices = &t->devices; 434 struct dm_dev_internal *dd; 435 436 down_write(&t->devices_lock); 437 438 list_for_each_entry(dd, devices, list) { 439 if (dd->dm_dev == d) { 440 found = 1; 441 break; 442 } 443 } 444 if (!found) { 445 DMERR("%s: device %s not in table devices list", 446 dm_device_name(t->md), d->name); 447 goto unlock_ret; 448 } 449 if (refcount_dec_and_test(&dd->count)) { 450 dm_put_table_device(t->md, d); 451 list_del(&dd->list); 452 kfree(dd); 453 } 454 455 unlock_ret: 456 up_write(&t->devices_lock); 457 } 458 EXPORT_SYMBOL(dm_put_device); 459 460 /* 461 * Checks to see if the target joins onto the end of the table. 462 */ 463 static int adjoin(struct dm_table *t, struct dm_target *ti) 464 { 465 struct dm_target *prev; 466 467 if (!t->num_targets) 468 return !ti->begin; 469 470 prev = &t->targets[t->num_targets - 1]; 471 return (ti->begin == (prev->begin + prev->len)); 472 } 473 474 /* 475 * Used to dynamically allocate the arg array. 476 * 477 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must 478 * process messages even if some device is suspended. These messages have a 479 * small fixed number of arguments. 480 * 481 * On the other hand, dm-switch needs to process bulk data using messages and 482 * excessive use of GFP_NOIO could cause trouble. 483 */ 484 static char **realloc_argv(unsigned int *size, char **old_argv) 485 { 486 char **argv; 487 unsigned int new_size; 488 gfp_t gfp; 489 490 if (*size) { 491 new_size = *size * 2; 492 gfp = GFP_KERNEL; 493 } else { 494 new_size = 8; 495 gfp = GFP_NOIO; 496 } 497 argv = kmalloc_array(new_size, sizeof(*argv), gfp); 498 if (argv && old_argv) { 499 memcpy(argv, old_argv, *size * sizeof(*argv)); 500 *size = new_size; 501 } 502 503 kfree(old_argv); 504 return argv; 505 } 506 507 /* 508 * Destructively splits up the argument list to pass to ctr. 509 */ 510 int dm_split_args(int *argc, char ***argvp, char *input) 511 { 512 char *start, *end = input, *out, **argv = NULL; 513 unsigned int array_size = 0; 514 515 *argc = 0; 516 517 if (!input) { 518 *argvp = NULL; 519 return 0; 520 } 521 522 argv = realloc_argv(&array_size, argv); 523 if (!argv) 524 return -ENOMEM; 525 526 while (1) { 527 /* Skip whitespace */ 528 start = skip_spaces(end); 529 530 if (!*start) 531 break; /* success, we hit the end */ 532 533 /* 'out' is used to remove any back-quotes */ 534 end = out = start; 535 while (*end) { 536 /* Everything apart from '\0' can be quoted */ 537 if (*end == '\\' && *(end + 1)) { 538 *out++ = *(end + 1); 539 end += 2; 540 continue; 541 } 542 543 if (isspace(*end)) 544 break; /* end of token */ 545 546 *out++ = *end++; 547 } 548 549 /* have we already filled the array ? */ 550 if ((*argc + 1) > array_size) { 551 argv = realloc_argv(&array_size, argv); 552 if (!argv) 553 return -ENOMEM; 554 } 555 556 /* we know this is whitespace */ 557 if (*end) 558 end++; 559 560 /* terminate the string and put it in the array */ 561 *out = '\0'; 562 argv[*argc] = start; 563 (*argc)++; 564 } 565 566 *argvp = argv; 567 return 0; 568 } 569 570 /* 571 * Impose necessary and sufficient conditions on a devices's table such 572 * that any incoming bio which respects its logical_block_size can be 573 * processed successfully. If it falls across the boundary between 574 * two or more targets, the size of each piece it gets split into must 575 * be compatible with the logical_block_size of the target processing it. 576 */ 577 static int validate_hardware_logical_block_alignment(struct dm_table *t, 578 struct queue_limits *limits) 579 { 580 /* 581 * This function uses arithmetic modulo the logical_block_size 582 * (in units of 512-byte sectors). 583 */ 584 unsigned short device_logical_block_size_sects = 585 limits->logical_block_size >> SECTOR_SHIFT; 586 587 /* 588 * Offset of the start of the next table entry, mod logical_block_size. 589 */ 590 unsigned short next_target_start = 0; 591 592 /* 593 * Given an aligned bio that extends beyond the end of a 594 * target, how many sectors must the next target handle? 595 */ 596 unsigned short remaining = 0; 597 598 struct dm_target *ti; 599 struct queue_limits ti_limits; 600 unsigned int i; 601 602 /* 603 * Check each entry in the table in turn. 604 */ 605 for (i = 0; i < t->num_targets; i++) { 606 ti = dm_table_get_target(t, i); 607 608 blk_set_stacking_limits(&ti_limits); 609 610 /* combine all target devices' limits */ 611 if (ti->type->iterate_devices) 612 ti->type->iterate_devices(ti, dm_set_device_limits, 613 &ti_limits); 614 615 /* 616 * If the remaining sectors fall entirely within this 617 * table entry are they compatible with its logical_block_size? 618 */ 619 if (remaining < ti->len && 620 remaining & ((ti_limits.logical_block_size >> 621 SECTOR_SHIFT) - 1)) 622 break; /* Error */ 623 624 next_target_start = 625 (unsigned short) ((next_target_start + ti->len) & 626 (device_logical_block_size_sects - 1)); 627 remaining = next_target_start ? 628 device_logical_block_size_sects - next_target_start : 0; 629 } 630 631 if (remaining) { 632 DMERR("%s: table line %u (start sect %llu len %llu) " 633 "not aligned to h/w logical block size %u", 634 dm_device_name(t->md), i, 635 (unsigned long long) ti->begin, 636 (unsigned long long) ti->len, 637 limits->logical_block_size); 638 return -EINVAL; 639 } 640 641 return 0; 642 } 643 644 int dm_table_add_target(struct dm_table *t, const char *type, 645 sector_t start, sector_t len, char *params) 646 { 647 int r = -EINVAL, argc; 648 char **argv; 649 struct dm_target *ti; 650 651 if (t->singleton) { 652 DMERR("%s: target type %s must appear alone in table", 653 dm_device_name(t->md), t->targets->type->name); 654 return -EINVAL; 655 } 656 657 BUG_ON(t->num_targets >= t->num_allocated); 658 659 ti = t->targets + t->num_targets; 660 memset(ti, 0, sizeof(*ti)); 661 662 if (!len) { 663 DMERR("%s: zero-length target", dm_device_name(t->md)); 664 return -EINVAL; 665 } 666 667 ti->type = dm_get_target_type(type); 668 if (!ti->type) { 669 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); 670 return -EINVAL; 671 } 672 673 if (dm_target_needs_singleton(ti->type)) { 674 if (t->num_targets) { 675 ti->error = "singleton target type must appear alone in table"; 676 goto bad; 677 } 678 t->singleton = true; 679 } 680 681 if (dm_target_always_writeable(ti->type) && 682 !(t->mode & BLK_OPEN_WRITE)) { 683 ti->error = "target type may not be included in a read-only table"; 684 goto bad; 685 } 686 687 if (t->immutable_target_type) { 688 if (t->immutable_target_type != ti->type) { 689 ti->error = "immutable target type cannot be mixed with other target types"; 690 goto bad; 691 } 692 } else if (dm_target_is_immutable(ti->type)) { 693 if (t->num_targets) { 694 ti->error = "immutable target type cannot be mixed with other target types"; 695 goto bad; 696 } 697 t->immutable_target_type = ti->type; 698 } 699 700 if (dm_target_has_integrity(ti->type)) 701 t->integrity_added = 1; 702 703 ti->table = t; 704 ti->begin = start; 705 ti->len = len; 706 ti->error = "Unknown error"; 707 708 /* 709 * Does this target adjoin the previous one ? 710 */ 711 if (!adjoin(t, ti)) { 712 ti->error = "Gap in table"; 713 goto bad; 714 } 715 716 r = dm_split_args(&argc, &argv, params); 717 if (r) { 718 ti->error = "couldn't split parameters"; 719 goto bad; 720 } 721 722 r = ti->type->ctr(ti, argc, argv); 723 kfree(argv); 724 if (r) 725 goto bad; 726 727 t->highs[t->num_targets++] = ti->begin + ti->len - 1; 728 729 if (!ti->num_discard_bios && ti->discards_supported) 730 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", 731 dm_device_name(t->md), type); 732 733 if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) 734 static_branch_enable(&swap_bios_enabled); 735 736 return 0; 737 738 bad: 739 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r)); 740 dm_put_target_type(ti->type); 741 return r; 742 } 743 744 /* 745 * Target argument parsing helpers. 746 */ 747 static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 748 unsigned int *value, char **error, unsigned int grouped) 749 { 750 const char *arg_str = dm_shift_arg(arg_set); 751 char dummy; 752 753 if (!arg_str || 754 (sscanf(arg_str, "%u%c", value, &dummy) != 1) || 755 (*value < arg->min) || 756 (*value > arg->max) || 757 (grouped && arg_set->argc < *value)) { 758 *error = arg->error; 759 return -EINVAL; 760 } 761 762 return 0; 763 } 764 765 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 766 unsigned int *value, char **error) 767 { 768 return validate_next_arg(arg, arg_set, value, error, 0); 769 } 770 EXPORT_SYMBOL(dm_read_arg); 771 772 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 773 unsigned int *value, char **error) 774 { 775 return validate_next_arg(arg, arg_set, value, error, 1); 776 } 777 EXPORT_SYMBOL(dm_read_arg_group); 778 779 const char *dm_shift_arg(struct dm_arg_set *as) 780 { 781 char *r; 782 783 if (as->argc) { 784 as->argc--; 785 r = *as->argv; 786 as->argv++; 787 return r; 788 } 789 790 return NULL; 791 } 792 EXPORT_SYMBOL(dm_shift_arg); 793 794 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args) 795 { 796 BUG_ON(as->argc < num_args); 797 as->argc -= num_args; 798 as->argv += num_args; 799 } 800 EXPORT_SYMBOL(dm_consume_args); 801 802 static bool __table_type_bio_based(enum dm_queue_mode table_type) 803 { 804 return (table_type == DM_TYPE_BIO_BASED || 805 table_type == DM_TYPE_DAX_BIO_BASED); 806 } 807 808 static bool __table_type_request_based(enum dm_queue_mode table_type) 809 { 810 return table_type == DM_TYPE_REQUEST_BASED; 811 } 812 813 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) 814 { 815 t->type = type; 816 } 817 EXPORT_SYMBOL_GPL(dm_table_set_type); 818 819 /* validate the dax capability of the target device span */ 820 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, 821 sector_t start, sector_t len, void *data) 822 { 823 if (dev->dax_dev) 824 return false; 825 826 DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev); 827 return true; 828 } 829 830 /* Check devices support synchronous DAX */ 831 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, 832 sector_t start, sector_t len, void *data) 833 { 834 return !dev->dax_dev || !dax_synchronous(dev->dax_dev); 835 } 836 837 static bool dm_table_supports_dax(struct dm_table *t, 838 iterate_devices_callout_fn iterate_fn) 839 { 840 /* Ensure that all targets support DAX. */ 841 for (unsigned int i = 0; i < t->num_targets; i++) { 842 struct dm_target *ti = dm_table_get_target(t, i); 843 844 if (!ti->type->direct_access) 845 return false; 846 847 if (dm_target_is_wildcard(ti->type) || 848 !ti->type->iterate_devices || 849 ti->type->iterate_devices(ti, iterate_fn, NULL)) 850 return false; 851 } 852 853 return true; 854 } 855 856 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, 857 sector_t start, sector_t len, void *data) 858 { 859 struct block_device *bdev = dev->bdev; 860 struct request_queue *q = bdev_get_queue(bdev); 861 862 /* request-based cannot stack on partitions! */ 863 if (bdev_is_partition(bdev)) 864 return false; 865 866 return queue_is_mq(q); 867 } 868 869 static int dm_table_determine_type(struct dm_table *t) 870 { 871 unsigned int bio_based = 0, request_based = 0, hybrid = 0; 872 struct dm_target *ti; 873 struct list_head *devices = dm_table_get_devices(t); 874 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); 875 876 if (t->type != DM_TYPE_NONE) { 877 /* target already set the table's type */ 878 if (t->type == DM_TYPE_BIO_BASED) { 879 /* possibly upgrade to a variant of bio-based */ 880 goto verify_bio_based; 881 } 882 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 883 goto verify_rq_based; 884 } 885 886 for (unsigned int i = 0; i < t->num_targets; i++) { 887 ti = dm_table_get_target(t, i); 888 if (dm_target_hybrid(ti)) 889 hybrid = 1; 890 else if (dm_target_request_based(ti)) 891 request_based = 1; 892 else 893 bio_based = 1; 894 895 if (bio_based && request_based) { 896 DMERR("Inconsistent table: different target types can't be mixed up"); 897 return -EINVAL; 898 } 899 } 900 901 if (hybrid && !bio_based && !request_based) { 902 /* 903 * The targets can work either way. 904 * Determine the type from the live device. 905 * Default to bio-based if device is new. 906 */ 907 if (__table_type_request_based(live_md_type)) 908 request_based = 1; 909 else 910 bio_based = 1; 911 } 912 913 if (bio_based) { 914 verify_bio_based: 915 /* We must use this table as bio-based */ 916 t->type = DM_TYPE_BIO_BASED; 917 if (dm_table_supports_dax(t, device_not_dax_capable) || 918 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { 919 t->type = DM_TYPE_DAX_BIO_BASED; 920 } 921 return 0; 922 } 923 924 BUG_ON(!request_based); /* No targets in this table */ 925 926 t->type = DM_TYPE_REQUEST_BASED; 927 928 verify_rq_based: 929 /* 930 * Request-based dm supports only tables that have a single target now. 931 * To support multiple targets, request splitting support is needed, 932 * and that needs lots of changes in the block-layer. 933 * (e.g. request completion process for partial completion.) 934 */ 935 if (t->num_targets > 1) { 936 DMERR("request-based DM doesn't support multiple targets"); 937 return -EINVAL; 938 } 939 940 if (list_empty(devices)) { 941 int srcu_idx; 942 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); 943 944 /* inherit live table's type */ 945 if (live_table) 946 t->type = live_table->type; 947 dm_put_live_table(t->md, srcu_idx); 948 return 0; 949 } 950 951 ti = dm_table_get_immutable_target(t); 952 if (!ti) { 953 DMERR("table load rejected: immutable target is required"); 954 return -EINVAL; 955 } else if (ti->max_io_len) { 956 DMERR("table load rejected: immutable target that splits IO is not supported"); 957 return -EINVAL; 958 } 959 960 /* Non-request-stackable devices can't be used for request-based dm */ 961 if (!ti->type->iterate_devices || 962 !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { 963 DMERR("table load rejected: including non-request-stackable devices"); 964 return -EINVAL; 965 } 966 967 return 0; 968 } 969 970 enum dm_queue_mode dm_table_get_type(struct dm_table *t) 971 { 972 return t->type; 973 } 974 975 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) 976 { 977 return t->immutable_target_type; 978 } 979 980 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) 981 { 982 /* Immutable target is implicitly a singleton */ 983 if (t->num_targets > 1 || 984 !dm_target_is_immutable(t->targets[0].type)) 985 return NULL; 986 987 return t->targets; 988 } 989 990 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) 991 { 992 for (unsigned int i = 0; i < t->num_targets; i++) { 993 struct dm_target *ti = dm_table_get_target(t, i); 994 995 if (dm_target_is_wildcard(ti->type)) 996 return ti; 997 } 998 999 return NULL; 1000 } 1001 1002 bool dm_table_bio_based(struct dm_table *t) 1003 { 1004 return __table_type_bio_based(dm_table_get_type(t)); 1005 } 1006 1007 bool dm_table_request_based(struct dm_table *t) 1008 { 1009 return __table_type_request_based(dm_table_get_type(t)); 1010 } 1011 1012 static bool dm_table_supports_poll(struct dm_table *t); 1013 1014 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) 1015 { 1016 enum dm_queue_mode type = dm_table_get_type(t); 1017 unsigned int per_io_data_size = 0, front_pad, io_front_pad; 1018 unsigned int min_pool_size = 0, pool_size; 1019 struct dm_md_mempools *pools; 1020 1021 if (unlikely(type == DM_TYPE_NONE)) { 1022 DMERR("no table type is set, can't allocate mempools"); 1023 return -EINVAL; 1024 } 1025 1026 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 1027 if (!pools) 1028 return -ENOMEM; 1029 1030 if (type == DM_TYPE_REQUEST_BASED) { 1031 pool_size = dm_get_reserved_rq_based_ios(); 1032 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 1033 goto init_bs; 1034 } 1035 1036 for (unsigned int i = 0; i < t->num_targets; i++) { 1037 struct dm_target *ti = dm_table_get_target(t, i); 1038 1039 per_io_data_size = max(per_io_data_size, ti->per_io_data_size); 1040 min_pool_size = max(min_pool_size, ti->num_flush_bios); 1041 } 1042 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 1043 front_pad = roundup(per_io_data_size, 1044 __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 1045 1046 io_front_pad = roundup(per_io_data_size, 1047 __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 1048 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, 1049 dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) 1050 goto out_free_pools; 1051 if (t->integrity_supported && 1052 bioset_integrity_create(&pools->io_bs, pool_size)) 1053 goto out_free_pools; 1054 init_bs: 1055 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) 1056 goto out_free_pools; 1057 if (t->integrity_supported && 1058 bioset_integrity_create(&pools->bs, pool_size)) 1059 goto out_free_pools; 1060 1061 t->mempools = pools; 1062 return 0; 1063 1064 out_free_pools: 1065 dm_free_md_mempools(pools); 1066 return -ENOMEM; 1067 } 1068 1069 static int setup_indexes(struct dm_table *t) 1070 { 1071 int i; 1072 unsigned int total = 0; 1073 sector_t *indexes; 1074 1075 /* allocate the space for *all* the indexes */ 1076 for (i = t->depth - 2; i >= 0; i--) { 1077 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); 1078 total += t->counts[i]; 1079 } 1080 1081 indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL); 1082 if (!indexes) 1083 return -ENOMEM; 1084 1085 /* set up internal nodes, bottom-up */ 1086 for (i = t->depth - 2; i >= 0; i--) { 1087 t->index[i] = indexes; 1088 indexes += (KEYS_PER_NODE * t->counts[i]); 1089 setup_btree_index(i, t); 1090 } 1091 1092 return 0; 1093 } 1094 1095 /* 1096 * Builds the btree to index the map. 1097 */ 1098 static int dm_table_build_index(struct dm_table *t) 1099 { 1100 int r = 0; 1101 unsigned int leaf_nodes; 1102 1103 /* how many indexes will the btree have ? */ 1104 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 1105 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 1106 1107 /* leaf layer has already been set up */ 1108 t->counts[t->depth - 1] = leaf_nodes; 1109 t->index[t->depth - 1] = t->highs; 1110 1111 if (t->depth >= 2) 1112 r = setup_indexes(t); 1113 1114 return r; 1115 } 1116 1117 static bool integrity_profile_exists(struct gendisk *disk) 1118 { 1119 return !!blk_get_integrity(disk); 1120 } 1121 1122 /* 1123 * Get a disk whose integrity profile reflects the table's profile. 1124 * Returns NULL if integrity support was inconsistent or unavailable. 1125 */ 1126 static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) 1127 { 1128 struct list_head *devices = dm_table_get_devices(t); 1129 struct dm_dev_internal *dd = NULL; 1130 struct gendisk *prev_disk = NULL, *template_disk = NULL; 1131 1132 for (unsigned int i = 0; i < t->num_targets; i++) { 1133 struct dm_target *ti = dm_table_get_target(t, i); 1134 1135 if (!dm_target_passes_integrity(ti->type)) 1136 goto no_integrity; 1137 } 1138 1139 list_for_each_entry(dd, devices, list) { 1140 template_disk = dd->dm_dev->bdev->bd_disk; 1141 if (!integrity_profile_exists(template_disk)) 1142 goto no_integrity; 1143 else if (prev_disk && 1144 blk_integrity_compare(prev_disk, template_disk) < 0) 1145 goto no_integrity; 1146 prev_disk = template_disk; 1147 } 1148 1149 return template_disk; 1150 1151 no_integrity: 1152 if (prev_disk) 1153 DMWARN("%s: integrity not set: %s and %s profile mismatch", 1154 dm_device_name(t->md), 1155 prev_disk->disk_name, 1156 template_disk->disk_name); 1157 return NULL; 1158 } 1159 1160 /* 1161 * Register the mapped device for blk_integrity support if the 1162 * underlying devices have an integrity profile. But all devices may 1163 * not have matching profiles (checking all devices isn't reliable 1164 * during table load because this table may use other DM device(s) which 1165 * must be resumed before they will have an initialized integity 1166 * profile). Consequently, stacked DM devices force a 2 stage integrity 1167 * profile validation: First pass during table load, final pass during 1168 * resume. 1169 */ 1170 static int dm_table_register_integrity(struct dm_table *t) 1171 { 1172 struct mapped_device *md = t->md; 1173 struct gendisk *template_disk = NULL; 1174 1175 /* If target handles integrity itself do not register it here. */ 1176 if (t->integrity_added) 1177 return 0; 1178 1179 template_disk = dm_table_get_integrity_disk(t); 1180 if (!template_disk) 1181 return 0; 1182 1183 if (!integrity_profile_exists(dm_disk(md))) { 1184 t->integrity_supported = true; 1185 /* 1186 * Register integrity profile during table load; we can do 1187 * this because the final profile must match during resume. 1188 */ 1189 blk_integrity_register(dm_disk(md), 1190 blk_get_integrity(template_disk)); 1191 return 0; 1192 } 1193 1194 /* 1195 * If DM device already has an initialized integrity 1196 * profile the new profile should not conflict. 1197 */ 1198 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { 1199 DMERR("%s: conflict with existing integrity profile: %s profile mismatch", 1200 dm_device_name(t->md), 1201 template_disk->disk_name); 1202 return 1; 1203 } 1204 1205 /* Preserve existing integrity profile */ 1206 t->integrity_supported = true; 1207 return 0; 1208 } 1209 1210 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1211 1212 struct dm_crypto_profile { 1213 struct blk_crypto_profile profile; 1214 struct mapped_device *md; 1215 }; 1216 1217 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, 1218 sector_t start, sector_t len, void *data) 1219 { 1220 const struct blk_crypto_key *key = data; 1221 1222 blk_crypto_evict_key(dev->bdev, key); 1223 return 0; 1224 } 1225 1226 /* 1227 * When an inline encryption key is evicted from a device-mapper device, evict 1228 * it from all the underlying devices. 1229 */ 1230 static int dm_keyslot_evict(struct blk_crypto_profile *profile, 1231 const struct blk_crypto_key *key, unsigned int slot) 1232 { 1233 struct mapped_device *md = 1234 container_of(profile, struct dm_crypto_profile, profile)->md; 1235 struct dm_table *t; 1236 int srcu_idx; 1237 1238 t = dm_get_live_table(md, &srcu_idx); 1239 if (!t) 1240 return 0; 1241 1242 for (unsigned int i = 0; i < t->num_targets; i++) { 1243 struct dm_target *ti = dm_table_get_target(t, i); 1244 1245 if (!ti->type->iterate_devices) 1246 continue; 1247 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, 1248 (void *)key); 1249 } 1250 1251 dm_put_live_table(md, srcu_idx); 1252 return 0; 1253 } 1254 1255 static int 1256 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, 1257 sector_t start, sector_t len, void *data) 1258 { 1259 struct blk_crypto_profile *parent = data; 1260 struct blk_crypto_profile *child = 1261 bdev_get_queue(dev->bdev)->crypto_profile; 1262 1263 blk_crypto_intersect_capabilities(parent, child); 1264 return 0; 1265 } 1266 1267 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) 1268 { 1269 struct dm_crypto_profile *dmcp = container_of(profile, 1270 struct dm_crypto_profile, 1271 profile); 1272 1273 if (!profile) 1274 return; 1275 1276 blk_crypto_profile_destroy(profile); 1277 kfree(dmcp); 1278 } 1279 1280 static void dm_table_destroy_crypto_profile(struct dm_table *t) 1281 { 1282 dm_destroy_crypto_profile(t->crypto_profile); 1283 t->crypto_profile = NULL; 1284 } 1285 1286 /* 1287 * Constructs and initializes t->crypto_profile with a crypto profile that 1288 * represents the common set of crypto capabilities of the devices described by 1289 * the dm_table. However, if the constructed crypto profile doesn't support all 1290 * crypto capabilities that are supported by the current mapped_device, it 1291 * returns an error instead, since we don't support removing crypto capabilities 1292 * on table changes. Finally, if the constructed crypto profile is "empty" (has 1293 * no crypto capabilities at all), it just sets t->crypto_profile to NULL. 1294 */ 1295 static int dm_table_construct_crypto_profile(struct dm_table *t) 1296 { 1297 struct dm_crypto_profile *dmcp; 1298 struct blk_crypto_profile *profile; 1299 unsigned int i; 1300 bool empty_profile = true; 1301 1302 dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL); 1303 if (!dmcp) 1304 return -ENOMEM; 1305 dmcp->md = t->md; 1306 1307 profile = &dmcp->profile; 1308 blk_crypto_profile_init(profile, 0); 1309 profile->ll_ops.keyslot_evict = dm_keyslot_evict; 1310 profile->max_dun_bytes_supported = UINT_MAX; 1311 memset(profile->modes_supported, 0xFF, 1312 sizeof(profile->modes_supported)); 1313 1314 for (i = 0; i < t->num_targets; i++) { 1315 struct dm_target *ti = dm_table_get_target(t, i); 1316 1317 if (!dm_target_passes_crypto(ti->type)) { 1318 blk_crypto_intersect_capabilities(profile, NULL); 1319 break; 1320 } 1321 if (!ti->type->iterate_devices) 1322 continue; 1323 ti->type->iterate_devices(ti, 1324 device_intersect_crypto_capabilities, 1325 profile); 1326 } 1327 1328 if (t->md->queue && 1329 !blk_crypto_has_capabilities(profile, 1330 t->md->queue->crypto_profile)) { 1331 DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); 1332 dm_destroy_crypto_profile(profile); 1333 return -EINVAL; 1334 } 1335 1336 /* 1337 * If the new profile doesn't actually support any crypto capabilities, 1338 * we may as well represent it with a NULL profile. 1339 */ 1340 for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) { 1341 if (profile->modes_supported[i]) { 1342 empty_profile = false; 1343 break; 1344 } 1345 } 1346 1347 if (empty_profile) { 1348 dm_destroy_crypto_profile(profile); 1349 profile = NULL; 1350 } 1351 1352 /* 1353 * t->crypto_profile is only set temporarily while the table is being 1354 * set up, and it gets set to NULL after the profile has been 1355 * transferred to the request_queue. 1356 */ 1357 t->crypto_profile = profile; 1358 1359 return 0; 1360 } 1361 1362 static void dm_update_crypto_profile(struct request_queue *q, 1363 struct dm_table *t) 1364 { 1365 if (!t->crypto_profile) 1366 return; 1367 1368 /* Make the crypto profile less restrictive. */ 1369 if (!q->crypto_profile) { 1370 blk_crypto_register(t->crypto_profile, q); 1371 } else { 1372 blk_crypto_update_capabilities(q->crypto_profile, 1373 t->crypto_profile); 1374 dm_destroy_crypto_profile(t->crypto_profile); 1375 } 1376 t->crypto_profile = NULL; 1377 } 1378 1379 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1380 1381 static int dm_table_construct_crypto_profile(struct dm_table *t) 1382 { 1383 return 0; 1384 } 1385 1386 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) 1387 { 1388 } 1389 1390 static void dm_table_destroy_crypto_profile(struct dm_table *t) 1391 { 1392 } 1393 1394 static void dm_update_crypto_profile(struct request_queue *q, 1395 struct dm_table *t) 1396 { 1397 } 1398 1399 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1400 1401 /* 1402 * Prepares the table for use by building the indices, 1403 * setting the type, and allocating mempools. 1404 */ 1405 int dm_table_complete(struct dm_table *t) 1406 { 1407 int r; 1408 1409 r = dm_table_determine_type(t); 1410 if (r) { 1411 DMERR("unable to determine table type"); 1412 return r; 1413 } 1414 1415 r = dm_table_build_index(t); 1416 if (r) { 1417 DMERR("unable to build btrees"); 1418 return r; 1419 } 1420 1421 r = dm_table_register_integrity(t); 1422 if (r) { 1423 DMERR("could not register integrity profile."); 1424 return r; 1425 } 1426 1427 r = dm_table_construct_crypto_profile(t); 1428 if (r) { 1429 DMERR("could not construct crypto profile."); 1430 return r; 1431 } 1432 1433 r = dm_table_alloc_md_mempools(t, t->md); 1434 if (r) 1435 DMERR("unable to allocate mempools"); 1436 1437 return r; 1438 } 1439 1440 static DEFINE_MUTEX(_event_lock); 1441 void dm_table_event_callback(struct dm_table *t, 1442 void (*fn)(void *), void *context) 1443 { 1444 mutex_lock(&_event_lock); 1445 t->event_fn = fn; 1446 t->event_context = context; 1447 mutex_unlock(&_event_lock); 1448 } 1449 1450 void dm_table_event(struct dm_table *t) 1451 { 1452 mutex_lock(&_event_lock); 1453 if (t->event_fn) 1454 t->event_fn(t->event_context); 1455 mutex_unlock(&_event_lock); 1456 } 1457 EXPORT_SYMBOL(dm_table_event); 1458 1459 inline sector_t dm_table_get_size(struct dm_table *t) 1460 { 1461 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1462 } 1463 EXPORT_SYMBOL(dm_table_get_size); 1464 1465 /* 1466 * Search the btree for the correct target. 1467 * 1468 * Caller should check returned pointer for NULL 1469 * to trap I/O beyond end of device. 1470 */ 1471 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 1472 { 1473 unsigned int l, n = 0, k = 0; 1474 sector_t *node; 1475 1476 if (unlikely(sector >= dm_table_get_size(t))) 1477 return NULL; 1478 1479 for (l = 0; l < t->depth; l++) { 1480 n = get_child(n, k); 1481 node = get_node(t, l, n); 1482 1483 for (k = 0; k < KEYS_PER_NODE; k++) 1484 if (node[k] >= sector) 1485 break; 1486 } 1487 1488 return &t->targets[(KEYS_PER_NODE * n) + k]; 1489 } 1490 1491 static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev, 1492 sector_t start, sector_t len, void *data) 1493 { 1494 struct request_queue *q = bdev_get_queue(dev->bdev); 1495 1496 return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags); 1497 } 1498 1499 /* 1500 * type->iterate_devices() should be called when the sanity check needs to 1501 * iterate and check all underlying data devices. iterate_devices() will 1502 * iterate all underlying data devices until it encounters a non-zero return 1503 * code, returned by whether the input iterate_devices_callout_fn, or 1504 * iterate_devices() itself internally. 1505 * 1506 * For some target type (e.g. dm-stripe), one call of iterate_devices() may 1507 * iterate multiple underlying devices internally, in which case a non-zero 1508 * return code returned by iterate_devices_callout_fn will stop the iteration 1509 * in advance. 1510 * 1511 * Cases requiring _any_ underlying device supporting some kind of attribute, 1512 * should use the iteration structure like dm_table_any_dev_attr(), or call 1513 * it directly. @func should handle semantics of positive examples, e.g. 1514 * capable of something. 1515 * 1516 * Cases requiring _all_ underlying devices supporting some kind of attribute, 1517 * should use the iteration structure like dm_table_supports_nowait() or 1518 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that 1519 * uses an @anti_func that handle semantics of counter examples, e.g. not 1520 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); 1521 */ 1522 static bool dm_table_any_dev_attr(struct dm_table *t, 1523 iterate_devices_callout_fn func, void *data) 1524 { 1525 for (unsigned int i = 0; i < t->num_targets; i++) { 1526 struct dm_target *ti = dm_table_get_target(t, i); 1527 1528 if (ti->type->iterate_devices && 1529 ti->type->iterate_devices(ti, func, data)) 1530 return true; 1531 } 1532 1533 return false; 1534 } 1535 1536 static int count_device(struct dm_target *ti, struct dm_dev *dev, 1537 sector_t start, sector_t len, void *data) 1538 { 1539 unsigned int *num_devices = data; 1540 1541 (*num_devices)++; 1542 1543 return 0; 1544 } 1545 1546 static bool dm_table_supports_poll(struct dm_table *t) 1547 { 1548 for (unsigned int i = 0; i < t->num_targets; i++) { 1549 struct dm_target *ti = dm_table_get_target(t, i); 1550 1551 if (!ti->type->iterate_devices || 1552 ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) 1553 return false; 1554 } 1555 1556 return true; 1557 } 1558 1559 /* 1560 * Check whether a table has no data devices attached using each 1561 * target's iterate_devices method. 1562 * Returns false if the result is unknown because a target doesn't 1563 * support iterate_devices. 1564 */ 1565 bool dm_table_has_no_data_devices(struct dm_table *t) 1566 { 1567 for (unsigned int i = 0; i < t->num_targets; i++) { 1568 struct dm_target *ti = dm_table_get_target(t, i); 1569 unsigned int num_devices = 0; 1570 1571 if (!ti->type->iterate_devices) 1572 return false; 1573 1574 ti->type->iterate_devices(ti, count_device, &num_devices); 1575 if (num_devices) 1576 return false; 1577 } 1578 1579 return true; 1580 } 1581 1582 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, 1583 sector_t start, sector_t len, void *data) 1584 { 1585 struct request_queue *q = bdev_get_queue(dev->bdev); 1586 enum blk_zoned_model *zoned_model = data; 1587 1588 return blk_queue_zoned_model(q) != *zoned_model; 1589 } 1590 1591 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, 1592 sector_t start, sector_t len, void *data) 1593 { 1594 struct request_queue *q = bdev_get_queue(dev->bdev); 1595 1596 return blk_queue_zoned_model(q) != BLK_ZONED_NONE; 1597 } 1598 1599 /* 1600 * Check the device zoned model based on the target feature flag. If the target 1601 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are 1602 * also accepted but all devices must have the same zoned model. If the target 1603 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any 1604 * zoned model with all zoned devices having the same zone size. 1605 */ 1606 static bool dm_table_supports_zoned_model(struct dm_table *t, 1607 enum blk_zoned_model zoned_model) 1608 { 1609 for (unsigned int i = 0; i < t->num_targets; i++) { 1610 struct dm_target *ti = dm_table_get_target(t, i); 1611 1612 /* 1613 * For the wildcard target (dm-error), if we do not have a 1614 * backing device, we must always return false. If we have a 1615 * backing device, the result must depend on checking zoned 1616 * model, like for any other target. So for this, check directly 1617 * if the target backing device is zoned as we get "false" when 1618 * dm-error was set without a backing device. 1619 */ 1620 if (dm_target_is_wildcard(ti->type) && 1621 !ti->type->iterate_devices(ti, device_is_zoned_model, NULL)) 1622 return false; 1623 1624 if (dm_target_supports_zoned_hm(ti->type)) { 1625 if (!ti->type->iterate_devices || 1626 ti->type->iterate_devices(ti, device_not_zoned_model, 1627 &zoned_model)) 1628 return false; 1629 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { 1630 if (zoned_model == BLK_ZONED_HM) 1631 return false; 1632 } 1633 } 1634 1635 return true; 1636 } 1637 1638 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, 1639 sector_t start, sector_t len, void *data) 1640 { 1641 unsigned int *zone_sectors = data; 1642 1643 if (!bdev_is_zoned(dev->bdev)) 1644 return 0; 1645 return bdev_zone_sectors(dev->bdev) != *zone_sectors; 1646 } 1647 1648 /* 1649 * Check consistency of zoned model and zone sectors across all targets. For 1650 * zone sectors, if the destination device is a zoned block device, it shall 1651 * have the specified zone_sectors. 1652 */ 1653 static int validate_hardware_zoned_model(struct dm_table *t, 1654 enum blk_zoned_model zoned_model, 1655 unsigned int zone_sectors) 1656 { 1657 if (zoned_model == BLK_ZONED_NONE) 1658 return 0; 1659 1660 if (!dm_table_supports_zoned_model(t, zoned_model)) { 1661 DMERR("%s: zoned model is not consistent across all devices", 1662 dm_device_name(t->md)); 1663 return -EINVAL; 1664 } 1665 1666 /* Check zone size validity and compatibility */ 1667 if (!zone_sectors || !is_power_of_2(zone_sectors)) 1668 return -EINVAL; 1669 1670 if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) { 1671 DMERR("%s: zone sectors is not consistent across all zoned devices", 1672 dm_device_name(t->md)); 1673 return -EINVAL; 1674 } 1675 1676 return 0; 1677 } 1678 1679 /* 1680 * Establish the new table's queue_limits and validate them. 1681 */ 1682 int dm_calculate_queue_limits(struct dm_table *t, 1683 struct queue_limits *limits) 1684 { 1685 struct queue_limits ti_limits; 1686 enum blk_zoned_model zoned_model = BLK_ZONED_NONE; 1687 unsigned int zone_sectors = 0; 1688 1689 blk_set_stacking_limits(limits); 1690 1691 for (unsigned int i = 0; i < t->num_targets; i++) { 1692 struct dm_target *ti = dm_table_get_target(t, i); 1693 1694 blk_set_stacking_limits(&ti_limits); 1695 1696 if (!ti->type->iterate_devices) { 1697 /* Set I/O hints portion of queue limits */ 1698 if (ti->type->io_hints) 1699 ti->type->io_hints(ti, &ti_limits); 1700 goto combine_limits; 1701 } 1702 1703 /* 1704 * Combine queue limits of all the devices this target uses. 1705 */ 1706 ti->type->iterate_devices(ti, dm_set_device_limits, 1707 &ti_limits); 1708 1709 if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { 1710 /* 1711 * After stacking all limits, validate all devices 1712 * in table support this zoned model and zone sectors. 1713 */ 1714 zoned_model = ti_limits.zoned; 1715 zone_sectors = ti_limits.chunk_sectors; 1716 } 1717 1718 /* Set I/O hints portion of queue limits */ 1719 if (ti->type->io_hints) 1720 ti->type->io_hints(ti, &ti_limits); 1721 1722 /* 1723 * Check each device area is consistent with the target's 1724 * overall queue limits. 1725 */ 1726 if (ti->type->iterate_devices(ti, device_area_is_invalid, 1727 &ti_limits)) 1728 return -EINVAL; 1729 1730 combine_limits: 1731 /* 1732 * Merge this target's queue limits into the overall limits 1733 * for the table. 1734 */ 1735 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1736 DMWARN("%s: adding target device (start sect %llu len %llu) " 1737 "caused an alignment inconsistency", 1738 dm_device_name(t->md), 1739 (unsigned long long) ti->begin, 1740 (unsigned long long) ti->len); 1741 } 1742 1743 /* 1744 * Verify that the zoned model and zone sectors, as determined before 1745 * any .io_hints override, are the same across all devices in the table. 1746 * - this is especially relevant if .io_hints is emulating a disk-managed 1747 * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices. 1748 * BUT... 1749 */ 1750 if (limits->zoned != BLK_ZONED_NONE) { 1751 /* 1752 * ...IF the above limits stacking determined a zoned model 1753 * validate that all of the table's devices conform to it. 1754 */ 1755 zoned_model = limits->zoned; 1756 zone_sectors = limits->chunk_sectors; 1757 } 1758 if (validate_hardware_zoned_model(t, zoned_model, zone_sectors)) 1759 return -EINVAL; 1760 1761 return validate_hardware_logical_block_alignment(t, limits); 1762 } 1763 1764 /* 1765 * Verify that all devices have an integrity profile that matches the 1766 * DM device's registered integrity profile. If the profiles don't 1767 * match then unregister the DM device's integrity profile. 1768 */ 1769 static void dm_table_verify_integrity(struct dm_table *t) 1770 { 1771 struct gendisk *template_disk = NULL; 1772 1773 if (t->integrity_added) 1774 return; 1775 1776 if (t->integrity_supported) { 1777 /* 1778 * Verify that the original integrity profile 1779 * matches all the devices in this table. 1780 */ 1781 template_disk = dm_table_get_integrity_disk(t); 1782 if (template_disk && 1783 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) 1784 return; 1785 } 1786 1787 if (integrity_profile_exists(dm_disk(t->md))) { 1788 DMWARN("%s: unable to establish an integrity profile", 1789 dm_device_name(t->md)); 1790 blk_integrity_unregister(dm_disk(t->md)); 1791 } 1792 } 1793 1794 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, 1795 sector_t start, sector_t len, void *data) 1796 { 1797 unsigned long flush = (unsigned long) data; 1798 struct request_queue *q = bdev_get_queue(dev->bdev); 1799 1800 return (q->queue_flags & flush); 1801 } 1802 1803 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) 1804 { 1805 /* 1806 * Require at least one underlying device to support flushes. 1807 * t->devices includes internal dm devices such as mirror logs 1808 * so we need to use iterate_devices here, which targets 1809 * supporting flushes must provide. 1810 */ 1811 for (unsigned int i = 0; i < t->num_targets; i++) { 1812 struct dm_target *ti = dm_table_get_target(t, i); 1813 1814 if (!ti->num_flush_bios) 1815 continue; 1816 1817 if (ti->flush_supported) 1818 return true; 1819 1820 if (ti->type->iterate_devices && 1821 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) 1822 return true; 1823 } 1824 1825 return false; 1826 } 1827 1828 static int device_dax_write_cache_enabled(struct dm_target *ti, 1829 struct dm_dev *dev, sector_t start, 1830 sector_t len, void *data) 1831 { 1832 struct dax_device *dax_dev = dev->dax_dev; 1833 1834 if (!dax_dev) 1835 return false; 1836 1837 if (dax_write_cache_enabled(dax_dev)) 1838 return true; 1839 return false; 1840 } 1841 1842 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, 1843 sector_t start, sector_t len, void *data) 1844 { 1845 return !bdev_nonrot(dev->bdev); 1846 } 1847 1848 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, 1849 sector_t start, sector_t len, void *data) 1850 { 1851 struct request_queue *q = bdev_get_queue(dev->bdev); 1852 1853 return !blk_queue_add_random(q); 1854 } 1855 1856 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, 1857 sector_t start, sector_t len, void *data) 1858 { 1859 struct request_queue *q = bdev_get_queue(dev->bdev); 1860 1861 return !q->limits.max_write_zeroes_sectors; 1862 } 1863 1864 static bool dm_table_supports_write_zeroes(struct dm_table *t) 1865 { 1866 for (unsigned int i = 0; i < t->num_targets; i++) { 1867 struct dm_target *ti = dm_table_get_target(t, i); 1868 1869 if (!ti->num_write_zeroes_bios) 1870 return false; 1871 1872 if (!ti->type->iterate_devices || 1873 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) 1874 return false; 1875 } 1876 1877 return true; 1878 } 1879 1880 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, 1881 sector_t start, sector_t len, void *data) 1882 { 1883 return !bdev_nowait(dev->bdev); 1884 } 1885 1886 static bool dm_table_supports_nowait(struct dm_table *t) 1887 { 1888 for (unsigned int i = 0; i < t->num_targets; i++) { 1889 struct dm_target *ti = dm_table_get_target(t, i); 1890 1891 if (!dm_target_supports_nowait(ti->type)) 1892 return false; 1893 1894 if (!ti->type->iterate_devices || 1895 ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) 1896 return false; 1897 } 1898 1899 return true; 1900 } 1901 1902 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1903 sector_t start, sector_t len, void *data) 1904 { 1905 return !bdev_max_discard_sectors(dev->bdev); 1906 } 1907 1908 static bool dm_table_supports_discards(struct dm_table *t) 1909 { 1910 for (unsigned int i = 0; i < t->num_targets; i++) { 1911 struct dm_target *ti = dm_table_get_target(t, i); 1912 1913 if (!ti->num_discard_bios) 1914 return false; 1915 1916 /* 1917 * Either the target provides discard support (as implied by setting 1918 * 'discards_supported') or it relies on _all_ data devices having 1919 * discard support. 1920 */ 1921 if (!ti->discards_supported && 1922 (!ti->type->iterate_devices || 1923 ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) 1924 return false; 1925 } 1926 1927 return true; 1928 } 1929 1930 static int device_not_secure_erase_capable(struct dm_target *ti, 1931 struct dm_dev *dev, sector_t start, 1932 sector_t len, void *data) 1933 { 1934 return !bdev_max_secure_erase_sectors(dev->bdev); 1935 } 1936 1937 static bool dm_table_supports_secure_erase(struct dm_table *t) 1938 { 1939 for (unsigned int i = 0; i < t->num_targets; i++) { 1940 struct dm_target *ti = dm_table_get_target(t, i); 1941 1942 if (!ti->num_secure_erase_bios) 1943 return false; 1944 1945 if (!ti->type->iterate_devices || 1946 ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) 1947 return false; 1948 } 1949 1950 return true; 1951 } 1952 1953 static int device_requires_stable_pages(struct dm_target *ti, 1954 struct dm_dev *dev, sector_t start, 1955 sector_t len, void *data) 1956 { 1957 return bdev_stable_writes(dev->bdev); 1958 } 1959 1960 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1961 struct queue_limits *limits) 1962 { 1963 bool wc = false, fua = false; 1964 int r; 1965 1966 /* 1967 * Copy table's limits to the DM device's request_queue 1968 */ 1969 q->limits = *limits; 1970 1971 if (dm_table_supports_nowait(t)) 1972 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); 1973 else 1974 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); 1975 1976 if (!dm_table_supports_discards(t)) { 1977 q->limits.max_discard_sectors = 0; 1978 q->limits.max_hw_discard_sectors = 0; 1979 q->limits.discard_granularity = 0; 1980 q->limits.discard_alignment = 0; 1981 q->limits.discard_misaligned = 0; 1982 } 1983 1984 if (!dm_table_supports_secure_erase(t)) 1985 q->limits.max_secure_erase_sectors = 0; 1986 1987 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1988 wc = true; 1989 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) 1990 fua = true; 1991 } 1992 blk_queue_write_cache(q, wc, fua); 1993 1994 if (dm_table_supports_dax(t, device_not_dax_capable)) { 1995 blk_queue_flag_set(QUEUE_FLAG_DAX, q); 1996 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) 1997 set_dax_synchronous(t->md->dax_dev); 1998 } else 1999 blk_queue_flag_clear(QUEUE_FLAG_DAX, q); 2000 2001 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) 2002 dax_write_cache(t->md->dax_dev, true); 2003 2004 /* Ensure that all underlying devices are non-rotational. */ 2005 if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) 2006 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); 2007 else 2008 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2009 2010 if (!dm_table_supports_write_zeroes(t)) 2011 q->limits.max_write_zeroes_sectors = 0; 2012 2013 dm_table_verify_integrity(t); 2014 2015 /* 2016 * Some devices don't use blk_integrity but still want stable pages 2017 * because they do their own checksumming. 2018 * If any underlying device requires stable pages, a table must require 2019 * them as well. Only targets that support iterate_devices are considered: 2020 * don't want error, zero, etc to require stable pages. 2021 */ 2022 if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) 2023 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); 2024 else 2025 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); 2026 2027 /* 2028 * Determine whether or not this queue's I/O timings contribute 2029 * to the entropy pool, Only request-based targets use this. 2030 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 2031 * have it set. 2032 */ 2033 if (blk_queue_add_random(q) && 2034 dm_table_any_dev_attr(t, device_is_not_random, NULL)) 2035 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2036 2037 /* 2038 * For a zoned target, setup the zones related queue attributes 2039 * and resources necessary for zone append emulation if necessary. 2040 */ 2041 if (blk_queue_is_zoned(q)) { 2042 r = dm_set_zones_restrictions(t, q); 2043 if (r) 2044 return r; 2045 if (!static_key_enabled(&zoned_enabled.key)) 2046 static_branch_enable(&zoned_enabled); 2047 } 2048 2049 dm_update_crypto_profile(q, t); 2050 disk_update_readahead(t->md->disk); 2051 2052 /* 2053 * Check for request-based device is left to 2054 * dm_mq_init_request_queue()->blk_mq_init_allocated_queue(). 2055 * 2056 * For bio-based device, only set QUEUE_FLAG_POLL when all 2057 * underlying devices supporting polling. 2058 */ 2059 if (__table_type_bio_based(t->type)) { 2060 if (dm_table_supports_poll(t)) 2061 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 2062 else 2063 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 2064 } 2065 2066 return 0; 2067 } 2068 2069 struct list_head *dm_table_get_devices(struct dm_table *t) 2070 { 2071 return &t->devices; 2072 } 2073 2074 blk_mode_t dm_table_get_mode(struct dm_table *t) 2075 { 2076 return t->mode; 2077 } 2078 EXPORT_SYMBOL(dm_table_get_mode); 2079 2080 enum suspend_mode { 2081 PRESUSPEND, 2082 PRESUSPEND_UNDO, 2083 POSTSUSPEND, 2084 }; 2085 2086 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) 2087 { 2088 lockdep_assert_held(&t->md->suspend_lock); 2089 2090 for (unsigned int i = 0; i < t->num_targets; i++) { 2091 struct dm_target *ti = dm_table_get_target(t, i); 2092 2093 switch (mode) { 2094 case PRESUSPEND: 2095 if (ti->type->presuspend) 2096 ti->type->presuspend(ti); 2097 break; 2098 case PRESUSPEND_UNDO: 2099 if (ti->type->presuspend_undo) 2100 ti->type->presuspend_undo(ti); 2101 break; 2102 case POSTSUSPEND: 2103 if (ti->type->postsuspend) 2104 ti->type->postsuspend(ti); 2105 break; 2106 } 2107 } 2108 } 2109 2110 void dm_table_presuspend_targets(struct dm_table *t) 2111 { 2112 if (!t) 2113 return; 2114 2115 suspend_targets(t, PRESUSPEND); 2116 } 2117 2118 void dm_table_presuspend_undo_targets(struct dm_table *t) 2119 { 2120 if (!t) 2121 return; 2122 2123 suspend_targets(t, PRESUSPEND_UNDO); 2124 } 2125 2126 void dm_table_postsuspend_targets(struct dm_table *t) 2127 { 2128 if (!t) 2129 return; 2130 2131 suspend_targets(t, POSTSUSPEND); 2132 } 2133 2134 int dm_table_resume_targets(struct dm_table *t) 2135 { 2136 unsigned int i; 2137 int r = 0; 2138 2139 lockdep_assert_held(&t->md->suspend_lock); 2140 2141 for (i = 0; i < t->num_targets; i++) { 2142 struct dm_target *ti = dm_table_get_target(t, i); 2143 2144 if (!ti->type->preresume) 2145 continue; 2146 2147 r = ti->type->preresume(ti); 2148 if (r) { 2149 DMERR("%s: %s: preresume failed, error = %d", 2150 dm_device_name(t->md), ti->type->name, r); 2151 return r; 2152 } 2153 } 2154 2155 for (i = 0; i < t->num_targets; i++) { 2156 struct dm_target *ti = dm_table_get_target(t, i); 2157 2158 if (ti->type->resume) 2159 ti->type->resume(ti); 2160 } 2161 2162 return 0; 2163 } 2164 2165 struct mapped_device *dm_table_get_md(struct dm_table *t) 2166 { 2167 return t->md; 2168 } 2169 EXPORT_SYMBOL(dm_table_get_md); 2170 2171 const char *dm_table_device_name(struct dm_table *t) 2172 { 2173 return dm_device_name(t->md); 2174 } 2175 EXPORT_SYMBOL_GPL(dm_table_device_name); 2176 2177 void dm_table_run_md_queue_async(struct dm_table *t) 2178 { 2179 if (!dm_table_request_based(t)) 2180 return; 2181 2182 if (t->md->queue) 2183 blk_mq_run_hw_queues(t->md->queue, true); 2184 } 2185 EXPORT_SYMBOL(dm_table_run_md_queue_async); 2186 2187