1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2001 Sistina Software (UK) Limited. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include "dm-core.h" 10 #include "dm-rq.h" 11 12 #include <linux/module.h> 13 #include <linux/vmalloc.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-integrity.h> 16 #include <linux/namei.h> 17 #include <linux/ctype.h> 18 #include <linux/string.h> 19 #include <linux/slab.h> 20 #include <linux/interrupt.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/atomic.h> 24 #include <linux/blk-mq.h> 25 #include <linux/mount.h> 26 #include <linux/dax.h> 27 28 #define DM_MSG_PREFIX "table" 29 30 #define NODE_SIZE L1_CACHE_BYTES 31 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 32 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 33 34 /* 35 * Similar to ceiling(log_size(n)) 36 */ 37 static unsigned int int_log(unsigned int n, unsigned int base) 38 { 39 int result = 0; 40 41 while (n > 1) { 42 n = dm_div_up(n, base); 43 result++; 44 } 45 46 return result; 47 } 48 49 /* 50 * Calculate the index of the child node of the n'th node k'th key. 51 */ 52 static inline unsigned int get_child(unsigned int n, unsigned int k) 53 { 54 return (n * CHILDREN_PER_NODE) + k; 55 } 56 57 /* 58 * Return the n'th node of level l from table t. 59 */ 60 static inline sector_t *get_node(struct dm_table *t, 61 unsigned int l, unsigned int n) 62 { 63 return t->index[l] + (n * KEYS_PER_NODE); 64 } 65 66 /* 67 * Return the highest key that you could lookup from the n'th 68 * node on level l of the btree. 69 */ 70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) 71 { 72 for (; l < t->depth - 1; l++) 73 n = get_child(n, CHILDREN_PER_NODE - 1); 74 75 if (n >= t->counts[l]) 76 return (sector_t) -1; 77 78 return get_node(t, l, n)[KEYS_PER_NODE - 1]; 79 } 80 81 /* 82 * Fills in a level of the btree based on the highs of the level 83 * below it. 84 */ 85 static int setup_btree_index(unsigned int l, struct dm_table *t) 86 { 87 unsigned int n, k; 88 sector_t *node; 89 90 for (n = 0U; n < t->counts[l]; n++) { 91 node = get_node(t, l, n); 92 93 for (k = 0U; k < KEYS_PER_NODE; k++) 94 node[k] = high(t, l + 1, get_child(n, k)); 95 } 96 97 return 0; 98 } 99 100 /* 101 * highs, and targets are managed as dynamic arrays during a 102 * table load. 103 */ 104 static int alloc_targets(struct dm_table *t, unsigned int num) 105 { 106 sector_t *n_highs; 107 struct dm_target *n_targets; 108 109 /* 110 * Allocate both the target array and offset array at once. 111 */ 112 n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t), 113 GFP_KERNEL); 114 if (!n_highs) 115 return -ENOMEM; 116 117 n_targets = (struct dm_target *) (n_highs + num); 118 119 memset(n_highs, -1, sizeof(*n_highs) * num); 120 kvfree(t->highs); 121 122 t->num_allocated = num; 123 t->highs = n_highs; 124 t->targets = n_targets; 125 126 return 0; 127 } 128 129 int dm_table_create(struct dm_table **result, blk_mode_t mode, 130 unsigned int num_targets, struct mapped_device *md) 131 { 132 struct dm_table *t; 133 134 if (num_targets > DM_MAX_TARGETS) 135 return -EOVERFLOW; 136 137 t = kzalloc(sizeof(*t), GFP_KERNEL); 138 139 if (!t) 140 return -ENOMEM; 141 142 INIT_LIST_HEAD(&t->devices); 143 init_rwsem(&t->devices_lock); 144 145 if (!num_targets) 146 num_targets = KEYS_PER_NODE; 147 148 num_targets = dm_round_up(num_targets, KEYS_PER_NODE); 149 150 if (!num_targets) { 151 kfree(t); 152 return -EOVERFLOW; 153 } 154 155 if (alloc_targets(t, num_targets)) { 156 kfree(t); 157 return -ENOMEM; 158 } 159 160 t->type = DM_TYPE_NONE; 161 t->mode = mode; 162 t->md = md; 163 *result = t; 164 return 0; 165 } 166 167 static void free_devices(struct list_head *devices, struct mapped_device *md) 168 { 169 struct list_head *tmp, *next; 170 171 list_for_each_safe(tmp, next, devices) { 172 struct dm_dev_internal *dd = 173 list_entry(tmp, struct dm_dev_internal, list); 174 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", 175 dm_device_name(md), dd->dm_dev->name); 176 dm_put_table_device(md, dd->dm_dev); 177 kfree(dd); 178 } 179 } 180 181 static void dm_table_destroy_crypto_profile(struct dm_table *t); 182 183 void dm_table_destroy(struct dm_table *t) 184 { 185 if (!t) 186 return; 187 188 /* free the indexes */ 189 if (t->depth >= 2) 190 kvfree(t->index[t->depth - 2]); 191 192 /* free the targets */ 193 for (unsigned int i = 0; i < t->num_targets; i++) { 194 struct dm_target *ti = dm_table_get_target(t, i); 195 196 if (ti->type->dtr) 197 ti->type->dtr(ti); 198 199 dm_put_target_type(ti->type); 200 } 201 202 kvfree(t->highs); 203 204 /* free the device list */ 205 free_devices(&t->devices, t->md); 206 207 dm_free_md_mempools(t->mempools); 208 209 dm_table_destroy_crypto_profile(t); 210 211 kfree(t); 212 } 213 214 /* 215 * See if we've already got a device in the list. 216 */ 217 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) 218 { 219 struct dm_dev_internal *dd; 220 221 list_for_each_entry(dd, l, list) 222 if (dd->dm_dev->bdev->bd_dev == dev) 223 return dd; 224 225 return NULL; 226 } 227 228 /* 229 * If possible, this checks an area of a destination device is invalid. 230 */ 231 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 232 sector_t start, sector_t len, void *data) 233 { 234 struct queue_limits *limits = data; 235 struct block_device *bdev = dev->bdev; 236 sector_t dev_size = bdev_nr_sectors(bdev); 237 unsigned short logical_block_size_sectors = 238 limits->logical_block_size >> SECTOR_SHIFT; 239 240 if (!dev_size) 241 return 0; 242 243 if ((start >= dev_size) || (start + len > dev_size)) { 244 DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu", 245 dm_device_name(ti->table->md), bdev, 246 (unsigned long long)start, 247 (unsigned long long)len, 248 (unsigned long long)dev_size); 249 return 1; 250 } 251 252 /* 253 * If the target is mapped to zoned block device(s), check 254 * that the zones are not partially mapped. 255 */ 256 if (bdev_is_zoned(bdev)) { 257 unsigned int zone_sectors = bdev_zone_sectors(bdev); 258 259 if (start & (zone_sectors - 1)) { 260 DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", 261 dm_device_name(ti->table->md), 262 (unsigned long long)start, 263 zone_sectors, bdev); 264 return 1; 265 } 266 267 /* 268 * Note: The last zone of a zoned block device may be smaller 269 * than other zones. So for a target mapping the end of a 270 * zoned block device with such a zone, len would not be zone 271 * aligned. We do not allow such last smaller zone to be part 272 * of the mapping here to ensure that mappings with multiple 273 * devices do not end up with a smaller zone in the middle of 274 * the sector range. 275 */ 276 if (len & (zone_sectors - 1)) { 277 DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", 278 dm_device_name(ti->table->md), 279 (unsigned long long)len, 280 zone_sectors, bdev); 281 return 1; 282 } 283 } 284 285 if (logical_block_size_sectors <= 1) 286 return 0; 287 288 if (start & (logical_block_size_sectors - 1)) { 289 DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg", 290 dm_device_name(ti->table->md), 291 (unsigned long long)start, 292 limits->logical_block_size, bdev); 293 return 1; 294 } 295 296 if (len & (logical_block_size_sectors - 1)) { 297 DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg", 298 dm_device_name(ti->table->md), 299 (unsigned long long)len, 300 limits->logical_block_size, bdev); 301 return 1; 302 } 303 304 return 0; 305 } 306 307 /* 308 * This upgrades the mode on an already open dm_dev, being 309 * careful to leave things as they were if we fail to reopen the 310 * device and not to touch the existing bdev field in case 311 * it is accessed concurrently. 312 */ 313 static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode, 314 struct mapped_device *md) 315 { 316 int r; 317 struct dm_dev *old_dev, *new_dev; 318 319 old_dev = dd->dm_dev; 320 321 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, 322 dd->dm_dev->mode | new_mode, &new_dev); 323 if (r) 324 return r; 325 326 dd->dm_dev = new_dev; 327 dm_put_table_device(md, old_dev); 328 329 return 0; 330 } 331 332 /* 333 * Add a device to the list, or just increment the usage count if 334 * it's already present. 335 * 336 * Note: the __ref annotation is because this function can call the __init 337 * marked early_lookup_bdev when called during early boot code from dm-init.c. 338 */ 339 int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, 340 struct dm_dev **result) 341 { 342 int r; 343 dev_t dev; 344 unsigned int major, minor; 345 char dummy; 346 struct dm_dev_internal *dd; 347 struct dm_table *t = ti->table; 348 349 BUG_ON(!t); 350 351 if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { 352 /* Extract the major/minor numbers */ 353 dev = MKDEV(major, minor); 354 if (MAJOR(dev) != major || MINOR(dev) != minor) 355 return -EOVERFLOW; 356 } else { 357 r = lookup_bdev(path, &dev); 358 #ifndef MODULE 359 if (r && system_state < SYSTEM_RUNNING) 360 r = early_lookup_bdev(path, &dev); 361 #endif 362 if (r) 363 return r; 364 } 365 if (dev == disk_devt(t->md->disk)) 366 return -EINVAL; 367 368 down_write(&t->devices_lock); 369 370 dd = find_device(&t->devices, dev); 371 if (!dd) { 372 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 373 if (!dd) { 374 r = -ENOMEM; 375 goto unlock_ret_r; 376 } 377 378 r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); 379 if (r) { 380 kfree(dd); 381 goto unlock_ret_r; 382 } 383 384 refcount_set(&dd->count, 1); 385 list_add(&dd->list, &t->devices); 386 goto out; 387 388 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 389 r = upgrade_mode(dd, mode, t->md); 390 if (r) 391 goto unlock_ret_r; 392 } 393 refcount_inc(&dd->count); 394 out: 395 up_write(&t->devices_lock); 396 *result = dd->dm_dev; 397 return 0; 398 399 unlock_ret_r: 400 up_write(&t->devices_lock); 401 return r; 402 } 403 EXPORT_SYMBOL(dm_get_device); 404 405 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 406 sector_t start, sector_t len, void *data) 407 { 408 struct queue_limits *limits = data; 409 struct block_device *bdev = dev->bdev; 410 struct request_queue *q = bdev_get_queue(bdev); 411 412 if (unlikely(!q)) { 413 DMWARN("%s: Cannot set limits for nonexistent device %pg", 414 dm_device_name(ti->table->md), bdev); 415 return 0; 416 } 417 418 if (blk_stack_limits(limits, &q->limits, 419 get_start_sect(bdev) + start) < 0) 420 DMWARN("%s: adding target device %pg caused an alignment inconsistency: " 421 "physical_block_size=%u, logical_block_size=%u, " 422 "alignment_offset=%u, start=%llu", 423 dm_device_name(ti->table->md), bdev, 424 q->limits.physical_block_size, 425 q->limits.logical_block_size, 426 q->limits.alignment_offset, 427 (unsigned long long) start << SECTOR_SHIFT); 428 429 /* 430 * Only stack the integrity profile if the target doesn't have native 431 * integrity support. 432 */ 433 if (!dm_target_has_integrity(ti->type)) 434 queue_limits_stack_integrity_bdev(limits, bdev); 435 return 0; 436 } 437 438 /* 439 * Decrement a device's use count and remove it if necessary. 440 */ 441 void dm_put_device(struct dm_target *ti, struct dm_dev *d) 442 { 443 int found = 0; 444 struct dm_table *t = ti->table; 445 struct list_head *devices = &t->devices; 446 struct dm_dev_internal *dd; 447 448 down_write(&t->devices_lock); 449 450 list_for_each_entry(dd, devices, list) { 451 if (dd->dm_dev == d) { 452 found = 1; 453 break; 454 } 455 } 456 if (!found) { 457 DMERR("%s: device %s not in table devices list", 458 dm_device_name(t->md), d->name); 459 goto unlock_ret; 460 } 461 if (refcount_dec_and_test(&dd->count)) { 462 dm_put_table_device(t->md, d); 463 list_del(&dd->list); 464 kfree(dd); 465 } 466 467 unlock_ret: 468 up_write(&t->devices_lock); 469 } 470 EXPORT_SYMBOL(dm_put_device); 471 472 /* 473 * Checks to see if the target joins onto the end of the table. 474 */ 475 static int adjoin(struct dm_table *t, struct dm_target *ti) 476 { 477 struct dm_target *prev; 478 479 if (!t->num_targets) 480 return !ti->begin; 481 482 prev = &t->targets[t->num_targets - 1]; 483 return (ti->begin == (prev->begin + prev->len)); 484 } 485 486 /* 487 * Used to dynamically allocate the arg array. 488 * 489 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must 490 * process messages even if some device is suspended. These messages have a 491 * small fixed number of arguments. 492 * 493 * On the other hand, dm-switch needs to process bulk data using messages and 494 * excessive use of GFP_NOIO could cause trouble. 495 */ 496 static char **realloc_argv(unsigned int *size, char **old_argv) 497 { 498 char **argv; 499 unsigned int new_size; 500 gfp_t gfp; 501 502 if (*size) { 503 new_size = *size * 2; 504 gfp = GFP_KERNEL; 505 } else { 506 new_size = 8; 507 gfp = GFP_NOIO; 508 } 509 argv = kmalloc_array(new_size, sizeof(*argv), gfp); 510 if (argv && old_argv) { 511 memcpy(argv, old_argv, *size * sizeof(*argv)); 512 *size = new_size; 513 } 514 515 kfree(old_argv); 516 return argv; 517 } 518 519 /* 520 * Destructively splits up the argument list to pass to ctr. 521 */ 522 int dm_split_args(int *argc, char ***argvp, char *input) 523 { 524 char *start, *end = input, *out, **argv = NULL; 525 unsigned int array_size = 0; 526 527 *argc = 0; 528 529 if (!input) { 530 *argvp = NULL; 531 return 0; 532 } 533 534 argv = realloc_argv(&array_size, argv); 535 if (!argv) 536 return -ENOMEM; 537 538 while (1) { 539 /* Skip whitespace */ 540 start = skip_spaces(end); 541 542 if (!*start) 543 break; /* success, we hit the end */ 544 545 /* 'out' is used to remove any back-quotes */ 546 end = out = start; 547 while (*end) { 548 /* Everything apart from '\0' can be quoted */ 549 if (*end == '\\' && *(end + 1)) { 550 *out++ = *(end + 1); 551 end += 2; 552 continue; 553 } 554 555 if (isspace(*end)) 556 break; /* end of token */ 557 558 *out++ = *end++; 559 } 560 561 /* have we already filled the array ? */ 562 if ((*argc + 1) > array_size) { 563 argv = realloc_argv(&array_size, argv); 564 if (!argv) 565 return -ENOMEM; 566 } 567 568 /* we know this is whitespace */ 569 if (*end) 570 end++; 571 572 /* terminate the string and put it in the array */ 573 *out = '\0'; 574 argv[*argc] = start; 575 (*argc)++; 576 } 577 578 *argvp = argv; 579 return 0; 580 } 581 582 static void dm_set_stacking_limits(struct queue_limits *limits) 583 { 584 blk_set_stacking_limits(limits); 585 limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; 586 } 587 588 /* 589 * Impose necessary and sufficient conditions on a devices's table such 590 * that any incoming bio which respects its logical_block_size can be 591 * processed successfully. If it falls across the boundary between 592 * two or more targets, the size of each piece it gets split into must 593 * be compatible with the logical_block_size of the target processing it. 594 */ 595 static int validate_hardware_logical_block_alignment(struct dm_table *t, 596 struct queue_limits *limits) 597 { 598 /* 599 * This function uses arithmetic modulo the logical_block_size 600 * (in units of 512-byte sectors). 601 */ 602 unsigned short device_logical_block_size_sects = 603 limits->logical_block_size >> SECTOR_SHIFT; 604 605 /* 606 * Offset of the start of the next table entry, mod logical_block_size. 607 */ 608 unsigned short next_target_start = 0; 609 610 /* 611 * Given an aligned bio that extends beyond the end of a 612 * target, how many sectors must the next target handle? 613 */ 614 unsigned short remaining = 0; 615 616 struct dm_target *ti; 617 struct queue_limits ti_limits; 618 unsigned int i; 619 620 /* 621 * Check each entry in the table in turn. 622 */ 623 for (i = 0; i < t->num_targets; i++) { 624 ti = dm_table_get_target(t, i); 625 626 dm_set_stacking_limits(&ti_limits); 627 628 /* combine all target devices' limits */ 629 if (ti->type->iterate_devices) 630 ti->type->iterate_devices(ti, dm_set_device_limits, 631 &ti_limits); 632 633 /* 634 * If the remaining sectors fall entirely within this 635 * table entry are they compatible with its logical_block_size? 636 */ 637 if (remaining < ti->len && 638 remaining & ((ti_limits.logical_block_size >> 639 SECTOR_SHIFT) - 1)) 640 break; /* Error */ 641 642 next_target_start = 643 (unsigned short) ((next_target_start + ti->len) & 644 (device_logical_block_size_sects - 1)); 645 remaining = next_target_start ? 646 device_logical_block_size_sects - next_target_start : 0; 647 } 648 649 if (remaining) { 650 DMERR("%s: table line %u (start sect %llu len %llu) " 651 "not aligned to h/w logical block size %u", 652 dm_device_name(t->md), i, 653 (unsigned long long) ti->begin, 654 (unsigned long long) ti->len, 655 limits->logical_block_size); 656 return -EINVAL; 657 } 658 659 return 0; 660 } 661 662 int dm_table_add_target(struct dm_table *t, const char *type, 663 sector_t start, sector_t len, char *params) 664 { 665 int r = -EINVAL, argc; 666 char **argv; 667 struct dm_target *ti; 668 669 if (t->singleton) { 670 DMERR("%s: target type %s must appear alone in table", 671 dm_device_name(t->md), t->targets->type->name); 672 return -EINVAL; 673 } 674 675 BUG_ON(t->num_targets >= t->num_allocated); 676 677 ti = t->targets + t->num_targets; 678 memset(ti, 0, sizeof(*ti)); 679 680 if (!len) { 681 DMERR("%s: zero-length target", dm_device_name(t->md)); 682 return -EINVAL; 683 } 684 685 ti->type = dm_get_target_type(type); 686 if (!ti->type) { 687 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); 688 return -EINVAL; 689 } 690 691 if (dm_target_needs_singleton(ti->type)) { 692 if (t->num_targets) { 693 ti->error = "singleton target type must appear alone in table"; 694 goto bad; 695 } 696 t->singleton = true; 697 } 698 699 if (dm_target_always_writeable(ti->type) && 700 !(t->mode & BLK_OPEN_WRITE)) { 701 ti->error = "target type may not be included in a read-only table"; 702 goto bad; 703 } 704 705 if (t->immutable_target_type) { 706 if (t->immutable_target_type != ti->type) { 707 ti->error = "immutable target type cannot be mixed with other target types"; 708 goto bad; 709 } 710 } else if (dm_target_is_immutable(ti->type)) { 711 if (t->num_targets) { 712 ti->error = "immutable target type cannot be mixed with other target types"; 713 goto bad; 714 } 715 t->immutable_target_type = ti->type; 716 } 717 718 ti->table = t; 719 ti->begin = start; 720 ti->len = len; 721 ti->error = "Unknown error"; 722 723 /* 724 * Does this target adjoin the previous one ? 725 */ 726 if (!adjoin(t, ti)) { 727 ti->error = "Gap in table"; 728 goto bad; 729 } 730 731 r = dm_split_args(&argc, &argv, params); 732 if (r) { 733 ti->error = "couldn't split parameters"; 734 goto bad; 735 } 736 737 r = ti->type->ctr(ti, argc, argv); 738 kfree(argv); 739 if (r) 740 goto bad; 741 742 t->highs[t->num_targets++] = ti->begin + ti->len - 1; 743 744 if (!ti->num_discard_bios && ti->discards_supported) 745 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", 746 dm_device_name(t->md), type); 747 748 if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) 749 static_branch_enable(&swap_bios_enabled); 750 751 return 0; 752 753 bad: 754 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r)); 755 dm_put_target_type(ti->type); 756 return r; 757 } 758 759 /* 760 * Target argument parsing helpers. 761 */ 762 static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 763 unsigned int *value, char **error, unsigned int grouped) 764 { 765 const char *arg_str = dm_shift_arg(arg_set); 766 char dummy; 767 768 if (!arg_str || 769 (sscanf(arg_str, "%u%c", value, &dummy) != 1) || 770 (*value < arg->min) || 771 (*value > arg->max) || 772 (grouped && arg_set->argc < *value)) { 773 *error = arg->error; 774 return -EINVAL; 775 } 776 777 return 0; 778 } 779 780 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 781 unsigned int *value, char **error) 782 { 783 return validate_next_arg(arg, arg_set, value, error, 0); 784 } 785 EXPORT_SYMBOL(dm_read_arg); 786 787 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 788 unsigned int *value, char **error) 789 { 790 return validate_next_arg(arg, arg_set, value, error, 1); 791 } 792 EXPORT_SYMBOL(dm_read_arg_group); 793 794 const char *dm_shift_arg(struct dm_arg_set *as) 795 { 796 char *r; 797 798 if (as->argc) { 799 as->argc--; 800 r = *as->argv; 801 as->argv++; 802 return r; 803 } 804 805 return NULL; 806 } 807 EXPORT_SYMBOL(dm_shift_arg); 808 809 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args) 810 { 811 BUG_ON(as->argc < num_args); 812 as->argc -= num_args; 813 as->argv += num_args; 814 } 815 EXPORT_SYMBOL(dm_consume_args); 816 817 static bool __table_type_bio_based(enum dm_queue_mode table_type) 818 { 819 return (table_type == DM_TYPE_BIO_BASED || 820 table_type == DM_TYPE_DAX_BIO_BASED); 821 } 822 823 static bool __table_type_request_based(enum dm_queue_mode table_type) 824 { 825 return table_type == DM_TYPE_REQUEST_BASED; 826 } 827 828 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) 829 { 830 t->type = type; 831 } 832 EXPORT_SYMBOL_GPL(dm_table_set_type); 833 834 /* validate the dax capability of the target device span */ 835 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, 836 sector_t start, sector_t len, void *data) 837 { 838 if (dev->dax_dev) 839 return false; 840 841 DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev); 842 return true; 843 } 844 845 /* Check devices support synchronous DAX */ 846 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, 847 sector_t start, sector_t len, void *data) 848 { 849 return !dev->dax_dev || !dax_synchronous(dev->dax_dev); 850 } 851 852 static bool dm_table_supports_dax(struct dm_table *t, 853 iterate_devices_callout_fn iterate_fn) 854 { 855 /* Ensure that all targets support DAX. */ 856 for (unsigned int i = 0; i < t->num_targets; i++) { 857 struct dm_target *ti = dm_table_get_target(t, i); 858 859 if (!ti->type->direct_access) 860 return false; 861 862 if (dm_target_is_wildcard(ti->type) || 863 !ti->type->iterate_devices || 864 ti->type->iterate_devices(ti, iterate_fn, NULL)) 865 return false; 866 } 867 868 return true; 869 } 870 871 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, 872 sector_t start, sector_t len, void *data) 873 { 874 struct block_device *bdev = dev->bdev; 875 struct request_queue *q = bdev_get_queue(bdev); 876 877 /* request-based cannot stack on partitions! */ 878 if (bdev_is_partition(bdev)) 879 return false; 880 881 return queue_is_mq(q); 882 } 883 884 static int dm_table_determine_type(struct dm_table *t) 885 { 886 unsigned int bio_based = 0, request_based = 0, hybrid = 0; 887 struct dm_target *ti; 888 struct list_head *devices = dm_table_get_devices(t); 889 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); 890 891 if (t->type != DM_TYPE_NONE) { 892 /* target already set the table's type */ 893 if (t->type == DM_TYPE_BIO_BASED) { 894 /* possibly upgrade to a variant of bio-based */ 895 goto verify_bio_based; 896 } 897 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 898 goto verify_rq_based; 899 } 900 901 for (unsigned int i = 0; i < t->num_targets; i++) { 902 ti = dm_table_get_target(t, i); 903 if (dm_target_hybrid(ti)) 904 hybrid = 1; 905 else if (dm_target_request_based(ti)) 906 request_based = 1; 907 else 908 bio_based = 1; 909 910 if (bio_based && request_based) { 911 DMERR("Inconsistent table: different target types can't be mixed up"); 912 return -EINVAL; 913 } 914 } 915 916 if (hybrid && !bio_based && !request_based) { 917 /* 918 * The targets can work either way. 919 * Determine the type from the live device. 920 * Default to bio-based if device is new. 921 */ 922 if (__table_type_request_based(live_md_type)) 923 request_based = 1; 924 else 925 bio_based = 1; 926 } 927 928 if (bio_based) { 929 verify_bio_based: 930 /* We must use this table as bio-based */ 931 t->type = DM_TYPE_BIO_BASED; 932 if (dm_table_supports_dax(t, device_not_dax_capable) || 933 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { 934 t->type = DM_TYPE_DAX_BIO_BASED; 935 } 936 return 0; 937 } 938 939 BUG_ON(!request_based); /* No targets in this table */ 940 941 t->type = DM_TYPE_REQUEST_BASED; 942 943 verify_rq_based: 944 /* 945 * Request-based dm supports only tables that have a single target now. 946 * To support multiple targets, request splitting support is needed, 947 * and that needs lots of changes in the block-layer. 948 * (e.g. request completion process for partial completion.) 949 */ 950 if (t->num_targets > 1) { 951 DMERR("request-based DM doesn't support multiple targets"); 952 return -EINVAL; 953 } 954 955 if (list_empty(devices)) { 956 int srcu_idx; 957 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); 958 959 /* inherit live table's type */ 960 if (live_table) 961 t->type = live_table->type; 962 dm_put_live_table(t->md, srcu_idx); 963 return 0; 964 } 965 966 ti = dm_table_get_immutable_target(t); 967 if (!ti) { 968 DMERR("table load rejected: immutable target is required"); 969 return -EINVAL; 970 } else if (ti->max_io_len) { 971 DMERR("table load rejected: immutable target that splits IO is not supported"); 972 return -EINVAL; 973 } 974 975 /* Non-request-stackable devices can't be used for request-based dm */ 976 if (!ti->type->iterate_devices || 977 !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { 978 DMERR("table load rejected: including non-request-stackable devices"); 979 return -EINVAL; 980 } 981 982 return 0; 983 } 984 985 enum dm_queue_mode dm_table_get_type(struct dm_table *t) 986 { 987 return t->type; 988 } 989 990 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) 991 { 992 return t->immutable_target_type; 993 } 994 995 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) 996 { 997 /* Immutable target is implicitly a singleton */ 998 if (t->num_targets > 1 || 999 !dm_target_is_immutable(t->targets[0].type)) 1000 return NULL; 1001 1002 return t->targets; 1003 } 1004 1005 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) 1006 { 1007 for (unsigned int i = 0; i < t->num_targets; i++) { 1008 struct dm_target *ti = dm_table_get_target(t, i); 1009 1010 if (dm_target_is_wildcard(ti->type)) 1011 return ti; 1012 } 1013 1014 return NULL; 1015 } 1016 1017 bool dm_table_bio_based(struct dm_table *t) 1018 { 1019 return __table_type_bio_based(dm_table_get_type(t)); 1020 } 1021 1022 bool dm_table_request_based(struct dm_table *t) 1023 { 1024 return __table_type_request_based(dm_table_get_type(t)); 1025 } 1026 1027 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) 1028 { 1029 enum dm_queue_mode type = dm_table_get_type(t); 1030 unsigned int per_io_data_size = 0, front_pad, io_front_pad; 1031 unsigned int min_pool_size = 0, pool_size; 1032 struct dm_md_mempools *pools; 1033 unsigned int bioset_flags = 0; 1034 1035 if (unlikely(type == DM_TYPE_NONE)) { 1036 DMERR("no table type is set, can't allocate mempools"); 1037 return -EINVAL; 1038 } 1039 1040 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 1041 if (!pools) 1042 return -ENOMEM; 1043 1044 if (type == DM_TYPE_REQUEST_BASED) { 1045 pool_size = dm_get_reserved_rq_based_ios(); 1046 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 1047 goto init_bs; 1048 } 1049 1050 if (md->queue->limits.features & BLK_FEAT_POLL) 1051 bioset_flags |= BIOSET_PERCPU_CACHE; 1052 1053 for (unsigned int i = 0; i < t->num_targets; i++) { 1054 struct dm_target *ti = dm_table_get_target(t, i); 1055 1056 per_io_data_size = max(per_io_data_size, ti->per_io_data_size); 1057 min_pool_size = max(min_pool_size, ti->num_flush_bios); 1058 } 1059 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 1060 front_pad = roundup(per_io_data_size, 1061 __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 1062 1063 io_front_pad = roundup(per_io_data_size, 1064 __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 1065 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) 1066 goto out_free_pools; 1067 if (t->integrity_supported && 1068 bioset_integrity_create(&pools->io_bs, pool_size)) 1069 goto out_free_pools; 1070 init_bs: 1071 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) 1072 goto out_free_pools; 1073 if (t->integrity_supported && 1074 bioset_integrity_create(&pools->bs, pool_size)) 1075 goto out_free_pools; 1076 1077 t->mempools = pools; 1078 return 0; 1079 1080 out_free_pools: 1081 dm_free_md_mempools(pools); 1082 return -ENOMEM; 1083 } 1084 1085 static int setup_indexes(struct dm_table *t) 1086 { 1087 int i; 1088 unsigned int total = 0; 1089 sector_t *indexes; 1090 1091 /* allocate the space for *all* the indexes */ 1092 for (i = t->depth - 2; i >= 0; i--) { 1093 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); 1094 total += t->counts[i]; 1095 } 1096 1097 indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL); 1098 if (!indexes) 1099 return -ENOMEM; 1100 1101 /* set up internal nodes, bottom-up */ 1102 for (i = t->depth - 2; i >= 0; i--) { 1103 t->index[i] = indexes; 1104 indexes += (KEYS_PER_NODE * t->counts[i]); 1105 setup_btree_index(i, t); 1106 } 1107 1108 return 0; 1109 } 1110 1111 /* 1112 * Builds the btree to index the map. 1113 */ 1114 static int dm_table_build_index(struct dm_table *t) 1115 { 1116 int r = 0; 1117 unsigned int leaf_nodes; 1118 1119 /* how many indexes will the btree have ? */ 1120 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 1121 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 1122 1123 /* leaf layer has already been set up */ 1124 t->counts[t->depth - 1] = leaf_nodes; 1125 t->index[t->depth - 1] = t->highs; 1126 1127 if (t->depth >= 2) 1128 r = setup_indexes(t); 1129 1130 return r; 1131 } 1132 1133 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1134 1135 struct dm_crypto_profile { 1136 struct blk_crypto_profile profile; 1137 struct mapped_device *md; 1138 }; 1139 1140 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, 1141 sector_t start, sector_t len, void *data) 1142 { 1143 const struct blk_crypto_key *key = data; 1144 1145 blk_crypto_evict_key(dev->bdev, key); 1146 return 0; 1147 } 1148 1149 /* 1150 * When an inline encryption key is evicted from a device-mapper device, evict 1151 * it from all the underlying devices. 1152 */ 1153 static int dm_keyslot_evict(struct blk_crypto_profile *profile, 1154 const struct blk_crypto_key *key, unsigned int slot) 1155 { 1156 struct mapped_device *md = 1157 container_of(profile, struct dm_crypto_profile, profile)->md; 1158 struct dm_table *t; 1159 int srcu_idx; 1160 1161 t = dm_get_live_table(md, &srcu_idx); 1162 if (!t) 1163 return 0; 1164 1165 for (unsigned int i = 0; i < t->num_targets; i++) { 1166 struct dm_target *ti = dm_table_get_target(t, i); 1167 1168 if (!ti->type->iterate_devices) 1169 continue; 1170 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, 1171 (void *)key); 1172 } 1173 1174 dm_put_live_table(md, srcu_idx); 1175 return 0; 1176 } 1177 1178 static int 1179 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, 1180 sector_t start, sector_t len, void *data) 1181 { 1182 struct blk_crypto_profile *parent = data; 1183 struct blk_crypto_profile *child = 1184 bdev_get_queue(dev->bdev)->crypto_profile; 1185 1186 blk_crypto_intersect_capabilities(parent, child); 1187 return 0; 1188 } 1189 1190 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) 1191 { 1192 struct dm_crypto_profile *dmcp = container_of(profile, 1193 struct dm_crypto_profile, 1194 profile); 1195 1196 if (!profile) 1197 return; 1198 1199 blk_crypto_profile_destroy(profile); 1200 kfree(dmcp); 1201 } 1202 1203 static void dm_table_destroy_crypto_profile(struct dm_table *t) 1204 { 1205 dm_destroy_crypto_profile(t->crypto_profile); 1206 t->crypto_profile = NULL; 1207 } 1208 1209 /* 1210 * Constructs and initializes t->crypto_profile with a crypto profile that 1211 * represents the common set of crypto capabilities of the devices described by 1212 * the dm_table. However, if the constructed crypto profile doesn't support all 1213 * crypto capabilities that are supported by the current mapped_device, it 1214 * returns an error instead, since we don't support removing crypto capabilities 1215 * on table changes. Finally, if the constructed crypto profile is "empty" (has 1216 * no crypto capabilities at all), it just sets t->crypto_profile to NULL. 1217 */ 1218 static int dm_table_construct_crypto_profile(struct dm_table *t) 1219 { 1220 struct dm_crypto_profile *dmcp; 1221 struct blk_crypto_profile *profile; 1222 unsigned int i; 1223 bool empty_profile = true; 1224 1225 dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL); 1226 if (!dmcp) 1227 return -ENOMEM; 1228 dmcp->md = t->md; 1229 1230 profile = &dmcp->profile; 1231 blk_crypto_profile_init(profile, 0); 1232 profile->ll_ops.keyslot_evict = dm_keyslot_evict; 1233 profile->max_dun_bytes_supported = UINT_MAX; 1234 memset(profile->modes_supported, 0xFF, 1235 sizeof(profile->modes_supported)); 1236 1237 for (i = 0; i < t->num_targets; i++) { 1238 struct dm_target *ti = dm_table_get_target(t, i); 1239 1240 if (!dm_target_passes_crypto(ti->type)) { 1241 blk_crypto_intersect_capabilities(profile, NULL); 1242 break; 1243 } 1244 if (!ti->type->iterate_devices) 1245 continue; 1246 ti->type->iterate_devices(ti, 1247 device_intersect_crypto_capabilities, 1248 profile); 1249 } 1250 1251 if (t->md->queue && 1252 !blk_crypto_has_capabilities(profile, 1253 t->md->queue->crypto_profile)) { 1254 DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); 1255 dm_destroy_crypto_profile(profile); 1256 return -EINVAL; 1257 } 1258 1259 /* 1260 * If the new profile doesn't actually support any crypto capabilities, 1261 * we may as well represent it with a NULL profile. 1262 */ 1263 for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) { 1264 if (profile->modes_supported[i]) { 1265 empty_profile = false; 1266 break; 1267 } 1268 } 1269 1270 if (empty_profile) { 1271 dm_destroy_crypto_profile(profile); 1272 profile = NULL; 1273 } 1274 1275 /* 1276 * t->crypto_profile is only set temporarily while the table is being 1277 * set up, and it gets set to NULL after the profile has been 1278 * transferred to the request_queue. 1279 */ 1280 t->crypto_profile = profile; 1281 1282 return 0; 1283 } 1284 1285 static void dm_update_crypto_profile(struct request_queue *q, 1286 struct dm_table *t) 1287 { 1288 if (!t->crypto_profile) 1289 return; 1290 1291 /* Make the crypto profile less restrictive. */ 1292 if (!q->crypto_profile) { 1293 blk_crypto_register(t->crypto_profile, q); 1294 } else { 1295 blk_crypto_update_capabilities(q->crypto_profile, 1296 t->crypto_profile); 1297 dm_destroy_crypto_profile(t->crypto_profile); 1298 } 1299 t->crypto_profile = NULL; 1300 } 1301 1302 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1303 1304 static int dm_table_construct_crypto_profile(struct dm_table *t) 1305 { 1306 return 0; 1307 } 1308 1309 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) 1310 { 1311 } 1312 1313 static void dm_table_destroy_crypto_profile(struct dm_table *t) 1314 { 1315 } 1316 1317 static void dm_update_crypto_profile(struct request_queue *q, 1318 struct dm_table *t) 1319 { 1320 } 1321 1322 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1323 1324 /* 1325 * Prepares the table for use by building the indices, 1326 * setting the type, and allocating mempools. 1327 */ 1328 int dm_table_complete(struct dm_table *t) 1329 { 1330 int r; 1331 1332 r = dm_table_determine_type(t); 1333 if (r) { 1334 DMERR("unable to determine table type"); 1335 return r; 1336 } 1337 1338 r = dm_table_build_index(t); 1339 if (r) { 1340 DMERR("unable to build btrees"); 1341 return r; 1342 } 1343 1344 r = dm_table_construct_crypto_profile(t); 1345 if (r) { 1346 DMERR("could not construct crypto profile."); 1347 return r; 1348 } 1349 1350 r = dm_table_alloc_md_mempools(t, t->md); 1351 if (r) 1352 DMERR("unable to allocate mempools"); 1353 1354 return r; 1355 } 1356 1357 static DEFINE_MUTEX(_event_lock); 1358 void dm_table_event_callback(struct dm_table *t, 1359 void (*fn)(void *), void *context) 1360 { 1361 mutex_lock(&_event_lock); 1362 t->event_fn = fn; 1363 t->event_context = context; 1364 mutex_unlock(&_event_lock); 1365 } 1366 1367 void dm_table_event(struct dm_table *t) 1368 { 1369 mutex_lock(&_event_lock); 1370 if (t->event_fn) 1371 t->event_fn(t->event_context); 1372 mutex_unlock(&_event_lock); 1373 } 1374 EXPORT_SYMBOL(dm_table_event); 1375 1376 inline sector_t dm_table_get_size(struct dm_table *t) 1377 { 1378 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1379 } 1380 EXPORT_SYMBOL(dm_table_get_size); 1381 1382 /* 1383 * Search the btree for the correct target. 1384 * 1385 * Caller should check returned pointer for NULL 1386 * to trap I/O beyond end of device. 1387 */ 1388 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 1389 { 1390 unsigned int l, n = 0, k = 0; 1391 sector_t *node; 1392 1393 if (unlikely(sector >= dm_table_get_size(t))) 1394 return NULL; 1395 1396 for (l = 0; l < t->depth; l++) { 1397 n = get_child(n, k); 1398 node = get_node(t, l, n); 1399 1400 for (k = 0; k < KEYS_PER_NODE; k++) 1401 if (node[k] >= sector) 1402 break; 1403 } 1404 1405 return &t->targets[(KEYS_PER_NODE * n) + k]; 1406 } 1407 1408 /* 1409 * type->iterate_devices() should be called when the sanity check needs to 1410 * iterate and check all underlying data devices. iterate_devices() will 1411 * iterate all underlying data devices until it encounters a non-zero return 1412 * code, returned by whether the input iterate_devices_callout_fn, or 1413 * iterate_devices() itself internally. 1414 * 1415 * For some target type (e.g. dm-stripe), one call of iterate_devices() may 1416 * iterate multiple underlying devices internally, in which case a non-zero 1417 * return code returned by iterate_devices_callout_fn will stop the iteration 1418 * in advance. 1419 * 1420 * Cases requiring _any_ underlying device supporting some kind of attribute, 1421 * should use the iteration structure like dm_table_any_dev_attr(), or call 1422 * it directly. @func should handle semantics of positive examples, e.g. 1423 * capable of something. 1424 * 1425 * Cases requiring _all_ underlying devices supporting some kind of attribute, 1426 * should use the iteration structure like dm_table_supports_nowait() or 1427 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that 1428 * uses an @anti_func that handle semantics of counter examples, e.g. not 1429 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); 1430 */ 1431 static bool dm_table_any_dev_attr(struct dm_table *t, 1432 iterate_devices_callout_fn func, void *data) 1433 { 1434 for (unsigned int i = 0; i < t->num_targets; i++) { 1435 struct dm_target *ti = dm_table_get_target(t, i); 1436 1437 if (ti->type->iterate_devices && 1438 ti->type->iterate_devices(ti, func, data)) 1439 return true; 1440 } 1441 1442 return false; 1443 } 1444 1445 static int count_device(struct dm_target *ti, struct dm_dev *dev, 1446 sector_t start, sector_t len, void *data) 1447 { 1448 unsigned int *num_devices = data; 1449 1450 (*num_devices)++; 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * Check whether a table has no data devices attached using each 1457 * target's iterate_devices method. 1458 * Returns false if the result is unknown because a target doesn't 1459 * support iterate_devices. 1460 */ 1461 bool dm_table_has_no_data_devices(struct dm_table *t) 1462 { 1463 for (unsigned int i = 0; i < t->num_targets; i++) { 1464 struct dm_target *ti = dm_table_get_target(t, i); 1465 unsigned int num_devices = 0; 1466 1467 if (!ti->type->iterate_devices) 1468 return false; 1469 1470 ti->type->iterate_devices(ti, count_device, &num_devices); 1471 if (num_devices) 1472 return false; 1473 } 1474 1475 return true; 1476 } 1477 1478 static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev, 1479 sector_t start, sector_t len, void *data) 1480 { 1481 bool *zoned = data; 1482 1483 return bdev_is_zoned(dev->bdev) != *zoned; 1484 } 1485 1486 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, 1487 sector_t start, sector_t len, void *data) 1488 { 1489 return bdev_is_zoned(dev->bdev); 1490 } 1491 1492 /* 1493 * Check the device zoned model based on the target feature flag. If the target 1494 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are 1495 * also accepted but all devices must have the same zoned model. If the target 1496 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any 1497 * zoned model with all zoned devices having the same zone size. 1498 */ 1499 static bool dm_table_supports_zoned(struct dm_table *t, bool zoned) 1500 { 1501 for (unsigned int i = 0; i < t->num_targets; i++) { 1502 struct dm_target *ti = dm_table_get_target(t, i); 1503 1504 /* 1505 * For the wildcard target (dm-error), if we do not have a 1506 * backing device, we must always return false. If we have a 1507 * backing device, the result must depend on checking zoned 1508 * model, like for any other target. So for this, check directly 1509 * if the target backing device is zoned as we get "false" when 1510 * dm-error was set without a backing device. 1511 */ 1512 if (dm_target_is_wildcard(ti->type) && 1513 !ti->type->iterate_devices(ti, device_is_zoned_model, NULL)) 1514 return false; 1515 1516 if (dm_target_supports_zoned_hm(ti->type)) { 1517 if (!ti->type->iterate_devices || 1518 ti->type->iterate_devices(ti, device_not_zoned, 1519 &zoned)) 1520 return false; 1521 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { 1522 if (zoned) 1523 return false; 1524 } 1525 } 1526 1527 return true; 1528 } 1529 1530 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, 1531 sector_t start, sector_t len, void *data) 1532 { 1533 unsigned int *zone_sectors = data; 1534 1535 if (!bdev_is_zoned(dev->bdev)) 1536 return 0; 1537 return bdev_zone_sectors(dev->bdev) != *zone_sectors; 1538 } 1539 1540 /* 1541 * Check consistency of zoned model and zone sectors across all targets. For 1542 * zone sectors, if the destination device is a zoned block device, it shall 1543 * have the specified zone_sectors. 1544 */ 1545 static int validate_hardware_zoned(struct dm_table *t, bool zoned, 1546 unsigned int zone_sectors) 1547 { 1548 if (!zoned) 1549 return 0; 1550 1551 if (!dm_table_supports_zoned(t, zoned)) { 1552 DMERR("%s: zoned model is not consistent across all devices", 1553 dm_device_name(t->md)); 1554 return -EINVAL; 1555 } 1556 1557 /* Check zone size validity and compatibility */ 1558 if (!zone_sectors || !is_power_of_2(zone_sectors)) 1559 return -EINVAL; 1560 1561 if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) { 1562 DMERR("%s: zone sectors is not consistent across all zoned devices", 1563 dm_device_name(t->md)); 1564 return -EINVAL; 1565 } 1566 1567 return 0; 1568 } 1569 1570 /* 1571 * Establish the new table's queue_limits and validate them. 1572 */ 1573 int dm_calculate_queue_limits(struct dm_table *t, 1574 struct queue_limits *limits) 1575 { 1576 struct queue_limits ti_limits; 1577 unsigned int zone_sectors = 0; 1578 bool zoned = false; 1579 1580 dm_set_stacking_limits(limits); 1581 1582 t->integrity_supported = true; 1583 for (unsigned int i = 0; i < t->num_targets; i++) { 1584 struct dm_target *ti = dm_table_get_target(t, i); 1585 1586 if (!dm_target_passes_integrity(ti->type)) 1587 t->integrity_supported = false; 1588 } 1589 1590 for (unsigned int i = 0; i < t->num_targets; i++) { 1591 struct dm_target *ti = dm_table_get_target(t, i); 1592 1593 dm_set_stacking_limits(&ti_limits); 1594 1595 if (!ti->type->iterate_devices) { 1596 /* Set I/O hints portion of queue limits */ 1597 if (ti->type->io_hints) 1598 ti->type->io_hints(ti, &ti_limits); 1599 goto combine_limits; 1600 } 1601 1602 /* 1603 * Combine queue limits of all the devices this target uses. 1604 */ 1605 ti->type->iterate_devices(ti, dm_set_device_limits, 1606 &ti_limits); 1607 1608 if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) { 1609 /* 1610 * After stacking all limits, validate all devices 1611 * in table support this zoned model and zone sectors. 1612 */ 1613 zoned = (ti_limits.features & BLK_FEAT_ZONED); 1614 zone_sectors = ti_limits.chunk_sectors; 1615 } 1616 1617 /* Set I/O hints portion of queue limits */ 1618 if (ti->type->io_hints) 1619 ti->type->io_hints(ti, &ti_limits); 1620 1621 /* 1622 * Check each device area is consistent with the target's 1623 * overall queue limits. 1624 */ 1625 if (ti->type->iterate_devices(ti, device_area_is_invalid, 1626 &ti_limits)) 1627 return -EINVAL; 1628 1629 combine_limits: 1630 /* 1631 * Merge this target's queue limits into the overall limits 1632 * for the table. 1633 */ 1634 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1635 DMWARN("%s: adding target device (start sect %llu len %llu) " 1636 "caused an alignment inconsistency", 1637 dm_device_name(t->md), 1638 (unsigned long long) ti->begin, 1639 (unsigned long long) ti->len); 1640 1641 if (t->integrity_supported || 1642 dm_target_has_integrity(ti->type)) { 1643 if (!queue_limits_stack_integrity(limits, &ti_limits)) { 1644 DMWARN("%s: adding target device (start sect %llu len %llu) " 1645 "disabled integrity support due to incompatibility", 1646 dm_device_name(t->md), 1647 (unsigned long long) ti->begin, 1648 (unsigned long long) ti->len); 1649 t->integrity_supported = false; 1650 } 1651 } 1652 } 1653 1654 /* 1655 * Verify that the zoned model and zone sectors, as determined before 1656 * any .io_hints override, are the same across all devices in the table. 1657 * - this is especially relevant if .io_hints is emulating a disk-managed 1658 * zoned model on host-managed zoned block devices. 1659 * BUT... 1660 */ 1661 if (limits->features & BLK_FEAT_ZONED) { 1662 /* 1663 * ...IF the above limits stacking determined a zoned model 1664 * validate that all of the table's devices conform to it. 1665 */ 1666 zoned = limits->features & BLK_FEAT_ZONED; 1667 zone_sectors = limits->chunk_sectors; 1668 } 1669 if (validate_hardware_zoned(t, zoned, zone_sectors)) 1670 return -EINVAL; 1671 1672 return validate_hardware_logical_block_alignment(t, limits); 1673 } 1674 1675 /* 1676 * Check if a target requires flush support even if none of the underlying 1677 * devices need it (e.g. to persist target-specific metadata). 1678 */ 1679 static bool dm_table_supports_flush(struct dm_table *t) 1680 { 1681 for (unsigned int i = 0; i < t->num_targets; i++) { 1682 struct dm_target *ti = dm_table_get_target(t, i); 1683 1684 if (ti->num_flush_bios && ti->flush_supported) 1685 return true; 1686 } 1687 1688 return false; 1689 } 1690 1691 static int device_dax_write_cache_enabled(struct dm_target *ti, 1692 struct dm_dev *dev, sector_t start, 1693 sector_t len, void *data) 1694 { 1695 struct dax_device *dax_dev = dev->dax_dev; 1696 1697 if (!dax_dev) 1698 return false; 1699 1700 if (dax_write_cache_enabled(dax_dev)) 1701 return true; 1702 return false; 1703 } 1704 1705 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, 1706 sector_t start, sector_t len, void *data) 1707 { 1708 struct request_queue *q = bdev_get_queue(dev->bdev); 1709 1710 return !q->limits.max_write_zeroes_sectors; 1711 } 1712 1713 static bool dm_table_supports_write_zeroes(struct dm_table *t) 1714 { 1715 for (unsigned int i = 0; i < t->num_targets; i++) { 1716 struct dm_target *ti = dm_table_get_target(t, i); 1717 1718 if (!ti->num_write_zeroes_bios) 1719 return false; 1720 1721 if (!ti->type->iterate_devices || 1722 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) 1723 return false; 1724 } 1725 1726 return true; 1727 } 1728 1729 static bool dm_table_supports_nowait(struct dm_table *t) 1730 { 1731 for (unsigned int i = 0; i < t->num_targets; i++) { 1732 struct dm_target *ti = dm_table_get_target(t, i); 1733 1734 if (!dm_target_supports_nowait(ti->type)) 1735 return false; 1736 } 1737 1738 return true; 1739 } 1740 1741 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1742 sector_t start, sector_t len, void *data) 1743 { 1744 return !bdev_max_discard_sectors(dev->bdev); 1745 } 1746 1747 static bool dm_table_supports_discards(struct dm_table *t) 1748 { 1749 for (unsigned int i = 0; i < t->num_targets; i++) { 1750 struct dm_target *ti = dm_table_get_target(t, i); 1751 1752 if (!ti->num_discard_bios) 1753 return false; 1754 1755 /* 1756 * Either the target provides discard support (as implied by setting 1757 * 'discards_supported') or it relies on _all_ data devices having 1758 * discard support. 1759 */ 1760 if (!ti->discards_supported && 1761 (!ti->type->iterate_devices || 1762 ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) 1763 return false; 1764 } 1765 1766 return true; 1767 } 1768 1769 static int device_not_secure_erase_capable(struct dm_target *ti, 1770 struct dm_dev *dev, sector_t start, 1771 sector_t len, void *data) 1772 { 1773 return !bdev_max_secure_erase_sectors(dev->bdev); 1774 } 1775 1776 static bool dm_table_supports_secure_erase(struct dm_table *t) 1777 { 1778 for (unsigned int i = 0; i < t->num_targets; i++) { 1779 struct dm_target *ti = dm_table_get_target(t, i); 1780 1781 if (!ti->num_secure_erase_bios) 1782 return false; 1783 1784 if (!ti->type->iterate_devices || 1785 ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) 1786 return false; 1787 } 1788 1789 return true; 1790 } 1791 1792 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1793 struct queue_limits *limits) 1794 { 1795 int r; 1796 1797 if (!dm_table_supports_nowait(t)) 1798 limits->features &= ~BLK_FEAT_NOWAIT; 1799 1800 /* 1801 * The current polling impementation does not support request based 1802 * stacking. 1803 */ 1804 if (!__table_type_bio_based(t->type)) 1805 limits->features &= ~BLK_FEAT_POLL; 1806 1807 if (!dm_table_supports_discards(t)) { 1808 limits->max_hw_discard_sectors = 0; 1809 limits->discard_granularity = 0; 1810 limits->discard_alignment = 0; 1811 } 1812 1813 if (!dm_table_supports_write_zeroes(t)) 1814 limits->max_write_zeroes_sectors = 0; 1815 1816 if (!dm_table_supports_secure_erase(t)) 1817 limits->max_secure_erase_sectors = 0; 1818 1819 if (dm_table_supports_flush(t)) 1820 limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; 1821 1822 if (dm_table_supports_dax(t, device_not_dax_capable)) { 1823 limits->features |= BLK_FEAT_DAX; 1824 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) 1825 set_dax_synchronous(t->md->dax_dev); 1826 } else 1827 limits->features &= ~BLK_FEAT_DAX; 1828 1829 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) 1830 dax_write_cache(t->md->dax_dev, true); 1831 1832 /* For a zoned table, setup the zone related queue attributes. */ 1833 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 1834 (limits->features & BLK_FEAT_ZONED)) { 1835 r = dm_set_zones_restrictions(t, q, limits); 1836 if (r) 1837 return r; 1838 } 1839 1840 r = queue_limits_set(q, limits); 1841 if (r) 1842 return r; 1843 1844 /* 1845 * Now that the limits are set, check the zones mapped by the table 1846 * and setup the resources for zone append emulation if necessary. 1847 */ 1848 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 1849 (limits->features & BLK_FEAT_ZONED)) { 1850 r = dm_revalidate_zones(t, q); 1851 if (r) 1852 return r; 1853 } 1854 1855 dm_update_crypto_profile(q, t); 1856 return 0; 1857 } 1858 1859 struct list_head *dm_table_get_devices(struct dm_table *t) 1860 { 1861 return &t->devices; 1862 } 1863 1864 blk_mode_t dm_table_get_mode(struct dm_table *t) 1865 { 1866 return t->mode; 1867 } 1868 EXPORT_SYMBOL(dm_table_get_mode); 1869 1870 enum suspend_mode { 1871 PRESUSPEND, 1872 PRESUSPEND_UNDO, 1873 POSTSUSPEND, 1874 }; 1875 1876 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) 1877 { 1878 lockdep_assert_held(&t->md->suspend_lock); 1879 1880 for (unsigned int i = 0; i < t->num_targets; i++) { 1881 struct dm_target *ti = dm_table_get_target(t, i); 1882 1883 switch (mode) { 1884 case PRESUSPEND: 1885 if (ti->type->presuspend) 1886 ti->type->presuspend(ti); 1887 break; 1888 case PRESUSPEND_UNDO: 1889 if (ti->type->presuspend_undo) 1890 ti->type->presuspend_undo(ti); 1891 break; 1892 case POSTSUSPEND: 1893 if (ti->type->postsuspend) 1894 ti->type->postsuspend(ti); 1895 break; 1896 } 1897 } 1898 } 1899 1900 void dm_table_presuspend_targets(struct dm_table *t) 1901 { 1902 if (!t) 1903 return; 1904 1905 suspend_targets(t, PRESUSPEND); 1906 } 1907 1908 void dm_table_presuspend_undo_targets(struct dm_table *t) 1909 { 1910 if (!t) 1911 return; 1912 1913 suspend_targets(t, PRESUSPEND_UNDO); 1914 } 1915 1916 void dm_table_postsuspend_targets(struct dm_table *t) 1917 { 1918 if (!t) 1919 return; 1920 1921 suspend_targets(t, POSTSUSPEND); 1922 } 1923 1924 int dm_table_resume_targets(struct dm_table *t) 1925 { 1926 unsigned int i; 1927 int r = 0; 1928 1929 lockdep_assert_held(&t->md->suspend_lock); 1930 1931 for (i = 0; i < t->num_targets; i++) { 1932 struct dm_target *ti = dm_table_get_target(t, i); 1933 1934 if (!ti->type->preresume) 1935 continue; 1936 1937 r = ti->type->preresume(ti); 1938 if (r) { 1939 DMERR("%s: %s: preresume failed, error = %d", 1940 dm_device_name(t->md), ti->type->name, r); 1941 return r; 1942 } 1943 } 1944 1945 for (i = 0; i < t->num_targets; i++) { 1946 struct dm_target *ti = dm_table_get_target(t, i); 1947 1948 if (ti->type->resume) 1949 ti->type->resume(ti); 1950 } 1951 1952 return 0; 1953 } 1954 1955 struct mapped_device *dm_table_get_md(struct dm_table *t) 1956 { 1957 return t->md; 1958 } 1959 EXPORT_SYMBOL(dm_table_get_md); 1960 1961 const char *dm_table_device_name(struct dm_table *t) 1962 { 1963 return dm_device_name(t->md); 1964 } 1965 EXPORT_SYMBOL_GPL(dm_table_device_name); 1966 1967 void dm_table_run_md_queue_async(struct dm_table *t) 1968 { 1969 if (!dm_table_request_based(t)) 1970 return; 1971 1972 if (t->md->queue) 1973 blk_mq_run_hw_queues(t->md->queue, true); 1974 } 1975 EXPORT_SYMBOL(dm_table_run_md_queue_async); 1976 1977