xref: /linux/drivers/md/dm-table.c (revision 5e3b7009f116f684ac6b93d8924506154f3b1f6d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001 Sistina Software (UK) Limited.
4  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 
12 #include <linux/module.h>
13 #include <linux/vmalloc.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/namei.h>
17 #include <linux/ctype.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/atomic.h>
24 #include <linux/blk-mq.h>
25 #include <linux/mount.h>
26 #include <linux/dax.h>
27 
28 #define DM_MSG_PREFIX "table"
29 
30 #define NODE_SIZE L1_CACHE_BYTES
31 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
32 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
33 
34 /*
35  * Similar to ceiling(log_size(n))
36  */
37 static unsigned int int_log(unsigned int n, unsigned int base)
38 {
39 	int result = 0;
40 
41 	while (n > 1) {
42 		n = dm_div_up(n, base);
43 		result++;
44 	}
45 
46 	return result;
47 }
48 
49 /*
50  * Calculate the index of the child node of the n'th node k'th key.
51  */
52 static inline unsigned int get_child(unsigned int n, unsigned int k)
53 {
54 	return (n * CHILDREN_PER_NODE) + k;
55 }
56 
57 /*
58  * Return the n'th node of level l from table t.
59  */
60 static inline sector_t *get_node(struct dm_table *t,
61 				 unsigned int l, unsigned int n)
62 {
63 	return t->index[l] + (n * KEYS_PER_NODE);
64 }
65 
66 /*
67  * Return the highest key that you could lookup from the n'th
68  * node on level l of the btree.
69  */
70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
71 {
72 	for (; l < t->depth - 1; l++)
73 		n = get_child(n, CHILDREN_PER_NODE - 1);
74 
75 	if (n >= t->counts[l])
76 		return (sector_t) -1;
77 
78 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
79 }
80 
81 /*
82  * Fills in a level of the btree based on the highs of the level
83  * below it.
84  */
85 static int setup_btree_index(unsigned int l, struct dm_table *t)
86 {
87 	unsigned int n, k;
88 	sector_t *node;
89 
90 	for (n = 0U; n < t->counts[l]; n++) {
91 		node = get_node(t, l, n);
92 
93 		for (k = 0U; k < KEYS_PER_NODE; k++)
94 			node[k] = high(t, l + 1, get_child(n, k));
95 	}
96 
97 	return 0;
98 }
99 
100 /*
101  * highs, and targets are managed as dynamic arrays during a
102  * table load.
103  */
104 static int alloc_targets(struct dm_table *t, unsigned int num)
105 {
106 	sector_t *n_highs;
107 	struct dm_target *n_targets;
108 
109 	/*
110 	 * Allocate both the target array and offset array at once.
111 	 */
112 	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
113 			   GFP_KERNEL);
114 	if (!n_highs)
115 		return -ENOMEM;
116 
117 	n_targets = (struct dm_target *) (n_highs + num);
118 
119 	memset(n_highs, -1, sizeof(*n_highs) * num);
120 	kvfree(t->highs);
121 
122 	t->num_allocated = num;
123 	t->highs = n_highs;
124 	t->targets = n_targets;
125 
126 	return 0;
127 }
128 
129 int dm_table_create(struct dm_table **result, blk_mode_t mode,
130 		    unsigned int num_targets, struct mapped_device *md)
131 {
132 	struct dm_table *t;
133 
134 	if (num_targets > DM_MAX_TARGETS)
135 		return -EOVERFLOW;
136 
137 	t = kzalloc(sizeof(*t), GFP_KERNEL);
138 
139 	if (!t)
140 		return -ENOMEM;
141 
142 	INIT_LIST_HEAD(&t->devices);
143 	init_rwsem(&t->devices_lock);
144 
145 	if (!num_targets)
146 		num_targets = KEYS_PER_NODE;
147 
148 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
149 
150 	if (!num_targets) {
151 		kfree(t);
152 		return -EOVERFLOW;
153 	}
154 
155 	if (alloc_targets(t, num_targets)) {
156 		kfree(t);
157 		return -ENOMEM;
158 	}
159 
160 	t->type = DM_TYPE_NONE;
161 	t->mode = mode;
162 	t->md = md;
163 	*result = t;
164 	return 0;
165 }
166 
167 static void free_devices(struct list_head *devices, struct mapped_device *md)
168 {
169 	struct list_head *tmp, *next;
170 
171 	list_for_each_safe(tmp, next, devices) {
172 		struct dm_dev_internal *dd =
173 		    list_entry(tmp, struct dm_dev_internal, list);
174 		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
175 		       dm_device_name(md), dd->dm_dev->name);
176 		dm_put_table_device(md, dd->dm_dev);
177 		kfree(dd);
178 	}
179 }
180 
181 static void dm_table_destroy_crypto_profile(struct dm_table *t);
182 
183 void dm_table_destroy(struct dm_table *t)
184 {
185 	if (!t)
186 		return;
187 
188 	/* free the indexes */
189 	if (t->depth >= 2)
190 		kvfree(t->index[t->depth - 2]);
191 
192 	/* free the targets */
193 	for (unsigned int i = 0; i < t->num_targets; i++) {
194 		struct dm_target *ti = dm_table_get_target(t, i);
195 
196 		if (ti->type->dtr)
197 			ti->type->dtr(ti);
198 
199 		dm_put_target_type(ti->type);
200 	}
201 
202 	kvfree(t->highs);
203 
204 	/* free the device list */
205 	free_devices(&t->devices, t->md);
206 
207 	dm_free_md_mempools(t->mempools);
208 
209 	dm_table_destroy_crypto_profile(t);
210 
211 	kfree(t);
212 }
213 
214 /*
215  * See if we've already got a device in the list.
216  */
217 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
218 {
219 	struct dm_dev_internal *dd;
220 
221 	list_for_each_entry(dd, l, list)
222 		if (dd->dm_dev->bdev->bd_dev == dev)
223 			return dd;
224 
225 	return NULL;
226 }
227 
228 /*
229  * If possible, this checks an area of a destination device is invalid.
230  */
231 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
232 				  sector_t start, sector_t len, void *data)
233 {
234 	struct queue_limits *limits = data;
235 	struct block_device *bdev = dev->bdev;
236 	sector_t dev_size = bdev_nr_sectors(bdev);
237 	unsigned short logical_block_size_sectors =
238 		limits->logical_block_size >> SECTOR_SHIFT;
239 
240 	if (!dev_size)
241 		return 0;
242 
243 	if ((start >= dev_size) || (start + len > dev_size)) {
244 		DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
245 		      dm_device_name(ti->table->md), bdev,
246 		      (unsigned long long)start,
247 		      (unsigned long long)len,
248 		      (unsigned long long)dev_size);
249 		return 1;
250 	}
251 
252 	/*
253 	 * If the target is mapped to zoned block device(s), check
254 	 * that the zones are not partially mapped.
255 	 */
256 	if (bdev_is_zoned(bdev)) {
257 		unsigned int zone_sectors = bdev_zone_sectors(bdev);
258 
259 		if (start & (zone_sectors - 1)) {
260 			DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
261 			      dm_device_name(ti->table->md),
262 			      (unsigned long long)start,
263 			      zone_sectors, bdev);
264 			return 1;
265 		}
266 
267 		/*
268 		 * Note: The last zone of a zoned block device may be smaller
269 		 * than other zones. So for a target mapping the end of a
270 		 * zoned block device with such a zone, len would not be zone
271 		 * aligned. We do not allow such last smaller zone to be part
272 		 * of the mapping here to ensure that mappings with multiple
273 		 * devices do not end up with a smaller zone in the middle of
274 		 * the sector range.
275 		 */
276 		if (len & (zone_sectors - 1)) {
277 			DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
278 			      dm_device_name(ti->table->md),
279 			      (unsigned long long)len,
280 			      zone_sectors, bdev);
281 			return 1;
282 		}
283 	}
284 
285 	if (logical_block_size_sectors <= 1)
286 		return 0;
287 
288 	if (start & (logical_block_size_sectors - 1)) {
289 		DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
290 		      dm_device_name(ti->table->md),
291 		      (unsigned long long)start,
292 		      limits->logical_block_size, bdev);
293 		return 1;
294 	}
295 
296 	if (len & (logical_block_size_sectors - 1)) {
297 		DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
298 		      dm_device_name(ti->table->md),
299 		      (unsigned long long)len,
300 		      limits->logical_block_size, bdev);
301 		return 1;
302 	}
303 
304 	return 0;
305 }
306 
307 /*
308  * This upgrades the mode on an already open dm_dev, being
309  * careful to leave things as they were if we fail to reopen the
310  * device and not to touch the existing bdev field in case
311  * it is accessed concurrently.
312  */
313 static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
314 			struct mapped_device *md)
315 {
316 	int r;
317 	struct dm_dev *old_dev, *new_dev;
318 
319 	old_dev = dd->dm_dev;
320 
321 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
322 				dd->dm_dev->mode | new_mode, &new_dev);
323 	if (r)
324 		return r;
325 
326 	dd->dm_dev = new_dev;
327 	dm_put_table_device(md, old_dev);
328 
329 	return 0;
330 }
331 
332 /*
333  * Add a device to the list, or just increment the usage count if
334  * it's already present.
335  *
336  * Note: the __ref annotation is because this function can call the __init
337  * marked early_lookup_bdev when called during early boot code from dm-init.c.
338  */
339 int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
340 		  struct dm_dev **result)
341 {
342 	int r;
343 	dev_t dev;
344 	unsigned int major, minor;
345 	char dummy;
346 	struct dm_dev_internal *dd;
347 	struct dm_table *t = ti->table;
348 
349 	BUG_ON(!t);
350 
351 	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
352 		/* Extract the major/minor numbers */
353 		dev = MKDEV(major, minor);
354 		if (MAJOR(dev) != major || MINOR(dev) != minor)
355 			return -EOVERFLOW;
356 	} else {
357 		r = lookup_bdev(path, &dev);
358 #ifndef MODULE
359 		if (r && system_state < SYSTEM_RUNNING)
360 			r = early_lookup_bdev(path, &dev);
361 #endif
362 		if (r)
363 			return r;
364 	}
365 	if (dev == disk_devt(t->md->disk))
366 		return -EINVAL;
367 
368 	down_write(&t->devices_lock);
369 
370 	dd = find_device(&t->devices, dev);
371 	if (!dd) {
372 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
373 		if (!dd) {
374 			r = -ENOMEM;
375 			goto unlock_ret_r;
376 		}
377 
378 		r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
379 		if (r) {
380 			kfree(dd);
381 			goto unlock_ret_r;
382 		}
383 
384 		refcount_set(&dd->count, 1);
385 		list_add(&dd->list, &t->devices);
386 		goto out;
387 
388 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
389 		r = upgrade_mode(dd, mode, t->md);
390 		if (r)
391 			goto unlock_ret_r;
392 	}
393 	refcount_inc(&dd->count);
394 out:
395 	up_write(&t->devices_lock);
396 	*result = dd->dm_dev;
397 	return 0;
398 
399 unlock_ret_r:
400 	up_write(&t->devices_lock);
401 	return r;
402 }
403 EXPORT_SYMBOL(dm_get_device);
404 
405 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
406 				sector_t start, sector_t len, void *data)
407 {
408 	struct queue_limits *limits = data;
409 	struct block_device *bdev = dev->bdev;
410 	struct request_queue *q = bdev_get_queue(bdev);
411 
412 	if (unlikely(!q)) {
413 		DMWARN("%s: Cannot set limits for nonexistent device %pg",
414 		       dm_device_name(ti->table->md), bdev);
415 		return 0;
416 	}
417 
418 	if (blk_stack_limits(limits, &q->limits,
419 			get_start_sect(bdev) + start) < 0)
420 		DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
421 		       "physical_block_size=%u, logical_block_size=%u, "
422 		       "alignment_offset=%u, start=%llu",
423 		       dm_device_name(ti->table->md), bdev,
424 		       q->limits.physical_block_size,
425 		       q->limits.logical_block_size,
426 		       q->limits.alignment_offset,
427 		       (unsigned long long) start << SECTOR_SHIFT);
428 
429 	/*
430 	 * Only stack the integrity profile if the target doesn't have native
431 	 * integrity support.
432 	 */
433 	if (!dm_target_has_integrity(ti->type))
434 		queue_limits_stack_integrity_bdev(limits, bdev);
435 	return 0;
436 }
437 
438 /*
439  * Decrement a device's use count and remove it if necessary.
440  */
441 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
442 {
443 	int found = 0;
444 	struct dm_table *t = ti->table;
445 	struct list_head *devices = &t->devices;
446 	struct dm_dev_internal *dd;
447 
448 	down_write(&t->devices_lock);
449 
450 	list_for_each_entry(dd, devices, list) {
451 		if (dd->dm_dev == d) {
452 			found = 1;
453 			break;
454 		}
455 	}
456 	if (!found) {
457 		DMERR("%s: device %s not in table devices list",
458 		      dm_device_name(t->md), d->name);
459 		goto unlock_ret;
460 	}
461 	if (refcount_dec_and_test(&dd->count)) {
462 		dm_put_table_device(t->md, d);
463 		list_del(&dd->list);
464 		kfree(dd);
465 	}
466 
467 unlock_ret:
468 	up_write(&t->devices_lock);
469 }
470 EXPORT_SYMBOL(dm_put_device);
471 
472 /*
473  * Checks to see if the target joins onto the end of the table.
474  */
475 static int adjoin(struct dm_table *t, struct dm_target *ti)
476 {
477 	struct dm_target *prev;
478 
479 	if (!t->num_targets)
480 		return !ti->begin;
481 
482 	prev = &t->targets[t->num_targets - 1];
483 	return (ti->begin == (prev->begin + prev->len));
484 }
485 
486 /*
487  * Used to dynamically allocate the arg array.
488  *
489  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
490  * process messages even if some device is suspended. These messages have a
491  * small fixed number of arguments.
492  *
493  * On the other hand, dm-switch needs to process bulk data using messages and
494  * excessive use of GFP_NOIO could cause trouble.
495  */
496 static char **realloc_argv(unsigned int *size, char **old_argv)
497 {
498 	char **argv;
499 	unsigned int new_size;
500 	gfp_t gfp;
501 
502 	if (*size) {
503 		new_size = *size * 2;
504 		gfp = GFP_KERNEL;
505 	} else {
506 		new_size = 8;
507 		gfp = GFP_NOIO;
508 	}
509 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
510 	if (argv && old_argv) {
511 		memcpy(argv, old_argv, *size * sizeof(*argv));
512 		*size = new_size;
513 	}
514 
515 	kfree(old_argv);
516 	return argv;
517 }
518 
519 /*
520  * Destructively splits up the argument list to pass to ctr.
521  */
522 int dm_split_args(int *argc, char ***argvp, char *input)
523 {
524 	char *start, *end = input, *out, **argv = NULL;
525 	unsigned int array_size = 0;
526 
527 	*argc = 0;
528 
529 	if (!input) {
530 		*argvp = NULL;
531 		return 0;
532 	}
533 
534 	argv = realloc_argv(&array_size, argv);
535 	if (!argv)
536 		return -ENOMEM;
537 
538 	while (1) {
539 		/* Skip whitespace */
540 		start = skip_spaces(end);
541 
542 		if (!*start)
543 			break;	/* success, we hit the end */
544 
545 		/* 'out' is used to remove any back-quotes */
546 		end = out = start;
547 		while (*end) {
548 			/* Everything apart from '\0' can be quoted */
549 			if (*end == '\\' && *(end + 1)) {
550 				*out++ = *(end + 1);
551 				end += 2;
552 				continue;
553 			}
554 
555 			if (isspace(*end))
556 				break;	/* end of token */
557 
558 			*out++ = *end++;
559 		}
560 
561 		/* have we already filled the array ? */
562 		if ((*argc + 1) > array_size) {
563 			argv = realloc_argv(&array_size, argv);
564 			if (!argv)
565 				return -ENOMEM;
566 		}
567 
568 		/* we know this is whitespace */
569 		if (*end)
570 			end++;
571 
572 		/* terminate the string and put it in the array */
573 		*out = '\0';
574 		argv[*argc] = start;
575 		(*argc)++;
576 	}
577 
578 	*argvp = argv;
579 	return 0;
580 }
581 
582 /*
583  * Impose necessary and sufficient conditions on a devices's table such
584  * that any incoming bio which respects its logical_block_size can be
585  * processed successfully.  If it falls across the boundary between
586  * two or more targets, the size of each piece it gets split into must
587  * be compatible with the logical_block_size of the target processing it.
588  */
589 static int validate_hardware_logical_block_alignment(struct dm_table *t,
590 						     struct queue_limits *limits)
591 {
592 	/*
593 	 * This function uses arithmetic modulo the logical_block_size
594 	 * (in units of 512-byte sectors).
595 	 */
596 	unsigned short device_logical_block_size_sects =
597 		limits->logical_block_size >> SECTOR_SHIFT;
598 
599 	/*
600 	 * Offset of the start of the next table entry, mod logical_block_size.
601 	 */
602 	unsigned short next_target_start = 0;
603 
604 	/*
605 	 * Given an aligned bio that extends beyond the end of a
606 	 * target, how many sectors must the next target handle?
607 	 */
608 	unsigned short remaining = 0;
609 
610 	struct dm_target *ti;
611 	struct queue_limits ti_limits;
612 	unsigned int i;
613 
614 	/*
615 	 * Check each entry in the table in turn.
616 	 */
617 	for (i = 0; i < t->num_targets; i++) {
618 		ti = dm_table_get_target(t, i);
619 
620 		blk_set_stacking_limits(&ti_limits);
621 
622 		/* combine all target devices' limits */
623 		if (ti->type->iterate_devices)
624 			ti->type->iterate_devices(ti, dm_set_device_limits,
625 						  &ti_limits);
626 
627 		/*
628 		 * If the remaining sectors fall entirely within this
629 		 * table entry are they compatible with its logical_block_size?
630 		 */
631 		if (remaining < ti->len &&
632 		    remaining & ((ti_limits.logical_block_size >>
633 				  SECTOR_SHIFT) - 1))
634 			break;	/* Error */
635 
636 		next_target_start =
637 		    (unsigned short) ((next_target_start + ti->len) &
638 				      (device_logical_block_size_sects - 1));
639 		remaining = next_target_start ?
640 		    device_logical_block_size_sects - next_target_start : 0;
641 	}
642 
643 	if (remaining) {
644 		DMERR("%s: table line %u (start sect %llu len %llu) "
645 		      "not aligned to h/w logical block size %u",
646 		      dm_device_name(t->md), i,
647 		      (unsigned long long) ti->begin,
648 		      (unsigned long long) ti->len,
649 		      limits->logical_block_size);
650 		return -EINVAL;
651 	}
652 
653 	return 0;
654 }
655 
656 int dm_table_add_target(struct dm_table *t, const char *type,
657 			sector_t start, sector_t len, char *params)
658 {
659 	int r = -EINVAL, argc;
660 	char **argv;
661 	struct dm_target *ti;
662 
663 	if (t->singleton) {
664 		DMERR("%s: target type %s must appear alone in table",
665 		      dm_device_name(t->md), t->targets->type->name);
666 		return -EINVAL;
667 	}
668 
669 	BUG_ON(t->num_targets >= t->num_allocated);
670 
671 	ti = t->targets + t->num_targets;
672 	memset(ti, 0, sizeof(*ti));
673 
674 	if (!len) {
675 		DMERR("%s: zero-length target", dm_device_name(t->md));
676 		return -EINVAL;
677 	}
678 
679 	ti->type = dm_get_target_type(type);
680 	if (!ti->type) {
681 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
682 		return -EINVAL;
683 	}
684 
685 	if (dm_target_needs_singleton(ti->type)) {
686 		if (t->num_targets) {
687 			ti->error = "singleton target type must appear alone in table";
688 			goto bad;
689 		}
690 		t->singleton = true;
691 	}
692 
693 	if (dm_target_always_writeable(ti->type) &&
694 	    !(t->mode & BLK_OPEN_WRITE)) {
695 		ti->error = "target type may not be included in a read-only table";
696 		goto bad;
697 	}
698 
699 	if (t->immutable_target_type) {
700 		if (t->immutable_target_type != ti->type) {
701 			ti->error = "immutable target type cannot be mixed with other target types";
702 			goto bad;
703 		}
704 	} else if (dm_target_is_immutable(ti->type)) {
705 		if (t->num_targets) {
706 			ti->error = "immutable target type cannot be mixed with other target types";
707 			goto bad;
708 		}
709 		t->immutable_target_type = ti->type;
710 	}
711 
712 	ti->table = t;
713 	ti->begin = start;
714 	ti->len = len;
715 	ti->error = "Unknown error";
716 
717 	/*
718 	 * Does this target adjoin the previous one ?
719 	 */
720 	if (!adjoin(t, ti)) {
721 		ti->error = "Gap in table";
722 		goto bad;
723 	}
724 
725 	r = dm_split_args(&argc, &argv, params);
726 	if (r) {
727 		ti->error = "couldn't split parameters";
728 		goto bad;
729 	}
730 
731 	r = ti->type->ctr(ti, argc, argv);
732 	kfree(argv);
733 	if (r)
734 		goto bad;
735 
736 	t->highs[t->num_targets++] = ti->begin + ti->len - 1;
737 
738 	if (!ti->num_discard_bios && ti->discards_supported)
739 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
740 		       dm_device_name(t->md), type);
741 
742 	if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
743 		static_branch_enable(&swap_bios_enabled);
744 
745 	return 0;
746 
747  bad:
748 	DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
749 	dm_put_target_type(ti->type);
750 	return r;
751 }
752 
753 /*
754  * Target argument parsing helpers.
755  */
756 static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
757 			     unsigned int *value, char **error, unsigned int grouped)
758 {
759 	const char *arg_str = dm_shift_arg(arg_set);
760 	char dummy;
761 
762 	if (!arg_str ||
763 	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
764 	    (*value < arg->min) ||
765 	    (*value > arg->max) ||
766 	    (grouped && arg_set->argc < *value)) {
767 		*error = arg->error;
768 		return -EINVAL;
769 	}
770 
771 	return 0;
772 }
773 
774 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
775 		unsigned int *value, char **error)
776 {
777 	return validate_next_arg(arg, arg_set, value, error, 0);
778 }
779 EXPORT_SYMBOL(dm_read_arg);
780 
781 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
782 		      unsigned int *value, char **error)
783 {
784 	return validate_next_arg(arg, arg_set, value, error, 1);
785 }
786 EXPORT_SYMBOL(dm_read_arg_group);
787 
788 const char *dm_shift_arg(struct dm_arg_set *as)
789 {
790 	char *r;
791 
792 	if (as->argc) {
793 		as->argc--;
794 		r = *as->argv;
795 		as->argv++;
796 		return r;
797 	}
798 
799 	return NULL;
800 }
801 EXPORT_SYMBOL(dm_shift_arg);
802 
803 void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
804 {
805 	BUG_ON(as->argc < num_args);
806 	as->argc -= num_args;
807 	as->argv += num_args;
808 }
809 EXPORT_SYMBOL(dm_consume_args);
810 
811 static bool __table_type_bio_based(enum dm_queue_mode table_type)
812 {
813 	return (table_type == DM_TYPE_BIO_BASED ||
814 		table_type == DM_TYPE_DAX_BIO_BASED);
815 }
816 
817 static bool __table_type_request_based(enum dm_queue_mode table_type)
818 {
819 	return table_type == DM_TYPE_REQUEST_BASED;
820 }
821 
822 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
823 {
824 	t->type = type;
825 }
826 EXPORT_SYMBOL_GPL(dm_table_set_type);
827 
828 /* validate the dax capability of the target device span */
829 static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
830 			sector_t start, sector_t len, void *data)
831 {
832 	if (dev->dax_dev)
833 		return false;
834 
835 	DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
836 	return true;
837 }
838 
839 /* Check devices support synchronous DAX */
840 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
841 					      sector_t start, sector_t len, void *data)
842 {
843 	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
844 }
845 
846 static bool dm_table_supports_dax(struct dm_table *t,
847 				  iterate_devices_callout_fn iterate_fn)
848 {
849 	/* Ensure that all targets support DAX. */
850 	for (unsigned int i = 0; i < t->num_targets; i++) {
851 		struct dm_target *ti = dm_table_get_target(t, i);
852 
853 		if (!ti->type->direct_access)
854 			return false;
855 
856 		if (dm_target_is_wildcard(ti->type) ||
857 		    !ti->type->iterate_devices ||
858 		    ti->type->iterate_devices(ti, iterate_fn, NULL))
859 			return false;
860 	}
861 
862 	return true;
863 }
864 
865 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
866 				  sector_t start, sector_t len, void *data)
867 {
868 	struct block_device *bdev = dev->bdev;
869 	struct request_queue *q = bdev_get_queue(bdev);
870 
871 	/* request-based cannot stack on partitions! */
872 	if (bdev_is_partition(bdev))
873 		return false;
874 
875 	return queue_is_mq(q);
876 }
877 
878 static int dm_table_determine_type(struct dm_table *t)
879 {
880 	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
881 	struct dm_target *ti;
882 	struct list_head *devices = dm_table_get_devices(t);
883 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
884 
885 	if (t->type != DM_TYPE_NONE) {
886 		/* target already set the table's type */
887 		if (t->type == DM_TYPE_BIO_BASED) {
888 			/* possibly upgrade to a variant of bio-based */
889 			goto verify_bio_based;
890 		}
891 		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
892 		goto verify_rq_based;
893 	}
894 
895 	for (unsigned int i = 0; i < t->num_targets; i++) {
896 		ti = dm_table_get_target(t, i);
897 		if (dm_target_hybrid(ti))
898 			hybrid = 1;
899 		else if (dm_target_request_based(ti))
900 			request_based = 1;
901 		else
902 			bio_based = 1;
903 
904 		if (bio_based && request_based) {
905 			DMERR("Inconsistent table: different target types can't be mixed up");
906 			return -EINVAL;
907 		}
908 	}
909 
910 	if (hybrid && !bio_based && !request_based) {
911 		/*
912 		 * The targets can work either way.
913 		 * Determine the type from the live device.
914 		 * Default to bio-based if device is new.
915 		 */
916 		if (__table_type_request_based(live_md_type))
917 			request_based = 1;
918 		else
919 			bio_based = 1;
920 	}
921 
922 	if (bio_based) {
923 verify_bio_based:
924 		/* We must use this table as bio-based */
925 		t->type = DM_TYPE_BIO_BASED;
926 		if (dm_table_supports_dax(t, device_not_dax_capable) ||
927 		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
928 			t->type = DM_TYPE_DAX_BIO_BASED;
929 		}
930 		return 0;
931 	}
932 
933 	BUG_ON(!request_based); /* No targets in this table */
934 
935 	t->type = DM_TYPE_REQUEST_BASED;
936 
937 verify_rq_based:
938 	/*
939 	 * Request-based dm supports only tables that have a single target now.
940 	 * To support multiple targets, request splitting support is needed,
941 	 * and that needs lots of changes in the block-layer.
942 	 * (e.g. request completion process for partial completion.)
943 	 */
944 	if (t->num_targets > 1) {
945 		DMERR("request-based DM doesn't support multiple targets");
946 		return -EINVAL;
947 	}
948 
949 	if (list_empty(devices)) {
950 		int srcu_idx;
951 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
952 
953 		/* inherit live table's type */
954 		if (live_table)
955 			t->type = live_table->type;
956 		dm_put_live_table(t->md, srcu_idx);
957 		return 0;
958 	}
959 
960 	ti = dm_table_get_immutable_target(t);
961 	if (!ti) {
962 		DMERR("table load rejected: immutable target is required");
963 		return -EINVAL;
964 	} else if (ti->max_io_len) {
965 		DMERR("table load rejected: immutable target that splits IO is not supported");
966 		return -EINVAL;
967 	}
968 
969 	/* Non-request-stackable devices can't be used for request-based dm */
970 	if (!ti->type->iterate_devices ||
971 	    !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
972 		DMERR("table load rejected: including non-request-stackable devices");
973 		return -EINVAL;
974 	}
975 
976 	return 0;
977 }
978 
979 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
980 {
981 	return t->type;
982 }
983 
984 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
985 {
986 	return t->immutable_target_type;
987 }
988 
989 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
990 {
991 	/* Immutable target is implicitly a singleton */
992 	if (t->num_targets > 1 ||
993 	    !dm_target_is_immutable(t->targets[0].type))
994 		return NULL;
995 
996 	return t->targets;
997 }
998 
999 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1000 {
1001 	for (unsigned int i = 0; i < t->num_targets; i++) {
1002 		struct dm_target *ti = dm_table_get_target(t, i);
1003 
1004 		if (dm_target_is_wildcard(ti->type))
1005 			return ti;
1006 	}
1007 
1008 	return NULL;
1009 }
1010 
1011 bool dm_table_bio_based(struct dm_table *t)
1012 {
1013 	return __table_type_bio_based(dm_table_get_type(t));
1014 }
1015 
1016 bool dm_table_request_based(struct dm_table *t)
1017 {
1018 	return __table_type_request_based(dm_table_get_type(t));
1019 }
1020 
1021 static bool dm_table_supports_poll(struct dm_table *t);
1022 
1023 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1024 {
1025 	enum dm_queue_mode type = dm_table_get_type(t);
1026 	unsigned int per_io_data_size = 0, front_pad, io_front_pad;
1027 	unsigned int min_pool_size = 0, pool_size;
1028 	struct dm_md_mempools *pools;
1029 
1030 	if (unlikely(type == DM_TYPE_NONE)) {
1031 		DMERR("no table type is set, can't allocate mempools");
1032 		return -EINVAL;
1033 	}
1034 
1035 	pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1036 	if (!pools)
1037 		return -ENOMEM;
1038 
1039 	if (type == DM_TYPE_REQUEST_BASED) {
1040 		pool_size = dm_get_reserved_rq_based_ios();
1041 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
1042 		goto init_bs;
1043 	}
1044 
1045 	for (unsigned int i = 0; i < t->num_targets; i++) {
1046 		struct dm_target *ti = dm_table_get_target(t, i);
1047 
1048 		per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1049 		min_pool_size = max(min_pool_size, ti->num_flush_bios);
1050 	}
1051 	pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
1052 	front_pad = roundup(per_io_data_size,
1053 		__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
1054 
1055 	io_front_pad = roundup(per_io_data_size,
1056 		__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
1057 	if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
1058 			dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
1059 		goto out_free_pools;
1060 	if (t->integrity_supported &&
1061 	    bioset_integrity_create(&pools->io_bs, pool_size))
1062 		goto out_free_pools;
1063 init_bs:
1064 	if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1065 		goto out_free_pools;
1066 	if (t->integrity_supported &&
1067 	    bioset_integrity_create(&pools->bs, pool_size))
1068 		goto out_free_pools;
1069 
1070 	t->mempools = pools;
1071 	return 0;
1072 
1073 out_free_pools:
1074 	dm_free_md_mempools(pools);
1075 	return -ENOMEM;
1076 }
1077 
1078 static int setup_indexes(struct dm_table *t)
1079 {
1080 	int i;
1081 	unsigned int total = 0;
1082 	sector_t *indexes;
1083 
1084 	/* allocate the space for *all* the indexes */
1085 	for (i = t->depth - 2; i >= 0; i--) {
1086 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1087 		total += t->counts[i];
1088 	}
1089 
1090 	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1091 	if (!indexes)
1092 		return -ENOMEM;
1093 
1094 	/* set up internal nodes, bottom-up */
1095 	for (i = t->depth - 2; i >= 0; i--) {
1096 		t->index[i] = indexes;
1097 		indexes += (KEYS_PER_NODE * t->counts[i]);
1098 		setup_btree_index(i, t);
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 /*
1105  * Builds the btree to index the map.
1106  */
1107 static int dm_table_build_index(struct dm_table *t)
1108 {
1109 	int r = 0;
1110 	unsigned int leaf_nodes;
1111 
1112 	/* how many indexes will the btree have ? */
1113 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1114 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1115 
1116 	/* leaf layer has already been set up */
1117 	t->counts[t->depth - 1] = leaf_nodes;
1118 	t->index[t->depth - 1] = t->highs;
1119 
1120 	if (t->depth >= 2)
1121 		r = setup_indexes(t);
1122 
1123 	return r;
1124 }
1125 
1126 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1127 
1128 struct dm_crypto_profile {
1129 	struct blk_crypto_profile profile;
1130 	struct mapped_device *md;
1131 };
1132 
1133 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1134 				     sector_t start, sector_t len, void *data)
1135 {
1136 	const struct blk_crypto_key *key = data;
1137 
1138 	blk_crypto_evict_key(dev->bdev, key);
1139 	return 0;
1140 }
1141 
1142 /*
1143  * When an inline encryption key is evicted from a device-mapper device, evict
1144  * it from all the underlying devices.
1145  */
1146 static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1147 			    const struct blk_crypto_key *key, unsigned int slot)
1148 {
1149 	struct mapped_device *md =
1150 		container_of(profile, struct dm_crypto_profile, profile)->md;
1151 	struct dm_table *t;
1152 	int srcu_idx;
1153 
1154 	t = dm_get_live_table(md, &srcu_idx);
1155 	if (!t)
1156 		return 0;
1157 
1158 	for (unsigned int i = 0; i < t->num_targets; i++) {
1159 		struct dm_target *ti = dm_table_get_target(t, i);
1160 
1161 		if (!ti->type->iterate_devices)
1162 			continue;
1163 		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
1164 					  (void *)key);
1165 	}
1166 
1167 	dm_put_live_table(md, srcu_idx);
1168 	return 0;
1169 }
1170 
1171 static int
1172 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1173 				     sector_t start, sector_t len, void *data)
1174 {
1175 	struct blk_crypto_profile *parent = data;
1176 	struct blk_crypto_profile *child =
1177 		bdev_get_queue(dev->bdev)->crypto_profile;
1178 
1179 	blk_crypto_intersect_capabilities(parent, child);
1180 	return 0;
1181 }
1182 
1183 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1184 {
1185 	struct dm_crypto_profile *dmcp = container_of(profile,
1186 						      struct dm_crypto_profile,
1187 						      profile);
1188 
1189 	if (!profile)
1190 		return;
1191 
1192 	blk_crypto_profile_destroy(profile);
1193 	kfree(dmcp);
1194 }
1195 
1196 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1197 {
1198 	dm_destroy_crypto_profile(t->crypto_profile);
1199 	t->crypto_profile = NULL;
1200 }
1201 
1202 /*
1203  * Constructs and initializes t->crypto_profile with a crypto profile that
1204  * represents the common set of crypto capabilities of the devices described by
1205  * the dm_table.  However, if the constructed crypto profile doesn't support all
1206  * crypto capabilities that are supported by the current mapped_device, it
1207  * returns an error instead, since we don't support removing crypto capabilities
1208  * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1209  * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1210  */
1211 static int dm_table_construct_crypto_profile(struct dm_table *t)
1212 {
1213 	struct dm_crypto_profile *dmcp;
1214 	struct blk_crypto_profile *profile;
1215 	unsigned int i;
1216 	bool empty_profile = true;
1217 
1218 	dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1219 	if (!dmcp)
1220 		return -ENOMEM;
1221 	dmcp->md = t->md;
1222 
1223 	profile = &dmcp->profile;
1224 	blk_crypto_profile_init(profile, 0);
1225 	profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1226 	profile->max_dun_bytes_supported = UINT_MAX;
1227 	memset(profile->modes_supported, 0xFF,
1228 	       sizeof(profile->modes_supported));
1229 
1230 	for (i = 0; i < t->num_targets; i++) {
1231 		struct dm_target *ti = dm_table_get_target(t, i);
1232 
1233 		if (!dm_target_passes_crypto(ti->type)) {
1234 			blk_crypto_intersect_capabilities(profile, NULL);
1235 			break;
1236 		}
1237 		if (!ti->type->iterate_devices)
1238 			continue;
1239 		ti->type->iterate_devices(ti,
1240 					  device_intersect_crypto_capabilities,
1241 					  profile);
1242 	}
1243 
1244 	if (t->md->queue &&
1245 	    !blk_crypto_has_capabilities(profile,
1246 					 t->md->queue->crypto_profile)) {
1247 		DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1248 		dm_destroy_crypto_profile(profile);
1249 		return -EINVAL;
1250 	}
1251 
1252 	/*
1253 	 * If the new profile doesn't actually support any crypto capabilities,
1254 	 * we may as well represent it with a NULL profile.
1255 	 */
1256 	for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1257 		if (profile->modes_supported[i]) {
1258 			empty_profile = false;
1259 			break;
1260 		}
1261 	}
1262 
1263 	if (empty_profile) {
1264 		dm_destroy_crypto_profile(profile);
1265 		profile = NULL;
1266 	}
1267 
1268 	/*
1269 	 * t->crypto_profile is only set temporarily while the table is being
1270 	 * set up, and it gets set to NULL after the profile has been
1271 	 * transferred to the request_queue.
1272 	 */
1273 	t->crypto_profile = profile;
1274 
1275 	return 0;
1276 }
1277 
1278 static void dm_update_crypto_profile(struct request_queue *q,
1279 				     struct dm_table *t)
1280 {
1281 	if (!t->crypto_profile)
1282 		return;
1283 
1284 	/* Make the crypto profile less restrictive. */
1285 	if (!q->crypto_profile) {
1286 		blk_crypto_register(t->crypto_profile, q);
1287 	} else {
1288 		blk_crypto_update_capabilities(q->crypto_profile,
1289 					       t->crypto_profile);
1290 		dm_destroy_crypto_profile(t->crypto_profile);
1291 	}
1292 	t->crypto_profile = NULL;
1293 }
1294 
1295 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1296 
1297 static int dm_table_construct_crypto_profile(struct dm_table *t)
1298 {
1299 	return 0;
1300 }
1301 
1302 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1303 {
1304 }
1305 
1306 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1307 {
1308 }
1309 
1310 static void dm_update_crypto_profile(struct request_queue *q,
1311 				     struct dm_table *t)
1312 {
1313 }
1314 
1315 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1316 
1317 /*
1318  * Prepares the table for use by building the indices,
1319  * setting the type, and allocating mempools.
1320  */
1321 int dm_table_complete(struct dm_table *t)
1322 {
1323 	int r;
1324 
1325 	r = dm_table_determine_type(t);
1326 	if (r) {
1327 		DMERR("unable to determine table type");
1328 		return r;
1329 	}
1330 
1331 	r = dm_table_build_index(t);
1332 	if (r) {
1333 		DMERR("unable to build btrees");
1334 		return r;
1335 	}
1336 
1337 	r = dm_table_construct_crypto_profile(t);
1338 	if (r) {
1339 		DMERR("could not construct crypto profile.");
1340 		return r;
1341 	}
1342 
1343 	r = dm_table_alloc_md_mempools(t, t->md);
1344 	if (r)
1345 		DMERR("unable to allocate mempools");
1346 
1347 	return r;
1348 }
1349 
1350 static DEFINE_MUTEX(_event_lock);
1351 void dm_table_event_callback(struct dm_table *t,
1352 			     void (*fn)(void *), void *context)
1353 {
1354 	mutex_lock(&_event_lock);
1355 	t->event_fn = fn;
1356 	t->event_context = context;
1357 	mutex_unlock(&_event_lock);
1358 }
1359 
1360 void dm_table_event(struct dm_table *t)
1361 {
1362 	mutex_lock(&_event_lock);
1363 	if (t->event_fn)
1364 		t->event_fn(t->event_context);
1365 	mutex_unlock(&_event_lock);
1366 }
1367 EXPORT_SYMBOL(dm_table_event);
1368 
1369 inline sector_t dm_table_get_size(struct dm_table *t)
1370 {
1371 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1372 }
1373 EXPORT_SYMBOL(dm_table_get_size);
1374 
1375 /*
1376  * Search the btree for the correct target.
1377  *
1378  * Caller should check returned pointer for NULL
1379  * to trap I/O beyond end of device.
1380  */
1381 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1382 {
1383 	unsigned int l, n = 0, k = 0;
1384 	sector_t *node;
1385 
1386 	if (unlikely(sector >= dm_table_get_size(t)))
1387 		return NULL;
1388 
1389 	for (l = 0; l < t->depth; l++) {
1390 		n = get_child(n, k);
1391 		node = get_node(t, l, n);
1392 
1393 		for (k = 0; k < KEYS_PER_NODE; k++)
1394 			if (node[k] >= sector)
1395 				break;
1396 	}
1397 
1398 	return &t->targets[(KEYS_PER_NODE * n) + k];
1399 }
1400 
1401 static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
1402 				   sector_t start, sector_t len, void *data)
1403 {
1404 	struct request_queue *q = bdev_get_queue(dev->bdev);
1405 
1406 	return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
1407 }
1408 
1409 /*
1410  * type->iterate_devices() should be called when the sanity check needs to
1411  * iterate and check all underlying data devices. iterate_devices() will
1412  * iterate all underlying data devices until it encounters a non-zero return
1413  * code, returned by whether the input iterate_devices_callout_fn, or
1414  * iterate_devices() itself internally.
1415  *
1416  * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1417  * iterate multiple underlying devices internally, in which case a non-zero
1418  * return code returned by iterate_devices_callout_fn will stop the iteration
1419  * in advance.
1420  *
1421  * Cases requiring _any_ underlying device supporting some kind of attribute,
1422  * should use the iteration structure like dm_table_any_dev_attr(), or call
1423  * it directly. @func should handle semantics of positive examples, e.g.
1424  * capable of something.
1425  *
1426  * Cases requiring _all_ underlying devices supporting some kind of attribute,
1427  * should use the iteration structure like dm_table_supports_nowait() or
1428  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1429  * uses an @anti_func that handle semantics of counter examples, e.g. not
1430  * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1431  */
1432 static bool dm_table_any_dev_attr(struct dm_table *t,
1433 				  iterate_devices_callout_fn func, void *data)
1434 {
1435 	for (unsigned int i = 0; i < t->num_targets; i++) {
1436 		struct dm_target *ti = dm_table_get_target(t, i);
1437 
1438 		if (ti->type->iterate_devices &&
1439 		    ti->type->iterate_devices(ti, func, data))
1440 			return true;
1441 	}
1442 
1443 	return false;
1444 }
1445 
1446 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1447 			sector_t start, sector_t len, void *data)
1448 {
1449 	unsigned int *num_devices = data;
1450 
1451 	(*num_devices)++;
1452 
1453 	return 0;
1454 }
1455 
1456 static bool dm_table_supports_poll(struct dm_table *t)
1457 {
1458 	for (unsigned int i = 0; i < t->num_targets; i++) {
1459 		struct dm_target *ti = dm_table_get_target(t, i);
1460 
1461 		if (!ti->type->iterate_devices ||
1462 		    ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
1463 			return false;
1464 	}
1465 
1466 	return true;
1467 }
1468 
1469 /*
1470  * Check whether a table has no data devices attached using each
1471  * target's iterate_devices method.
1472  * Returns false if the result is unknown because a target doesn't
1473  * support iterate_devices.
1474  */
1475 bool dm_table_has_no_data_devices(struct dm_table *t)
1476 {
1477 	for (unsigned int i = 0; i < t->num_targets; i++) {
1478 		struct dm_target *ti = dm_table_get_target(t, i);
1479 		unsigned int num_devices = 0;
1480 
1481 		if (!ti->type->iterate_devices)
1482 			return false;
1483 
1484 		ti->type->iterate_devices(ti, count_device, &num_devices);
1485 		if (num_devices)
1486 			return false;
1487 	}
1488 
1489 	return true;
1490 }
1491 
1492 static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
1493 			    sector_t start, sector_t len, void *data)
1494 {
1495 	bool *zoned = data;
1496 
1497 	return bdev_is_zoned(dev->bdev) != *zoned;
1498 }
1499 
1500 static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1501 				 sector_t start, sector_t len, void *data)
1502 {
1503 	return bdev_is_zoned(dev->bdev);
1504 }
1505 
1506 /*
1507  * Check the device zoned model based on the target feature flag. If the target
1508  * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1509  * also accepted but all devices must have the same zoned model. If the target
1510  * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1511  * zoned model with all zoned devices having the same zone size.
1512  */
1513 static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
1514 {
1515 	for (unsigned int i = 0; i < t->num_targets; i++) {
1516 		struct dm_target *ti = dm_table_get_target(t, i);
1517 
1518 		/*
1519 		 * For the wildcard target (dm-error), if we do not have a
1520 		 * backing device, we must always return false. If we have a
1521 		 * backing device, the result must depend on checking zoned
1522 		 * model, like for any other target. So for this, check directly
1523 		 * if the target backing device is zoned as we get "false" when
1524 		 * dm-error was set without a backing device.
1525 		 */
1526 		if (dm_target_is_wildcard(ti->type) &&
1527 		    !ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
1528 			return false;
1529 
1530 		if (dm_target_supports_zoned_hm(ti->type)) {
1531 			if (!ti->type->iterate_devices ||
1532 			    ti->type->iterate_devices(ti, device_not_zoned,
1533 						      &zoned))
1534 				return false;
1535 		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1536 			if (zoned)
1537 				return false;
1538 		}
1539 	}
1540 
1541 	return true;
1542 }
1543 
1544 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1545 					   sector_t start, sector_t len, void *data)
1546 {
1547 	unsigned int *zone_sectors = data;
1548 
1549 	if (!bdev_is_zoned(dev->bdev))
1550 		return 0;
1551 	return bdev_zone_sectors(dev->bdev) != *zone_sectors;
1552 }
1553 
1554 /*
1555  * Check consistency of zoned model and zone sectors across all targets. For
1556  * zone sectors, if the destination device is a zoned block device, it shall
1557  * have the specified zone_sectors.
1558  */
1559 static int validate_hardware_zoned(struct dm_table *t, bool zoned,
1560 				   unsigned int zone_sectors)
1561 {
1562 	if (!zoned)
1563 		return 0;
1564 
1565 	if (!dm_table_supports_zoned(t, zoned)) {
1566 		DMERR("%s: zoned model is not consistent across all devices",
1567 		      dm_device_name(t->md));
1568 		return -EINVAL;
1569 	}
1570 
1571 	/* Check zone size validity and compatibility */
1572 	if (!zone_sectors || !is_power_of_2(zone_sectors))
1573 		return -EINVAL;
1574 
1575 	if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1576 		DMERR("%s: zone sectors is not consistent across all zoned devices",
1577 		      dm_device_name(t->md));
1578 		return -EINVAL;
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 /*
1585  * Establish the new table's queue_limits and validate them.
1586  */
1587 int dm_calculate_queue_limits(struct dm_table *t,
1588 			      struct queue_limits *limits)
1589 {
1590 	struct queue_limits ti_limits;
1591 	unsigned int zone_sectors = 0;
1592 	bool zoned = false;
1593 
1594 	blk_set_stacking_limits(limits);
1595 
1596 	t->integrity_supported = true;
1597 	for (unsigned int i = 0; i < t->num_targets; i++) {
1598 		struct dm_target *ti = dm_table_get_target(t, i);
1599 
1600 		if (!dm_target_passes_integrity(ti->type))
1601 			t->integrity_supported = false;
1602 	}
1603 
1604 	for (unsigned int i = 0; i < t->num_targets; i++) {
1605 		struct dm_target *ti = dm_table_get_target(t, i);
1606 
1607 		blk_set_stacking_limits(&ti_limits);
1608 
1609 		if (!ti->type->iterate_devices) {
1610 			/* Set I/O hints portion of queue limits */
1611 			if (ti->type->io_hints)
1612 				ti->type->io_hints(ti, &ti_limits);
1613 			goto combine_limits;
1614 		}
1615 
1616 		/*
1617 		 * Combine queue limits of all the devices this target uses.
1618 		 */
1619 		ti->type->iterate_devices(ti, dm_set_device_limits,
1620 					  &ti_limits);
1621 
1622 		if (!zoned && ti_limits.zoned) {
1623 			/*
1624 			 * After stacking all limits, validate all devices
1625 			 * in table support this zoned model and zone sectors.
1626 			 */
1627 			zoned = ti_limits.zoned;
1628 			zone_sectors = ti_limits.chunk_sectors;
1629 		}
1630 
1631 		/* Set I/O hints portion of queue limits */
1632 		if (ti->type->io_hints)
1633 			ti->type->io_hints(ti, &ti_limits);
1634 
1635 		/*
1636 		 * Check each device area is consistent with the target's
1637 		 * overall queue limits.
1638 		 */
1639 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1640 					      &ti_limits))
1641 			return -EINVAL;
1642 
1643 combine_limits:
1644 		/*
1645 		 * Merge this target's queue limits into the overall limits
1646 		 * for the table.
1647 		 */
1648 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1649 			DMWARN("%s: adding target device (start sect %llu len %llu) "
1650 			       "caused an alignment inconsistency",
1651 			       dm_device_name(t->md),
1652 			       (unsigned long long) ti->begin,
1653 			       (unsigned long long) ti->len);
1654 
1655 		if (t->integrity_supported ||
1656 		    dm_target_has_integrity(ti->type)) {
1657 			if (!queue_limits_stack_integrity(limits, &ti_limits)) {
1658 				DMWARN("%s: adding target device (start sect %llu len %llu) "
1659 				       "disabled integrity support due to incompatibility",
1660 				       dm_device_name(t->md),
1661 				       (unsigned long long) ti->begin,
1662 				       (unsigned long long) ti->len);
1663 				t->integrity_supported = false;
1664 			}
1665 		}
1666 	}
1667 
1668 	/*
1669 	 * Verify that the zoned model and zone sectors, as determined before
1670 	 * any .io_hints override, are the same across all devices in the table.
1671 	 * - this is especially relevant if .io_hints is emulating a disk-managed
1672 	 *   zoned model on host-managed zoned block devices.
1673 	 * BUT...
1674 	 */
1675 	if (limits->zoned) {
1676 		/*
1677 		 * ...IF the above limits stacking determined a zoned model
1678 		 * validate that all of the table's devices conform to it.
1679 		 */
1680 		zoned = limits->zoned;
1681 		zone_sectors = limits->chunk_sectors;
1682 	}
1683 	if (validate_hardware_zoned(t, zoned, zone_sectors))
1684 		return -EINVAL;
1685 
1686 	return validate_hardware_logical_block_alignment(t, limits);
1687 }
1688 
1689 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1690 				sector_t start, sector_t len, void *data)
1691 {
1692 	unsigned long flush = (unsigned long) data;
1693 	struct request_queue *q = bdev_get_queue(dev->bdev);
1694 
1695 	return (q->queue_flags & flush);
1696 }
1697 
1698 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1699 {
1700 	/*
1701 	 * Require at least one underlying device to support flushes.
1702 	 * t->devices includes internal dm devices such as mirror logs
1703 	 * so we need to use iterate_devices here, which targets
1704 	 * supporting flushes must provide.
1705 	 */
1706 	for (unsigned int i = 0; i < t->num_targets; i++) {
1707 		struct dm_target *ti = dm_table_get_target(t, i);
1708 
1709 		if (!ti->num_flush_bios)
1710 			continue;
1711 
1712 		if (ti->flush_supported)
1713 			return true;
1714 
1715 		if (ti->type->iterate_devices &&
1716 		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1717 			return true;
1718 	}
1719 
1720 	return false;
1721 }
1722 
1723 static int device_dax_write_cache_enabled(struct dm_target *ti,
1724 					  struct dm_dev *dev, sector_t start,
1725 					  sector_t len, void *data)
1726 {
1727 	struct dax_device *dax_dev = dev->dax_dev;
1728 
1729 	if (!dax_dev)
1730 		return false;
1731 
1732 	if (dax_write_cache_enabled(dax_dev))
1733 		return true;
1734 	return false;
1735 }
1736 
1737 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1738 				sector_t start, sector_t len, void *data)
1739 {
1740 	return !bdev_nonrot(dev->bdev);
1741 }
1742 
1743 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1744 			     sector_t start, sector_t len, void *data)
1745 {
1746 	struct request_queue *q = bdev_get_queue(dev->bdev);
1747 
1748 	return !blk_queue_add_random(q);
1749 }
1750 
1751 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1752 					   sector_t start, sector_t len, void *data)
1753 {
1754 	struct request_queue *q = bdev_get_queue(dev->bdev);
1755 
1756 	return !q->limits.max_write_zeroes_sectors;
1757 }
1758 
1759 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1760 {
1761 	for (unsigned int i = 0; i < t->num_targets; i++) {
1762 		struct dm_target *ti = dm_table_get_target(t, i);
1763 
1764 		if (!ti->num_write_zeroes_bios)
1765 			return false;
1766 
1767 		if (!ti->type->iterate_devices ||
1768 		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1769 			return false;
1770 	}
1771 
1772 	return true;
1773 }
1774 
1775 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1776 				     sector_t start, sector_t len, void *data)
1777 {
1778 	return !bdev_nowait(dev->bdev);
1779 }
1780 
1781 static bool dm_table_supports_nowait(struct dm_table *t)
1782 {
1783 	for (unsigned int i = 0; i < t->num_targets; i++) {
1784 		struct dm_target *ti = dm_table_get_target(t, i);
1785 
1786 		if (!dm_target_supports_nowait(ti->type))
1787 			return false;
1788 
1789 		if (!ti->type->iterate_devices ||
1790 		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1791 			return false;
1792 	}
1793 
1794 	return true;
1795 }
1796 
1797 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1798 				      sector_t start, sector_t len, void *data)
1799 {
1800 	return !bdev_max_discard_sectors(dev->bdev);
1801 }
1802 
1803 static bool dm_table_supports_discards(struct dm_table *t)
1804 {
1805 	for (unsigned int i = 0; i < t->num_targets; i++) {
1806 		struct dm_target *ti = dm_table_get_target(t, i);
1807 
1808 		if (!ti->num_discard_bios)
1809 			return false;
1810 
1811 		/*
1812 		 * Either the target provides discard support (as implied by setting
1813 		 * 'discards_supported') or it relies on _all_ data devices having
1814 		 * discard support.
1815 		 */
1816 		if (!ti->discards_supported &&
1817 		    (!ti->type->iterate_devices ||
1818 		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1819 			return false;
1820 	}
1821 
1822 	return true;
1823 }
1824 
1825 static int device_not_secure_erase_capable(struct dm_target *ti,
1826 					   struct dm_dev *dev, sector_t start,
1827 					   sector_t len, void *data)
1828 {
1829 	return !bdev_max_secure_erase_sectors(dev->bdev);
1830 }
1831 
1832 static bool dm_table_supports_secure_erase(struct dm_table *t)
1833 {
1834 	for (unsigned int i = 0; i < t->num_targets; i++) {
1835 		struct dm_target *ti = dm_table_get_target(t, i);
1836 
1837 		if (!ti->num_secure_erase_bios)
1838 			return false;
1839 
1840 		if (!ti->type->iterate_devices ||
1841 		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1842 			return false;
1843 	}
1844 
1845 	return true;
1846 }
1847 
1848 static int device_requires_stable_pages(struct dm_target *ti,
1849 					struct dm_dev *dev, sector_t start,
1850 					sector_t len, void *data)
1851 {
1852 	return bdev_stable_writes(dev->bdev);
1853 }
1854 
1855 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1856 			      struct queue_limits *limits)
1857 {
1858 	bool wc = false, fua = false;
1859 	int r;
1860 
1861 	if (dm_table_supports_nowait(t))
1862 		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1863 	else
1864 		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1865 
1866 	if (!dm_table_supports_discards(t)) {
1867 		limits->max_hw_discard_sectors = 0;
1868 		limits->discard_granularity = 0;
1869 		limits->discard_alignment = 0;
1870 		limits->discard_misaligned = 0;
1871 	}
1872 
1873 	if (!dm_table_supports_write_zeroes(t))
1874 		limits->max_write_zeroes_sectors = 0;
1875 
1876 	if (!dm_table_supports_secure_erase(t))
1877 		limits->max_secure_erase_sectors = 0;
1878 
1879 	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1880 		wc = true;
1881 		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1882 			fua = true;
1883 	}
1884 	blk_queue_write_cache(q, wc, fua);
1885 
1886 	if (dm_table_supports_dax(t, device_not_dax_capable)) {
1887 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1888 		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
1889 			set_dax_synchronous(t->md->dax_dev);
1890 	} else
1891 		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1892 
1893 	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
1894 		dax_write_cache(t->md->dax_dev, true);
1895 
1896 	/* Ensure that all underlying devices are non-rotational. */
1897 	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
1898 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1899 	else
1900 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1901 
1902 	/*
1903 	 * Some devices don't use blk_integrity but still want stable pages
1904 	 * because they do their own checksumming.
1905 	 * If any underlying device requires stable pages, a table must require
1906 	 * them as well.  Only targets that support iterate_devices are considered:
1907 	 * don't want error, zero, etc to require stable pages.
1908 	 */
1909 	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
1910 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
1911 	else
1912 		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
1913 
1914 	/*
1915 	 * Determine whether or not this queue's I/O timings contribute
1916 	 * to the entropy pool, Only request-based targets use this.
1917 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1918 	 * have it set.
1919 	 */
1920 	if (blk_queue_add_random(q) &&
1921 	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
1922 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1923 
1924 	/*
1925 	 * For a zoned target, setup the zones related queue attributes
1926 	 * and resources necessary for zone append emulation if necessary.
1927 	 */
1928 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
1929 		r = dm_set_zones_restrictions(t, q, limits);
1930 		if (r)
1931 			return r;
1932 	}
1933 
1934 	r = queue_limits_set(q, limits);
1935 	if (r)
1936 		return r;
1937 
1938 	dm_update_crypto_profile(q, t);
1939 
1940 	/*
1941 	 * Check for request-based device is left to
1942 	 * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
1943 	 *
1944 	 * For bio-based device, only set QUEUE_FLAG_POLL when all
1945 	 * underlying devices supporting polling.
1946 	 */
1947 	if (__table_type_bio_based(t->type)) {
1948 		if (dm_table_supports_poll(t))
1949 			blk_queue_flag_set(QUEUE_FLAG_POLL, q);
1950 		else
1951 			blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 struct list_head *dm_table_get_devices(struct dm_table *t)
1958 {
1959 	return &t->devices;
1960 }
1961 
1962 blk_mode_t dm_table_get_mode(struct dm_table *t)
1963 {
1964 	return t->mode;
1965 }
1966 EXPORT_SYMBOL(dm_table_get_mode);
1967 
1968 enum suspend_mode {
1969 	PRESUSPEND,
1970 	PRESUSPEND_UNDO,
1971 	POSTSUSPEND,
1972 };
1973 
1974 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1975 {
1976 	lockdep_assert_held(&t->md->suspend_lock);
1977 
1978 	for (unsigned int i = 0; i < t->num_targets; i++) {
1979 		struct dm_target *ti = dm_table_get_target(t, i);
1980 
1981 		switch (mode) {
1982 		case PRESUSPEND:
1983 			if (ti->type->presuspend)
1984 				ti->type->presuspend(ti);
1985 			break;
1986 		case PRESUSPEND_UNDO:
1987 			if (ti->type->presuspend_undo)
1988 				ti->type->presuspend_undo(ti);
1989 			break;
1990 		case POSTSUSPEND:
1991 			if (ti->type->postsuspend)
1992 				ti->type->postsuspend(ti);
1993 			break;
1994 		}
1995 	}
1996 }
1997 
1998 void dm_table_presuspend_targets(struct dm_table *t)
1999 {
2000 	if (!t)
2001 		return;
2002 
2003 	suspend_targets(t, PRESUSPEND);
2004 }
2005 
2006 void dm_table_presuspend_undo_targets(struct dm_table *t)
2007 {
2008 	if (!t)
2009 		return;
2010 
2011 	suspend_targets(t, PRESUSPEND_UNDO);
2012 }
2013 
2014 void dm_table_postsuspend_targets(struct dm_table *t)
2015 {
2016 	if (!t)
2017 		return;
2018 
2019 	suspend_targets(t, POSTSUSPEND);
2020 }
2021 
2022 int dm_table_resume_targets(struct dm_table *t)
2023 {
2024 	unsigned int i;
2025 	int r = 0;
2026 
2027 	lockdep_assert_held(&t->md->suspend_lock);
2028 
2029 	for (i = 0; i < t->num_targets; i++) {
2030 		struct dm_target *ti = dm_table_get_target(t, i);
2031 
2032 		if (!ti->type->preresume)
2033 			continue;
2034 
2035 		r = ti->type->preresume(ti);
2036 		if (r) {
2037 			DMERR("%s: %s: preresume failed, error = %d",
2038 			      dm_device_name(t->md), ti->type->name, r);
2039 			return r;
2040 		}
2041 	}
2042 
2043 	for (i = 0; i < t->num_targets; i++) {
2044 		struct dm_target *ti = dm_table_get_target(t, i);
2045 
2046 		if (ti->type->resume)
2047 			ti->type->resume(ti);
2048 	}
2049 
2050 	return 0;
2051 }
2052 
2053 struct mapped_device *dm_table_get_md(struct dm_table *t)
2054 {
2055 	return t->md;
2056 }
2057 EXPORT_SYMBOL(dm_table_get_md);
2058 
2059 const char *dm_table_device_name(struct dm_table *t)
2060 {
2061 	return dm_device_name(t->md);
2062 }
2063 EXPORT_SYMBOL_GPL(dm_table_device_name);
2064 
2065 void dm_table_run_md_queue_async(struct dm_table *t)
2066 {
2067 	if (!dm_table_request_based(t))
2068 		return;
2069 
2070 	if (t->md->queue)
2071 		blk_mq_run_hw_queues(t->md->queue, true);
2072 }
2073 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2074 
2075