xref: /linux/drivers/md/dm-table.c (revision c5aec4c76af1a2d89ee2f2d4d5463b2ad2d85de5)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 
22 #define DM_MSG_PREFIX "table"
23 
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 
29 struct dm_table {
30 	struct mapped_device *md;
31 	unsigned type;
32 
33 	/* btree table */
34 	unsigned int depth;
35 	unsigned int counts[MAX_DEPTH];	/* in nodes */
36 	sector_t *index[MAX_DEPTH];
37 
38 	unsigned int num_targets;
39 	unsigned int num_allocated;
40 	sector_t *highs;
41 	struct dm_target *targets;
42 
43 	struct target_type *immutable_target_type;
44 	unsigned integrity_supported:1;
45 	unsigned singleton:1;
46 
47 	/*
48 	 * Indicates the rw permissions for the new logical
49 	 * device.  This should be a combination of FMODE_READ
50 	 * and FMODE_WRITE.
51 	 */
52 	fmode_t mode;
53 
54 	/* a list of devices used by this table */
55 	struct list_head devices;
56 
57 	/* events get handed up using this callback */
58 	void (*event_fn)(void *);
59 	void *event_context;
60 
61 	struct dm_md_mempools *mempools;
62 
63 	struct list_head target_callbacks;
64 };
65 
66 /*
67  * Similar to ceiling(log_size(n))
68  */
69 static unsigned int int_log(unsigned int n, unsigned int base)
70 {
71 	int result = 0;
72 
73 	while (n > 1) {
74 		n = dm_div_up(n, base);
75 		result++;
76 	}
77 
78 	return result;
79 }
80 
81 /*
82  * Calculate the index of the child node of the n'th node k'th key.
83  */
84 static inline unsigned int get_child(unsigned int n, unsigned int k)
85 {
86 	return (n * CHILDREN_PER_NODE) + k;
87 }
88 
89 /*
90  * Return the n'th node of level l from table t.
91  */
92 static inline sector_t *get_node(struct dm_table *t,
93 				 unsigned int l, unsigned int n)
94 {
95 	return t->index[l] + (n * KEYS_PER_NODE);
96 }
97 
98 /*
99  * Return the highest key that you could lookup from the n'th
100  * node on level l of the btree.
101  */
102 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
103 {
104 	for (; l < t->depth - 1; l++)
105 		n = get_child(n, CHILDREN_PER_NODE - 1);
106 
107 	if (n >= t->counts[l])
108 		return (sector_t) - 1;
109 
110 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
111 }
112 
113 /*
114  * Fills in a level of the btree based on the highs of the level
115  * below it.
116  */
117 static int setup_btree_index(unsigned int l, struct dm_table *t)
118 {
119 	unsigned int n, k;
120 	sector_t *node;
121 
122 	for (n = 0U; n < t->counts[l]; n++) {
123 		node = get_node(t, l, n);
124 
125 		for (k = 0U; k < KEYS_PER_NODE; k++)
126 			node[k] = high(t, l + 1, get_child(n, k));
127 	}
128 
129 	return 0;
130 }
131 
132 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
133 {
134 	unsigned long size;
135 	void *addr;
136 
137 	/*
138 	 * Check that we're not going to overflow.
139 	 */
140 	if (nmemb > (ULONG_MAX / elem_size))
141 		return NULL;
142 
143 	size = nmemb * elem_size;
144 	addr = vzalloc(size);
145 
146 	return addr;
147 }
148 EXPORT_SYMBOL(dm_vcalloc);
149 
150 /*
151  * highs, and targets are managed as dynamic arrays during a
152  * table load.
153  */
154 static int alloc_targets(struct dm_table *t, unsigned int num)
155 {
156 	sector_t *n_highs;
157 	struct dm_target *n_targets;
158 
159 	/*
160 	 * Allocate both the target array and offset array at once.
161 	 * Append an empty entry to catch sectors beyond the end of
162 	 * the device.
163 	 */
164 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
165 					  sizeof(sector_t));
166 	if (!n_highs)
167 		return -ENOMEM;
168 
169 	n_targets = (struct dm_target *) (n_highs + num);
170 
171 	memset(n_highs, -1, sizeof(*n_highs) * num);
172 	vfree(t->highs);
173 
174 	t->num_allocated = num;
175 	t->highs = n_highs;
176 	t->targets = n_targets;
177 
178 	return 0;
179 }
180 
181 int dm_table_create(struct dm_table **result, fmode_t mode,
182 		    unsigned num_targets, struct mapped_device *md)
183 {
184 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
185 
186 	if (!t)
187 		return -ENOMEM;
188 
189 	INIT_LIST_HEAD(&t->devices);
190 	INIT_LIST_HEAD(&t->target_callbacks);
191 
192 	if (!num_targets)
193 		num_targets = KEYS_PER_NODE;
194 
195 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
196 
197 	if (!num_targets) {
198 		kfree(t);
199 		return -ENOMEM;
200 	}
201 
202 	if (alloc_targets(t, num_targets)) {
203 		kfree(t);
204 		return -ENOMEM;
205 	}
206 
207 	t->mode = mode;
208 	t->md = md;
209 	*result = t;
210 	return 0;
211 }
212 
213 static void free_devices(struct list_head *devices)
214 {
215 	struct list_head *tmp, *next;
216 
217 	list_for_each_safe(tmp, next, devices) {
218 		struct dm_dev_internal *dd =
219 		    list_entry(tmp, struct dm_dev_internal, list);
220 		DMWARN("dm_table_destroy: dm_put_device call missing for %s",
221 		       dd->dm_dev.name);
222 		kfree(dd);
223 	}
224 }
225 
226 void dm_table_destroy(struct dm_table *t)
227 {
228 	unsigned int i;
229 
230 	if (!t)
231 		return;
232 
233 	/* free the indexes */
234 	if (t->depth >= 2)
235 		vfree(t->index[t->depth - 2]);
236 
237 	/* free the targets */
238 	for (i = 0; i < t->num_targets; i++) {
239 		struct dm_target *tgt = t->targets + i;
240 
241 		if (tgt->type->dtr)
242 			tgt->type->dtr(tgt);
243 
244 		dm_put_target_type(tgt->type);
245 	}
246 
247 	vfree(t->highs);
248 
249 	/* free the device list */
250 	free_devices(&t->devices);
251 
252 	dm_free_md_mempools(t->mempools);
253 
254 	kfree(t);
255 }
256 
257 /*
258  * See if we've already got a device in the list.
259  */
260 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
261 {
262 	struct dm_dev_internal *dd;
263 
264 	list_for_each_entry (dd, l, list)
265 		if (dd->dm_dev.bdev->bd_dev == dev)
266 			return dd;
267 
268 	return NULL;
269 }
270 
271 /*
272  * Open a device so we can use it as a map destination.
273  */
274 static int open_dev(struct dm_dev_internal *d, dev_t dev,
275 		    struct mapped_device *md)
276 {
277 	static char *_claim_ptr = "I belong to device-mapper";
278 	struct block_device *bdev;
279 
280 	int r;
281 
282 	BUG_ON(d->dm_dev.bdev);
283 
284 	bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
285 	if (IS_ERR(bdev))
286 		return PTR_ERR(bdev);
287 
288 	r = bd_link_disk_holder(bdev, dm_disk(md));
289 	if (r) {
290 		blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
291 		return r;
292 	}
293 
294 	d->dm_dev.bdev = bdev;
295 	return 0;
296 }
297 
298 /*
299  * Close a device that we've been using.
300  */
301 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
302 {
303 	if (!d->dm_dev.bdev)
304 		return;
305 
306 	bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
307 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
308 	d->dm_dev.bdev = NULL;
309 }
310 
311 /*
312  * If possible, this checks an area of a destination device is invalid.
313  */
314 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
315 				  sector_t start, sector_t len, void *data)
316 {
317 	struct request_queue *q;
318 	struct queue_limits *limits = data;
319 	struct block_device *bdev = dev->bdev;
320 	sector_t dev_size =
321 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
322 	unsigned short logical_block_size_sectors =
323 		limits->logical_block_size >> SECTOR_SHIFT;
324 	char b[BDEVNAME_SIZE];
325 
326 	/*
327 	 * Some devices exist without request functions,
328 	 * such as loop devices not yet bound to backing files.
329 	 * Forbid the use of such devices.
330 	 */
331 	q = bdev_get_queue(bdev);
332 	if (!q || !q->make_request_fn) {
333 		DMWARN("%s: %s is not yet initialised: "
334 		       "start=%llu, len=%llu, dev_size=%llu",
335 		       dm_device_name(ti->table->md), bdevname(bdev, b),
336 		       (unsigned long long)start,
337 		       (unsigned long long)len,
338 		       (unsigned long long)dev_size);
339 		return 1;
340 	}
341 
342 	if (!dev_size)
343 		return 0;
344 
345 	if ((start >= dev_size) || (start + len > dev_size)) {
346 		DMWARN("%s: %s too small for target: "
347 		       "start=%llu, len=%llu, dev_size=%llu",
348 		       dm_device_name(ti->table->md), bdevname(bdev, b),
349 		       (unsigned long long)start,
350 		       (unsigned long long)len,
351 		       (unsigned long long)dev_size);
352 		return 1;
353 	}
354 
355 	if (logical_block_size_sectors <= 1)
356 		return 0;
357 
358 	if (start & (logical_block_size_sectors - 1)) {
359 		DMWARN("%s: start=%llu not aligned to h/w "
360 		       "logical block size %u of %s",
361 		       dm_device_name(ti->table->md),
362 		       (unsigned long long)start,
363 		       limits->logical_block_size, bdevname(bdev, b));
364 		return 1;
365 	}
366 
367 	if (len & (logical_block_size_sectors - 1)) {
368 		DMWARN("%s: len=%llu not aligned to h/w "
369 		       "logical block size %u of %s",
370 		       dm_device_name(ti->table->md),
371 		       (unsigned long long)len,
372 		       limits->logical_block_size, bdevname(bdev, b));
373 		return 1;
374 	}
375 
376 	return 0;
377 }
378 
379 /*
380  * This upgrades the mode on an already open dm_dev, being
381  * careful to leave things as they were if we fail to reopen the
382  * device and not to touch the existing bdev field in case
383  * it is accessed concurrently inside dm_table_any_congested().
384  */
385 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
386 			struct mapped_device *md)
387 {
388 	int r;
389 	struct dm_dev_internal dd_new, dd_old;
390 
391 	dd_new = dd_old = *dd;
392 
393 	dd_new.dm_dev.mode |= new_mode;
394 	dd_new.dm_dev.bdev = NULL;
395 
396 	r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
397 	if (r)
398 		return r;
399 
400 	dd->dm_dev.mode |= new_mode;
401 	close_dev(&dd_old, md);
402 
403 	return 0;
404 }
405 
406 /*
407  * Add a device to the list, or just increment the usage count if
408  * it's already present.
409  */
410 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
411 		  struct dm_dev **result)
412 {
413 	int r;
414 	dev_t uninitialized_var(dev);
415 	struct dm_dev_internal *dd;
416 	unsigned int major, minor;
417 	struct dm_table *t = ti->table;
418 	char dummy;
419 
420 	BUG_ON(!t);
421 
422 	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
423 		/* Extract the major/minor numbers */
424 		dev = MKDEV(major, minor);
425 		if (MAJOR(dev) != major || MINOR(dev) != minor)
426 			return -EOVERFLOW;
427 	} else {
428 		/* convert the path to a device */
429 		struct block_device *bdev = lookup_bdev(path);
430 
431 		if (IS_ERR(bdev))
432 			return PTR_ERR(bdev);
433 		dev = bdev->bd_dev;
434 		bdput(bdev);
435 	}
436 
437 	dd = find_device(&t->devices, dev);
438 	if (!dd) {
439 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
440 		if (!dd)
441 			return -ENOMEM;
442 
443 		dd->dm_dev.mode = mode;
444 		dd->dm_dev.bdev = NULL;
445 
446 		if ((r = open_dev(dd, dev, t->md))) {
447 			kfree(dd);
448 			return r;
449 		}
450 
451 		format_dev_t(dd->dm_dev.name, dev);
452 
453 		atomic_set(&dd->count, 0);
454 		list_add(&dd->list, &t->devices);
455 
456 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
457 		r = upgrade_mode(dd, mode, t->md);
458 		if (r)
459 			return r;
460 	}
461 	atomic_inc(&dd->count);
462 
463 	*result = &dd->dm_dev;
464 	return 0;
465 }
466 EXPORT_SYMBOL(dm_get_device);
467 
468 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
469 			 sector_t start, sector_t len, void *data)
470 {
471 	struct queue_limits *limits = data;
472 	struct block_device *bdev = dev->bdev;
473 	struct request_queue *q = bdev_get_queue(bdev);
474 	char b[BDEVNAME_SIZE];
475 
476 	if (unlikely(!q)) {
477 		DMWARN("%s: Cannot set limits for nonexistent device %s",
478 		       dm_device_name(ti->table->md), bdevname(bdev, b));
479 		return 0;
480 	}
481 
482 	if (bdev_stack_limits(limits, bdev, start) < 0)
483 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
484 		       "physical_block_size=%u, logical_block_size=%u, "
485 		       "alignment_offset=%u, start=%llu",
486 		       dm_device_name(ti->table->md), bdevname(bdev, b),
487 		       q->limits.physical_block_size,
488 		       q->limits.logical_block_size,
489 		       q->limits.alignment_offset,
490 		       (unsigned long long) start << SECTOR_SHIFT);
491 
492 	/*
493 	 * Check if merge fn is supported.
494 	 * If not we'll force DM to use PAGE_SIZE or
495 	 * smaller I/O, just to be safe.
496 	 */
497 	if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
498 		blk_limits_max_hw_sectors(limits,
499 					  (unsigned int) (PAGE_SIZE >> 9));
500 	return 0;
501 }
502 EXPORT_SYMBOL_GPL(dm_set_device_limits);
503 
504 /*
505  * Decrement a device's use count and remove it if necessary.
506  */
507 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
508 {
509 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
510 						  dm_dev);
511 
512 	if (atomic_dec_and_test(&dd->count)) {
513 		close_dev(dd, ti->table->md);
514 		list_del(&dd->list);
515 		kfree(dd);
516 	}
517 }
518 EXPORT_SYMBOL(dm_put_device);
519 
520 /*
521  * Checks to see if the target joins onto the end of the table.
522  */
523 static int adjoin(struct dm_table *table, struct dm_target *ti)
524 {
525 	struct dm_target *prev;
526 
527 	if (!table->num_targets)
528 		return !ti->begin;
529 
530 	prev = &table->targets[table->num_targets - 1];
531 	return (ti->begin == (prev->begin + prev->len));
532 }
533 
534 /*
535  * Used to dynamically allocate the arg array.
536  *
537  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
538  * process messages even if some device is suspended. These messages have a
539  * small fixed number of arguments.
540  *
541  * On the other hand, dm-switch needs to process bulk data using messages and
542  * excessive use of GFP_NOIO could cause trouble.
543  */
544 static char **realloc_argv(unsigned *array_size, char **old_argv)
545 {
546 	char **argv;
547 	unsigned new_size;
548 	gfp_t gfp;
549 
550 	if (*array_size) {
551 		new_size = *array_size * 2;
552 		gfp = GFP_KERNEL;
553 	} else {
554 		new_size = 8;
555 		gfp = GFP_NOIO;
556 	}
557 	argv = kmalloc(new_size * sizeof(*argv), gfp);
558 	if (argv) {
559 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
560 		*array_size = new_size;
561 	}
562 
563 	kfree(old_argv);
564 	return argv;
565 }
566 
567 /*
568  * Destructively splits up the argument list to pass to ctr.
569  */
570 int dm_split_args(int *argc, char ***argvp, char *input)
571 {
572 	char *start, *end = input, *out, **argv = NULL;
573 	unsigned array_size = 0;
574 
575 	*argc = 0;
576 
577 	if (!input) {
578 		*argvp = NULL;
579 		return 0;
580 	}
581 
582 	argv = realloc_argv(&array_size, argv);
583 	if (!argv)
584 		return -ENOMEM;
585 
586 	while (1) {
587 		/* Skip whitespace */
588 		start = skip_spaces(end);
589 
590 		if (!*start)
591 			break;	/* success, we hit the end */
592 
593 		/* 'out' is used to remove any back-quotes */
594 		end = out = start;
595 		while (*end) {
596 			/* Everything apart from '\0' can be quoted */
597 			if (*end == '\\' && *(end + 1)) {
598 				*out++ = *(end + 1);
599 				end += 2;
600 				continue;
601 			}
602 
603 			if (isspace(*end))
604 				break;	/* end of token */
605 
606 			*out++ = *end++;
607 		}
608 
609 		/* have we already filled the array ? */
610 		if ((*argc + 1) > array_size) {
611 			argv = realloc_argv(&array_size, argv);
612 			if (!argv)
613 				return -ENOMEM;
614 		}
615 
616 		/* we know this is whitespace */
617 		if (*end)
618 			end++;
619 
620 		/* terminate the string and put it in the array */
621 		*out = '\0';
622 		argv[*argc] = start;
623 		(*argc)++;
624 	}
625 
626 	*argvp = argv;
627 	return 0;
628 }
629 
630 /*
631  * Impose necessary and sufficient conditions on a devices's table such
632  * that any incoming bio which respects its logical_block_size can be
633  * processed successfully.  If it falls across the boundary between
634  * two or more targets, the size of each piece it gets split into must
635  * be compatible with the logical_block_size of the target processing it.
636  */
637 static int validate_hardware_logical_block_alignment(struct dm_table *table,
638 						 struct queue_limits *limits)
639 {
640 	/*
641 	 * This function uses arithmetic modulo the logical_block_size
642 	 * (in units of 512-byte sectors).
643 	 */
644 	unsigned short device_logical_block_size_sects =
645 		limits->logical_block_size >> SECTOR_SHIFT;
646 
647 	/*
648 	 * Offset of the start of the next table entry, mod logical_block_size.
649 	 */
650 	unsigned short next_target_start = 0;
651 
652 	/*
653 	 * Given an aligned bio that extends beyond the end of a
654 	 * target, how many sectors must the next target handle?
655 	 */
656 	unsigned short remaining = 0;
657 
658 	struct dm_target *uninitialized_var(ti);
659 	struct queue_limits ti_limits;
660 	unsigned i = 0;
661 
662 	/*
663 	 * Check each entry in the table in turn.
664 	 */
665 	while (i < dm_table_get_num_targets(table)) {
666 		ti = dm_table_get_target(table, i++);
667 
668 		blk_set_stacking_limits(&ti_limits);
669 
670 		/* combine all target devices' limits */
671 		if (ti->type->iterate_devices)
672 			ti->type->iterate_devices(ti, dm_set_device_limits,
673 						  &ti_limits);
674 
675 		/*
676 		 * If the remaining sectors fall entirely within this
677 		 * table entry are they compatible with its logical_block_size?
678 		 */
679 		if (remaining < ti->len &&
680 		    remaining & ((ti_limits.logical_block_size >>
681 				  SECTOR_SHIFT) - 1))
682 			break;	/* Error */
683 
684 		next_target_start =
685 		    (unsigned short) ((next_target_start + ti->len) &
686 				      (device_logical_block_size_sects - 1));
687 		remaining = next_target_start ?
688 		    device_logical_block_size_sects - next_target_start : 0;
689 	}
690 
691 	if (remaining) {
692 		DMWARN("%s: table line %u (start sect %llu len %llu) "
693 		       "not aligned to h/w logical block size %u",
694 		       dm_device_name(table->md), i,
695 		       (unsigned long long) ti->begin,
696 		       (unsigned long long) ti->len,
697 		       limits->logical_block_size);
698 		return -EINVAL;
699 	}
700 
701 	return 0;
702 }
703 
704 int dm_table_add_target(struct dm_table *t, const char *type,
705 			sector_t start, sector_t len, char *params)
706 {
707 	int r = -EINVAL, argc;
708 	char **argv;
709 	struct dm_target *tgt;
710 
711 	if (t->singleton) {
712 		DMERR("%s: target type %s must appear alone in table",
713 		      dm_device_name(t->md), t->targets->type->name);
714 		return -EINVAL;
715 	}
716 
717 	BUG_ON(t->num_targets >= t->num_allocated);
718 
719 	tgt = t->targets + t->num_targets;
720 	memset(tgt, 0, sizeof(*tgt));
721 
722 	if (!len) {
723 		DMERR("%s: zero-length target", dm_device_name(t->md));
724 		return -EINVAL;
725 	}
726 
727 	tgt->type = dm_get_target_type(type);
728 	if (!tgt->type) {
729 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
730 		      type);
731 		return -EINVAL;
732 	}
733 
734 	if (dm_target_needs_singleton(tgt->type)) {
735 		if (t->num_targets) {
736 			DMERR("%s: target type %s must appear alone in table",
737 			      dm_device_name(t->md), type);
738 			return -EINVAL;
739 		}
740 		t->singleton = 1;
741 	}
742 
743 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
744 		DMERR("%s: target type %s may not be included in read-only tables",
745 		      dm_device_name(t->md), type);
746 		return -EINVAL;
747 	}
748 
749 	if (t->immutable_target_type) {
750 		if (t->immutable_target_type != tgt->type) {
751 			DMERR("%s: immutable target type %s cannot be mixed with other target types",
752 			      dm_device_name(t->md), t->immutable_target_type->name);
753 			return -EINVAL;
754 		}
755 	} else if (dm_target_is_immutable(tgt->type)) {
756 		if (t->num_targets) {
757 			DMERR("%s: immutable target type %s cannot be mixed with other target types",
758 			      dm_device_name(t->md), tgt->type->name);
759 			return -EINVAL;
760 		}
761 		t->immutable_target_type = tgt->type;
762 	}
763 
764 	tgt->table = t;
765 	tgt->begin = start;
766 	tgt->len = len;
767 	tgt->error = "Unknown error";
768 
769 	/*
770 	 * Does this target adjoin the previous one ?
771 	 */
772 	if (!adjoin(t, tgt)) {
773 		tgt->error = "Gap in table";
774 		r = -EINVAL;
775 		goto bad;
776 	}
777 
778 	r = dm_split_args(&argc, &argv, params);
779 	if (r) {
780 		tgt->error = "couldn't split parameters (insufficient memory)";
781 		goto bad;
782 	}
783 
784 	r = tgt->type->ctr(tgt, argc, argv);
785 	kfree(argv);
786 	if (r)
787 		goto bad;
788 
789 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
790 
791 	if (!tgt->num_discard_bios && tgt->discards_supported)
792 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
793 		       dm_device_name(t->md), type);
794 
795 	return 0;
796 
797  bad:
798 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
799 	dm_put_target_type(tgt->type);
800 	return r;
801 }
802 
803 /*
804  * Target argument parsing helpers.
805  */
806 static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
807 			     unsigned *value, char **error, unsigned grouped)
808 {
809 	const char *arg_str = dm_shift_arg(arg_set);
810 	char dummy;
811 
812 	if (!arg_str ||
813 	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
814 	    (*value < arg->min) ||
815 	    (*value > arg->max) ||
816 	    (grouped && arg_set->argc < *value)) {
817 		*error = arg->error;
818 		return -EINVAL;
819 	}
820 
821 	return 0;
822 }
823 
824 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
825 		unsigned *value, char **error)
826 {
827 	return validate_next_arg(arg, arg_set, value, error, 0);
828 }
829 EXPORT_SYMBOL(dm_read_arg);
830 
831 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
832 		      unsigned *value, char **error)
833 {
834 	return validate_next_arg(arg, arg_set, value, error, 1);
835 }
836 EXPORT_SYMBOL(dm_read_arg_group);
837 
838 const char *dm_shift_arg(struct dm_arg_set *as)
839 {
840 	char *r;
841 
842 	if (as->argc) {
843 		as->argc--;
844 		r = *as->argv;
845 		as->argv++;
846 		return r;
847 	}
848 
849 	return NULL;
850 }
851 EXPORT_SYMBOL(dm_shift_arg);
852 
853 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
854 {
855 	BUG_ON(as->argc < num_args);
856 	as->argc -= num_args;
857 	as->argv += num_args;
858 }
859 EXPORT_SYMBOL(dm_consume_args);
860 
861 static int dm_table_set_type(struct dm_table *t)
862 {
863 	unsigned i;
864 	unsigned bio_based = 0, request_based = 0, hybrid = 0;
865 	struct dm_target *tgt;
866 	struct dm_dev_internal *dd;
867 	struct list_head *devices;
868 	unsigned live_md_type;
869 
870 	for (i = 0; i < t->num_targets; i++) {
871 		tgt = t->targets + i;
872 		if (dm_target_hybrid(tgt))
873 			hybrid = 1;
874 		else if (dm_target_request_based(tgt))
875 			request_based = 1;
876 		else
877 			bio_based = 1;
878 
879 		if (bio_based && request_based) {
880 			DMWARN("Inconsistent table: different target types"
881 			       " can't be mixed up");
882 			return -EINVAL;
883 		}
884 	}
885 
886 	if (hybrid && !bio_based && !request_based) {
887 		/*
888 		 * The targets can work either way.
889 		 * Determine the type from the live device.
890 		 * Default to bio-based if device is new.
891 		 */
892 		live_md_type = dm_get_md_type(t->md);
893 		if (live_md_type == DM_TYPE_REQUEST_BASED)
894 			request_based = 1;
895 		else
896 			bio_based = 1;
897 	}
898 
899 	if (bio_based) {
900 		/* We must use this table as bio-based */
901 		t->type = DM_TYPE_BIO_BASED;
902 		return 0;
903 	}
904 
905 	BUG_ON(!request_based); /* No targets in this table */
906 
907 	/* Non-request-stackable devices can't be used for request-based dm */
908 	devices = dm_table_get_devices(t);
909 	list_for_each_entry(dd, devices, list) {
910 		if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
911 			DMWARN("table load rejected: including"
912 			       " non-request-stackable devices");
913 			return -EINVAL;
914 		}
915 	}
916 
917 	/*
918 	 * Request-based dm supports only tables that have a single target now.
919 	 * To support multiple targets, request splitting support is needed,
920 	 * and that needs lots of changes in the block-layer.
921 	 * (e.g. request completion process for partial completion.)
922 	 */
923 	if (t->num_targets > 1) {
924 		DMWARN("Request-based dm doesn't support multiple targets yet");
925 		return -EINVAL;
926 	}
927 
928 	t->type = DM_TYPE_REQUEST_BASED;
929 
930 	return 0;
931 }
932 
933 unsigned dm_table_get_type(struct dm_table *t)
934 {
935 	return t->type;
936 }
937 
938 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
939 {
940 	return t->immutable_target_type;
941 }
942 
943 bool dm_table_request_based(struct dm_table *t)
944 {
945 	return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
946 }
947 
948 static int dm_table_alloc_md_mempools(struct dm_table *t)
949 {
950 	unsigned type = dm_table_get_type(t);
951 	unsigned per_bio_data_size = 0;
952 	struct dm_target *tgt;
953 	unsigned i;
954 
955 	if (unlikely(type == DM_TYPE_NONE)) {
956 		DMWARN("no table type is set, can't allocate mempools");
957 		return -EINVAL;
958 	}
959 
960 	if (type == DM_TYPE_BIO_BASED)
961 		for (i = 0; i < t->num_targets; i++) {
962 			tgt = t->targets + i;
963 			per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
964 		}
965 
966 	t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
967 	if (!t->mempools)
968 		return -ENOMEM;
969 
970 	return 0;
971 }
972 
973 void dm_table_free_md_mempools(struct dm_table *t)
974 {
975 	dm_free_md_mempools(t->mempools);
976 	t->mempools = NULL;
977 }
978 
979 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
980 {
981 	return t->mempools;
982 }
983 
984 static int setup_indexes(struct dm_table *t)
985 {
986 	int i;
987 	unsigned int total = 0;
988 	sector_t *indexes;
989 
990 	/* allocate the space for *all* the indexes */
991 	for (i = t->depth - 2; i >= 0; i--) {
992 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
993 		total += t->counts[i];
994 	}
995 
996 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
997 	if (!indexes)
998 		return -ENOMEM;
999 
1000 	/* set up internal nodes, bottom-up */
1001 	for (i = t->depth - 2; i >= 0; i--) {
1002 		t->index[i] = indexes;
1003 		indexes += (KEYS_PER_NODE * t->counts[i]);
1004 		setup_btree_index(i, t);
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Builds the btree to index the map.
1012  */
1013 static int dm_table_build_index(struct dm_table *t)
1014 {
1015 	int r = 0;
1016 	unsigned int leaf_nodes;
1017 
1018 	/* how many indexes will the btree have ? */
1019 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1020 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1021 
1022 	/* leaf layer has already been set up */
1023 	t->counts[t->depth - 1] = leaf_nodes;
1024 	t->index[t->depth - 1] = t->highs;
1025 
1026 	if (t->depth >= 2)
1027 		r = setup_indexes(t);
1028 
1029 	return r;
1030 }
1031 
1032 /*
1033  * Get a disk whose integrity profile reflects the table's profile.
1034  * If %match_all is true, all devices' profiles must match.
1035  * If %match_all is false, all devices must at least have an
1036  * allocated integrity profile; but uninitialized is ok.
1037  * Returns NULL if integrity support was inconsistent or unavailable.
1038  */
1039 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1040 						    bool match_all)
1041 {
1042 	struct list_head *devices = dm_table_get_devices(t);
1043 	struct dm_dev_internal *dd = NULL;
1044 	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1045 
1046 	list_for_each_entry(dd, devices, list) {
1047 		template_disk = dd->dm_dev.bdev->bd_disk;
1048 		if (!blk_get_integrity(template_disk))
1049 			goto no_integrity;
1050 		if (!match_all && !blk_integrity_is_initialized(template_disk))
1051 			continue; /* skip uninitialized profiles */
1052 		else if (prev_disk &&
1053 			 blk_integrity_compare(prev_disk, template_disk) < 0)
1054 			goto no_integrity;
1055 		prev_disk = template_disk;
1056 	}
1057 
1058 	return template_disk;
1059 
1060 no_integrity:
1061 	if (prev_disk)
1062 		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1063 		       dm_device_name(t->md),
1064 		       prev_disk->disk_name,
1065 		       template_disk->disk_name);
1066 	return NULL;
1067 }
1068 
1069 /*
1070  * Register the mapped device for blk_integrity support if
1071  * the underlying devices have an integrity profile.  But all devices
1072  * may not have matching profiles (checking all devices isn't reliable
1073  * during table load because this table may use other DM device(s) which
1074  * must be resumed before they will have an initialized integity profile).
1075  * Stacked DM devices force a 2 stage integrity profile validation:
1076  * 1 - during load, validate all initialized integrity profiles match
1077  * 2 - during resume, validate all integrity profiles match
1078  */
1079 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
1080 {
1081 	struct gendisk *template_disk = NULL;
1082 
1083 	template_disk = dm_table_get_integrity_disk(t, false);
1084 	if (!template_disk)
1085 		return 0;
1086 
1087 	if (!blk_integrity_is_initialized(dm_disk(md))) {
1088 		t->integrity_supported = 1;
1089 		return blk_integrity_register(dm_disk(md), NULL);
1090 	}
1091 
1092 	/*
1093 	 * If DM device already has an initalized integrity
1094 	 * profile the new profile should not conflict.
1095 	 */
1096 	if (blk_integrity_is_initialized(template_disk) &&
1097 	    blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1098 		DMWARN("%s: conflict with existing integrity profile: "
1099 		       "%s profile mismatch",
1100 		       dm_device_name(t->md),
1101 		       template_disk->disk_name);
1102 		return 1;
1103 	}
1104 
1105 	/* Preserve existing initialized integrity profile */
1106 	t->integrity_supported = 1;
1107 	return 0;
1108 }
1109 
1110 /*
1111  * Prepares the table for use by building the indices,
1112  * setting the type, and allocating mempools.
1113  */
1114 int dm_table_complete(struct dm_table *t)
1115 {
1116 	int r;
1117 
1118 	r = dm_table_set_type(t);
1119 	if (r) {
1120 		DMERR("unable to set table type");
1121 		return r;
1122 	}
1123 
1124 	r = dm_table_build_index(t);
1125 	if (r) {
1126 		DMERR("unable to build btrees");
1127 		return r;
1128 	}
1129 
1130 	r = dm_table_prealloc_integrity(t, t->md);
1131 	if (r) {
1132 		DMERR("could not register integrity profile.");
1133 		return r;
1134 	}
1135 
1136 	r = dm_table_alloc_md_mempools(t);
1137 	if (r)
1138 		DMERR("unable to allocate mempools");
1139 
1140 	return r;
1141 }
1142 
1143 static DEFINE_MUTEX(_event_lock);
1144 void dm_table_event_callback(struct dm_table *t,
1145 			     void (*fn)(void *), void *context)
1146 {
1147 	mutex_lock(&_event_lock);
1148 	t->event_fn = fn;
1149 	t->event_context = context;
1150 	mutex_unlock(&_event_lock);
1151 }
1152 
1153 void dm_table_event(struct dm_table *t)
1154 {
1155 	/*
1156 	 * You can no longer call dm_table_event() from interrupt
1157 	 * context, use a bottom half instead.
1158 	 */
1159 	BUG_ON(in_interrupt());
1160 
1161 	mutex_lock(&_event_lock);
1162 	if (t->event_fn)
1163 		t->event_fn(t->event_context);
1164 	mutex_unlock(&_event_lock);
1165 }
1166 EXPORT_SYMBOL(dm_table_event);
1167 
1168 sector_t dm_table_get_size(struct dm_table *t)
1169 {
1170 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1171 }
1172 EXPORT_SYMBOL(dm_table_get_size);
1173 
1174 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1175 {
1176 	if (index >= t->num_targets)
1177 		return NULL;
1178 
1179 	return t->targets + index;
1180 }
1181 
1182 /*
1183  * Search the btree for the correct target.
1184  *
1185  * Caller should check returned pointer with dm_target_is_valid()
1186  * to trap I/O beyond end of device.
1187  */
1188 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1189 {
1190 	unsigned int l, n = 0, k = 0;
1191 	sector_t *node;
1192 
1193 	for (l = 0; l < t->depth; l++) {
1194 		n = get_child(n, k);
1195 		node = get_node(t, l, n);
1196 
1197 		for (k = 0; k < KEYS_PER_NODE; k++)
1198 			if (node[k] >= sector)
1199 				break;
1200 	}
1201 
1202 	return &t->targets[(KEYS_PER_NODE * n) + k];
1203 }
1204 
1205 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1206 			sector_t start, sector_t len, void *data)
1207 {
1208 	unsigned *num_devices = data;
1209 
1210 	(*num_devices)++;
1211 
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Check whether a table has no data devices attached using each
1217  * target's iterate_devices method.
1218  * Returns false if the result is unknown because a target doesn't
1219  * support iterate_devices.
1220  */
1221 bool dm_table_has_no_data_devices(struct dm_table *table)
1222 {
1223 	struct dm_target *uninitialized_var(ti);
1224 	unsigned i = 0, num_devices = 0;
1225 
1226 	while (i < dm_table_get_num_targets(table)) {
1227 		ti = dm_table_get_target(table, i++);
1228 
1229 		if (!ti->type->iterate_devices)
1230 			return false;
1231 
1232 		ti->type->iterate_devices(ti, count_device, &num_devices);
1233 		if (num_devices)
1234 			return false;
1235 	}
1236 
1237 	return true;
1238 }
1239 
1240 /*
1241  * Establish the new table's queue_limits and validate them.
1242  */
1243 int dm_calculate_queue_limits(struct dm_table *table,
1244 			      struct queue_limits *limits)
1245 {
1246 	struct dm_target *uninitialized_var(ti);
1247 	struct queue_limits ti_limits;
1248 	unsigned i = 0;
1249 
1250 	blk_set_stacking_limits(limits);
1251 
1252 	while (i < dm_table_get_num_targets(table)) {
1253 		blk_set_stacking_limits(&ti_limits);
1254 
1255 		ti = dm_table_get_target(table, i++);
1256 
1257 		if (!ti->type->iterate_devices)
1258 			goto combine_limits;
1259 
1260 		/*
1261 		 * Combine queue limits of all the devices this target uses.
1262 		 */
1263 		ti->type->iterate_devices(ti, dm_set_device_limits,
1264 					  &ti_limits);
1265 
1266 		/* Set I/O hints portion of queue limits */
1267 		if (ti->type->io_hints)
1268 			ti->type->io_hints(ti, &ti_limits);
1269 
1270 		/*
1271 		 * Check each device area is consistent with the target's
1272 		 * overall queue limits.
1273 		 */
1274 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1275 					      &ti_limits))
1276 			return -EINVAL;
1277 
1278 combine_limits:
1279 		/*
1280 		 * Merge this target's queue limits into the overall limits
1281 		 * for the table.
1282 		 */
1283 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1284 			DMWARN("%s: adding target device "
1285 			       "(start sect %llu len %llu) "
1286 			       "caused an alignment inconsistency",
1287 			       dm_device_name(table->md),
1288 			       (unsigned long long) ti->begin,
1289 			       (unsigned long long) ti->len);
1290 	}
1291 
1292 	return validate_hardware_logical_block_alignment(table, limits);
1293 }
1294 
1295 /*
1296  * Set the integrity profile for this device if all devices used have
1297  * matching profiles.  We're quite deep in the resume path but still
1298  * don't know if all devices (particularly DM devices this device
1299  * may be stacked on) have matching profiles.  Even if the profiles
1300  * don't match we have no way to fail (to resume) at this point.
1301  */
1302 static void dm_table_set_integrity(struct dm_table *t)
1303 {
1304 	struct gendisk *template_disk = NULL;
1305 
1306 	if (!blk_get_integrity(dm_disk(t->md)))
1307 		return;
1308 
1309 	template_disk = dm_table_get_integrity_disk(t, true);
1310 	if (template_disk)
1311 		blk_integrity_register(dm_disk(t->md),
1312 				       blk_get_integrity(template_disk));
1313 	else if (blk_integrity_is_initialized(dm_disk(t->md)))
1314 		DMWARN("%s: device no longer has a valid integrity profile",
1315 		       dm_device_name(t->md));
1316 	else
1317 		DMWARN("%s: unable to establish an integrity profile",
1318 		       dm_device_name(t->md));
1319 }
1320 
1321 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1322 				sector_t start, sector_t len, void *data)
1323 {
1324 	unsigned flush = (*(unsigned *)data);
1325 	struct request_queue *q = bdev_get_queue(dev->bdev);
1326 
1327 	return q && (q->flush_flags & flush);
1328 }
1329 
1330 static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1331 {
1332 	struct dm_target *ti;
1333 	unsigned i = 0;
1334 
1335 	/*
1336 	 * Require at least one underlying device to support flushes.
1337 	 * t->devices includes internal dm devices such as mirror logs
1338 	 * so we need to use iterate_devices here, which targets
1339 	 * supporting flushes must provide.
1340 	 */
1341 	while (i < dm_table_get_num_targets(t)) {
1342 		ti = dm_table_get_target(t, i++);
1343 
1344 		if (!ti->num_flush_bios)
1345 			continue;
1346 
1347 		if (ti->flush_supported)
1348 			return 1;
1349 
1350 		if (ti->type->iterate_devices &&
1351 		    ti->type->iterate_devices(ti, device_flush_capable, &flush))
1352 			return 1;
1353 	}
1354 
1355 	return 0;
1356 }
1357 
1358 static bool dm_table_discard_zeroes_data(struct dm_table *t)
1359 {
1360 	struct dm_target *ti;
1361 	unsigned i = 0;
1362 
1363 	/* Ensure that all targets supports discard_zeroes_data. */
1364 	while (i < dm_table_get_num_targets(t)) {
1365 		ti = dm_table_get_target(t, i++);
1366 
1367 		if (ti->discard_zeroes_data_unsupported)
1368 			return 0;
1369 	}
1370 
1371 	return 1;
1372 }
1373 
1374 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1375 			    sector_t start, sector_t len, void *data)
1376 {
1377 	struct request_queue *q = bdev_get_queue(dev->bdev);
1378 
1379 	return q && blk_queue_nonrot(q);
1380 }
1381 
1382 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1383 			     sector_t start, sector_t len, void *data)
1384 {
1385 	struct request_queue *q = bdev_get_queue(dev->bdev);
1386 
1387 	return q && !blk_queue_add_random(q);
1388 }
1389 
1390 static bool dm_table_all_devices_attribute(struct dm_table *t,
1391 					   iterate_devices_callout_fn func)
1392 {
1393 	struct dm_target *ti;
1394 	unsigned i = 0;
1395 
1396 	while (i < dm_table_get_num_targets(t)) {
1397 		ti = dm_table_get_target(t, i++);
1398 
1399 		if (!ti->type->iterate_devices ||
1400 		    !ti->type->iterate_devices(ti, func, NULL))
1401 			return 0;
1402 	}
1403 
1404 	return 1;
1405 }
1406 
1407 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1408 					 sector_t start, sector_t len, void *data)
1409 {
1410 	struct request_queue *q = bdev_get_queue(dev->bdev);
1411 
1412 	return q && !q->limits.max_write_same_sectors;
1413 }
1414 
1415 static bool dm_table_supports_write_same(struct dm_table *t)
1416 {
1417 	struct dm_target *ti;
1418 	unsigned i = 0;
1419 
1420 	while (i < dm_table_get_num_targets(t)) {
1421 		ti = dm_table_get_target(t, i++);
1422 
1423 		if (!ti->num_write_same_bios)
1424 			return false;
1425 
1426 		if (!ti->type->iterate_devices ||
1427 		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1428 			return false;
1429 	}
1430 
1431 	return true;
1432 }
1433 
1434 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1435 			       struct queue_limits *limits)
1436 {
1437 	unsigned flush = 0;
1438 
1439 	/*
1440 	 * Copy table's limits to the DM device's request_queue
1441 	 */
1442 	q->limits = *limits;
1443 
1444 	if (!dm_table_supports_discards(t))
1445 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1446 	else
1447 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1448 
1449 	if (dm_table_supports_flush(t, REQ_FLUSH)) {
1450 		flush |= REQ_FLUSH;
1451 		if (dm_table_supports_flush(t, REQ_FUA))
1452 			flush |= REQ_FUA;
1453 	}
1454 	blk_queue_flush(q, flush);
1455 
1456 	if (!dm_table_discard_zeroes_data(t))
1457 		q->limits.discard_zeroes_data = 0;
1458 
1459 	/* Ensure that all underlying devices are non-rotational. */
1460 	if (dm_table_all_devices_attribute(t, device_is_nonrot))
1461 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1462 	else
1463 		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1464 
1465 	if (!dm_table_supports_write_same(t))
1466 		q->limits.max_write_same_sectors = 0;
1467 
1468 	dm_table_set_integrity(t);
1469 
1470 	/*
1471 	 * Determine whether or not this queue's I/O timings contribute
1472 	 * to the entropy pool, Only request-based targets use this.
1473 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1474 	 * have it set.
1475 	 */
1476 	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1477 		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1478 
1479 	/*
1480 	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1481 	 * visible to other CPUs because, once the flag is set, incoming bios
1482 	 * are processed by request-based dm, which refers to the queue
1483 	 * settings.
1484 	 * Until the flag set, bios are passed to bio-based dm and queued to
1485 	 * md->deferred where queue settings are not needed yet.
1486 	 * Those bios are passed to request-based dm at the resume time.
1487 	 */
1488 	smp_mb();
1489 	if (dm_table_request_based(t))
1490 		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1491 }
1492 
1493 unsigned int dm_table_get_num_targets(struct dm_table *t)
1494 {
1495 	return t->num_targets;
1496 }
1497 
1498 struct list_head *dm_table_get_devices(struct dm_table *t)
1499 {
1500 	return &t->devices;
1501 }
1502 
1503 fmode_t dm_table_get_mode(struct dm_table *t)
1504 {
1505 	return t->mode;
1506 }
1507 EXPORT_SYMBOL(dm_table_get_mode);
1508 
1509 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1510 {
1511 	int i = t->num_targets;
1512 	struct dm_target *ti = t->targets;
1513 
1514 	while (i--) {
1515 		if (postsuspend) {
1516 			if (ti->type->postsuspend)
1517 				ti->type->postsuspend(ti);
1518 		} else if (ti->type->presuspend)
1519 			ti->type->presuspend(ti);
1520 
1521 		ti++;
1522 	}
1523 }
1524 
1525 void dm_table_presuspend_targets(struct dm_table *t)
1526 {
1527 	if (!t)
1528 		return;
1529 
1530 	suspend_targets(t, 0);
1531 }
1532 
1533 void dm_table_postsuspend_targets(struct dm_table *t)
1534 {
1535 	if (!t)
1536 		return;
1537 
1538 	suspend_targets(t, 1);
1539 }
1540 
1541 int dm_table_resume_targets(struct dm_table *t)
1542 {
1543 	int i, r = 0;
1544 
1545 	for (i = 0; i < t->num_targets; i++) {
1546 		struct dm_target *ti = t->targets + i;
1547 
1548 		if (!ti->type->preresume)
1549 			continue;
1550 
1551 		r = ti->type->preresume(ti);
1552 		if (r) {
1553 			DMERR("%s: %s: preresume failed, error = %d",
1554 			      dm_device_name(t->md), ti->type->name, r);
1555 			return r;
1556 		}
1557 	}
1558 
1559 	for (i = 0; i < t->num_targets; i++) {
1560 		struct dm_target *ti = t->targets + i;
1561 
1562 		if (ti->type->resume)
1563 			ti->type->resume(ti);
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1570 {
1571 	list_add(&cb->list, &t->target_callbacks);
1572 }
1573 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1574 
1575 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1576 {
1577 	struct dm_dev_internal *dd;
1578 	struct list_head *devices = dm_table_get_devices(t);
1579 	struct dm_target_callbacks *cb;
1580 	int r = 0;
1581 
1582 	list_for_each_entry(dd, devices, list) {
1583 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1584 		char b[BDEVNAME_SIZE];
1585 
1586 		if (likely(q))
1587 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1588 		else
1589 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1590 				     dm_device_name(t->md),
1591 				     bdevname(dd->dm_dev.bdev, b));
1592 	}
1593 
1594 	list_for_each_entry(cb, &t->target_callbacks, list)
1595 		if (cb->congested_fn)
1596 			r |= cb->congested_fn(cb, bdi_bits);
1597 
1598 	return r;
1599 }
1600 
1601 int dm_table_any_busy_target(struct dm_table *t)
1602 {
1603 	unsigned i;
1604 	struct dm_target *ti;
1605 
1606 	for (i = 0; i < t->num_targets; i++) {
1607 		ti = t->targets + i;
1608 		if (ti->type->busy && ti->type->busy(ti))
1609 			return 1;
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 struct mapped_device *dm_table_get_md(struct dm_table *t)
1616 {
1617 	return t->md;
1618 }
1619 EXPORT_SYMBOL(dm_table_get_md);
1620 
1621 void dm_table_run_md_queue_async(struct dm_table *t)
1622 {
1623 	struct mapped_device *md;
1624 	struct request_queue *queue;
1625 	unsigned long flags;
1626 
1627 	if (!dm_table_request_based(t))
1628 		return;
1629 
1630 	md = dm_table_get_md(t);
1631 	queue = dm_get_md_queue(md);
1632 	if (queue) {
1633 		spin_lock_irqsave(queue->queue_lock, flags);
1634 		blk_run_queue_async(queue);
1635 		spin_unlock_irqrestore(queue->queue_lock, flags);
1636 	}
1637 }
1638 EXPORT_SYMBOL(dm_table_run_md_queue_async);
1639 
1640 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1641 				  sector_t start, sector_t len, void *data)
1642 {
1643 	struct request_queue *q = bdev_get_queue(dev->bdev);
1644 
1645 	return q && blk_queue_discard(q);
1646 }
1647 
1648 bool dm_table_supports_discards(struct dm_table *t)
1649 {
1650 	struct dm_target *ti;
1651 	unsigned i = 0;
1652 
1653 	/*
1654 	 * Unless any target used by the table set discards_supported,
1655 	 * require at least one underlying device to support discards.
1656 	 * t->devices includes internal dm devices such as mirror logs
1657 	 * so we need to use iterate_devices here, which targets
1658 	 * supporting discard selectively must provide.
1659 	 */
1660 	while (i < dm_table_get_num_targets(t)) {
1661 		ti = dm_table_get_target(t, i++);
1662 
1663 		if (!ti->num_discard_bios)
1664 			continue;
1665 
1666 		if (ti->discards_supported)
1667 			return 1;
1668 
1669 		if (ti->type->iterate_devices &&
1670 		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1671 			return 1;
1672 	}
1673 
1674 	return 0;
1675 }
1676